edited_code
stringlengths 17
978k
| original_code
stringlengths 17
978k
|
|---|---|
import warnings
import torch
from torch.nn import GroupNorm, LayerNorm
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.instancenorm import _InstanceNorm
from ..utils import build_from_cfg
from .builder import OPTIMIZER_BUILDERS, OPTIMIZERS
@OPTIMIZER_BUILDERS.register_module
class DefaultOptimizerConstructor:
"""Default constructor for optimizers.
By default each parameter share the same optimizer settings, and we
provide an argument ``paramwise_cfg`` to specify parameter-wise settings.
It is a dict and may contain the following fields:
- ``custom_keys`` (dict): Specified parameters-wise settings by keys. If
one of the keys in ``custom_keys`` is a substring of the name of one
parameter, then the setting of the parameter will be specified by
``custom_keys[key]`` and other setting like ``bias_lr_mult`` etc. will
be ignored. It should be noted that the aforementioned ``key`` is the
longest key that is a substring of the name of the parameter. If there
are multiple matched keys with the same length, then the key with lower
alphabet order will be chosen.
``custom_keys[key]`` should be a dict and may contain fields ``lr_mult``
and ``decay_mult``. See Example 2 below.
- ``bias_lr_mult`` (float): It will be multiplied to the learning
rate for all bias parameters (except for those in normalization
layers and offset layers of DCN).
- ``bias_decay_mult`` (float): It will be multiplied to the weight
decay for all bias parameters (except for those in
normalization layers, depthwise conv layers, offset layers of DCN).
- ``norm_decay_mult`` (float): It will be multiplied to the weight
decay for all weight and bias parameters of normalization
layers.
- ``dwconv_decay_mult`` (float): It will be multiplied to the weight
decay for all weight and bias parameters of depthwise conv
layers.
- ``dcn_offset_lr_mult`` (float): It will be multiplied to the learning
rate for parameters of offset layer in the deformable convs
of a model.
- ``bypass_duplicate`` (bool): If true, the duplicate parameters
would not be added into optimizer. Default: False.
Note:
1. If the option ``dcn_offset_lr_mult`` is used, the constructor will
override the effect of ``bias_lr_mult`` in the bias of offset layer.
So be careful when using both ``bias_lr_mult`` and
``dcn_offset_lr_mult``. If you wish to apply both of them to the offset
layer in deformable convs, set ``dcn_offset_lr_mult`` to the original
``dcn_offset_lr_mult`` * ``bias_lr_mult``.
2. If the option ``dcn_offset_lr_mult`` is used, the constructor will
apply it to all the DCN layers in the model. So be careful when the
model contains multiple DCN layers in places other than backbone.
Args:
model (:obj:`nn.Module`): The model with parameters to be optimized.
optimizer_cfg (dict): The config dict of the optimizer.
Positional fields are
- `type`: class name of the optimizer.
Optional fields are
- any arguments of the corresponding optimizer type, e.g.,
lr, weight_decay, momentum, etc.
paramwise_cfg (dict, optional): Parameter-wise options.
Example 1:
>>> model = torch.nn.modules.Conv1d(1, 1, 1)
>>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,
>>> weight_decay=0.0001)
>>> paramwise_cfg = dict(norm_decay_mult=0.)
>>> optim_builder = DefaultOptimizerConstructor(
>>> optimizer_cfg, paramwise_cfg)
>>> optimizer = optim_builder(model)
Example 2:
>>> # assume model have attribute model.backbone and model.cls_head
>>> optimizer_cfg = dict(type='SGD', lr=0.01, weight_decay=0.95)
>>> paramwise_cfg = dict(custom_keys={
'.backbone': dict(lr_mult=0.1, decay_mult=0.9)})
>>> optim_builder = DefaultOptimizerConstructor(
>>> optimizer_cfg, paramwise_cfg)
>>> optimizer = optim_builder(model)
>>> # Then the `lr` and `weight_decay` for model.backbone is
>>> # (0.01 * 0.1, 0.95 * 0.9). `lr` and `weight_decay` for
>>> # model.cls_head is (0.01, 0.95).
"""
def __init__(self, optimizer_cfg, paramwise_cfg=None):
if not isinstance(optimizer_cfg, dict):
raise TypeError(
"optimizer_cfg should be a dict", f"but got {type(optimizer_cfg)}"
)
self.optimizer_cfg = optimizer_cfg
self.paramwise_cfg = {} if paramwise_cfg is None else paramwise_cfg
self.base_lr = optimizer_cfg.get("lr", None)
self.base_wd = optimizer_cfg.get("weight_decay", None)
self._validate_cfg()
def _validate_cfg(self):
if not isinstance(self.paramwise_cfg, dict):
raise TypeError(
"paramwise_cfg should be None or a dict, "
f"but got {type(self.paramwise_cfg)}"
)
if "custom_keys" in self.paramwise_cfg:
if not isinstance(self.paramwise_cfg["custom_keys"], dict):
raise TypeError(
"If specified, custom_keys must be a dict, "
f'but got {type(self.paramwise_cfg['custom_keys'])}'
)
if self.base_wd is None:
for key in self.paramwise_cfg["custom_keys"]:
if "decay_mult" in self.paramwise_cfg["custom_keys"][key]:
raise ValueError("base_wd should not be None")
# get base lr and weight decay
# weight_decay must be explicitly specified if mult is specified
if (
"bias_decay_mult" in self.paramwise_cfg
or "norm_decay_mult" in self.paramwise_cfg
or "dwconv_decay_mult" in self.paramwise_cfg
):
if self.base_wd is None:
raise ValueError("base_wd should not be None")
def _is_in(self, param_group, param_group_list):
# assert is_list_of(param_group_list, dict)
param = set(param_group["params"])
param_set = set()
for group in param_group_list:
param_set.update(set(group["params"]))
return not param.isdisjoint(param_set)
def add_params(self, params, module, prefix="", is_dcn_module=None):
"""Add all parameters of module to the params list.
The parameters of the given module will be added to the list of param
groups, with specific rules defined by paramwise_cfg.
Args:
params (list[dict]): A list of param groups, it will be modified
in place.
module (nn.Module): The module to be added.
prefix (str): The prefix of the module
is_dcn_module (int|float|None): If the current module is a
submodule of DCN, `is_dcn_module` will be passed to
control conv_offset layer's learning rate. Defaults to None.
"""
# get param-wise options
custom_keys = self.paramwise_cfg.get("custom_keys", {})
# first sort with alphabet order and then sort with reversed len of str
sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True)
bias_lr_mult = self.paramwise_cfg.get("bias_lr_mult", 1.0)
bias_decay_mult = self.paramwise_cfg.get("bias_decay_mult", 1.0)
norm_decay_mult = self.paramwise_cfg.get("norm_decay_mult", 1.0)
dwconv_decay_mult = self.paramwise_cfg.get("dwconv_decay_mult", 1.0)
bypass_duplicate = self.paramwise_cfg.get("bypass_duplicate", False)
dcn_offset_lr_mult = self.paramwise_cfg.get("dcn_offset_lr_mult", 1.0)
# special rules for norm layers and depth-wise conv layers
is_norm = isinstance(module, (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm))
is_dwconv = (
isinstance(module, torch.nn.Conv2d) and module.in_channels == module.groups
)
for name, param in module.named_parameters(recurse=False):
param_group = {"params": [param]}
if not param.requires_grad:
params.append(param_group)
continue
if bypass_duplicate and self._is_in(param_group, params):
warnings.warn(
f"{prefix} is duplicate. It is skipped since "
f"bypass_duplicate={bypass_duplicate}"
)
continue
# if the parameter match one of the custom keys, ignore other rules
is_custom = False
for key in sorted_keys:
if key in f"{prefix}.{name}":
is_custom = True
lr_mult = custom_keys[key].get("lr_mult", 1.0)
param_group["lr"] = self.base_lr * lr_mult
if self.base_wd is not None:
decay_mult = custom_keys[key].get("decay_mult", 1.0)
param_group["weight_decay"] = self.base_wd * decay_mult
break
if not is_custom:
# bias_lr_mult affects all bias parameters
# except for norm.bias dcn.conv_offset.bias
if name == "bias" and not (is_norm or is_dcn_module):
param_group["lr"] = self.base_lr * bias_lr_mult
if (
prefix.find("conv_offset") != -1
and is_dcn_module
and isinstance(module, torch.nn.Conv2d)
):
# deal with both dcn_offset's bias & weight
param_group["lr"] = self.base_lr * dcn_offset_lr_mult
# apply weight decay policies
if self.base_wd is not None:
# norm decay
if is_norm:
param_group["weight_decay"] = self.base_wd * norm_decay_mult
# depth-wise conv
elif is_dwconv:
param_group["weight_decay"] = self.base_wd * dwconv_decay_mult
# bias lr and decay
elif name == "bias" and not is_dcn_module:
# TODO: current bias_decay_mult will have affect on DCN
param_group["weight_decay"] = self.base_wd * bias_decay_mult
params.append(param_group)
is_dcn_module = False
for child_name, child_mod in module.named_children():
child_prefix = f"{prefix}.{child_name}" if prefix else child_name
self.add_params(
params, child_mod, prefix=child_prefix, is_dcn_module=is_dcn_module
)
def __call__(self, model):
if hasattr(model, "module"):
model = model.module
optimizer_cfg = self.optimizer_cfg.copy()
# if no paramwise option is specified, just use the global setting
if not self.paramwise_cfg:
optimizer_cfg["params"] = model.parameters()
return build_from_cfg(optimizer_cfg, OPTIMIZERS)
# set param-wise lr and weight decay recursively
params = []
self.add_params(params, model)
optimizer_cfg["params"] = params
return build_from_cfg(optimizer_cfg, OPTIMIZERS)
|
import warnings
import torch
from torch.nn import GroupNorm, LayerNorm
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.instancenorm import _InstanceNorm
from ..utils import build_from_cfg
from .builder import OPTIMIZER_BUILDERS, OPTIMIZERS
@OPTIMIZER_BUILDERS.register_module
class DefaultOptimizerConstructor:
"""Default constructor for optimizers.
By default each parameter share the same optimizer settings, and we
provide an argument ``paramwise_cfg`` to specify parameter-wise settings.
It is a dict and may contain the following fields:
- ``custom_keys`` (dict): Specified parameters-wise settings by keys. If
one of the keys in ``custom_keys`` is a substring of the name of one
parameter, then the setting of the parameter will be specified by
``custom_keys[key]`` and other setting like ``bias_lr_mult`` etc. will
be ignored. It should be noted that the aforementioned ``key`` is the
longest key that is a substring of the name of the parameter. If there
are multiple matched keys with the same length, then the key with lower
alphabet order will be chosen.
``custom_keys[key]`` should be a dict and may contain fields ``lr_mult``
and ``decay_mult``. See Example 2 below.
- ``bias_lr_mult`` (float): It will be multiplied to the learning
rate for all bias parameters (except for those in normalization
layers and offset layers of DCN).
- ``bias_decay_mult`` (float): It will be multiplied to the weight
decay for all bias parameters (except for those in
normalization layers, depthwise conv layers, offset layers of DCN).
- ``norm_decay_mult`` (float): It will be multiplied to the weight
decay for all weight and bias parameters of normalization
layers.
- ``dwconv_decay_mult`` (float): It will be multiplied to the weight
decay for all weight and bias parameters of depthwise conv
layers.
- ``dcn_offset_lr_mult`` (float): It will be multiplied to the learning
rate for parameters of offset layer in the deformable convs
of a model.
- ``bypass_duplicate`` (bool): If true, the duplicate parameters
would not be added into optimizer. Default: False.
Note:
1. If the option ``dcn_offset_lr_mult`` is used, the constructor will
override the effect of ``bias_lr_mult`` in the bias of offset layer.
So be careful when using both ``bias_lr_mult`` and
``dcn_offset_lr_mult``. If you wish to apply both of them to the offset
layer in deformable convs, set ``dcn_offset_lr_mult`` to the original
``dcn_offset_lr_mult`` * ``bias_lr_mult``.
2. If the option ``dcn_offset_lr_mult`` is used, the constructor will
apply it to all the DCN layers in the model. So be careful when the
model contains multiple DCN layers in places other than backbone.
Args:
model (:obj:`nn.Module`): The model with parameters to be optimized.
optimizer_cfg (dict): The config dict of the optimizer.
Positional fields are
- `type`: class name of the optimizer.
Optional fields are
- any arguments of the corresponding optimizer type, e.g.,
lr, weight_decay, momentum, etc.
paramwise_cfg (dict, optional): Parameter-wise options.
Example 1:
>>> model = torch.nn.modules.Conv1d(1, 1, 1)
>>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,
>>> weight_decay=0.0001)
>>> paramwise_cfg = dict(norm_decay_mult=0.)
>>> optim_builder = DefaultOptimizerConstructor(
>>> optimizer_cfg, paramwise_cfg)
>>> optimizer = optim_builder(model)
Example 2:
>>> # assume model have attribute model.backbone and model.cls_head
>>> optimizer_cfg = dict(type='SGD', lr=0.01, weight_decay=0.95)
>>> paramwise_cfg = dict(custom_keys={
'.backbone': dict(lr_mult=0.1, decay_mult=0.9)})
>>> optim_builder = DefaultOptimizerConstructor(
>>> optimizer_cfg, paramwise_cfg)
>>> optimizer = optim_builder(model)
>>> # Then the `lr` and `weight_decay` for model.backbone is
>>> # (0.01 * 0.1, 0.95 * 0.9). `lr` and `weight_decay` for
>>> # model.cls_head is (0.01, 0.95).
"""
def __init__(self, optimizer_cfg, paramwise_cfg=None):
if not isinstance(optimizer_cfg, dict):
raise TypeError(
"optimizer_cfg should be a dict", f"but got {type(optimizer_cfg)}"
)
self.optimizer_cfg = optimizer_cfg
self.paramwise_cfg = {} if paramwise_cfg is None else paramwise_cfg
self.base_lr = optimizer_cfg.get("lr", None)
self.base_wd = optimizer_cfg.get("weight_decay", None)
self._validate_cfg()
def _validate_cfg(self):
if not isinstance(self.paramwise_cfg, dict):
raise TypeError(
"paramwise_cfg should be None or a dict, "
f"but got {type(self.paramwise_cfg)}"
)
if "custom_keys" in self.paramwise_cfg:
if not isinstance(self.paramwise_cfg["custom_keys"], dict):
raise TypeError(
"If specified, custom_keys must be a dict, "
f'but got {type(self.paramwise_cfg["custom_keys"])}'
)
if self.base_wd is None:
for key in self.paramwise_cfg["custom_keys"]:
if "decay_mult" in self.paramwise_cfg["custom_keys"][key]:
raise ValueError("base_wd should not be None")
# get base lr and weight decay
# weight_decay must be explicitly specified if mult is specified
if (
"bias_decay_mult" in self.paramwise_cfg
or "norm_decay_mult" in self.paramwise_cfg
or "dwconv_decay_mult" in self.paramwise_cfg
):
if self.base_wd is None:
raise ValueError("base_wd should not be None")
def _is_in(self, param_group, param_group_list):
# assert is_list_of(param_group_list, dict)
param = set(param_group["params"])
param_set = set()
for group in param_group_list:
param_set.update(set(group["params"]))
return not param.isdisjoint(param_set)
def add_params(self, params, module, prefix="", is_dcn_module=None):
"""Add all parameters of module to the params list.
The parameters of the given module will be added to the list of param
groups, with specific rules defined by paramwise_cfg.
Args:
params (list[dict]): A list of param groups, it will be modified
in place.
module (nn.Module): The module to be added.
prefix (str): The prefix of the module
is_dcn_module (int|float|None): If the current module is a
submodule of DCN, `is_dcn_module` will be passed to
control conv_offset layer's learning rate. Defaults to None.
"""
# get param-wise options
custom_keys = self.paramwise_cfg.get("custom_keys", {})
# first sort with alphabet order and then sort with reversed len of str
sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True)
bias_lr_mult = self.paramwise_cfg.get("bias_lr_mult", 1.0)
bias_decay_mult = self.paramwise_cfg.get("bias_decay_mult", 1.0)
norm_decay_mult = self.paramwise_cfg.get("norm_decay_mult", 1.0)
dwconv_decay_mult = self.paramwise_cfg.get("dwconv_decay_mult", 1.0)
bypass_duplicate = self.paramwise_cfg.get("bypass_duplicate", False)
dcn_offset_lr_mult = self.paramwise_cfg.get("dcn_offset_lr_mult", 1.0)
# special rules for norm layers and depth-wise conv layers
is_norm = isinstance(module, (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm))
is_dwconv = (
isinstance(module, torch.nn.Conv2d) and module.in_channels == module.groups
)
for name, param in module.named_parameters(recurse=False):
param_group = {"params": [param]}
if not param.requires_grad:
params.append(param_group)
continue
if bypass_duplicate and self._is_in(param_group, params):
warnings.warn(
f"{prefix} is duplicate. It is skipped since "
f"bypass_duplicate={bypass_duplicate}"
)
continue
# if the parameter match one of the custom keys, ignore other rules
is_custom = False
for key in sorted_keys:
if key in f"{prefix}.{name}":
is_custom = True
lr_mult = custom_keys[key].get("lr_mult", 1.0)
param_group["lr"] = self.base_lr * lr_mult
if self.base_wd is not None:
decay_mult = custom_keys[key].get("decay_mult", 1.0)
param_group["weight_decay"] = self.base_wd * decay_mult
break
if not is_custom:
# bias_lr_mult affects all bias parameters
# except for norm.bias dcn.conv_offset.bias
if name == "bias" and not (is_norm or is_dcn_module):
param_group["lr"] = self.base_lr * bias_lr_mult
if (
prefix.find("conv_offset") != -1
and is_dcn_module
and isinstance(module, torch.nn.Conv2d)
):
# deal with both dcn_offset's bias & weight
param_group["lr"] = self.base_lr * dcn_offset_lr_mult
# apply weight decay policies
if self.base_wd is not None:
# norm decay
if is_norm:
param_group["weight_decay"] = self.base_wd * norm_decay_mult
# depth-wise conv
elif is_dwconv:
param_group["weight_decay"] = self.base_wd * dwconv_decay_mult
# bias lr and decay
elif name == "bias" and not is_dcn_module:
# TODO: current bias_decay_mult will have affect on DCN
param_group["weight_decay"] = self.base_wd * bias_decay_mult
params.append(param_group)
is_dcn_module = False
for child_name, child_mod in module.named_children():
child_prefix = f"{prefix}.{child_name}" if prefix else child_name
self.add_params(
params, child_mod, prefix=child_prefix, is_dcn_module=is_dcn_module
)
def __call__(self, model):
if hasattr(model, "module"):
model = model.module
optimizer_cfg = self.optimizer_cfg.copy()
# if no paramwise option is specified, just use the global setting
if not self.paramwise_cfg:
optimizer_cfg["params"] = model.parameters()
return build_from_cfg(optimizer_cfg, OPTIMIZERS)
# set param-wise lr and weight decay recursively
params = []
self.add_params(params, model)
optimizer_cfg["params"] = params
return build_from_cfg(optimizer_cfg, OPTIMIZERS)
|
from pathlib import Path
import shutil
import os
import sys
def convert_repo_to_fork(path2repo):
path_fork = path2repo.parent / "yolov5-icevision" # path of our fork
if path_fork.is_dir():
shutil.rmtree(path_fork)
path_fork.mkdir(exist_ok=True)
_ = shutil.copytree(path2repo, path_fork, dirs_exist_ok=True)
(path_fork / "yolov5").mkdir(exist_ok=True)
sources = [
path_fork / d
for d in next(os.walk(path_fork))[1]
if "git" not in d and "yolo" not in d
]
dests = [(path_fork / "yolov5") / source.stem for source in sources]
for source, dest in zip(sources, dests):
_ = shutil.move(source, dest)
init_content = """
__version__ = "4.0.0"
import imp
_, path, _ = imp.find_module('yolov5')
"""
f = open(path_fork / "yolov5" / "__init__.py", "w+")
f.write(init_content)
f.close()
manifest_content = """
# Include the README
include *.md
# Include the license file
include LICENSE
# Include setup.py
include setup.py
# Include the data files
recursive-include yolov5/data *
recursive-include yolov5/models *
"""
f = open(path_fork / "MANIFEST.in", "w+")
f.write(manifest_content)
f.close()
pypi_release_content = """
name: PyPi Release
on:
release:
types: [created]
workflow_dispatch:
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Setup dependencies
run: python -m pip install --user --upgrade setuptools wheel
- name: Build package
run: python setup.py sdist bdist_wheel
- name: Publish package to TestPyPI
uses: pypa/gh-action-pypi-publish@master
with:
user: __token__
password: ${{ secrets.test_pypi_password }}
repository_url: https://test.pypi.org/legacy/
verbose: true
- name: Publish package to PyPI
uses: pypa/gh-action-pypi-publish@master
with:
user: __token__
password: ${{ secrets.pypi_password }}
verbose: true
"""
f = open(path_fork / ".github/workflows/pypi-release.yml", "w+")
f.write(pypi_release_content)
f.close()
setup_cfg_content = """
# reference: https://setuptools.readthedocs.io/en/latest/userguide/declarative_config.html
[metadata]
name = yolov5-icevision
version = 4.0.0
author = ultralytics
author_email = glenn.jocher@ultralytics.com
description = YOLOv5
long_description = file: README.md
long_description_content_type = text/markdown
keywords = object detection, machine learning
license = GNU General Public License v3.0
classifiers =
Development Status :: 4 - Beta
Intended Audience :: Developers
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Topic :: Scientific/Engineering :: Artificial Intelligence
Topic :: Scientific/Engineering :: Image Recognition
[options]
python_requires = >=3.6,<4
zip_safe = False
include_package_data = True
packages = find:
install_requires =
Cython
matplotlib>=3.2.2
numpy>=1.18.5
opencv-python>=4.1.2
Pillow
PyYAML>=5.3.1
scipy>=1.4.1
tensorboard>=2.2
torch>=1.7.0
torchvision>=0.8.1
tqdm>=4.41.0
seaborn>=0.11.0
[options.extras_require]
all =
pandas
"""
f = open(path_fork / "setup.cfg", "w+")
f.write(setup_cfg_content)
f.close()
setup_py_content = """
from setuptools import setup
if __name__ == "__main__":
setup()
"""
f = open(path_fork / "setup.py", "w+")
f.write(setup_py_content)
f.close()
def replace_imports(path):
bads = ["from utils", "from models", "import utils", "import models"]
goods = [
"from yolov5.utils",
"from yolov5.models",
"import yolov5.utils",
"import yolov5.models",
]
with open(path, "r") as file:
filedata = file.read()
if any(bad in filedata for bad in bads):
for bad, good in zip(bads, goods):
filedata = filedata.replace(bad, good)
with open(path, "w") as file:
file.write(filedata)
for root, _, files in os.walk(path_fork):
for f in files:
if f.endswith(".py"):
replace_imports(os.path.join(root, f))
with open(path_fork / ".gitignore", "r+") as f:
d = f.readlines()
f.seek(0)
for i in d:
if i.strip() != "*.cfg":
f.write(i)
f.truncate()
if __name__ == "__main__":
path2repo = Path(sys.argv[1])
convert_repo_to_fork(path2repo)
print(
f"Conversion completed successfully. Please find the fork here: {str(path2repo.parent / "yolov5-icevision")}"
)
|
from pathlib import Path
import shutil
import os
import sys
def convert_repo_to_fork(path2repo):
path_fork = path2repo.parent / "yolov5-icevision" # path of our fork
if path_fork.is_dir():
shutil.rmtree(path_fork)
path_fork.mkdir(exist_ok=True)
_ = shutil.copytree(path2repo, path_fork, dirs_exist_ok=True)
(path_fork / "yolov5").mkdir(exist_ok=True)
sources = [
path_fork / d
for d in next(os.walk(path_fork))[1]
if "git" not in d and "yolo" not in d
]
dests = [(path_fork / "yolov5") / source.stem for source in sources]
for source, dest in zip(sources, dests):
_ = shutil.move(source, dest)
init_content = """
__version__ = "4.0.0"
import imp
_, path, _ = imp.find_module('yolov5')
"""
f = open(path_fork / "yolov5" / "__init__.py", "w+")
f.write(init_content)
f.close()
manifest_content = """
# Include the README
include *.md
# Include the license file
include LICENSE
# Include setup.py
include setup.py
# Include the data files
recursive-include yolov5/data *
recursive-include yolov5/models *
"""
f = open(path_fork / "MANIFEST.in", "w+")
f.write(manifest_content)
f.close()
pypi_release_content = """
name: PyPi Release
on:
release:
types: [created]
workflow_dispatch:
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Setup dependencies
run: python -m pip install --user --upgrade setuptools wheel
- name: Build package
run: python setup.py sdist bdist_wheel
- name: Publish package to TestPyPI
uses: pypa/gh-action-pypi-publish@master
with:
user: __token__
password: ${{ secrets.test_pypi_password }}
repository_url: https://test.pypi.org/legacy/
verbose: true
- name: Publish package to PyPI
uses: pypa/gh-action-pypi-publish@master
with:
user: __token__
password: ${{ secrets.pypi_password }}
verbose: true
"""
f = open(path_fork / ".github/workflows/pypi-release.yml", "w+")
f.write(pypi_release_content)
f.close()
setup_cfg_content = """
# reference: https://setuptools.readthedocs.io/en/latest/userguide/declarative_config.html
[metadata]
name = yolov5-icevision
version = 4.0.0
author = ultralytics
author_email = glenn.jocher@ultralytics.com
description = YOLOv5
long_description = file: README.md
long_description_content_type = text/markdown
keywords = object detection, machine learning
license = GNU General Public License v3.0
classifiers =
Development Status :: 4 - Beta
Intended Audience :: Developers
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Topic :: Scientific/Engineering :: Artificial Intelligence
Topic :: Scientific/Engineering :: Image Recognition
[options]
python_requires = >=3.6,<4
zip_safe = False
include_package_data = True
packages = find:
install_requires =
Cython
matplotlib>=3.2.2
numpy>=1.18.5
opencv-python>=4.1.2
Pillow
PyYAML>=5.3.1
scipy>=1.4.1
tensorboard>=2.2
torch>=1.7.0
torchvision>=0.8.1
tqdm>=4.41.0
seaborn>=0.11.0
[options.extras_require]
all =
pandas
"""
f = open(path_fork / "setup.cfg", "w+")
f.write(setup_cfg_content)
f.close()
setup_py_content = """
from setuptools import setup
if __name__ == "__main__":
setup()
"""
f = open(path_fork / "setup.py", "w+")
f.write(setup_py_content)
f.close()
def replace_imports(path):
bads = ["from utils", "from models", "import utils", "import models"]
goods = [
"from yolov5.utils",
"from yolov5.models",
"import yolov5.utils",
"import yolov5.models",
]
with open(path, "r") as file:
filedata = file.read()
if any(bad in filedata for bad in bads):
for bad, good in zip(bads, goods):
filedata = filedata.replace(bad, good)
with open(path, "w") as file:
file.write(filedata)
for root, _, files in os.walk(path_fork):
for f in files:
if f.endswith(".py"):
replace_imports(os.path.join(root, f))
with open(path_fork / ".gitignore", "r+") as f:
d = f.readlines()
f.seek(0)
for i in d:
if i.strip() != "*.cfg":
f.write(i)
f.truncate()
if __name__ == "__main__":
path2repo = Path(sys.argv[1])
convert_repo_to_fork(path2repo)
print(
f"Conversion completed successfully. Please find the fork here: {str(path2repo.parent / 'yolov5-icevision')}"
)
|
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'Objects': {'1149705583.09Shochet': {'Type': 'Island','Name': 'VegasIsland','File': '','Environment': 'Interior','Minimap': False,'Objects': {'1149705605.5Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1149705605.5Shochet0': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1149705607.02Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1149705607.63Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1149705619.08Shochet': {'Type': 'Cell Portal Area','Name': 'cell_pier','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1149705619.08Shochet0': {'Type': 'Parlor Game','Category': 'Blackjack','BetMultiplier': '1','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(421.053, -131.608, 5.287),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/Cardtable_HalfCircle'}},'1149705632.05Shochet': {'Type': 'Parlor Game','Category': 'Holdem','BetMultiplier': '1','Hpr': VBase3(45.0, 0.0, 0.0),'Pos': Point3(443.184, -123.256, 5.295),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/Cardtable_Pill'}},'1169451658.54Shochet': {'Type': 'Searchable Container','Aggro Radius': 5.0,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(508.36, -134.581, 5.213),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/desk_gov'},'searchTime': '6.0','type': 'Desk'},'1169451790.01Shochet': {'Type': 'Searchable Container','Aggro Radius': 5.0,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(480.699, -161.909, 5.213),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/wellA'},'searchTime': '6.0','type': 'WellA'},'1171348677.72Shochet': {'Type': 'Interactive Prop','Hpr': VBase3(96.302, 0.0, 0.0),'Pos': Point3(305.078, -115.012, 4.769),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/dummy_zero'},'interactAble': 'player','interactType': 'hit'},'1186785500.34Shochet': {'Type': 'Player Spawn Node','Hpr': Point3(0.0, 0.0, 0.0),'Index': 1,'Pos': Point3(485.258, -86.884, 5.213),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}}},'Pos': Point3(0.0, 0.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0)},'1149706548.67Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1149706548.67Shochet0': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1149706577.28Shochet': {'Type': 'Cell Portal Area','Name': 'cell_spanish_town','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1149706632.97Shochet': {'Type': 'Spawn Node','Aggro Radius': '12.0000','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '2','Patrol Radius': '12.0000','Pause Chance': '100','Pause Duration': '30','Pos': Point3(500.886, 153.538, 45.292),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Scorpion','Start State': 'Ambush','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Visual': {'Model': 'models/misc/smiley'}},'1157596044.44jasyeung': {'Type': 'Townsperson','Category': 'MedicineMan','AnimSet': 'default','AuraFX': 'None','CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','Hpr': VBase3(-173.418, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': 12,'Pos': Point3(454.118, 103.165, 41.541),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Requires Quest Interest': False,'Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Start State': 'Idle','StartFrame': '0','Team': 'Villager','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1169192829.53Shochet': {'Type': 'Townsperson','Category': 'Commoner','AnimSet': 'default','AuraFX': 'None','CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','Hpr': VBase3(128.625, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': 12,'Pos': Point3(572.566, 129.349, 42.792),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Requires Quest Interest': False,'Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Start State': 'Idle','StartFrame': '0','Team': 'Villager','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1169616338.61Shochet': {'Type': 'Townsperson','Category': 'Shipwright','AnimSet': 'default','AuraFX': 'None','CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','Hpr': VBase3(95.14, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': 12,'Pos': Point3(594.082, 77.364, 42.876),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Requires Quest Interest': False,'Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Start State': 'Idle','StartFrame': '0','Team': 'Player','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1169616428.63Shochet': {'Type': 'Object Spawn Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(470.936, 243.777, 49.657),'Priority': '1','Scale': VBase3(1.0, 1.0, 1.0),'SpawnDelay': '10','Spawnables': 'Buried Treasure','Visual': {'Color': (0.8, 0.2, 0.65, 1),'Model': 'models/misc/smiley'},'startingDepth': '5'},'1171691663.17Shochet': {'Type': 'Townsperson','Category': 'Gypsy','AnimSet': 'default','AuraFX': 'None','CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','Hpr': VBase3(-161.55, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': 12,'Pos': Point3(446.386, 327.483, 54.071),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Requires Quest Interest': False,'Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Start State': 'Idle','StartFrame': '0','Team': 'Villager','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0}},'Pos': Point3(0.0, 0.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0)},'1154059325.91Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1154059325.91Shochet0': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1154059341.09Shochet': {'Type': 'Animal','Hpr': Point3(0.0, 0.0, 0.0),'Patrol Radius': 12,'Pos': Point3(223.47, -25.232, 5.178),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Pig','Start State': 'Walk','StartFrame': '0'},'1154059344.67Shochet': {'Type': 'Animal','Hpr': Point3(0.0, 0.0, 0.0),'Patrol Radius': 12,'Pos': Point3(230.53, -15.957, 5.073),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Chicken','Start State': 'Walk','StartFrame': '0'},'1154059351.97Shochet': {'Type': 'Animal','Hpr': Point3(0.0, 0.0, 0.0),'Patrol Radius': 12,'Pos': Point3(239.433, -6.269, 4.768),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Rooster','Start State': 'Walk','StartFrame': '0'},'1154059362.19Shochet': {'Type': 'Creature','Boss': False,'Boss Name': 'Anonymous','Hpr': Point3(0.0, 0.0, 0.0),'Level': '37','Patrol Radius': 12,'Pos': Point3(-93.119, -296.023, 1.391),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Crab','Start State': 'Idle','StartFrame': '0'},'1154059366.69Shochet': {'Type': 'Creature','Boss': False,'Boss Name': 'Anonymous','Hpr': Point3(0.0, 0.0, 0.0),'Level': '37','Patrol Radius': 12,'Pos': Point3(267.997, 4.319, 7.507),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'FlyTrap','Start State': 'Idle','StartFrame': '0'},'1157596022.35jasyeung': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1157596022.35jasyeung0': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1165018948.05sdnaik': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1165018950.47sdnaik': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1169192695.17Shochet': {'Type': 'Port Collision Sphere','Name': 'VegasPort','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(147.216, -168.582, 0.0),'Scale': VBase3(470.212, 470.212, 470.212),'VisSize': '','Visual': {'Color': (0.5, 0.5, 1.0, 0.2),'Model': 'models/misc/smiley'}},'1169192874.58Shochet': {'Type': 'Cell Portal Area','Name': 'cell_green_area','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1165019061.13sdnaik': {'Type': 'Spawn Node','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '2','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(119.094, -74.897, 4.167),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Bat','Start State': 'Ambush','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}}},'Pos': Point3(0.0, 0.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0)},'1169192882.95Shochet': {'Type': 'Cell Portal Area','Name': 'cell_shanty_town','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1165019080.61sdnaik': {'Type': 'Spawn Node','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '2','Patrol Radius': 12,'Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(180.048, 221.152, 66.347),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Wasp','Start State': 'Patrol','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}}},'Pos': Point3(0.0, 0.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0)},'1169192926.38Shochet': {'Type': 'Spawn Node','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(640.128, -165.933, 6.871),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Alligator','Start State': 'Patrol','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1169193027.28Shochet': {'Type': 'Cell Portal Area','Name': 'cell_graveyard','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1169193004.98Shochet': {'Type': 'Spawn Node','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(-255.945, 98.577, 68.242),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Stump','Start State': 'Patrol','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}}},'Pos': Point3(0.0, 0.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0)},'1169449533.4Shochet': {'Type': 'Searchable Container','Aggro Radius': 5.0,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(494.568, -149.091, 5.213),'Scale': VBase3(3.885, 3.885, 3.885),'Visual': {'Color': (1.0, 0.0, 0.0, 1.0),'Model': 'models/props/barrel'},'searchTime': '6.0','type': 'Barrel'},'1169616489.03Shochet': {'Type': 'Creature','Boss': False,'Boss Name': 'Anonymous','Hpr': Point3(0.0, 0.0, 0.0),'Level': '37','Patrol Radius': 12,'Pos': Point3(327.376, 1.76, 10.049),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Stump','Start State': 'Idle','StartFrame': '0'},'1179593088.0Shochet1': {'Type': 'Creature','Boss': False,'Boss Name': 'Anonymous','Hpr': Point3(0.0, 0.0, 0.0),'Level': '37','Patrol Radius': 12,'Pos': Point3(271.381, -60.852, 2.396),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Scorpion','Start State': 'Idle','StartFrame': '0'},'1186784633.39Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1186784634.77Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1186784662.83Shochet': {'Type': 'Dinghy','Aggro Radius': '12.0000','Hpr': VBase3(90.585, 0.0, 0.0),'Location': 'Water','Pos': Point3(373.406, -138.477, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/shipparts/dingy-geometry_High'}},'1186785480.13Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1186785480.14Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1187140148.8gjeon': {'Type': 'Cannon','Hpr': Point3(0.0, 0.0, 0.0),'MaxPower': '1.0','MinPower': '0.2','Pos': Point3(376.873, -67.173, 4.441),'Scale': VBase3(2.833, 2.833, 2.833),'Visual': {'Model': 'models/shipparts/cannon_hi'}},'1187407776.73gjeon': {'Type': 'Building Exterior','File': 'A_GyedoInterior','ExtUid': '1187407776.73gjeon0','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1234924975.76caoconno': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(-8.035, -1.826, 0.068),'Scale': VBase3(1.0, 1.0, 1.0)},'1234924977.02caoconno': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(8.421, -1.87, 0.156),'Scale': VBase3(1.0, 1.0, 1.0)}},'Pos': Point3(378.723, -30.806, 8.78),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Door': 'models/buildings/shanty_guildhall_door','Interior': 'models/buildings/interior_shanty_guildhall','Model': 'models/buildings/spanish_npc_house_a_exterior','SignFrame': '','SignImage': 'models/buildings/sign1_eng_a_icon_blacksmith'}},'1187634515.47gjeon': {'Type': 'Barrel','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(592.624, -103.901, 2.332),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/barrel_worn'}},'1187634559.22gjeon': {'Type': 'Barrel','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(604.356, -93.711, 2.764),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/barrel_worn'}},'1191607138.69gjeon': {'Type': 'Animated Avatar','Category': 'cast','Animation Track': 'shovel','AuraFX': 'None','Effect Type': 'None','Hpr': Point3(74.754, 0.0, 0.0),'Pos': Point3(640.247, -90.735, 2.301),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'StartFrame': '0','SubCategory': 'models/char/jr_2000','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None'},'1191607940.19gjeon': {'Type': 'Animated Avatar - Townfolk','Animation Track': 'flute','Holiday': '','Hpr': Point3(85.387, 0.0, 0.0),'Pos': Point3(630.709, -86.24, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Beard': 6,'Belt': 2,'BeltColor': 3,'Coat': 2,'CoatColor': 14,'Gender': 'f','Hair': 4,'HairColor': 1,'Hat': 2,'Mustache': 4,'Pants': 1,'PantsColor': 2,'Shape': 2,'Shirt': 1,'ShirtColor': 1,'Shoe': 2,'Skin': 11,'Sock': 0}},'1234924978.07caoconno': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(0.127, -4.354, 4.971),'Scale': VBase3(1.0, 1.0, 1.0)},'1234924978.08caoconno': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(8.421, -1.87, 0.156),'Scale': VBase3(1.0, 1.0, 1.0)},'1235676532.77gjeon': {'Type': 'Building Exterior','File': '','ExtUid': '1235676532.77gjeon0','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1235676532.83gjeon': {'Type': 'Door Locator Node','Name': 'door_locator','GridPos': Point3(656.644, -209.671, 11.583),'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(0.127, -4.354, 4.971),'Scale': VBase3(1.0, 1.0, 1.0)}},'Pos': Point3(656.517, -205.317, 6.612),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Door': 'models/buildings/shanty_guildhall_door','Model': 'models/buildings/shanty_npc_house_a_exterior','SignFrame': '','SignImage': 'models/buildings/sign1_eng_a_icon_barber'}},'1240961664.0gjeon1': {'Type': 'Spawn Node','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(639.664, -118.133, 3.765),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Noob Skeleton','Start State': 'Ambush','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1275692669.07gjeon': {'Type': 'Townsperson','Category': 'Commoner','AnimSet': 'default','AuraFX': 'None','CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','HelpID': 'NONE','Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': '12.0000','Pos': Point3(662.545, -63.045, 0.0),'PoseAnim': '','PoseFrame': '','Private Status': 'All','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Requires Quest Interest': False,'Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'ShopID': 'PORT_ROYAL_DEFAULTS','Start State': 'Walk','StartFrame': '0','Team': 'Player','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','spawnTimeBegin': 9.25,'spawnTimeEnd': 18.5}},'Undockable': False,'Visibility': 'Grid','Visual': {'Model': 'models/islands/bilgewater_zero'}}},'Node Links': [],'Layers': {},'ObjectIds': {'1149705583.09Shochet': '["Objects"]["1149705583.09Shochet"]','1149705605.5Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705605.5Shochet"]','1149705605.5Shochet0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705605.5Shochet0"]','1149705607.02Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705607.02Shochet"]','1149705607.63Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705607.63Shochet"]','1149705619.08Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]','1149705619.08Shochet0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1149705619.08Shochet0"]','1149705632.05Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1149705632.05Shochet"]','1149706548.67Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706548.67Shochet"]','1149706548.67Shochet0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706548.67Shochet0"]','1149706577.28Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]','1149706632.97Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1149706632.97Shochet"]','1154059325.91Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059325.91Shochet"]','1154059325.91Shochet0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059325.91Shochet0"]','1154059341.09Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059341.09Shochet"]','1154059344.67Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059344.67Shochet"]','1154059351.97Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059351.97Shochet"]','1154059362.19Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059362.19Shochet"]','1154059366.69Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059366.69Shochet"]','1157596022.35jasyeung': '["Objects"]["1149705583.09Shochet"]["Objects"]["1157596022.35jasyeung"]','1157596022.35jasyeung0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1157596022.35jasyeung0"]','1157596044.44jasyeung': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1157596044.44jasyeung"]','1165018948.05sdnaik': '["Objects"]["1149705583.09Shochet"]["Objects"]["1165018948.05sdnaik"]','1165018950.47sdnaik': '["Objects"]["1149705583.09Shochet"]["Objects"]["1165018950.47sdnaik"]','1165019061.13sdnaik': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192874.58Shochet"]["Objects"]["1165019061.13sdnaik"]','1165019080.61sdnaik': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192882.95Shochet"]["Objects"]["1165019080.61sdnaik"]','1169192695.17Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192695.17Shochet"]','1169192829.53Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1169192829.53Shochet"]','1169192874.58Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192874.58Shochet"]','1169192882.95Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192882.95Shochet"]','1169192926.38Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192926.38Shochet"]','1169193004.98Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169193027.28Shochet"]["Objects"]["1169193004.98Shochet"]','1169193027.28Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169193027.28Shochet"]','1169449533.4Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169449533.4Shochet"]','1169451658.54Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1169451658.54Shochet"]','1169451790.01Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1169451790.01Shochet"]','1169616338.61Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1169616338.61Shochet"]','1169616428.63Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1169616428.63Shochet"]','1169616489.03Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169616489.03Shochet"]','1171348677.72Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1171348677.72Shochet"]','1171691663.17Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1171691663.17Shochet"]','1179593088.0Shochet1': '["Objects"]["1149705583.09Shochet"]["Objects"]["1179593088.0Shochet1"]','1186784633.39Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1186784633.39Shochet"]','1186784634.77Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1186784634.77Shochet"]','1186784662.83Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1186784662.83Shochet"]','1186785480.13Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1186785480.13Shochet"]','1186785480.14Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1186785480.14Shochet"]','1186785500.34Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1186785500.34Shochet"]','1187140148.8gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187140148.8gjeon"]','1187407776.73gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187407776.73gjeon"]','1187407776.73gjeon0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187407776.73gjeon"]','1187634515.47gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187634515.47gjeon"]','1187634559.22gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187634559.22gjeon"]','1191607138.69gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1191607138.69gjeon"]','1191607940.19gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1191607940.19gjeon"]','1234924975.76caoconno': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187407776.73gjeon"]["Objects"]["1234924975.76caoconno"]','1234924977.02caoconno': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187407776.73gjeon"]["Objects"]["1234924977.02caoconno"]','1234924978.07caoconno': '["Objects"]["1149705583.09Shochet"]["Objects"]["1234924978.07caoconno"]','1234924978.08caoconno': '["Objects"]["1149705583.09Shochet"]["Objects"]["1234924978.08caoconno"]','1235676532.77gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1235676532.77gjeon"]','1235676532.77gjeon0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1235676532.77gjeon"]','1235676532.83gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1235676532.77gjeon"]["Objects"]["1235676532.83gjeon"]','1240961664.0gjeon1': '["Objects"]["1149705583.09Shochet"]["Objects"]["1240961664.0gjeon1"]','1275692669.07gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1275692669.07gjeon"]'}}
extraInfo = {'camPos': Point3(652.314, -154.956, 60.7173),'camHpr': VBase3(-5.3369, -32.9357, 0),'focalLength': 1.39999997616,'skyState': 2,'fog': 0}
|
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'Objects': {'1149705583.09Shochet': {'Type': 'Island','Name': 'VegasIsland','File': '','Environment': 'Interior','Minimap': False,'Objects': {'1149705605.5Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1149705605.5Shochet0': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1149705607.02Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1149705607.63Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1149705619.08Shochet': {'Type': 'Cell Portal Area','Name': 'cell_pier','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1149705619.08Shochet0': {'Type': 'Parlor Game','Category': 'Blackjack','BetMultiplier': '1','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(421.053, -131.608, 5.287),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/Cardtable_HalfCircle'}},'1149705632.05Shochet': {'Type': 'Parlor Game','Category': 'Holdem','BetMultiplier': '1','Hpr': VBase3(45.0, 0.0, 0.0),'Pos': Point3(443.184, -123.256, 5.295),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/Cardtable_Pill'}},'1169451658.54Shochet': {'Type': 'Searchable Container','Aggro Radius': 5.0,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(508.36, -134.581, 5.213),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/desk_gov'},'searchTime': '6.0','type': 'Desk'},'1169451790.01Shochet': {'Type': 'Searchable Container','Aggro Radius': 5.0,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(480.699, -161.909, 5.213),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/wellA'},'searchTime': '6.0','type': 'WellA'},'1171348677.72Shochet': {'Type': 'Interactive Prop','Hpr': VBase3(96.302, 0.0, 0.0),'Pos': Point3(305.078, -115.012, 4.769),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/dummy_zero'},'interactAble': 'player','interactType': 'hit'},'1186785500.34Shochet': {'Type': 'Player Spawn Node','Hpr': Point3(0.0, 0.0, 0.0),'Index': 1,'Pos': Point3(485.258, -86.884, 5.213),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}}},'Pos': Point3(0.0, 0.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0)},'1149706548.67Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1149706548.67Shochet0': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1149706577.28Shochet': {'Type': 'Cell Portal Area','Name': 'cell_spanish_town','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1149706632.97Shochet': {'Type': 'Spawn Node','Aggro Radius': '12.0000','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '2','Patrol Radius': '12.0000','Pause Chance': '100','Pause Duration': '30','Pos': Point3(500.886, 153.538, 45.292),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Scorpion','Start State': 'Ambush','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Visual': {'Model': 'models/misc/smiley'}},'1157596044.44jasyeung': {'Type': 'Townsperson','Category': 'MedicineMan','AnimSet': 'default','AuraFX': 'None','CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','Hpr': VBase3(-173.418, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': 12,'Pos': Point3(454.118, 103.165, 41.541),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Requires Quest Interest': False,'Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Start State': 'Idle','StartFrame': '0','Team': 'Villager','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1169192829.53Shochet': {'Type': 'Townsperson','Category': 'Commoner','AnimSet': 'default','AuraFX': 'None','CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','Hpr': VBase3(128.625, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': 12,'Pos': Point3(572.566, 129.349, 42.792),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Requires Quest Interest': False,'Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Start State': 'Idle','StartFrame': '0','Team': 'Villager','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1169616338.61Shochet': {'Type': 'Townsperson','Category': 'Shipwright','AnimSet': 'default','AuraFX': 'None','CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','Hpr': VBase3(95.14, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': 12,'Pos': Point3(594.082, 77.364, 42.876),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Requires Quest Interest': False,'Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Start State': 'Idle','StartFrame': '0','Team': 'Player','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1169616428.63Shochet': {'Type': 'Object Spawn Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(470.936, 243.777, 49.657),'Priority': '1','Scale': VBase3(1.0, 1.0, 1.0),'SpawnDelay': '10','Spawnables': 'Buried Treasure','Visual': {'Color': (0.8, 0.2, 0.65, 1),'Model': 'models/misc/smiley'},'startingDepth': '5'},'1171691663.17Shochet': {'Type': 'Townsperson','Category': 'Gypsy','AnimSet': 'default','AuraFX': 'None','CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','Hpr': VBase3(-161.55, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': 12,'Pos': Point3(446.386, 327.483, 54.071),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Requires Quest Interest': False,'Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Start State': 'Idle','StartFrame': '0','Team': 'Villager','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0}},'Pos': Point3(0.0, 0.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0)},'1154059325.91Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1154059325.91Shochet0': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1154059341.09Shochet': {'Type': 'Animal','Hpr': Point3(0.0, 0.0, 0.0),'Patrol Radius': 12,'Pos': Point3(223.47, -25.232, 5.178),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Pig','Start State': 'Walk','StartFrame': '0'},'1154059344.67Shochet': {'Type': 'Animal','Hpr': Point3(0.0, 0.0, 0.0),'Patrol Radius': 12,'Pos': Point3(230.53, -15.957, 5.073),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Chicken','Start State': 'Walk','StartFrame': '0'},'1154059351.97Shochet': {'Type': 'Animal','Hpr': Point3(0.0, 0.0, 0.0),'Patrol Radius': 12,'Pos': Point3(239.433, -6.269, 4.768),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Rooster','Start State': 'Walk','StartFrame': '0'},'1154059362.19Shochet': {'Type': 'Creature','Boss': False,'Boss Name': 'Anonymous','Hpr': Point3(0.0, 0.0, 0.0),'Level': '37','Patrol Radius': 12,'Pos': Point3(-93.119, -296.023, 1.391),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Crab','Start State': 'Idle','StartFrame': '0'},'1154059366.69Shochet': {'Type': 'Creature','Boss': False,'Boss Name': 'Anonymous','Hpr': Point3(0.0, 0.0, 0.0),'Level': '37','Patrol Radius': 12,'Pos': Point3(267.997, 4.319, 7.507),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'FlyTrap','Start State': 'Idle','StartFrame': '0'},'1157596022.35jasyeung': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1157596022.35jasyeung0': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1165018948.05sdnaik': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1165018950.47sdnaik': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1169192695.17Shochet': {'Type': 'Port Collision Sphere','Name': 'VegasPort','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(147.216, -168.582, 0.0),'Scale': VBase3(470.212, 470.212, 470.212),'VisSize': '','Visual': {'Color': (0.5, 0.5, 1.0, 0.2),'Model': 'models/misc/smiley'}},'1169192874.58Shochet': {'Type': 'Cell Portal Area','Name': 'cell_green_area','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1165019061.13sdnaik': {'Type': 'Spawn Node','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '2','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(119.094, -74.897, 4.167),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Bat','Start State': 'Ambush','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}}},'Pos': Point3(0.0, 0.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0)},'1169192882.95Shochet': {'Type': 'Cell Portal Area','Name': 'cell_shanty_town','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1165019080.61sdnaik': {'Type': 'Spawn Node','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '2','Patrol Radius': 12,'Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(180.048, 221.152, 66.347),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Wasp','Start State': 'Patrol','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}}},'Pos': Point3(0.0, 0.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0)},'1169192926.38Shochet': {'Type': 'Spawn Node','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(640.128, -165.933, 6.871),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Alligator','Start State': 'Patrol','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1169193027.28Shochet': {'Type': 'Cell Portal Area','Name': 'cell_graveyard','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1169193004.98Shochet': {'Type': 'Spawn Node','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(-255.945, 98.577, 68.242),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Stump','Start State': 'Patrol','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}}},'Pos': Point3(0.0, 0.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0)},'1169449533.4Shochet': {'Type': 'Searchable Container','Aggro Radius': 5.0,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(494.568, -149.091, 5.213),'Scale': VBase3(3.885, 3.885, 3.885),'Visual': {'Color': (1.0, 0.0, 0.0, 1.0),'Model': 'models/props/barrel'},'searchTime': '6.0','type': 'Barrel'},'1169616489.03Shochet': {'Type': 'Creature','Boss': False,'Boss Name': 'Anonymous','Hpr': Point3(0.0, 0.0, 0.0),'Level': '37','Patrol Radius': 12,'Pos': Point3(327.376, 1.76, 10.049),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Stump','Start State': 'Idle','StartFrame': '0'},'1179593088.0Shochet1': {'Type': 'Creature','Boss': False,'Boss Name': 'Anonymous','Hpr': Point3(0.0, 0.0, 0.0),'Level': '37','Patrol Radius': 12,'Pos': Point3(271.381, -60.852, 2.396),'PoseAnim': '','PoseFrame': '','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'Species': 'Scorpion','Start State': 'Idle','StartFrame': '0'},'1186784633.39Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1186784634.77Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1186784662.83Shochet': {'Type': 'Dinghy','Aggro Radius': '12.0000','Hpr': VBase3(90.585, 0.0, 0.0),'Location': 'Water','Pos': Point3(373.406, -138.477, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/shipparts/dingy-geometry_High'}},'1186785480.13Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_1','Hpr': VBase3(-18.331, 0.0, 0.0),'Pos': Point3(-219.917, -319.235, 0.595),'Scale': VBase3(1.0, 1.0, 1.0)},'1186785480.14Shochet': {'Type': 'Locator Node','Name': 'portal_exterior_2','Hpr': VBase3(68.97, 0.0, 0.0),'Pos': Point3(-285.103, -58.817, 44.049),'Scale': VBase3(1.0, 1.0, 1.0)},'1187140148.8gjeon': {'Type': 'Cannon','Hpr': Point3(0.0, 0.0, 0.0),'MaxPower': '1.0','MinPower': '0.2','Pos': Point3(376.873, -67.173, 4.441),'Scale': VBase3(2.833, 2.833, 2.833),'Visual': {'Model': 'models/shipparts/cannon_hi'}},'1187407776.73gjeon': {'Type': 'Building Exterior','File': 'A_GyedoInterior','ExtUid': '1187407776.73gjeon0','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1234924975.76caoconno': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(-8.035, -1.826, 0.068),'Scale': VBase3(1.0, 1.0, 1.0)},'1234924977.02caoconno': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(8.421, -1.87, 0.156),'Scale': VBase3(1.0, 1.0, 1.0)}},'Pos': Point3(378.723, -30.806, 8.78),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Door': 'models/buildings/shanty_guildhall_door','Interior': 'models/buildings/interior_shanty_guildhall','Model': 'models/buildings/spanish_npc_house_a_exterior','SignFrame': '','SignImage': 'models/buildings/sign1_eng_a_icon_blacksmith'}},'1187634515.47gjeon': {'Type': 'Barrel','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(592.624, -103.901, 2.332),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/barrel_worn'}},'1187634559.22gjeon': {'Type': 'Barrel','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(604.356, -93.711, 2.764),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/barrel_worn'}},'1191607138.69gjeon': {'Type': 'Animated Avatar','Category': 'cast','Animation Track': 'shovel','AuraFX': 'None','Effect Type': 'None','Hpr': Point3(74.754, 0.0, 0.0),'Pos': Point3(640.247, -90.735, 2.301),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'StartFrame': '0','SubCategory': 'models/char/jr_2000','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None'},'1191607940.19gjeon': {'Type': 'Animated Avatar - Townfolk','Animation Track': 'flute','Holiday': '','Hpr': Point3(85.387, 0.0, 0.0),'Pos': Point3(630.709, -86.24, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Beard': 6,'Belt': 2,'BeltColor': 3,'Coat': 2,'CoatColor': 14,'Gender': 'f','Hair': 4,'HairColor': 1,'Hat': 2,'Mustache': 4,'Pants': 1,'PantsColor': 2,'Shape': 2,'Shirt': 1,'ShirtColor': 1,'Shoe': 2,'Skin': 11,'Sock': 0}},'1234924978.07caoconno': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(0.127, -4.354, 4.971),'Scale': VBase3(1.0, 1.0, 1.0)},'1234924978.08caoconno': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(8.421, -1.87, 0.156),'Scale': VBase3(1.0, 1.0, 1.0)},'1235676532.77gjeon': {'Type': 'Building Exterior','File': '','ExtUid': '1235676532.77gjeon0','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1235676532.83gjeon': {'Type': 'Door Locator Node','Name': 'door_locator','GridPos': Point3(656.644, -209.671, 11.583),'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(0.127, -4.354, 4.971),'Scale': VBase3(1.0, 1.0, 1.0)}},'Pos': Point3(656.517, -205.317, 6.612),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Door': 'models/buildings/shanty_guildhall_door','Model': 'models/buildings/shanty_npc_house_a_exterior','SignFrame': '','SignImage': 'models/buildings/sign1_eng_a_icon_barber'}},'1240961664.0gjeon1': {'Type': 'Spawn Node','AnimSet': 'default','AuraFX': 'None','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(639.664, -118.133, 3.765),'PoseAnim': '','PoseFrame': '','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Noob Skeleton','Start State': 'Ambush','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1275692669.07gjeon': {'Type': 'Townsperson','Category': 'Commoner','AnimSet': 'default','AuraFX': 'None','CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','HelpID': 'NONE','Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': '12.0000','Pos': Point3(662.545, -63.045, 0.0),'PoseAnim': '','PoseFrame': '','Private Status': 'All','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Requires Quest Interest': False,'Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'ShopID': 'PORT_ROYAL_DEFAULTS','Start State': 'Walk','StartFrame': '0','Team': 'Player','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','spawnTimeBegin': 9.25,'spawnTimeEnd': 18.5}},'Undockable': False,'Visibility': 'Grid','Visual': {'Model': 'models/islands/bilgewater_zero'}}},'Node Links': [],'Layers': {},'ObjectIds': {'1149705583.09Shochet': '["Objects"]["1149705583.09Shochet"]','1149705605.5Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705605.5Shochet"]','1149705605.5Shochet0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705605.5Shochet0"]','1149705607.02Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705607.02Shochet"]','1149705607.63Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705607.63Shochet"]','1149705619.08Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]','1149705619.08Shochet0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1149705619.08Shochet0"]','1149705632.05Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1149705632.05Shochet"]','1149706548.67Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706548.67Shochet"]','1149706548.67Shochet0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706548.67Shochet0"]','1149706577.28Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]','1149706632.97Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1149706632.97Shochet"]','1154059325.91Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059325.91Shochet"]','1154059325.91Shochet0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059325.91Shochet0"]','1154059341.09Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059341.09Shochet"]','1154059344.67Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059344.67Shochet"]','1154059351.97Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059351.97Shochet"]','1154059362.19Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059362.19Shochet"]','1154059366.69Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1154059366.69Shochet"]','1157596022.35jasyeung': '["Objects"]["1149705583.09Shochet"]["Objects"]["1157596022.35jasyeung"]','1157596022.35jasyeung0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1157596022.35jasyeung0"]','1157596044.44jasyeung': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1157596044.44jasyeung"]','1165018948.05sdnaik': '["Objects"]["1149705583.09Shochet"]["Objects"]["1165018948.05sdnaik"]','1165018950.47sdnaik': '["Objects"]["1149705583.09Shochet"]["Objects"]["1165018950.47sdnaik"]','1165019061.13sdnaik': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192874.58Shochet"]["Objects"]["1165019061.13sdnaik"]','1165019080.61sdnaik': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192882.95Shochet"]["Objects"]["1165019080.61sdnaik"]','1169192695.17Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192695.17Shochet"]','1169192829.53Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1169192829.53Shochet"]','1169192874.58Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192874.58Shochet"]','1169192882.95Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192882.95Shochet"]','1169192926.38Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169192926.38Shochet"]','1169193004.98Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169193027.28Shochet"]["Objects"]["1169193004.98Shochet"]','1169193027.28Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169193027.28Shochet"]','1169449533.4Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169449533.4Shochet"]','1169451658.54Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1169451658.54Shochet"]','1169451790.01Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1169451790.01Shochet"]','1169616338.61Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1169616338.61Shochet"]','1169616428.63Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1169616428.63Shochet"]','1169616489.03Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1169616489.03Shochet"]','1171348677.72Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1171348677.72Shochet"]','1171691663.17Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149706577.28Shochet"]["Objects"]["1171691663.17Shochet"]','1179593088.0Shochet1': '["Objects"]["1149705583.09Shochet"]["Objects"]["1179593088.0Shochet1"]','1186784633.39Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1186784633.39Shochet"]','1186784634.77Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1186784634.77Shochet"]','1186784662.83Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1186784662.83Shochet"]','1186785480.13Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1186785480.13Shochet"]','1186785480.14Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1186785480.14Shochet"]','1186785500.34Shochet': '["Objects"]["1149705583.09Shochet"]["Objects"]["1149705619.08Shochet"]["Objects"]["1186785500.34Shochet"]','1187140148.8gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187140148.8gjeon"]','1187407776.73gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187407776.73gjeon"]','1187407776.73gjeon0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187407776.73gjeon"]','1187634515.47gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187634515.47gjeon"]','1187634559.22gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187634559.22gjeon"]','1191607138.69gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1191607138.69gjeon"]','1191607940.19gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1191607940.19gjeon"]','1234924975.76caoconno': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187407776.73gjeon"]["Objects"]["1234924975.76caoconno"]','1234924977.02caoconno': '["Objects"]["1149705583.09Shochet"]["Objects"]["1187407776.73gjeon"]["Objects"]["1234924977.02caoconno"]','1234924978.07caoconno': '["Objects"]["1149705583.09Shochet"]["Objects"]["1234924978.07caoconno"]','1234924978.08caoconno': '["Objects"]["1149705583.09Shochet"]["Objects"]["1234924978.08caoconno"]','1235676532.77gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1235676532.77gjeon"]','1235676532.77gjeon0': '["Objects"]["1149705583.09Shochet"]["Objects"]["1235676532.77gjeon"]','1235676532.83gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1235676532.77gjeon"]["Objects"]["1235676532.83gjeon"]','1240961664.0gjeon1': '["Objects"]["1149705583.09Shochet"]["Objects"]["1240961664.0gjeon1"]','1275692669.07gjeon': '["Objects"]["1149705583.09Shochet"]["Objects"]["1275692669.07gjeon"]'}}
extraInfo = {'camPos': Point3(652.314, -154.956, 60.7173),'camHpr': VBase3(-5.3369, -32.9357, 0),'focalLength': 1.39999997616,'skyState': 2,'fog': 0}
|
"""
Various ugly utility functions for twill.
Apart from various simple utility functions, twill's robust parsing
code is implemented in the ConfigurableParsingFactory class.
"""
import os
import re
from collections import namedtuple
from lxml import html
try:
import tidylib
except (ImportError, OSError):
# ImportError can be raised when PyTidyLib package is not installed
# OSError can be raised when the HTML Tidy shared library is not installed
tidylib = None
from . import log, twill_ext
from .errors import TwillException
Link = namedtuple('Link', 'text, url')
class Singleton:
"""A mixin class to create singleton objects."""
def __new__(cls, *args, **kwargs):
it = cls.__dict__.get('__it__')
if it is not None:
return it
cls.__it__ = it = object.__new__(cls)
return it
@classmethod
def reset(cls):
cls.__it__ = None
class ResultWrapper:
"""Deal with request results, and present them in a unified form.
These objects are returned by browser._journey()-wrapped functions.
"""
def __init__(self, response):
self.response = response
self.encoding = response.encoding
try:
self.tree = html.fromstring(self.text)
except ValueError:
# may happen when there is an XML encoding declaration
self.tree = html.fromstring(self.content)
self.xpath = self.tree.xpath
self._fix_forms()
@property
def url(self):
""""Get the url of the result page."""
return self.response.url
@property
def http_code(self):
"""Get the http status code of the result page."""
return self.response.status_code
@property
def text(self):
"""Get the text of the result page."""
return self.response.text
@property
def content(self):
"""Get the binary content of the result page."""
return self.response.content
@property
def headers(self):
"""Get the headers of the result page."""
return self.response.headers
@property
def title(self):
"""Get the title of the result page."""
try:
return self.xpath('//title[1]/text()')[0]
except IndexError:
return None
@property
def links(self):
"""Get all links in the result page."""
return [Link(a.text_content(), a.get('href'))
for a in self.xpath('//a[@href]')]
def find_link(self, pattern):
"""Find a link with a given pattern on the result page."""
regex = re.compile(pattern)
for link in self.links:
if regex.search(link.text) or regex.search(link.url):
return link
return None
def form(self, formname=1):
"""Get the form with the given name on the result page"""
forms = self.forms
if isinstance(formname, str):
# first, try ID
for form in forms:
form_id = form.get('id')
if form_id and form_id == formname:
return form
# next, try regex with name
regex = re.compile(formname)
for form in forms:
name = form.get('name')
if name and regex.search(name):
return form
# last, try number
try:
formnum = int(formname) - 1
if not 0 <= formnum < len(forms):
raise IndexError
except (ValueError, IndexError):
return None
else:
return forms[formnum]
def _fix_forms(self):
"""Fix forms on the page for use with twill."""
# put all stray fields into a form
orphans = self.xpath('//input[not(ancestor::form)]')
if orphans:
form = [b'<form>']
for orphan in orphans:
form.append(html.tostring(orphan))
form.append(b'</form>')
form = b''.join(form)
self.forms = html.fromstring(form).forms
self.forms.extend(self.tree.forms)
else:
self.forms = self.tree.forms
# convert all submit button elements to input elements, since
# otherwise lxml will not recognize them as form input fields
for form in self.forms:
for button in form.xpath("//button[@type='submit']"):
button.tag = 'input'
def trunc(s, length):
"""Truncate a string to a given length.
The string is truncated by cutting off the last (length-4) characters
and replacing them with ' ...'
"""
if s and len(s) > length:
return s[:length - 4] + ' ...'
return s or ''
def print_form(form, n):
"""Pretty-print the given form, with the assigned number."""
info = log.info
name = form.get('name')
if name:
info('\nForm name=%s (#%d)', name, n + 1)
else:
info('\nForm #%d', n + 1)
if form.inputs is not None:
info('## __Name__________________'
' __Type___ __ID________ __Value__________________')
for n, field in enumerate(form.inputs, 1):
value = field.value
if hasattr(field, 'value_options'):
items = ', '.join(
f"'{getattr(opt, "name", opt)}'"
for opt in field.value_options)
value_displayed = f'{value} of {items}'
else:
value_displayed = f'{value}'
field_name = field.name
field_type = getattr(field, 'type', 'select')
field_id = field.get('id')
strings = (
f'{n:2}',
f'{trunc(field_name, 24):24}',
f'{trunc(field_type, 9):9}',
f'{trunc(field_id, 12):12}',
trunc(value_displayed, 40))
info(' '.join(strings))
info('')
def make_boolean(value):
"""Convert the input value into a boolean."""
value = str(value).lower().strip()
# true/false
if value in ('true', 'false'):
return value == 'true'
# 0/nonzero
try:
ival = int(value)
except ValueError:
pass
else:
return bool(ival)
# +/-
if value in ('+', '-'):
return value == '+'
# on/off
if value in ('on', 'off'):
return value == 'on'
raise TwillException(f"unable to convert '{value}' into true/false")
def make_int(value):
"""Convert the input value into an int."""
try:
ival = int(value)
except Exception:
pass
else:
return ival
raise TwillException(f"unable to convert '{value}' into an int")
def set_form_control_value(control, value):
"""Set the given control to the given value
The controls can be checkboxes, select elements etc.
"""
if isinstance(control, html.InputElement):
if control.checkable:
try:
value = make_boolean(value)
except TwillException:
# if there's more than one checkbox,
# it should be a html.CheckboxGroup, see below.
pass
else:
control.checked = value
elif control.type not in ('submit', 'image'):
control.value = value
elif isinstance(control, (html.TextareaElement, html.RadioGroup)):
control.value = value
elif isinstance(control, html.CheckboxGroup):
if value.startswith('-'):
value = value[1:]
try:
control.value.remove(value)
except KeyError:
pass
else:
if value.startswith('+'):
value = value[1:]
control.value.add(value)
elif isinstance(control, html.SelectElement):
# for ListControls we need to find the right *value*,
# and figure out if we want to *select* or *deselect*
if value.startswith('-'):
add = False
value = value[1:]
else:
add = True
if value.startswith('+'):
value = value[1:]
# now, select the value.
options = [opt.strip() for opt in control.value_options]
option_names = [(c.text or '').strip() for c in control.getchildren()]
full_options = dict(zip(option_names, options))
for name, opt in full_options.items():
if value not in (name, opt):
continue
if isinstance(control.value, html.MultipleSelectOptions):
if add:
control.value.add(opt)
elif opt in control.value:
control.value.remove(opt)
else:
if add:
control.value = opt
else:
control.value = None
break
else:
raise TwillException('Attempt to set an invalid value')
else:
raise TwillException('Attempt to set value on invalid control')
def _all_the_same_submit(matches):
"""Check if a list of controls all belong to the same control.
For use with checkboxes, hidden, and submit buttons.
"""
name = value = None
for match in matches:
if not isinstance(match, html.InputElement):
return False
if match.type not in ('submit', 'hidden'):
return False
if name is None:
name = match.name
value = match.value
elif match.name != name or match.value != value:
return False
return True
def _all_the_same_checkbox(matches):
"""Check if a list of controls all belong to the same checkbox.
Hidden controls can combine with checkboxes, to allow form
processors to ensure a False value is returned even if user
does not check the checkbox. Without the hidden control, no
value would be returned.
"""
name = None
for match in matches:
if not isinstance(match, html.InputElement):
return False
if match.type not in ('checkbox', 'hidden'):
return False
if name is None:
name = match.name
else:
if match.name != name:
return False
return True
def unique_match(matches):
"""Check whether a match is unique"""
return (len(matches) == 1 or
_all_the_same_checkbox(matches) or _all_the_same_submit(matches))
def run_tidy(html):
"""Run HTML Tidy on the given HTML string.
Return a 2-tuple (output, errors). (None, None) will be returned if
PyTidyLib (or the required shared library for tidy) isn't installed.
"""
from .commands import options
require_tidy = options.get('require_tidy')
if not tidylib:
if require_tidy:
raise TwillException(
'Option require_tidy is set, but PyTidyLib is not installed')
return None, None
opts = {key[5:].replace('_', '-'): value
for key, value in options.items() if key.startswith('tidy_')}
clean_html, errors = tidylib.tidy_document(html, opts)
return clean_html, errors
def _equiv_refresh_interval():
"""Get smallest interval for which the browser should follow redirects.
Redirection happens if the given interval is smaller than this.
"""
from .commands import options
return options.get('equiv_refresh_interval')
def is_hidden_filename(filename):
"""Check if this is a hidden file (starting with a dot)."""
return filename not in (
'.', '..') and os.path.basename(filename).startswith('.')
def is_twill_filename(filename):
"""Check if the given filename has the twill file extension."""
return filename.endswith(twill_ext) and not is_hidden_filename(filename)
def make_twill_filename(name):
"""Add the twill extension to the name of a script if necessary."""
if name not in ('.', '..'):
twillname, ext = os.path.splitext(name)
if not ext:
twillname += twill_ext
if os.path.exists(twillname):
name = twillname
return name
def gather_filenames(arglist):
"""Collect script files from within directories."""
names = []
for arg in arglist:
name = make_twill_filename(arg)
if os.path.isdir(name):
for dirpath, dirnames, filenames in os.walk(arg):
dirnames[:] = [
d for d in dirnames if not is_hidden_filename(d)]
for filename in filenames:
if not is_twill_filename(filename):
continue
filename = os.path.join(dirpath, filename)
names.append(filename)
else:
names.append(name)
return names
|
"""
Various ugly utility functions for twill.
Apart from various simple utility functions, twill's robust parsing
code is implemented in the ConfigurableParsingFactory class.
"""
import os
import re
from collections import namedtuple
from lxml import html
try:
import tidylib
except (ImportError, OSError):
# ImportError can be raised when PyTidyLib package is not installed
# OSError can be raised when the HTML Tidy shared library is not installed
tidylib = None
from . import log, twill_ext
from .errors import TwillException
Link = namedtuple('Link', 'text, url')
class Singleton:
"""A mixin class to create singleton objects."""
def __new__(cls, *args, **kwargs):
it = cls.__dict__.get('__it__')
if it is not None:
return it
cls.__it__ = it = object.__new__(cls)
return it
@classmethod
def reset(cls):
cls.__it__ = None
class ResultWrapper:
"""Deal with request results, and present them in a unified form.
These objects are returned by browser._journey()-wrapped functions.
"""
def __init__(self, response):
self.response = response
self.encoding = response.encoding
try:
self.tree = html.fromstring(self.text)
except ValueError:
# may happen when there is an XML encoding declaration
self.tree = html.fromstring(self.content)
self.xpath = self.tree.xpath
self._fix_forms()
@property
def url(self):
""""Get the url of the result page."""
return self.response.url
@property
def http_code(self):
"""Get the http status code of the result page."""
return self.response.status_code
@property
def text(self):
"""Get the text of the result page."""
return self.response.text
@property
def content(self):
"""Get the binary content of the result page."""
return self.response.content
@property
def headers(self):
"""Get the headers of the result page."""
return self.response.headers
@property
def title(self):
"""Get the title of the result page."""
try:
return self.xpath('//title[1]/text()')[0]
except IndexError:
return None
@property
def links(self):
"""Get all links in the result page."""
return [Link(a.text_content(), a.get('href'))
for a in self.xpath('//a[@href]')]
def find_link(self, pattern):
"""Find a link with a given pattern on the result page."""
regex = re.compile(pattern)
for link in self.links:
if regex.search(link.text) or regex.search(link.url):
return link
return None
def form(self, formname=1):
"""Get the form with the given name on the result page"""
forms = self.forms
if isinstance(formname, str):
# first, try ID
for form in forms:
form_id = form.get('id')
if form_id and form_id == formname:
return form
# next, try regex with name
regex = re.compile(formname)
for form in forms:
name = form.get('name')
if name and regex.search(name):
return form
# last, try number
try:
formnum = int(formname) - 1
if not 0 <= formnum < len(forms):
raise IndexError
except (ValueError, IndexError):
return None
else:
return forms[formnum]
def _fix_forms(self):
"""Fix forms on the page for use with twill."""
# put all stray fields into a form
orphans = self.xpath('//input[not(ancestor::form)]')
if orphans:
form = [b'<form>']
for orphan in orphans:
form.append(html.tostring(orphan))
form.append(b'</form>')
form = b''.join(form)
self.forms = html.fromstring(form).forms
self.forms.extend(self.tree.forms)
else:
self.forms = self.tree.forms
# convert all submit button elements to input elements, since
# otherwise lxml will not recognize them as form input fields
for form in self.forms:
for button in form.xpath("//button[@type='submit']"):
button.tag = 'input'
def trunc(s, length):
"""Truncate a string to a given length.
The string is truncated by cutting off the last (length-4) characters
and replacing them with ' ...'
"""
if s and len(s) > length:
return s[:length - 4] + ' ...'
return s or ''
def print_form(form, n):
"""Pretty-print the given form, with the assigned number."""
info = log.info
name = form.get('name')
if name:
info('\nForm name=%s (#%d)', name, n + 1)
else:
info('\nForm #%d', n + 1)
if form.inputs is not None:
info('## __Name__________________'
' __Type___ __ID________ __Value__________________')
for n, field in enumerate(form.inputs, 1):
value = field.value
if hasattr(field, 'value_options'):
items = ', '.join(
f"'{getattr(opt, 'name', opt)}'"
for opt in field.value_options)
value_displayed = f'{value} of {items}'
else:
value_displayed = f'{value}'
field_name = field.name
field_type = getattr(field, 'type', 'select')
field_id = field.get('id')
strings = (
f'{n:2}',
f'{trunc(field_name, 24):24}',
f'{trunc(field_type, 9):9}',
f'{trunc(field_id, 12):12}',
trunc(value_displayed, 40))
info(' '.join(strings))
info('')
def make_boolean(value):
"""Convert the input value into a boolean."""
value = str(value).lower().strip()
# true/false
if value in ('true', 'false'):
return value == 'true'
# 0/nonzero
try:
ival = int(value)
except ValueError:
pass
else:
return bool(ival)
# +/-
if value in ('+', '-'):
return value == '+'
# on/off
if value in ('on', 'off'):
return value == 'on'
raise TwillException(f"unable to convert '{value}' into true/false")
def make_int(value):
"""Convert the input value into an int."""
try:
ival = int(value)
except Exception:
pass
else:
return ival
raise TwillException(f"unable to convert '{value}' into an int")
def set_form_control_value(control, value):
"""Set the given control to the given value
The controls can be checkboxes, select elements etc.
"""
if isinstance(control, html.InputElement):
if control.checkable:
try:
value = make_boolean(value)
except TwillException:
# if there's more than one checkbox,
# it should be a html.CheckboxGroup, see below.
pass
else:
control.checked = value
elif control.type not in ('submit', 'image'):
control.value = value
elif isinstance(control, (html.TextareaElement, html.RadioGroup)):
control.value = value
elif isinstance(control, html.CheckboxGroup):
if value.startswith('-'):
value = value[1:]
try:
control.value.remove(value)
except KeyError:
pass
else:
if value.startswith('+'):
value = value[1:]
control.value.add(value)
elif isinstance(control, html.SelectElement):
# for ListControls we need to find the right *value*,
# and figure out if we want to *select* or *deselect*
if value.startswith('-'):
add = False
value = value[1:]
else:
add = True
if value.startswith('+'):
value = value[1:]
# now, select the value.
options = [opt.strip() for opt in control.value_options]
option_names = [(c.text or '').strip() for c in control.getchildren()]
full_options = dict(zip(option_names, options))
for name, opt in full_options.items():
if value not in (name, opt):
continue
if isinstance(control.value, html.MultipleSelectOptions):
if add:
control.value.add(opt)
elif opt in control.value:
control.value.remove(opt)
else:
if add:
control.value = opt
else:
control.value = None
break
else:
raise TwillException('Attempt to set an invalid value')
else:
raise TwillException('Attempt to set value on invalid control')
def _all_the_same_submit(matches):
"""Check if a list of controls all belong to the same control.
For use with checkboxes, hidden, and submit buttons.
"""
name = value = None
for match in matches:
if not isinstance(match, html.InputElement):
return False
if match.type not in ('submit', 'hidden'):
return False
if name is None:
name = match.name
value = match.value
elif match.name != name or match.value != value:
return False
return True
def _all_the_same_checkbox(matches):
"""Check if a list of controls all belong to the same checkbox.
Hidden controls can combine with checkboxes, to allow form
processors to ensure a False value is returned even if user
does not check the checkbox. Without the hidden control, no
value would be returned.
"""
name = None
for match in matches:
if not isinstance(match, html.InputElement):
return False
if match.type not in ('checkbox', 'hidden'):
return False
if name is None:
name = match.name
else:
if match.name != name:
return False
return True
def unique_match(matches):
"""Check whether a match is unique"""
return (len(matches) == 1 or
_all_the_same_checkbox(matches) or _all_the_same_submit(matches))
def run_tidy(html):
"""Run HTML Tidy on the given HTML string.
Return a 2-tuple (output, errors). (None, None) will be returned if
PyTidyLib (or the required shared library for tidy) isn't installed.
"""
from .commands import options
require_tidy = options.get('require_tidy')
if not tidylib:
if require_tidy:
raise TwillException(
'Option require_tidy is set, but PyTidyLib is not installed')
return None, None
opts = {key[5:].replace('_', '-'): value
for key, value in options.items() if key.startswith('tidy_')}
clean_html, errors = tidylib.tidy_document(html, opts)
return clean_html, errors
def _equiv_refresh_interval():
"""Get smallest interval for which the browser should follow redirects.
Redirection happens if the given interval is smaller than this.
"""
from .commands import options
return options.get('equiv_refresh_interval')
def is_hidden_filename(filename):
"""Check if this is a hidden file (starting with a dot)."""
return filename not in (
'.', '..') and os.path.basename(filename).startswith('.')
def is_twill_filename(filename):
"""Check if the given filename has the twill file extension."""
return filename.endswith(twill_ext) and not is_hidden_filename(filename)
def make_twill_filename(name):
"""Add the twill extension to the name of a script if necessary."""
if name not in ('.', '..'):
twillname, ext = os.path.splitext(name)
if not ext:
twillname += twill_ext
if os.path.exists(twillname):
name = twillname
return name
def gather_filenames(arglist):
"""Collect script files from within directories."""
names = []
for arg in arglist:
name = make_twill_filename(arg)
if os.path.isdir(name):
for dirpath, dirnames, filenames in os.walk(arg):
dirnames[:] = [
d for d in dirnames if not is_hidden_filename(d)]
for filename in filenames:
if not is_twill_filename(filename):
continue
filename = os.path.join(dirpath, filename)
names.append(filename)
else:
names.append(name)
return names
|
# Copyright 2020 Ram Rachum and collaborators.
# This program is distributed under the MIT license.
from __future__ import annotations
import math
import inspect
import re
import abc
import random
import itertools
import collections.abc
import statistics
import concurrent.futures
import enum
import functools
import numbers
from typing import (Iterable, Union, Optional, Tuple, Any, Iterator, Type,
Sequence, Callable, Hashable, Mapping, TypeVar)
import dataclasses
import more_itertools
import numpy as np
from .utils import ImmutableDict
from . import utils
from . import exceptions
@dataclasses.dataclass(order=True, frozen=True)
class ActionObservation(utils.NiceDataclass):
action: Optional[Action]
observation: Observation
class _ActionType(abc.ABCMeta):# collections.abc.Sequence):
__iter__ = lambda cls: iter(cls.all_actions)
__len__ = lambda cls: len(cls.all_actions)
def __getitem__(cls, i: int):
if i >= len(cls):
raise IndexError
for j, item in enumerate(cls):
if j == i:
return cls
raise RuntimeError
@property
def n_neurons(cls) -> int:
try:
return cls._n_neurons
except AttributeError:
cls._n_neurons = len(cls)
return cls._n_neurons
_action_regex_head = re.compile(r'[A-Za-z0-9.]')
_action_regex_tail = re.compile(r'[A-Za-z0-9_.\-/>]*')
_action_regex = re.compile(f'^{_action_regex_head.pattern}'
f'{_action_regex_tail.pattern}$')
@functools.total_ordering
class Action(metaclass=_ActionType):
all_actions: Sequence[Action]
n_neurons: int
def __lt__(self, other):
return self.all_actions.index(self) < self.all_actions.index(other)
def slugify(self) -> str:
raw = str(self)
first_letter = raw[0]
prefix = '' if _action_regex_head.fullmatch(first_letter) else '0'
characters = ((c if _action_regex_tail.fullmatch(c) else '-') for c in raw)
result = f'{prefix}{''.join(characters)}'
assert _action_regex.fullmatch(result)
return result
def to_neurons(self) -> np.ndarray:
# Implementation for simple discrete actions. Can override.
try:
return self._to_neurons
except AttributeError:
self._to_neurons = np.array([int(self == action) for action in type(self)],
dtype=np.float64)
return self._to_neurons
@classmethod
def from_neurons(cls, neurons: Iterable) -> Action:
# Implementation for simple discrete actions. Can override.
return cls[tuple(neurons).index(1)]
class Observation(abc.ABC):
state: State
legal_actions: Tuple[Action, ...]
is_end: bool
reward: numbers.Real
n_neurons: int
@abc.abstractmethod
def to_neurons(self) -> np.ndarray:
'''Represent the observation as an array of numbers for a neural network.'''
raise NotImplementedError
PlayerId = TypeVar('PlayerId', bound=Hashable)
class State(abc.ABC):
Observation: Type[Observation]
Action: Type[Action]
is_end: bool
player_id_to_observation: ImmutableDict[PlayerId, Observation]
@abc.abstractmethod
def get_next_state_from_actions(self, player_id_to_action: Mapping[PlayerId, Action]) -> State:
raise NotImplementedError
@staticmethod
@abc.abstractmethod
def make_initial() -> State:
'''Create an initial world state that we can start playing with.'''
raise NotImplementedError
class _SinglePlayerStateType(abc.ABCMeta):
@property
def Observation(cls) -> _SinglePlayerStateType:
return cls
class SinglePlayerState(State, Observation, metaclass=_SinglePlayerStateType):
player_id_to_observation = property(lambda self: ImmutableDict({None: self}))
@abc.abstractmethod
def get_next_state_from_action(self, action: Action) -> SinglePlayerState:
raise NotImplementedError
def get_next_state_from_actions(self, player_id_to_action: Mapping[PlayerId, Action]) \
-> SinglePlayerState:
return self.get_next_state_from_action(more_itertools.one(player_id_to_action.values()))
from . import strategizing
|
# Copyright 2020 Ram Rachum and collaborators.
# This program is distributed under the MIT license.
from __future__ import annotations
import math
import inspect
import re
import abc
import random
import itertools
import collections.abc
import statistics
import concurrent.futures
import enum
import functools
import numbers
from typing import (Iterable, Union, Optional, Tuple, Any, Iterator, Type,
Sequence, Callable, Hashable, Mapping, TypeVar)
import dataclasses
import more_itertools
import numpy as np
from .utils import ImmutableDict
from . import utils
from . import exceptions
@dataclasses.dataclass(order=True, frozen=True)
class ActionObservation(utils.NiceDataclass):
action: Optional[Action]
observation: Observation
class _ActionType(abc.ABCMeta):# collections.abc.Sequence):
__iter__ = lambda cls: iter(cls.all_actions)
__len__ = lambda cls: len(cls.all_actions)
def __getitem__(cls, i: int):
if i >= len(cls):
raise IndexError
for j, item in enumerate(cls):
if j == i:
return cls
raise RuntimeError
@property
def n_neurons(cls) -> int:
try:
return cls._n_neurons
except AttributeError:
cls._n_neurons = len(cls)
return cls._n_neurons
_action_regex_head = re.compile(r'[A-Za-z0-9.]')
_action_regex_tail = re.compile(r'[A-Za-z0-9_.\-/>]*')
_action_regex = re.compile(f'^{_action_regex_head.pattern}'
f'{_action_regex_tail.pattern}$')
@functools.total_ordering
class Action(metaclass=_ActionType):
all_actions: Sequence[Action]
n_neurons: int
def __lt__(self, other):
return self.all_actions.index(self) < self.all_actions.index(other)
def slugify(self) -> str:
raw = str(self)
first_letter = raw[0]
prefix = '' if _action_regex_head.fullmatch(first_letter) else '0'
characters = ((c if _action_regex_tail.fullmatch(c) else '-') for c in raw)
result = f'{prefix}{"".join(characters)}'
assert _action_regex.fullmatch(result)
return result
def to_neurons(self) -> np.ndarray:
# Implementation for simple discrete actions. Can override.
try:
return self._to_neurons
except AttributeError:
self._to_neurons = np.array([int(self == action) for action in type(self)],
dtype=np.float64)
return self._to_neurons
@classmethod
def from_neurons(cls, neurons: Iterable) -> Action:
# Implementation for simple discrete actions. Can override.
return cls[tuple(neurons).index(1)]
class Observation(abc.ABC):
state: State
legal_actions: Tuple[Action, ...]
is_end: bool
reward: numbers.Real
n_neurons: int
@abc.abstractmethod
def to_neurons(self) -> np.ndarray:
'''Represent the observation as an array of numbers for a neural network.'''
raise NotImplementedError
PlayerId = TypeVar('PlayerId', bound=Hashable)
class State(abc.ABC):
Observation: Type[Observation]
Action: Type[Action]
is_end: bool
player_id_to_observation: ImmutableDict[PlayerId, Observation]
@abc.abstractmethod
def get_next_state_from_actions(self, player_id_to_action: Mapping[PlayerId, Action]) -> State:
raise NotImplementedError
@staticmethod
@abc.abstractmethod
def make_initial() -> State:
'''Create an initial world state that we can start playing with.'''
raise NotImplementedError
class _SinglePlayerStateType(abc.ABCMeta):
@property
def Observation(cls) -> _SinglePlayerStateType:
return cls
class SinglePlayerState(State, Observation, metaclass=_SinglePlayerStateType):
player_id_to_observation = property(lambda self: ImmutableDict({None: self}))
@abc.abstractmethod
def get_next_state_from_action(self, action: Action) -> SinglePlayerState:
raise NotImplementedError
def get_next_state_from_actions(self, player_id_to_action: Mapping[PlayerId, Action]) \
-> SinglePlayerState:
return self.get_next_state_from_action(more_itertools.one(player_id_to_action.values()))
from . import strategizing
|
from typing import Dict, TextIO
from utils.file import write_enum
from utils.string import to_pascal_case
def read_template() -> str:
template_file: TextIO = open("./file_templates/csharp-enum.cs", "r")
return template_file.read()
def as_enum_row(key: object, json: object) -> str:
enum_name = to_pascal_case(key)
hex_val = f"0x{json[key]["unicode"]}"
return f" {enum_name} = {hex_val},\n"
def as_csharp_enum(icon_json: Dict):
enum_template = read_template()
enum_rows = ""
for key in icon_json:
enum_rows += as_enum_row(key, icon_json)
updated_enum = enum_template.replace("<<Contents>>", enum_rows)
write_enum(updated_enum, "FontAwesomeCodes.cs")
|
from typing import Dict, TextIO
from utils.file import write_enum
from utils.string import to_pascal_case
def read_template() -> str:
template_file: TextIO = open("./file_templates/csharp-enum.cs", "r")
return template_file.read()
def as_enum_row(key: object, json: object) -> str:
enum_name = to_pascal_case(key)
hex_val = f"0x{json[key]['unicode']}"
return f" {enum_name} = {hex_val},\n"
def as_csharp_enum(icon_json: Dict):
enum_template = read_template()
enum_rows = ""
for key in icon_json:
enum_rows += as_enum_row(key, icon_json)
updated_enum = enum_template.replace("<<Contents>>", enum_rows)
write_enum(updated_enum, "FontAwesomeCodes.cs")
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import Tuple, Dict, List, Any
import itertools
import configparser as CP
from datetime import timedelta, datetime
import dateutil.parser
from traitlets import TraitType
from traitlets.config.configurable import Configurable
from ._debug_utils import debug_print
from .dependencies import Dependencies
from .constants import Constants, Schema
from .my_utils import split_lex, adjust_path, is_env_var, get_env_var, is_collection, strip_if_quoted
from .engine import Engine
class Parser(object):
default_options:Dict[str,Any] = {}
traits_dict:Dict[str,TraitType] = {}
@classmethod
def initialize(cls, config:Configurable):
cls.traits_dict = config.traits()
config.observe(Parser.observe_config_changes)
cls.init_default_options(config)
@staticmethod
def observe_config_changes(change:Dict[str,str]):
if change.get('type') == 'change':
name = change.get('name')
obj = Parser._OPTIONS_TABLE.get(name.lower().replace("-", "").replace("_", ""))
if "init" not in obj:
Parser.default_options[change.get('name')] = change.get('new')
@classmethod
def init_default_options(cls, config:Configurable):
for obj in cls._OPTIONS_TABLE.values():
if "abbreviation" not in obj:
name = obj.get("flag")
if "init" in obj:
cls.default_options[name] = obj.get("init")
elif name in cls.traits_dict:
cls.default_options[name] = getattr(config, name)
elif "abbreviation" not in obj:
raise Exception(f"missing init in _OPTIONS_TABLE for option: {name}")
@classmethod
def parse(cls, _line:str, _cell:str, config:Configurable, engines:List[Engine], user_ns:Dict[str,Any])->List[Dict[str,Any]]:
is_cell = _cell is not None
cell = f"{_line}\n{_cell or ""}"
cell = cell.strip()
code = cell
#
# split string to queries
#
suppress_all_results = False
# tuple:
sections:List[Dict[str,str]] = []
if is_cell:
magic_section_name = Constants.MAGIC_NAME
section_lines:List[str] = []
previous_line = " " # should be init to space for the below to work
# note: splitlines don't remove the \n suffix, each line endswith \n
for line in code.splitlines(True):
lstripped_line = line.lstrip()
if (lstripped_line.startswith(Constants.IPYKERNEL_CELL_MAGIC_PREFIX)
and (previous_line.isspace() or (len(sections) > 0 and sections[-1].get("type") == "line_magic"))):
if len(section_lines) > 0:
sections.append({"type": "cell_magic", "name": magic_section_name, "body": "".join(section_lines)})
magic_word = lstripped_line.split(None, 1)[0]
magic_section_name = magic_word[len(Constants.IPYKERNEL_CELL_MAGIC_PREFIX):]
lstripped_line = lstripped_line[len(magic_word):].lstrip()
if magic_section_name == Constants.MAGIC_NAME:
section_lines = [] if lstripped_line.isspace() else [lstripped_line]
else:
section_lines = [lstripped_line]
elif magic_section_name != Constants.MAGIC_NAME:
section_lines.append(line)
elif (lstripped_line.startswith(Constants.IPYKERNEL_LINE_MAGIC_PREFIX)
and (previous_line.isspace() or (len(sections) > 0 and sections[-1].get("type") == "line_magic"))):
magic_word = lstripped_line.split(None, 1)[0]
magic_name = magic_word[len(Constants.IPYKERNEL_LINE_MAGIC_PREFIX):]
lstripped_line = lstripped_line[len(magic_word):].lstrip()
if magic_name == Constants.MAGIC_NAME:
if not lstripped_line.isspace():
sections.append({"type": "line_magic", "name": magic_name, "body": lstripped_line})
else:
sections.append({"type": "line_magic", "name": magic_name, "body": lstripped_line})
elif line.isspace():
if len(section_lines) > 0:
not_commented_lines = [1 for seg_line in section_lines if not seg_line.lstrip().startswith("//")]
if (len(not_commented_lines) > 0):
sections.append({"type": "cell_magic", "name": magic_section_name, "body": "".join(section_lines)})
section_lines = []
else:
section_lines.append(line)
previous_line = line
if len(section_lines) > 0:
if magic_section_name == Constants.MAGIC_NAME:
not_commented_lines = [1 for seg_line in section_lines if not seg_line.lstrip().startswith("//")]
if (len(not_commented_lines) > 0):
sections.append({"type": "cell_magic", "name": magic_section_name, "body": "".join(section_lines)})
else:
sections.append({"type": "cell_magic", "name": magic_section_name, "body": "".join(section_lines)})
if len(sections) > 0:
last_query = sections[-1].get("body").strip()
if last_query == ";":
suppress_all_results = True
sections = sections[:-1]
if len(sections) == 0:
sections.append({"type": "cell_magic", "name": Constants.MAGIC_NAME, "body": ""})
else:
sections.append({"type": "line_magic", "name": Constants.MAGIC_NAME, "body": code.strip()})
#
# parse code to kql and options
#
parsed_sections = []
last_connection_string = ""
for section in sections:
parsed_section = cls._parse_one_section(section, is_cell, config, engines, user_ns)
connection_string = parsed_section.get("connection_string")
if connection_string:
last_connection_string = connection_string
elif len(parsed_section.get("command")) == 0:
parsed_section["connection_string"] = last_connection_string
if suppress_all_results:
parsed_section.get("options")["suppress_results"] = True
parsed_sections.append(parsed_section)
if len(parsed_sections) > 0:
parsed_sections[-1]["last_query"] = True
return parsed_sections
@classmethod
def _parse_one_section(cls, section:Dict[str,str], is_cell:bool, config:Configurable, engines:List[Engine], user_ns:Dict[str,Any])->Dict[str,Any]:
"""Separate input into (connection info, KQL statements, options)"""
cell, command = cls._parse_kql_command(section, user_ns)
command_name = command.get("command")
if command_name is not None and command_name != "submit":
cell_rest, options = cls._parse_kql_options(cell.strip(), is_cell, config, user_ns)
cell_rest = cls._update_kql_command_params(command, cell_rest, user_ns)
cls._validate_kql_command_params(command)
if cell_rest:
raise ValueError(f"command --{command_name} has too many parameters")
parsed_query = {"connection_string": "", "query": "", "options": options, "command": command}
return parsed_query
# split to max 2 parts. First part, parts[0], is the first string.
# parts = [part.strip() for part in cell.split(None, 1)]
parts = split_lex(cell)
# print(parts)
if not parts:
kql, options = cls._parse_kql_options("", is_cell, config, user_ns)
parsed_query = {"connection_string": "", "query": kql, "options": options, "command": {}}
return parsed_query
#
# replace substring of the form $name or ${name}, in windows also %name% if found in env variabes
#
connection_string = None
conn_str = parts[0].strip()
if not conn_str.startswith(('-', '+')):
_was_quoted, conn_str = strip_if_quoted(conn_str)
#
# connection taken from a section in dsn file (file name have to be define in config.dsn_filename or specified as a parameter)
#
if is_collection(conn_str, "["):
section = conn_str[1:-1].strip()
# parse to get flag, for the case that the file nema is specified in the options
code = cell[len(parts[0]):]
kql, options = cls._parse_kql_options(code, is_cell, config, user_ns)
parser = CP.ConfigParser()
dsn_filename = adjust_path(options.get("dsn_filename", config.dsn_filename))
parser.read(dsn_filename)
cfg_dict = dict(parser.items(section))
cfg_dict_lower = {k.lower().replace("_", "").replace("-", ""): v for (k, v) in cfg_dict.items()}
for e in engines:
if e.get_mandatory_key() in cfg_dict_lower.keys():
all_keys = set(itertools.chain(*e.get_valid_keys_combinations()))
connection_kv = [f"{k}='{v}'" for k, v in cfg_dict_lower.items() if v and k in all_keys]
connection_string = f"{e.get_uri_schema_name()}://{";".join(connection_kv)}"
break
#
# connection specified starting with one of the supported prefixes
#
elif "://" in conn_str:
sub_parts = conn_str.strip().split("://", 1)
if (len(sub_parts) == 2 and sub_parts[0].lower().replace("_", "").replace("-", "") in list(itertools.chain(*[e._ALT_URI_SCHEMA_NAMES for e in engines]))):
connection_string = conn_str
#
# connection specified as database@cluster
#
elif "@" in conn_str and "|" not in conn_str and "'" not in conn_str and '"' not in conn_str:
connection_string = conn_str
#
# connection not specified, override default
#
if connection_string is None:
connection_string = ""
code = cell
else:
code = cell[len(parts[0]):]
#
# parse code to kql and options
#
kql, options = cls._parse_kql_options(code.strip(), is_cell, config, user_ns)
kql = options.pop("query", None) or kql
connection_string = options.pop("conn", None) or connection_string
parsed_query = {"connection_string": connection_string.strip(), "query": kql, "options": options, "command": {}}
return parsed_query
@classmethod
def parse_old(cls, line:str, cell:str, config:Configurable, engines:List[Engine], user_ns:Dict[str,Any])->List[Dict[str,Any]]:
"""Separate input into (connection info, KQL statements, options)"""
is_cell = cell is not None
cell = f"{line}\n{cell or ""}"
cell = cell.strip()
parsed_queries = []
cell, command = cls._parse_kql_command(cell, user_ns)
command_name = command.get("command")
if command_name is not None and command_name != "submit":
cell_rest, options = cls._parse_kql_options(cell.strip(), is_cell, config, user_ns)
cell_rest = cls._update_kql_command_params(command, cell_rest, user_ns)
cls._validate_kql_command_params(command)
if cell_rest:
raise ValueError(f"command --{command_name} has too many parameters")
parsed_queries.append({"connection_string": "", "query": "", "options": options, "command": command})
return parsed_queries
# split to max 2 parts. First part, parts[0], is the first string.
# parts = [part.strip() for part in cell.split(None, 1)]
parts = split_lex(cell)
# print(parts)
if not parts:
kql, options = cls._parse_kql_options("", is_cell, config, user_ns)
parsed_queries.append({"connection_string": "", "query": kql, "options": options, "command": {}})
return parsed_queries
#
# replace substring of the form $name or ${name}, in windows also %name% if found in env variabes
#
connection_string = None
conn_str = parts[0].strip()
if not conn_str.startswith(('-', '+')):
_was_quoted, conn_str = strip_if_quoted(conn_str)
#
# connection taken from a section in dsn file (file name have to be define in config.dsn_filename or specified as a parameter)
#
if is_collection(conn_str, "["):
section = conn_str[1:-1].strip()
# parse to get flag, for the case that the file nema is specified in the options
code = cell[len(parts[0]):]
kql, options = cls._parse_kql_options(code, is_cell, config, user_ns)
parser = CP.ConfigParser()
dsn_filename = adjust_path(options.get("dsn_filename", config.dsn_filename))
parser.read(dsn_filename)
cfg_dict = dict(parser.items(section))
cfg_dict_lower = {k.lower().replace("_", "").replace("-", ""): v for (k, v) in cfg_dict.items()}
for e in engines:
if e.get_mandatory_key() in cfg_dict_lower.keys():
all_keys = set(itertools.chain(*e.get_valid_keys_combinations()))
connection_kv = [f"{k}='{v}'" for k, v in cfg_dict_lower.items() if v and k in all_keys]
connection_string = f"{e.get_uri_schema_name()}://{";".join(connection_kv)}"
break
#
# connection specified starting with one of the supported prefixes
#
elif "://" in conn_str:
sub_parts = conn_str.strip().split("://", 1)
if (len(sub_parts) == 2 and sub_parts[0].lower().replace("_", "").replace("-", "") in list(itertools.chain(*[e._ALT_URI_SCHEMA_NAMES for e in engines]))):
connection_string = conn_str
#
# connection specified as database@cluster
#
elif "@" in conn_str and "|" not in conn_str and "'" not in conn_str and '"' not in conn_str:
connection_string = conn_str
#
# connection not specified, override default
#
if connection_string is None:
connection_string = ""
code = cell
else:
code = cell[len(parts[0]):]
#
# split string to queries
#
suppress_all_results = False
queries:List[str] = []
if is_cell:
queryLines:List[str] = []
last_line:str = None
for last_line in code.splitlines(True):
# note: splitlines don't remove the \n suffix, each line endswith \n
if last_line.isspace():
if len(queryLines) > 0:
queries.append("".join(queryLines))
queryLines = []
else:
queryLines.append(last_line)
if len(queryLines) > 0:
queries.append("".join(queryLines))
if len(queries) > 0:
last_query = queries[-1].strip()
if last_query == ";":
suppress_all_results = True
queries = queries[:-1]
if len(queries) == 0:
queries.append("")
else:
queries.append(code.strip())
#
# parse code to kql and options
#
for query in queries:
kql, options = cls._parse_kql_options(query.strip(), is_cell, config, user_ns)
kql = options.pop("query", None) or kql
connection_string = options.pop("conn", None) or connection_string
if suppress_all_results:
options["suppress_results"] = True
parsed_queries.append({"connection_string": connection_string.strip(), "query": kql, "options": options, "command": {}})
return parsed_queries
# Note: None as default is valid
# commands that have no parameters, typekey should not exit or set to None
# default max_params is 1 if type is not None otherwise 0
# default min_params is 1 if type is not None otherwise 0
_COMMANDS_TABLE = {
"version": {"flag": "version", "type": None},
"banner": {"flag": "banner", "type": None},
"usage": {"flag": "usage", "type": None},
"submit": {"flag": "submit", "type": None}, # default
"help": {"flag": "help", "type": "str", "default": "help"},
"faq": {"flag": "faq", "type": None},
"palette": {"flag": "palette", "type": None},
"palettes": {"flag": "palettes", "type": None},
# "config": {"flag": "config", "type": "str", "default": None},
"config": {"flag": "config", "type": "not_quoted_str", "allow_none": True, "max_params": 1, "min_params": 0},
"bugreport": {"flag": "bug_report", "type": None},
"conn": {"flag": "conn", "type": "str", "default": None},
# should be per connection
"cache": {"flag": "cache", "type": "str", "allow_none": True},
"cachecreate": {"flag": "cache_create", "type": "str", "allow_none": True},
"cacheappend": {"flag": "cache_append", "type": "str", "allow_none": True},
"cachecreateorappend": {"flag": "cache_create_or_append", "type": "str", "allow_none": True},
"cacheremove": {"flag": "cache_remove", "type": "str", "allow_none": True},
"cachelist": {"flag": "cache_list", "type": None},
"cachestop": {"flag": "cache_stop", "type": None},
"usecache": {"flag": "use_cache", "type": "str", "allow_none": True},
"usecachestop": {"flag": "use_cache_stop", "type": "str", "allow_none": True},
"schema": {"flag": "schema", "type": "str", "default": None},
"clearssodb": {"flag": "clear_sso_db", "type": None},
"py": {"flag": "python", "type": "not_quoted_str", "allow_none": True, "max_params": 2, "min_params": 2},
"pyro": {"flag": "python", "type": "not_quoted_str", "allow_none": True, "max_params": 2, "min_params": 2},
"pyrw": {"flag": "python", "type": "not_quoted_str", "allow_none": True, "max_params": 2, "min_params": 2},
"activatekernel": {"flag": "activate_kernel", "type": None},
"deactivatekernel": {"flag": "deactivate_kernel", "type": None},
"linemagic": {"flag": "line_magic", "type": "not_quoted_str", "allow_none": True, "max_params": 2, "min_params": 2},
"cellmagic": {"flag": "cell_magic", "type": "not_quoted_str", "allow_none": True, "max_params": 3, "min_params": 3},
}
@classmethod
def _parse_kql_command(cls, section:Dict[str,str], user_ns:Dict[str,Any])->Tuple[str,Dict[str,Any]]:
code = section.get("body")
if section.get("name") != Constants.MAGIC_NAME:
name = section.get("name").replace("_", "").replace("-", "")
if name in ["py", "pyro", "pyrw"]:
obj = cls._COMMANDS_TABLE.get(name)
return ("", {"command": obj.get("flag"), "obj": obj, "params": [section.get("body"), name]})
else:
# line/cell magic
name = section.get("type")
obj = cls._COMMANDS_TABLE.get(name.replace("_", ""))
command_name = obj.get("flag")
params = []
body = section.get("body")
if command_name == "cell_magic":
first_line = body.split("\n",1)[0]
params.append(first_line.strip())
body = body[len(first_line) + 1:]
params.append(body)
params.append(section.get("name"))
return ("", {"command": command_name, "obj": obj, "params": params})
# kql section
lookup_key = None
trimmed_code = code
skip_words_count = 0
obj = None
params_type = None
command_name = None
params = []
words = code.split()
more_words_count = len(words)
for word in words:
more_words_count -= 1
if params_type == "not_quoted_str" and not word.startswith("-"):
# break
pass
if skip_words_count == 0:
_comment, skip_words_count = cls._parse_comment(word, trimmed_code)
if skip_words_count > 0:
skip_words_count -= 1
trimmed_code = trimmed_code[trimmed_code.find(word) + len(word):]
continue
# command
elif command_name is None:
if not word.strip().startswith("--"):
break
word = word[2:]
if word.startswith("-"):
raise ValueError(f"unknown command --{word}, commands' prefix should be a double hyphen-minus, not a triple hyphen-minus")
lookup_key = word.lower().replace("_", "").replace("-", "")
obj = cls._COMMANDS_TABLE.get(lookup_key)
if obj is None:
raise ValueError(f"unknown command --{word}")
command_name = obj.get("flag")
trimmed_code = trimmed_code[trimmed_code.find(word) + len(word):]
params_type = obj.get("type")
if params_type is None:
break
# option
elif word.startswith("-"):
break
# command's parameters
else:
command = {"command": command_name, "obj": obj, "params": params}
EXIT_ON_OPTION = True
trimmed_code, params = cls._parse_kql_command_params(command, trimmed_code, EXIT_ON_OPTION, user_ns)
break
if command_name is None:
return (code.strip(), {})
if command_name == "python":
params = params or [""]
params.append(lookup_key) # type of python "py", "pyro", "pyrw"
elif command_name in ["line_magic", "cell_magic"]:
body = params[0] if params else ""
magic_word = body.split(None, 1)[0]
body = body.lstrip()[len(magic_word):]
params = []
if command_name == "cell_magic":
first_line = body.split("\n", 1)[0]
params.append(first_line.strip())
body = body[len(first_line) + 1:]
else:
body = body.lstrip()
params.append(body)
ipykernel_prefix_len = 0
if command_name == "cell_magic" and magic_word.startswith(Constants.IPYKERNEL_CELL_MAGIC_PREFIX):
ipykernel_prefix_len = len(Constants.IPYKERNEL_CELL_MAGIC_PREFIX)
elif command_name == "line_magic" and magic_word.startswith(Constants.IPYKERNEL_LINE_MAGIC_PREFIX):
ipykernel_prefix_len = len(Constants.IPYKERNEL_LINE_MAGIC_PREFIX)
magic_name = magic_word[ipykernel_prefix_len:]
params.append(magic_name) # the magic name
if magic_name in ["py", "pyro", "pyrw"]:
obj = cls._COMMANDS_TABLE.get(magic_name)
body = params[0] if command_name == "line_magic" else f"{params[0]}\n{params[1]}"
return ("", {"command": obj.get("flag"), "obj": obj, "params": [body, magic_name]})
return (trimmed_code.strip(), {"command": command_name, "obj": obj, "params": params})
@classmethod
def _parse_kql_command_params(cls, command:dict, code:str, exit_on_options:bool, user_ns:Dict[str,Any])->Tuple[str,List[str]]:
trimmed_code = code.strip()
params = command.get("params")
obj = command.get("obj")
params_type = obj.get("type")
if params_type == "not_quoted_str":
if exit_on_options and trimmed_code.startswith("-"):
# DO NOTHING, EXIT
pass
else:
params.append(trimmed_code)
trimmed_code = ""
elif params_type is not None:
skip_words_count = 0
command_name = command.get("command")
words = code.split()
for word in words:
if skip_words_count == 0:
_comment, skip_words_count = cls._parse_comment(word, trimmed_code)
if skip_words_count > 0:
skip_words_count -= 1
trimmed_code = trimmed_code[trimmed_code.find(word) + len(word):]
continue
# option
if exit_on_options and word.startswith("-"):
break
# command's parameters
if params_type == "str" and word[0] in ["'",'"']:
quoted_string, skip_words_count = cls.parse_quote(trimmed_code)
param = quoted_string
skip_words_count -= 1
else:
param = word
params.append(cls._parse_value("command", obj, command_name, param, user_ns))
trimmed_code = trimmed_code[trimmed_code.find(word) + len(word):]
return trimmed_code.strip(), params
@classmethod
def _update_kql_command_params(cls, command:dict, cell_rest:str, user_ns:Dict[str,Any])->str:
params = command.get("params")
if len(params) == 0:
obj = command.get("obj")
if obj.get("type") == "not_quoted_str":
params.append(cell_rest)
cell_rest = ""
elif len(cell_rest) > 0 and obj.get("type") is not None:
DONT_EXIT_ON_OPTION = False
cell_rest, params = cls._parse_kql_command_params(command, cell_rest, DONT_EXIT_ON_OPTION, user_ns)
cell_rest = ""
elif "default" in obj:
params.append(obj.get("default"))
command["params"] = params
return cell_rest
@classmethod
def _validate_kql_command_params(cls, command:dict):
params = command.get("params")
command_name = command.get("command")
obj = command.get("obj")
_type = obj.get("type")
max_params = obj.get("max_params") or (1 if _type is not None else 0)
if len(params) > max_params:
raise ValueError(f"command --{command_name} has too many parameters")
min_params = obj.get("min_params") or (1 if _type is not None else 0)
if len(params) < min_params:
raise ValueError(f"command --{command_name} is missing parameter")
@classmethod
def validate_query_properties(cls, schema:str, properties:Dict[str,Any])->None:
if type(properties) == dict:
usupported_properties = []
for p in properties:
prop = cls._QUERY_PROPERTIES_TABLE[p]
prop_schema_list = prop.get("schema")
if type(prop_schema_list) == list and schema not in prop_schema_list and len(prop_schema_list) > 0 and schema is not None:
usupported_properties.append(p)
if len(usupported_properties) > 0:
raise ValueError(f"query properties {usupported_properties} are not supported by current connection")
_QUERY_PROPERTIES_TABLE = {
# NOT DOCUMENTED - (OptionBlockSplittingEnabled): Enables splitting of sequence blocks after aggregation operator. [Boolean]
"block_splitting_enabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# NOT DOCUMENTED - (OptionDatabasePattern): Database pattern overrides database name and picks the 1st database that matches the pattern.
# '*' means any database that user has access to. [String]
"database_pattern": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# NOT DOCUMENTED - If set, don't fuse projection into ExternalData operator. [bool]
"debug_query_externaldata_projection_fusion_disabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# NOT DOCUMENTED - The percentage of threads to fanout execution to for external data nodes. [int]
"debug_query_fanout_threads_percent_external_data": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionDeferPartialQueryFailures): If true, disables reporting partial query failures as part of the result set. [Boolean]
"deferpartialqueryfailures": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionMaterializedViewShuffleQuery): A hint to use shuffle strategy for materialized views that are referenced in the query. The property is an array of materialized views names and the shuffle keys to use. examples: 'dynamic([ { "Name": "V1", "Keys" : [ "K1", "K2" ] } ])' (shuffle view V1 by K1, K2) or 'dynamic([ { "Name": "V1" } ])' (shuffle view V1 by all keys) [dynamic]
"materialized_view_shuffle": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "dict"},
# (OptionMaxMemoryConsumptionPerQueryPerNode): Overrides the default maximum amount of memory a whole query may allocate per node. [UInt64]
"max_memory_consumption_per_query_per_node": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionMaxMemoryConsumptionPerIterator): Overrides the default maximum amount of memory a query operator may allocate. [UInt64]
"maxmemoryconsumptionperiterator": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionMaxOutputColumns): Overrides the default maximum number of columns a query is allowed to produce. [Long]
"maxoutputcolumns": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionNoRequestTimeout): Enables setting the request timeout to its maximum value. [Boolean]
"norequesttimeout": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionNoTruncation): Enables suppressing truncation of the query results returned to the caller. [Boolean]
"notruncation": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionPushSelectionThroughAggregation): If true, push simple selection through aggregation [Boolean]
"push_selection_through_aggregation": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# NOT DOCUMENTED - (OptionAdminSuperSlackerMode): If true, delegate execution of the query to another node [Boolean]
"query_admin_super_slacker_mode": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (QueryBinAutoAt): When evaluating the bin_auto() function, the start value to use. [LiteralExpression]
"query_bin_auto_at": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (QueryBinAutoSize): When evaluating the bin_auto() function, the bin size value to use. [LiteralExpression]
"query_bin_auto_size": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionQueryCursorAfterDefault): The default parameter value of the cursor_after() function when called without parameters. [string]
"query_cursor_after_default": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# NOT DOCUMENTED - (OptionQueryCursorAllowReferencingStreamingIngestionTables): Enable usage of cursor functions over databases which have streaming ingestion enabled. [boolean]
"query_cursor_allow_referencing_streaming_ingestion_tables": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionQueryCursorBeforeOrAtDefault): The default parameter value of the cursor_before_or_at() function when called without parameters. [string]
"query_cursor_before_or_at_default": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionQueryCursorCurrent): Overrides the cursor value returned by the cursor_current() or current_cursor() functions. [string]
"query_cursor_current": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionQueryCursorDisabled): Disables usage of cursor functions in the context of the query. [boolean]
"query_cursor_disabled ": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionQueryCursorScopedTables): List of table names that should be scoped to cursor_after_default ..
# cursor_before_or_at_default (upper bound is optional). [dynamic]
"query_cursor_scoped_tables": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "dict"},
# (OptionQueryDataScope): Controls the query's datascope -- whether the query applies to all data or just part of it. ['default', 'all', or 'hotcache']
"query_datascope": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "enum", "values": ['default', 'all', 'hotcache']},
# (OptionQueryDateTimeScopeColumn): Controls the column name for the query's datetime scope (query_datetimescope_to / query_datetimescope_from). [String]
"query_datetimescope_column": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionQueryDateTimeScopeFrom): Controls the query's datetime scope (earliest)
# used as auto-applied filter on query_datetimescope_column only (if defined). [DateTime]
"query_datetimescope_from": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionQueryDateTimeScopeTo): Controls the query's datetime scope (latest)
# used as auto-applied filter on query_datetimescope_column only (if defined). [DateTime]
"query_datetimescope_to": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionQueryDistributionNodesSpanSize): If set, controls the way sub-query merge behaves:
# the executing node will introduce an additional level in the query hierarchy for each sub-group of nodes; the size of the sub-group is set by this option. [Int]
"query_distribution_nodes_span": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "int"},
# (OptionQueryFanoutNodesPercent): The percentage of nodes to fanour execution to. [Int]
"query_fanout_nodes_percent": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionQueryFanoutThreadsPercent): The percentage of threads to fanout execution to. [Int]
"query_fanout_threads_percent": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionQueryForceRowLevelSecurity): If specified, forces Row Level Security rules, even if row_level_security policy is disabled [Boolean]
"query_force_row_level_security": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionQueryLanguage): Controls how the query text is to be interpreted. ['csl','kql' or 'sql']
"query_language": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "enum", "values": ['csl', 'kql', 'sql']},
# NOT DOCUMENTED - (RemoteMaterializeOperatorInCrossCluster): Enables remoting materialize operator in cross cluster query.
"query_materialize_remote_subquery": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionMaxEntitiesToUnion): Overrides the default maximum number of columns a query is allowed to produce. [Long]
"query_max_entities_in_union": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionQueryNow): Overrides the datetime value returned by the now(0s) function. [DateTime]
# note: cannot be relative to now()
"query_now": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionDebugPython): If set, generate python debug query for the enumerated python node (default first). [Boolean or Int]
"query_python_debug": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionQueryResultsApplyGetSchema): If set, retrieves the schema of each tabular data in the results of the query instead of the data itself. [Boolean]
"query_results_apply_getschema": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionQueryResultsCacheMaxAge): If positive, controls the maximum age of the cached query results that Kusto is allowed to return [TimeSpan]
"query_results_cache_max_age": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# NOT DOCUMENTED - (CostBasedOptimizerBroadcastJoinBuildMax): Max Rows count for build in broadcast join.
"query_optimization_broadcast_build_maxSize": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# NOT DOCUMENTED - (CostBasedOptimizerBroadcastJoinProbeMin): Min Rows count for probe in broadcast join.
"query_optimization_broadcast_probe_minSize": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# NOT DOCUMENTED - (CostBasedOptimizer): Enables automatic optimizations.
"query_optimization_costbased_enabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# NOT DOCUMENTED - (OptionOptimizeInOperator): Optimizes in operands serialization.
"query_optimization_in_operator": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# NOT DOCUMENTED - (CostBasedOptimizerShufflingCardinalityThreshold): Shuffling Cardinality Threshold.
"query_optimization_shuffling_cardinality": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# NOT DOCUMENTED - (OptionQueryRemoteEntitiesDisabled): If set, queries cannot access remote databases / clusters. [Boolean]
"query_remote_entities_disabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# NOT DOCUMENTED - (RemoteInOperandsInQuery): Enables remoting in operands.
"query_remote_in_operands": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionProgressiveQueryMinRowCountPerUpdate): Hint for Kusto as to how many records to send in each update
# (Takes effect only if OptionProgressiveQueryIsProgressive is set)
"query_results_progressive_row_count": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionProgressiveProgressReportPeriod): Hint for Kusto as to how often to send progress frames (Takes effect only if OptionProgressiveQueryIsProgressive is set)
"query_results_progressive_update_period": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionTakeMaxRecords): Enables limiting query results to this number of records. [Long]
"query_take_max_records": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionQueryConsistency): Controls query consistency. ['strongconsistency' or 'normalconsistency' or 'weakconsistency']
"queryconsistency": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "enum", "values": ['strongconsistency', 'normalconsistency', 'weakconsistency']},
# (OptionRequestBlockRowLevelSecurity): If specified, blocks access to tables for which row_level_security policy is enabled [Boolean]
"request_block_row_level_security": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionRequestCalloutDisabled): If set, callouts to external services are blocked. [Boolean]
"request_callout_disabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionRequestDescription): Arbitrary text that the author of the request wants to include as the request description. [String]
"request_description": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionRequestExternalTableDisabled): If specified, indicates that the request cannot invoke code in the ExternalTable. [bool]
"request_external_table_disabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionDoNotImpersonate): If specified, indicates that the service shouldn't impersonate the caller's identity. [bool]
"request_impersonation_disabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionRequestReadOnly): If specified, indicates that the request must not be able to write anything. [Boolean]
"request_readonly": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionRequestEntitiesDisabled): If specified, indicates that the request cannot access remote databases and clusters. [bool]
"request_remote_entities_disabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionRequestSandboxedExecutionDisabled): If specified, indicates that the request cannot invoke code in the sandbox. [bool]
"request_sandboxed_execution_disabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# NOT DOCUMENTED - (OptionResponseDynamicSerialization): Controls the serialization of 'dynamic' values in result sets. ['string', 'json']
"response_dynamic_serialization": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "enum", "values": ['string', 'json']},
# NOT DOCUMENTED - (OptionResponseDynamicSerialization_2): Controls the serialization of 'dynamic' string and null values in result sets. ['legacy', 'current']
"response_dynamic_serialization_2": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "enum", "values": ['legacy', 'current']},
# (OptionResultsProgressiveEnabled): If set, enables the progressive query stream
"results_progressive_enabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# NOT DOCUMENTED - (OptionSandboxedExecutionDisabled): If set, using sandboxes as part of query execution is disabled. [Boolean]
"sandboxed_execution_disabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionServerTimeout): Overrides the default request timeout. [TimeSpan]
# is capped by 1hour
"servertimeout": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionTruncationMaxRecords): Overrides the default maximum number of records a query is allowed to return to the caller (truncation). [Long]
"truncationmaxrecords": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionTruncationMaxSize): Overrides the dfefault maximum data size a query is allowed to return to the caller (truncation). [Long]
"truncationmaxsize": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionValidatePermissions): Validates user's permissions to perform the query and doesn't run the query itself. [Boolean]
"validate_permissions": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# For either implicit or explicit cross-application queries, specify resources you will be accessing
# see https://dev.loganalytics.io/documentation/Using-the-API/Cross-Resource-Queries
"workspaces": {"schema": [Schema.LOG_ANALYTICS], "type": "list"},
# For either implicit or explicit cross-application queries, specify resources you will be accessing
# see: https://dev.applicationinsights.io/documentation/Using-the-API/Cross-Resource-Queries
"applications": {"schema": [Schema.APPLICATION_INSIGHTS, Schema.AIMON], "type": "list"},
# The timespan over which to query data. This is an ISO8601 time period value. This timespan is applied in addition to any that are specified in the query expression.
# see: https://docs.microsoft.com/en-us/rest/api/application-insights/query/get
"timespan": {"schema": [Schema.APPLICATION_INSIGHTS, Schema.AIMON, Schema.LOG_ANALYTICS], "type": "iso8601_duration"},
}
# all lookup keys in table, must be without spaces, underscores and hypthen-minus, because parser ignores them
_OPTIONS_TABLE:Dict[str,Dict[str,Any]] = {
"ad": {"abbreviation": "autodataframe"},
"autodataframe": {"flag": "auto_dataframe", "type": "bool"},
"se": {"abbreviation": "shorterrors"},
"shorterrors": {"flag": "short_errors", "type": "bool"},
"f": {"abbreviation": "feedback"},
"feedback": {"flag": "feedback", "type": "bool"},
"sci": {"abbreviation": "showconninfo"},
"showconninfo": {"flag": "show_conn_info", "type": "str", "allow_none": True},
"c2lv": {"abbreviation": "columnstolocalvars"},
"columnstolocalvars": {"flag": "columns_to_local_vars", "type": "bool"},
"sqt": {"abbreviation": "showquerytime"},
"showquerytime": {"flag": "show_query_time", "type": "bool"},
"sq": {"abbreviation": "showquery"},
"showquery": {"flag": "show_query", "type": "bool"},
"sql": {"abbreviation": "showquerylink"},
"showquerylink": {"flag": "show_query_link", "type": "bool"},
"qld": {"abbreviation": "querylinkdestination"},
"querylinkdestination": {"flag": "query_link_destination", "type": "str"},
"esr": {"abbreviation": "enablesuppressresult"},
"enablesuppressresult": {"flag": "enable_suppress_result", "type": "bool"},
"pfi": {"abbreviation": "plotlyfsincludejs"},
"plotlyfsincludejs": {"flag": "plotly_fs_includejs", "type": "bool"},
"pw": {"abbreviation": "popupwindow"},
"popupwindow": {"flag": "popup_window", "type": "bool", "init": False},
"al": {"abbreviation": "autolimit"},
"autolimit": {"flag": "auto_limit", "type": "int", "allow_none": True},
"dl": {"abbreviation": "displaylimit"},
"displaylimit": {"flag": "display_limit", "type": "int", "allow_none": True},
"wait": {"abbreviation": "timeout"},
"to": {"abbreviation": "timeout"},
"timeout": {"flag": "timeout", "type": "int", "allow_none": True},
"ptst": {"abbreviation": "prettytablestyle"},
"prettytablestyle": {"flag": "prettytable_style", "type": "str"},
"var": {"abbreviation": "lastrawresultvar"},
"lastrawresultvar": {"flag": "last_raw_result_var", "type": "str"},
"tp": {"abbreviation": "tablepackage"},
"tablepackage": {"flag": "table_package", "type": "str"},
"pp": {"abbreviation": "plotpackage"},
"plotpackage": {"flag": "plot_package", "type": "str"},
"df": {"abbreviation": "dsnfilename"},
"dsnfilename": {"flag": "dsn_filename", "type": "str", "allow_none": True},
"vc": {"abbreviation": "validateconnectionstring"},
"validateconnectionstring": {"flag": "validate_connection_string", "type": "bool"},
"aps": {"abbreviation": "autopopupschema"},
"autopopupschema": {"flag": "auto_popup_schema", "type": "bool"},
"jd": {"abbreviation": "jsondisplay"},
"jsondisplay": {"flag": "json_display", "type": "str"},
"sjd": {"abbreviation": "schemajsondisplay"},
"schemajsondisplay": {"flag": "schema_json_display", "type": "str"},
"pd": {"abbreviation": "palettedesaturation"},
"palettedesaturation": {"flag": "palette_desaturation", "type": "float"},
"pn": {"abbreviation": "palettename"},
"paramsdict": {"flag": "params_dict", "type": "dict", "init": None},
"palettename": {"flag": "palette_name", "type": "str"},
"cache": {"flag": "cache", "type": "str", "allow_none": True},
"usecache": {"flag": "use_cache", "type": "str", "allow_none": True},
"tempfoldername": {"flag": "temp_folder_name", "type": "str"},
"cachefoldername": {"flag": "cache_folder_name", "type": "str"},
"exportfoldername": {"flag": "export_folder_name", "type": "str"},
"addkqlreftohelp": {"flag": "add_kql_ref_to_help", "type": "bool"},
"addschematohelp": {"flag": "add_schema_to_help", "type": "bool"},
"notebookapp": {"flag": "notebook_app", "type": "str"},
"debug": {"flag": "debug", "type": "bool"},
"checkmagicversion": {"flag": "check_magic_version", "type": "bool"},
"showwhatnew": {"flag": "show_what_new", "type": "bool"},
"showinitbanner": {"flag": "show_init_banner", "type": "bool"},
"warnmissingdependencies": {"flag": "warn_missing_dependencies", "type": "bool"},
"warnmissingenvvariables": {"flag": "warn_missing_env_variables", "type": "bool"},
"allowsinglelinecell": {"flag": "allow_single_line_cell", "type": "bool"},
"allowpycommentsbeforecell": {"flag": "allow_py_comments_before_cell", "type": "bool"},
"kqlmagickernel": {"flag": "kqlmagic_kernel", "type": "bool"},
"extrasrequire": {"flag": "extras_require", "type": "str"},
"testnotebookapp": {"flag": "test_notebook_app", "type": "str"},
"cloud": {"flag": "cloud", "type": "str"},
"enablesso": {"flag": "enable_sso", "type": "bool"},
"ssodbgcinterval": {"flag": "sso_db_gc_interval", "type": "int"},
"authusehttpclient": {"flag": "auth_use_http_client", "type": "bool"},
"tryazclilogin": {"flag": "try_azcli_login", "type": "bool"},
"tryazcliloginbyprofile": {"flag": "try_azcli_login_by_profile", "type": "bool"},
"tryvscodelogin": {"flag": "try_vscode_login", "type": "bool"},
"tryazcliloginsubscription": {"flag": "try_azcli_login_subscription", "type": "str", "allow_none": True},
"trytoken": {"flag": "try_token", "type": "dict", "allow_none": True},
"trymsi": {"flag": "try_msi", "type": "dict", "allow_none": True},
"idtag": {"abbreviation": "requestidtag"},
"requestidtag": {"flag": "request_id_tag", "type": "str", "allow_none": True},
"apptag": {"abbreviation": "requestapptag"},
"requestapptag": {"flag": "request_app_tag", "type": "str", "allow_none": True},
"usertag": {"abbreviation": "requestusertag"},
"requestusertag": {"flag": "request_user_tag", "type": "str", "allow_none": True},
"maxage": {"abbreviation": "requestcachemaxage"},
"requestcachemaxage": {"flag": "request_cache_max_age", "type": "int", "allow_none": True},
"dcln": {"abbreviation": "devicecodeloginnotification"},
"devicecodeloginnotification": {"flag": "device_code_login_notification", "type": "str"},
"dcne": {"abbreviation": "devicecodenotificationemail"},
"devicecodenotificationemail": {"flag": "device_code_notification_email", "type": "str"},
"saveas": {"flag": "save_as", "type": "str", "init": None},
"saveto": {"flag": "save_to", "type": "str", "init": None},
"query": {"flag": "query", "type": "str", "init": None},
"conn": {"flag": "conn", "type": "str", "init": None},
"queryproperties": {"flag": "query_properties", "type": "dict", "init": None},
"pc": {"abbreviation": "palettecolors"},
"palettecolors": {"flag": "palette_colors", "type": "int"},
"pr": {"abbreviation": "palettereverse"},
"palettereverse": {"flag": "palette_reverse", "type": "bool", "init": False},
"ps": {"abbreviation": "popupschema"},
"popupschema": {"flag": "popup_schema", "type": "bool", "init": False},
"did": {"abbreviation": "displayid"},
"displayid": {"flag": "display_id", "type": "bool", "init": False},
"displayhandlers": {"flag": "display_handlers", "type": "dict", "init": {}},
"pi": {"abbreviation": "popupinteraction"},
"popupinteraction": {"flag": "popup_interaction", "type": "str"},
"tempfilesserver": {"flag": "temp_files_server", "type": "str"},
"tempfilesserveraddress": {"flag": "temp_files_server_address", "type": "str", "allow_none": True},
"kernellocation": {"flag": "kernel_location", "type": "str"},
"kernelid": {"flag": "kernel_id", "type": "str", "allow_none": True},
"notebookserviceaddress": {"flag": "notebook_service_address", "type": "str", "allow_none": True},
"dtd": {"abbreviation": "dynamictodataframe"},
"dynamictodataframe": {"flag": "dynamic_to_dataframe", "type": "str"},
"tempfolderlocation": {"flag": "temp_folder_location", "type": "str"},
"pl": {"abbreviation": "plotlylayout"},
"plotlylayout": {"flag": "plotly_layout", "type": "dict", "allow_none": True},
"atw": {"abbreviation": "authtokenwarnings"},
"authtokenwarnings": {"flag": "auth_token_warnings", "type": "bool"},
"ecbp": {"abbreviation": "enablecurlybracketsparams"},
"enablecurlybracketsparams": {"flag": "enable_curly_brackets_params", "type": "bool"},
"nop": {"flag": "nop", "type": "bool", "init": False}, # does nothing, useful to indicate option part when no options are required
"av": {"abbreviation": "assignvar"},
"assignvar": {"flag": "assign_var", "type": "str", "allow_none": True},
"cv": {"abbreviation": "cursorvar"},
"cursorvar": {"flag": "cursor_var", "type": "str", "allow_none": True},
"ismagic": {"flag": "is_magic", "type": "bool"},
"caim": {"abbreviation": "codeauthinteractivemode"},
"codeauthinteractivemode": {"flag": "code_auth_interactive_mode", "type": "str", "allow_none": True},
}
@classmethod
def validate_override(cls, name:str, config:Configurable, **override_options)->Dict[str,Any]:
"""validate the provided option are valid"""
options = {}
for key, value in override_options.items():
obj = cls._get_obj(key, allow_abbr=True)
if obj.get("flag") in config.read_only_trait_names:
raise ValueError(f"option '{key}' in {name} is readony, cannot be set")
cls._convert(name, obj, key, value)
cls._validate_config_trait(name, obj, key, value, config)
options[obj.get("flag")] = value
return options
@classmethod
def parse_option(cls, dict_name:str, key:str, value:str, config:Configurable=None, lookup:Dict[str,Dict[str,Any]]=None, user_ns:Dict[str,Any]=None, allow_abbr:bool=None, force:bool=False):
"""validate the provided option are valid
return normalized key and value"""
obj = cls._get_obj(key, lookup=lookup, allow_abbr=allow_abbr)
value = cls._parse_value(dict_name, obj, key, value, user_ns=user_ns)
cls._validate_config_trait(dict_name, obj, key, value, config)
key_name = obj.get("flag", key)
if config.is_read_only(key_name) and value != getattr(config, key_name):
# done to raise the proer error
setattr(config, key_name, value)
return key_name, value
@classmethod
def _parse_value(cls, dict_name:str, obj:Dict[str,Any], key:str, string:str, user_ns:Dict[str,Any])->Any:
_type = obj.get("type")
if string == "" and _type == "str":
return string
# if we allow to bring value from python, we also allow from env variables
# when we parse env vironment with option we fon't use user_ns
if string.startswith('$') and user_ns:
env_var_name = string[1:]
if not is_env_var(env_var_name):
raise ValueError(f"failed to parse referred value, due environment variable {env_var_name} not set")
string = get_env_var(env_var_name)
_was_quoted, value = strip_if_quoted(string)
else:
try:
value = eval(string, None, user_ns)
except:
# if no user_ns it means parse is for environment var, and it just may be an unquoted object
if user_ns:
raise
value = string
# check value is of the right type
try:
return cls._convert(dict_name, obj, key, value)
except:
raise
@classmethod
def parse_config_key(cls, key:str, config:Configurable, allow_abbr:bool=None)->Tuple[str,str,Any]:
"""validate the provided option key is valid
return normalized key"""
obj = cls._get_obj(key, allow_abbr=allow_abbr)
name = obj.get("flag")
if "init" in obj:
value = obj.get("init")
elif name in cls.traits_dict:
value = getattr(config, name)
else:
raise f"internal error '{key}' has no init value and not defined as Kqlmagic traitlet"
return name, value
@classmethod
def _get_obj(cls, key:str, lookup:Dict[str,Dict[str,Any]]=None, allow_abbr:bool=None)->Dict[str,Any]:
lookup_key = key.lower().replace("-", "").replace("_", "")
lookup_table = lookup or cls._OPTIONS_TABLE
obj = lookup_table.get(lookup_key)
if obj is None:
raise ValueError(f"unknown option '{key}'")
if obj.get("abbreviation"):
obj = lookup_table.get(obj.get("abbreviation"))
if not allow_abbr is not True:
raise ValueError(f"unknown option '{key}". (Found option abbreviation "{key}' for {obj.get("flag")})")
return obj
@classmethod
def _parse_kql_options(cls, code:str, is_cell:bool, config:Configurable, user_ns:Dict[str,Any])->Tuple[str,Dict[str,Any]]:
trimmed_kql = code
trimmed_kql = trimmed_kql.strip()
suppress_results = False
if trimmed_kql.endswith(";"):
suppress_results = not is_cell
if is_cell:
lines = trimmed_kql.splitlines(True)
if lines[-1].strip() == ";":
suppress_results = True
if suppress_results:
trimmed_kql = trimmed_kql[:-1].strip()
words = trimmed_kql.split()
properties = {}
table = options = cls.default_options.copy()
if not words:
return ("", options)
num_words = len(words)
first_word = 0
if num_words - first_word >= 2 and words[first_word + 1] == "<<":
options["result_var"] = words[first_word]
trimmed_kql = trimmed_kql[trimmed_kql.find("<<") + 2:]
first_word += 2
obj = None
key = None
opt_key = None
key_state = True
option_type = None
is_option = True
is_property = False
skip_words_count = 0
for word in words[first_word:]:
if key_state:
if skip_words_count == 0:
_comment, skip_words_count = cls._parse_comment(word, trimmed_kql)
if skip_words_count > 0:
skip_words_count -= 1
trimmed_kql = trimmed_kql[trimmed_kql.find(word) + len(word):]
continue
is_option = word.startswith("-")
is_property = word.startswith("+")
option_type = "option" if is_option else "query property"
if not is_option and not is_property:
break
# validate it is not a command
if is_option and word.startswith("--"):
raise ValueError(f"invalid {option_type} '{word}', cannot start with a bouble hyphen-minus")
trimmed_kql = trimmed_kql[trimmed_kql.find(word) + len(word):]
word = word[1:]
bool_value = True
if word[0].startswith("!"):
bool_value = False
word = word[1:]
if "=" in word:
parts = word.split("=", 1)
key = parts[0]
value = parts[1]
else:
key = word
value = None
if is_option:
lookup_key = key.lower().replace("-", "").replace("_", "")
obj = cls._OPTIONS_TABLE.get(lookup_key)
table = options
else:
lookup_key = key.lower()
obj = cls._QUERY_PROPERTIES_TABLE.get(lookup_key)
table = properties
if obj is not None:
if obj.get("abbreviation") is not None:
obj = cls._OPTIONS_TABLE.get(obj.get("abbreviation"))
if obj.get("flag") in config.read_only_trait_names:
raise ValueError(f"{option_type} {key} is readony, cannot be set")
_type = obj.get("type")
opt_key = obj.get("flag") or lookup_key
if _type == "bool" and value is None:
table[opt_key] = bool_value
else:
if not bool_value:
raise ValueError(f"{option_type} {key} cannot be negated")
if value is not None:
table[opt_key] = cls._parse_value("options" if is_option else "query properties", obj, key, value, user_ns)
else:
key_state = False
else:
raise ValueError(f"unknown {option_type} '{key}'")
else:
trimmed_kql = trimmed_kql[trimmed_kql.find(word) + len(word):]
table[opt_key] = cls._parse_value("options", obj, key, word, user_ns)
key_state = True
first_word += 1
# validate using config traits
if key_state and is_option:
cls._validate_config_trait("options", obj, key, options.get(opt_key), config)
if not key_state:
raise ValueError(f"{option_type} '{opt_key}' must have a value")
if options.get("query_properties"):
properties.update(options["query_properties"])
options["query_properties"] = properties
if suppress_results:
options["suppress_results"] = True
return (trimmed_kql.strip(), options)
@classmethod
def _parse_comment(cls, word:str, _str:str)->Tuple[str,int]:
comment = None
skip_words_count = 0
if word.startswith("//"):
idx_start = _str.find(word)
idx_end = _str[idx_start:].find("\n")
if idx_end > 0:
idx_end = idx_start + idx_end
comment = _str[idx_start:idx_end]
else:
comment = _str[idx_start:]
comment_words = comment.split()
skip_words_count = len(comment_words)
return comment, skip_words_count
@classmethod
def parse_and_get_kv_string(cls, conn_str:str, user_ns:Dict[str,Any], keep_original_key:bool=None)->Dict[str,Any]:
rest = conn_str
rest = rest.strip()
_was_quoted, rest = strip_if_quoted(rest)
matched_kv = {}
delimiter_required = False
lp_idx = rest.find("(")
eq_idx = rest.find("=")
sc_idx = rest.find(";")
l_char = "(" if eq_idx < 0 and sc_idx < 0 else "=" if lp_idx < 0 else "(" if lp_idx < eq_idx and lp_idx < sc_idx else "="
r_char = ")" if l_char == "(" else ";"
extra_delimiter = None if r_char == ";" else "."
while len(rest) > 0:
l_idx = rest.find(l_char)
r_idx = rest.find(r_char)
if l_idx < 0:
if l_char == "(":
# string ends with delimiter
if extra_delimiter is not None and extra_delimiter == rest:
break
else:
raise ValueError("invalid key/value string, missing left parethesis.")
# key only at end of string
elif r_idx < 0:
key = rest
val = ""
rest = ""
# key only
else:
key = rest[:r_idx].strip()
val = ""
rest = rest[r_idx + 1:].strip()
# key only
elif r_idx >= 0 and r_idx < l_idx:
if l_char == "(":
raise ValueError("invalid key/value string, missing left parethesis.")
else:
key = rest[:r_idx].strip()
val = ""
rest = rest[r_idx + 1:].strip()
# key and value
else:
key = rest[:l_idx].strip()
rest = rest[l_idx + 1:].strip()
r_idx = rest.find(r_char)
if r_idx < 0:
if l_char == "(":
raise ValueError("invalid key/value string, missing right parethesis.")
else:
val = rest
rest = ""
else:
val = rest[:r_idx].strip()
rest = rest[r_idx + 1:].strip()
if extra_delimiter is not None:
if key.startswith(extra_delimiter):
key = key[1:].strip()
elif delimiter_required:
raise ValueError("invalid key/value string, missing delimiter.")
delimiter_required = True
# key exist
if len(key) > 0:
if keep_original_key is True:
lookup_key = key
else:
val = cls._parse_value("key/value", {"type": "str"}, key, val, user_ns)
lookup_key = key.lower().replace("-", "").replace("_", "")
matched_kv[lookup_key] = val
# no key but value exist
elif len(val) > 0:
raise ValueError("invalid key/value string, missing key.")
# no key, no value in parenthesis mode
elif l_char == "(":
raise ValueError("invalid key/value string, missing key.")
return matched_kv
@classmethod
def parse_quote(cls, string:str):
string = string.strip()
delimiter = string[0]
delimiter_len = 1
triple_quote = len(string) > 2 and string[1] == delimiter and string[2] == delimiter
if triple_quote:
delimiter_len = 3
delimiter = delimiter * 3
quoted_string_len = string[3:].find(delimiter)
else:
escape = False
quoted_string_len = -1
count = 0
for c in string[1:]:
if c == "\\":
escape = not escape
elif escape:
pass
elif c == delimiter:
quoted_string_len = count
break
count += 1
if quoted_string_len >= 0:
trimmed_string = string[quoted_string_len + 2 * delimiter_len:]
if len(trimmed_string) > 0 and not trimmed_string[0].isspace():
raise SyntaxError("invalid syntax after quoted string, should be followed by whitespace only")
quoted_string = string[delimiter_len:quoted_string_len + delimiter_len]
quoted_words = len(quoted_string.split())
if len(quoted_string)> 0:
if quoted_string[-1].isspace():
quoted_words += 1
if quoted_string[0].isspace():
quoted_words += 1
else:
quoted_words = 1
return delimiter + quoted_string + delimiter, quoted_words
else:
raise SyntaxError("EOL while scanning quoted string")
@classmethod
def _convert(cls, name:str, obj:Dict[str,Any], key:str, value:Any)->Any:
if value is None:
if obj.get("allow_none"):
return None
else:
raise ValueError(f"option '{key}' doesn't allow None value.")
_type = None
try:
_type = obj.get("type")
if _type == "int":
if float(value) != int(value):
raise ValueError
return int(value)
elif _type == "uint":
if float(value) != int(value) or int(value) < 0:
raise ValueError
return int(value)
elif _type == "float":
return float(value)
elif _type == "bool":
if type(value) == str:
if value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
else:
raise ValueError
elif bool(value) != int(value):
raise ValueError
return bool(value)
elif _type == "dict":
return dict(value)
elif _type == "list":
if type(value) == str:
value = [value]
return list(value)
elif _type == "enum":
enum_values = obj.get("values", [])
if enum_values.index(value) >= 0:
return value
else:
raise ValueError
elif _type == "iso8601_duration":
# There are four ways to express a time interval:
# Start and end, such as "2007-03-01T13:00:00Z/2008-05-11T15:30:00Z"
# Start and duration, such as "2007-03-01T13:00:00Z/P1Y2M10DT2H30M"
# Duration and end, such as "P1Y2M10DT2H30M/2008-05-11T15:30:00Z"
# Duration only, such as "P1Y2M10DT2H30M", with additional context information
value_list = [value] if type(value) != list else list(value)[:2]
if len(value_list) == 0:
raise ValueError
elif len(value_list) == 1:
value = value_list[0]
if isinstance(value, timedelta):
isodate = Dependencies.get_module("isodate", message="timedelta convertion to iso8601 duration format is not supported without isodate module, use instead a datetime range format, or already converted string") # will throw if does not exist
value = isodate.duration_isoformat(value)
elif type(value) != str:
raise ValueError
return value
else:
start_value = value_list[0]
end_value = value_list[1]
if isinstance(start_value, timedelta):
isodate = Dependencies.get_module("isodate", dont_throw=True)
if isodate:
start_value = isodate.duration_isoformat(start_value)
else:
end_datetime = end_value if isinstance(end_value, datetime) else dateutil.parser.isoparse(end_value)
start_value = end_datetime - start_value
elif isinstance(end_value, timedelta):
isodate = Dependencies.get_module("isodate", dont_throw=True)
if isodate:
end_value = isodate.duration_isoformat(end_value)
else:
start_datetime = end_value if isinstance(start_value, datetime) else dateutil.parser.isoparse(start_value)
end_value = end_value + start_datetime
value_list = [v.strftime('%Y-%m-%dT%H:%M:%S%ZZ') if isinstance(v, datetime) else str(v) for v in [start_value, end_value]]
return "/".join(value_list)
else:
return str(value)
except Exception as e:
option_type = "property" if name == "query properties" else "option"
due_message = f"{e}" or f"invalid '{_type}' of value '{value}'"
raise ValueError(f"failed to set {option_type} '{key}' in {name}, due to {due_message}")
@classmethod
def _validate_config_trait(cls, dict_name:str, obj:Dict[str,Any], key:str, value:Any, config:Configurable)->None:
# validate using config traits
name = obj.get("flag")
if isinstance(config, Configurable) and name in cls.traits_dict:
#
# save current value
#
try:
new_value = cls._convert(dict_name, obj, key, value)
trait:TraitType = cls.traits_dict.get(name)
if hasattr(trait, "_validate"):
validated_value = trait._validate(config, new_value)
return
except Exception as error:
raise ValueError(f"failed to set option '{key}' in {dict_name}, due to invalid value '{value}'. Exception: {error}")
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import Tuple, Dict, List, Any
import itertools
import configparser as CP
from datetime import timedelta, datetime
import dateutil.parser
from traitlets import TraitType
from traitlets.config.configurable import Configurable
from ._debug_utils import debug_print
from .dependencies import Dependencies
from .constants import Constants, Schema
from .my_utils import split_lex, adjust_path, is_env_var, get_env_var, is_collection, strip_if_quoted
from .engine import Engine
class Parser(object):
default_options:Dict[str,Any] = {}
traits_dict:Dict[str,TraitType] = {}
@classmethod
def initialize(cls, config:Configurable):
cls.traits_dict = config.traits()
config.observe(Parser.observe_config_changes)
cls.init_default_options(config)
@staticmethod
def observe_config_changes(change:Dict[str,str]):
if change.get('type') == 'change':
name = change.get('name')
obj = Parser._OPTIONS_TABLE.get(name.lower().replace("-", "").replace("_", ""))
if "init" not in obj:
Parser.default_options[change.get('name')] = change.get('new')
@classmethod
def init_default_options(cls, config:Configurable):
for obj in cls._OPTIONS_TABLE.values():
if "abbreviation" not in obj:
name = obj.get("flag")
if "init" in obj:
cls.default_options[name] = obj.get("init")
elif name in cls.traits_dict:
cls.default_options[name] = getattr(config, name)
elif "abbreviation" not in obj:
raise Exception(f"missing init in _OPTIONS_TABLE for option: {name}")
@classmethod
def parse(cls, _line:str, _cell:str, config:Configurable, engines:List[Engine], user_ns:Dict[str,Any])->List[Dict[str,Any]]:
is_cell = _cell is not None
cell = f"{_line}\n{_cell or ''}"
cell = cell.strip()
code = cell
#
# split string to queries
#
suppress_all_results = False
# tuple:
sections:List[Dict[str,str]] = []
if is_cell:
magic_section_name = Constants.MAGIC_NAME
section_lines:List[str] = []
previous_line = " " # should be init to space for the below to work
# note: splitlines don't remove the \n suffix, each line endswith \n
for line in code.splitlines(True):
lstripped_line = line.lstrip()
if (lstripped_line.startswith(Constants.IPYKERNEL_CELL_MAGIC_PREFIX)
and (previous_line.isspace() or (len(sections) > 0 and sections[-1].get("type") == "line_magic"))):
if len(section_lines) > 0:
sections.append({"type": "cell_magic", "name": magic_section_name, "body": "".join(section_lines)})
magic_word = lstripped_line.split(None, 1)[0]
magic_section_name = magic_word[len(Constants.IPYKERNEL_CELL_MAGIC_PREFIX):]
lstripped_line = lstripped_line[len(magic_word):].lstrip()
if magic_section_name == Constants.MAGIC_NAME:
section_lines = [] if lstripped_line.isspace() else [lstripped_line]
else:
section_lines = [lstripped_line]
elif magic_section_name != Constants.MAGIC_NAME:
section_lines.append(line)
elif (lstripped_line.startswith(Constants.IPYKERNEL_LINE_MAGIC_PREFIX)
and (previous_line.isspace() or (len(sections) > 0 and sections[-1].get("type") == "line_magic"))):
magic_word = lstripped_line.split(None, 1)[0]
magic_name = magic_word[len(Constants.IPYKERNEL_LINE_MAGIC_PREFIX):]
lstripped_line = lstripped_line[len(magic_word):].lstrip()
if magic_name == Constants.MAGIC_NAME:
if not lstripped_line.isspace():
sections.append({"type": "line_magic", "name": magic_name, "body": lstripped_line})
else:
sections.append({"type": "line_magic", "name": magic_name, "body": lstripped_line})
elif line.isspace():
if len(section_lines) > 0:
not_commented_lines = [1 for seg_line in section_lines if not seg_line.lstrip().startswith("//")]
if (len(not_commented_lines) > 0):
sections.append({"type": "cell_magic", "name": magic_section_name, "body": "".join(section_lines)})
section_lines = []
else:
section_lines.append(line)
previous_line = line
if len(section_lines) > 0:
if magic_section_name == Constants.MAGIC_NAME:
not_commented_lines = [1 for seg_line in section_lines if not seg_line.lstrip().startswith("//")]
if (len(not_commented_lines) > 0):
sections.append({"type": "cell_magic", "name": magic_section_name, "body": "".join(section_lines)})
else:
sections.append({"type": "cell_magic", "name": magic_section_name, "body": "".join(section_lines)})
if len(sections) > 0:
last_query = sections[-1].get("body").strip()
if last_query == ";":
suppress_all_results = True
sections = sections[:-1]
if len(sections) == 0:
sections.append({"type": "cell_magic", "name": Constants.MAGIC_NAME, "body": ""})
else:
sections.append({"type": "line_magic", "name": Constants.MAGIC_NAME, "body": code.strip()})
#
# parse code to kql and options
#
parsed_sections = []
last_connection_string = ""
for section in sections:
parsed_section = cls._parse_one_section(section, is_cell, config, engines, user_ns)
connection_string = parsed_section.get("connection_string")
if connection_string:
last_connection_string = connection_string
elif len(parsed_section.get("command")) == 0:
parsed_section["connection_string"] = last_connection_string
if suppress_all_results:
parsed_section.get("options")["suppress_results"] = True
parsed_sections.append(parsed_section)
if len(parsed_sections) > 0:
parsed_sections[-1]["last_query"] = True
return parsed_sections
@classmethod
def _parse_one_section(cls, section:Dict[str,str], is_cell:bool, config:Configurable, engines:List[Engine], user_ns:Dict[str,Any])->Dict[str,Any]:
"""Separate input into (connection info, KQL statements, options)"""
cell, command = cls._parse_kql_command(section, user_ns)
command_name = command.get("command")
if command_name is not None and command_name != "submit":
cell_rest, options = cls._parse_kql_options(cell.strip(), is_cell, config, user_ns)
cell_rest = cls._update_kql_command_params(command, cell_rest, user_ns)
cls._validate_kql_command_params(command)
if cell_rest:
raise ValueError(f"command --{command_name} has too many parameters")
parsed_query = {"connection_string": "", "query": "", "options": options, "command": command}
return parsed_query
# split to max 2 parts. First part, parts[0], is the first string.
# parts = [part.strip() for part in cell.split(None, 1)]
parts = split_lex(cell)
# print(parts)
if not parts:
kql, options = cls._parse_kql_options("", is_cell, config, user_ns)
parsed_query = {"connection_string": "", "query": kql, "options": options, "command": {}}
return parsed_query
#
# replace substring of the form $name or ${name}, in windows also %name% if found in env variabes
#
connection_string = None
conn_str = parts[0].strip()
if not conn_str.startswith(('-', '+')):
_was_quoted, conn_str = strip_if_quoted(conn_str)
#
# connection taken from a section in dsn file (file name have to be define in config.dsn_filename or specified as a parameter)
#
if is_collection(conn_str, "["):
section = conn_str[1:-1].strip()
# parse to get flag, for the case that the file nema is specified in the options
code = cell[len(parts[0]):]
kql, options = cls._parse_kql_options(code, is_cell, config, user_ns)
parser = CP.ConfigParser()
dsn_filename = adjust_path(options.get("dsn_filename", config.dsn_filename))
parser.read(dsn_filename)
cfg_dict = dict(parser.items(section))
cfg_dict_lower = {k.lower().replace("_", "").replace("-", ""): v for (k, v) in cfg_dict.items()}
for e in engines:
if e.get_mandatory_key() in cfg_dict_lower.keys():
all_keys = set(itertools.chain(*e.get_valid_keys_combinations()))
connection_kv = [f"{k}='{v}'" for k, v in cfg_dict_lower.items() if v and k in all_keys]
connection_string = f"{e.get_uri_schema_name()}://{';'.join(connection_kv)}"
break
#
# connection specified starting with one of the supported prefixes
#
elif "://" in conn_str:
sub_parts = conn_str.strip().split("://", 1)
if (len(sub_parts) == 2 and sub_parts[0].lower().replace("_", "").replace("-", "") in list(itertools.chain(*[e._ALT_URI_SCHEMA_NAMES for e in engines]))):
connection_string = conn_str
#
# connection specified as database@cluster
#
elif "@" in conn_str and "|" not in conn_str and "'" not in conn_str and '"' not in conn_str:
connection_string = conn_str
#
# connection not specified, override default
#
if connection_string is None:
connection_string = ""
code = cell
else:
code = cell[len(parts[0]):]
#
# parse code to kql and options
#
kql, options = cls._parse_kql_options(code.strip(), is_cell, config, user_ns)
kql = options.pop("query", None) or kql
connection_string = options.pop("conn", None) or connection_string
parsed_query = {"connection_string": connection_string.strip(), "query": kql, "options": options, "command": {}}
return parsed_query
@classmethod
def parse_old(cls, line:str, cell:str, config:Configurable, engines:List[Engine], user_ns:Dict[str,Any])->List[Dict[str,Any]]:
"""Separate input into (connection info, KQL statements, options)"""
is_cell = cell is not None
cell = f"{line}\n{cell or ''}"
cell = cell.strip()
parsed_queries = []
cell, command = cls._parse_kql_command(cell, user_ns)
command_name = command.get("command")
if command_name is not None and command_name != "submit":
cell_rest, options = cls._parse_kql_options(cell.strip(), is_cell, config, user_ns)
cell_rest = cls._update_kql_command_params(command, cell_rest, user_ns)
cls._validate_kql_command_params(command)
if cell_rest:
raise ValueError(f"command --{command_name} has too many parameters")
parsed_queries.append({"connection_string": "", "query": "", "options": options, "command": command})
return parsed_queries
# split to max 2 parts. First part, parts[0], is the first string.
# parts = [part.strip() for part in cell.split(None, 1)]
parts = split_lex(cell)
# print(parts)
if not parts:
kql, options = cls._parse_kql_options("", is_cell, config, user_ns)
parsed_queries.append({"connection_string": "", "query": kql, "options": options, "command": {}})
return parsed_queries
#
# replace substring of the form $name or ${name}, in windows also %name% if found in env variabes
#
connection_string = None
conn_str = parts[0].strip()
if not conn_str.startswith(('-', '+')):
_was_quoted, conn_str = strip_if_quoted(conn_str)
#
# connection taken from a section in dsn file (file name have to be define in config.dsn_filename or specified as a parameter)
#
if is_collection(conn_str, "["):
section = conn_str[1:-1].strip()
# parse to get flag, for the case that the file nema is specified in the options
code = cell[len(parts[0]):]
kql, options = cls._parse_kql_options(code, is_cell, config, user_ns)
parser = CP.ConfigParser()
dsn_filename = adjust_path(options.get("dsn_filename", config.dsn_filename))
parser.read(dsn_filename)
cfg_dict = dict(parser.items(section))
cfg_dict_lower = {k.lower().replace("_", "").replace("-", ""): v for (k, v) in cfg_dict.items()}
for e in engines:
if e.get_mandatory_key() in cfg_dict_lower.keys():
all_keys = set(itertools.chain(*e.get_valid_keys_combinations()))
connection_kv = [f"{k}='{v}'" for k, v in cfg_dict_lower.items() if v and k in all_keys]
connection_string = f"{e.get_uri_schema_name()}://{';'.join(connection_kv)}"
break
#
# connection specified starting with one of the supported prefixes
#
elif "://" in conn_str:
sub_parts = conn_str.strip().split("://", 1)
if (len(sub_parts) == 2 and sub_parts[0].lower().replace("_", "").replace("-", "") in list(itertools.chain(*[e._ALT_URI_SCHEMA_NAMES for e in engines]))):
connection_string = conn_str
#
# connection specified as database@cluster
#
elif "@" in conn_str and "|" not in conn_str and "'" not in conn_str and '"' not in conn_str:
connection_string = conn_str
#
# connection not specified, override default
#
if connection_string is None:
connection_string = ""
code = cell
else:
code = cell[len(parts[0]):]
#
# split string to queries
#
suppress_all_results = False
queries:List[str] = []
if is_cell:
queryLines:List[str] = []
last_line:str = None
for last_line in code.splitlines(True):
# note: splitlines don't remove the \n suffix, each line endswith \n
if last_line.isspace():
if len(queryLines) > 0:
queries.append("".join(queryLines))
queryLines = []
else:
queryLines.append(last_line)
if len(queryLines) > 0:
queries.append("".join(queryLines))
if len(queries) > 0:
last_query = queries[-1].strip()
if last_query == ";":
suppress_all_results = True
queries = queries[:-1]
if len(queries) == 0:
queries.append("")
else:
queries.append(code.strip())
#
# parse code to kql and options
#
for query in queries:
kql, options = cls._parse_kql_options(query.strip(), is_cell, config, user_ns)
kql = options.pop("query", None) or kql
connection_string = options.pop("conn", None) or connection_string
if suppress_all_results:
options["suppress_results"] = True
parsed_queries.append({"connection_string": connection_string.strip(), "query": kql, "options": options, "command": {}})
return parsed_queries
# Note: None as default is valid
# commands that have no parameters, typekey should not exit or set to None
# default max_params is 1 if type is not None otherwise 0
# default min_params is 1 if type is not None otherwise 0
_COMMANDS_TABLE = {
"version": {"flag": "version", "type": None},
"banner": {"flag": "banner", "type": None},
"usage": {"flag": "usage", "type": None},
"submit": {"flag": "submit", "type": None}, # default
"help": {"flag": "help", "type": "str", "default": "help"},
"faq": {"flag": "faq", "type": None},
"palette": {"flag": "palette", "type": None},
"palettes": {"flag": "palettes", "type": None},
# "config": {"flag": "config", "type": "str", "default": None},
"config": {"flag": "config", "type": "not_quoted_str", "allow_none": True, "max_params": 1, "min_params": 0},
"bugreport": {"flag": "bug_report", "type": None},
"conn": {"flag": "conn", "type": "str", "default": None},
# should be per connection
"cache": {"flag": "cache", "type": "str", "allow_none": True},
"cachecreate": {"flag": "cache_create", "type": "str", "allow_none": True},
"cacheappend": {"flag": "cache_append", "type": "str", "allow_none": True},
"cachecreateorappend": {"flag": "cache_create_or_append", "type": "str", "allow_none": True},
"cacheremove": {"flag": "cache_remove", "type": "str", "allow_none": True},
"cachelist": {"flag": "cache_list", "type": None},
"cachestop": {"flag": "cache_stop", "type": None},
"usecache": {"flag": "use_cache", "type": "str", "allow_none": True},
"usecachestop": {"flag": "use_cache_stop", "type": "str", "allow_none": True},
"schema": {"flag": "schema", "type": "str", "default": None},
"clearssodb": {"flag": "clear_sso_db", "type": None},
"py": {"flag": "python", "type": "not_quoted_str", "allow_none": True, "max_params": 2, "min_params": 2},
"pyro": {"flag": "python", "type": "not_quoted_str", "allow_none": True, "max_params": 2, "min_params": 2},
"pyrw": {"flag": "python", "type": "not_quoted_str", "allow_none": True, "max_params": 2, "min_params": 2},
"activatekernel": {"flag": "activate_kernel", "type": None},
"deactivatekernel": {"flag": "deactivate_kernel", "type": None},
"linemagic": {"flag": "line_magic", "type": "not_quoted_str", "allow_none": True, "max_params": 2, "min_params": 2},
"cellmagic": {"flag": "cell_magic", "type": "not_quoted_str", "allow_none": True, "max_params": 3, "min_params": 3},
}
@classmethod
def _parse_kql_command(cls, section:Dict[str,str], user_ns:Dict[str,Any])->Tuple[str,Dict[str,Any]]:
code = section.get("body")
if section.get("name") != Constants.MAGIC_NAME:
name = section.get("name").replace("_", "").replace("-", "")
if name in ["py", "pyro", "pyrw"]:
obj = cls._COMMANDS_TABLE.get(name)
return ("", {"command": obj.get("flag"), "obj": obj, "params": [section.get("body"), name]})
else:
# line/cell magic
name = section.get("type")
obj = cls._COMMANDS_TABLE.get(name.replace("_", ""))
command_name = obj.get("flag")
params = []
body = section.get("body")
if command_name == "cell_magic":
first_line = body.split("\n",1)[0]
params.append(first_line.strip())
body = body[len(first_line) + 1:]
params.append(body)
params.append(section.get("name"))
return ("", {"command": command_name, "obj": obj, "params": params})
# kql section
lookup_key = None
trimmed_code = code
skip_words_count = 0
obj = None
params_type = None
command_name = None
params = []
words = code.split()
more_words_count = len(words)
for word in words:
more_words_count -= 1
if params_type == "not_quoted_str" and not word.startswith("-"):
# break
pass
if skip_words_count == 0:
_comment, skip_words_count = cls._parse_comment(word, trimmed_code)
if skip_words_count > 0:
skip_words_count -= 1
trimmed_code = trimmed_code[trimmed_code.find(word) + len(word):]
continue
# command
elif command_name is None:
if not word.strip().startswith("--"):
break
word = word[2:]
if word.startswith("-"):
raise ValueError(f"unknown command --{word}, commands' prefix should be a double hyphen-minus, not a triple hyphen-minus")
lookup_key = word.lower().replace("_", "").replace("-", "")
obj = cls._COMMANDS_TABLE.get(lookup_key)
if obj is None:
raise ValueError(f"unknown command --{word}")
command_name = obj.get("flag")
trimmed_code = trimmed_code[trimmed_code.find(word) + len(word):]
params_type = obj.get("type")
if params_type is None:
break
# option
elif word.startswith("-"):
break
# command's parameters
else:
command = {"command": command_name, "obj": obj, "params": params}
EXIT_ON_OPTION = True
trimmed_code, params = cls._parse_kql_command_params(command, trimmed_code, EXIT_ON_OPTION, user_ns)
break
if command_name is None:
return (code.strip(), {})
if command_name == "python":
params = params or [""]
params.append(lookup_key) # type of python "py", "pyro", "pyrw"
elif command_name in ["line_magic", "cell_magic"]:
body = params[0] if params else ""
magic_word = body.split(None, 1)[0]
body = body.lstrip()[len(magic_word):]
params = []
if command_name == "cell_magic":
first_line = body.split("\n", 1)[0]
params.append(first_line.strip())
body = body[len(first_line) + 1:]
else:
body = body.lstrip()
params.append(body)
ipykernel_prefix_len = 0
if command_name == "cell_magic" and magic_word.startswith(Constants.IPYKERNEL_CELL_MAGIC_PREFIX):
ipykernel_prefix_len = len(Constants.IPYKERNEL_CELL_MAGIC_PREFIX)
elif command_name == "line_magic" and magic_word.startswith(Constants.IPYKERNEL_LINE_MAGIC_PREFIX):
ipykernel_prefix_len = len(Constants.IPYKERNEL_LINE_MAGIC_PREFIX)
magic_name = magic_word[ipykernel_prefix_len:]
params.append(magic_name) # the magic name
if magic_name in ["py", "pyro", "pyrw"]:
obj = cls._COMMANDS_TABLE.get(magic_name)
body = params[0] if command_name == "line_magic" else f"{params[0]}\n{params[1]}"
return ("", {"command": obj.get("flag"), "obj": obj, "params": [body, magic_name]})
return (trimmed_code.strip(), {"command": command_name, "obj": obj, "params": params})
@classmethod
def _parse_kql_command_params(cls, command:dict, code:str, exit_on_options:bool, user_ns:Dict[str,Any])->Tuple[str,List[str]]:
trimmed_code = code.strip()
params = command.get("params")
obj = command.get("obj")
params_type = obj.get("type")
if params_type == "not_quoted_str":
if exit_on_options and trimmed_code.startswith("-"):
# DO NOTHING, EXIT
pass
else:
params.append(trimmed_code)
trimmed_code = ""
elif params_type is not None:
skip_words_count = 0
command_name = command.get("command")
words = code.split()
for word in words:
if skip_words_count == 0:
_comment, skip_words_count = cls._parse_comment(word, trimmed_code)
if skip_words_count > 0:
skip_words_count -= 1
trimmed_code = trimmed_code[trimmed_code.find(word) + len(word):]
continue
# option
if exit_on_options and word.startswith("-"):
break
# command's parameters
if params_type == "str" and word[0] in ["'",'"']:
quoted_string, skip_words_count = cls.parse_quote(trimmed_code)
param = quoted_string
skip_words_count -= 1
else:
param = word
params.append(cls._parse_value("command", obj, command_name, param, user_ns))
trimmed_code = trimmed_code[trimmed_code.find(word) + len(word):]
return trimmed_code.strip(), params
@classmethod
def _update_kql_command_params(cls, command:dict, cell_rest:str, user_ns:Dict[str,Any])->str:
params = command.get("params")
if len(params) == 0:
obj = command.get("obj")
if obj.get("type") == "not_quoted_str":
params.append(cell_rest)
cell_rest = ""
elif len(cell_rest) > 0 and obj.get("type") is not None:
DONT_EXIT_ON_OPTION = False
cell_rest, params = cls._parse_kql_command_params(command, cell_rest, DONT_EXIT_ON_OPTION, user_ns)
cell_rest = ""
elif "default" in obj:
params.append(obj.get("default"))
command["params"] = params
return cell_rest
@classmethod
def _validate_kql_command_params(cls, command:dict):
params = command.get("params")
command_name = command.get("command")
obj = command.get("obj")
_type = obj.get("type")
max_params = obj.get("max_params") or (1 if _type is not None else 0)
if len(params) > max_params:
raise ValueError(f"command --{command_name} has too many parameters")
min_params = obj.get("min_params") or (1 if _type is not None else 0)
if len(params) < min_params:
raise ValueError(f"command --{command_name} is missing parameter")
@classmethod
def validate_query_properties(cls, schema:str, properties:Dict[str,Any])->None:
if type(properties) == dict:
usupported_properties = []
for p in properties:
prop = cls._QUERY_PROPERTIES_TABLE[p]
prop_schema_list = prop.get("schema")
if type(prop_schema_list) == list and schema not in prop_schema_list and len(prop_schema_list) > 0 and schema is not None:
usupported_properties.append(p)
if len(usupported_properties) > 0:
raise ValueError(f"query properties {usupported_properties} are not supported by current connection")
_QUERY_PROPERTIES_TABLE = {
# NOT DOCUMENTED - (OptionBlockSplittingEnabled): Enables splitting of sequence blocks after aggregation operator. [Boolean]
"block_splitting_enabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# NOT DOCUMENTED - (OptionDatabasePattern): Database pattern overrides database name and picks the 1st database that matches the pattern.
# '*' means any database that user has access to. [String]
"database_pattern": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# NOT DOCUMENTED - If set, don't fuse projection into ExternalData operator. [bool]
"debug_query_externaldata_projection_fusion_disabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# NOT DOCUMENTED - The percentage of threads to fanout execution to for external data nodes. [int]
"debug_query_fanout_threads_percent_external_data": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionDeferPartialQueryFailures): If true, disables reporting partial query failures as part of the result set. [Boolean]
"deferpartialqueryfailures": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionMaterializedViewShuffleQuery): A hint to use shuffle strategy for materialized views that are referenced in the query. The property is an array of materialized views names and the shuffle keys to use. examples: 'dynamic([ { "Name": "V1", "Keys" : [ "K1", "K2" ] } ])' (shuffle view V1 by K1, K2) or 'dynamic([ { "Name": "V1" } ])' (shuffle view V1 by all keys) [dynamic]
"materialized_view_shuffle": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "dict"},
# (OptionMaxMemoryConsumptionPerQueryPerNode): Overrides the default maximum amount of memory a whole query may allocate per node. [UInt64]
"max_memory_consumption_per_query_per_node": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionMaxMemoryConsumptionPerIterator): Overrides the default maximum amount of memory a query operator may allocate. [UInt64]
"maxmemoryconsumptionperiterator": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionMaxOutputColumns): Overrides the default maximum number of columns a query is allowed to produce. [Long]
"maxoutputcolumns": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionNoRequestTimeout): Enables setting the request timeout to its maximum value. [Boolean]
"norequesttimeout": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionNoTruncation): Enables suppressing truncation of the query results returned to the caller. [Boolean]
"notruncation": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionPushSelectionThroughAggregation): If true, push simple selection through aggregation [Boolean]
"push_selection_through_aggregation": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# NOT DOCUMENTED - (OptionAdminSuperSlackerMode): If true, delegate execution of the query to another node [Boolean]
"query_admin_super_slacker_mode": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (QueryBinAutoAt): When evaluating the bin_auto() function, the start value to use. [LiteralExpression]
"query_bin_auto_at": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (QueryBinAutoSize): When evaluating the bin_auto() function, the bin size value to use. [LiteralExpression]
"query_bin_auto_size": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionQueryCursorAfterDefault): The default parameter value of the cursor_after() function when called without parameters. [string]
"query_cursor_after_default": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# NOT DOCUMENTED - (OptionQueryCursorAllowReferencingStreamingIngestionTables): Enable usage of cursor functions over databases which have streaming ingestion enabled. [boolean]
"query_cursor_allow_referencing_streaming_ingestion_tables": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionQueryCursorBeforeOrAtDefault): The default parameter value of the cursor_before_or_at() function when called without parameters. [string]
"query_cursor_before_or_at_default": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionQueryCursorCurrent): Overrides the cursor value returned by the cursor_current() or current_cursor() functions. [string]
"query_cursor_current": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionQueryCursorDisabled): Disables usage of cursor functions in the context of the query. [boolean]
"query_cursor_disabled ": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionQueryCursorScopedTables): List of table names that should be scoped to cursor_after_default ..
# cursor_before_or_at_default (upper bound is optional). [dynamic]
"query_cursor_scoped_tables": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "dict"},
# (OptionQueryDataScope): Controls the query's datascope -- whether the query applies to all data or just part of it. ['default', 'all', or 'hotcache']
"query_datascope": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "enum", "values": ['default', 'all', 'hotcache']},
# (OptionQueryDateTimeScopeColumn): Controls the column name for the query's datetime scope (query_datetimescope_to / query_datetimescope_from). [String]
"query_datetimescope_column": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionQueryDateTimeScopeFrom): Controls the query's datetime scope (earliest)
# used as auto-applied filter on query_datetimescope_column only (if defined). [DateTime]
"query_datetimescope_from": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionQueryDateTimeScopeTo): Controls the query's datetime scope (latest)
# used as auto-applied filter on query_datetimescope_column only (if defined). [DateTime]
"query_datetimescope_to": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionQueryDistributionNodesSpanSize): If set, controls the way sub-query merge behaves:
# the executing node will introduce an additional level in the query hierarchy for each sub-group of nodes; the size of the sub-group is set by this option. [Int]
"query_distribution_nodes_span": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "int"},
# (OptionQueryFanoutNodesPercent): The percentage of nodes to fanour execution to. [Int]
"query_fanout_nodes_percent": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionQueryFanoutThreadsPercent): The percentage of threads to fanout execution to. [Int]
"query_fanout_threads_percent": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionQueryForceRowLevelSecurity): If specified, forces Row Level Security rules, even if row_level_security policy is disabled [Boolean]
"query_force_row_level_security": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionQueryLanguage): Controls how the query text is to be interpreted. ['csl','kql' or 'sql']
"query_language": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "enum", "values": ['csl', 'kql', 'sql']},
# NOT DOCUMENTED - (RemoteMaterializeOperatorInCrossCluster): Enables remoting materialize operator in cross cluster query.
"query_materialize_remote_subquery": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionMaxEntitiesToUnion): Overrides the default maximum number of columns a query is allowed to produce. [Long]
"query_max_entities_in_union": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionQueryNow): Overrides the datetime value returned by the now(0s) function. [DateTime]
# note: cannot be relative to now()
"query_now": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionDebugPython): If set, generate python debug query for the enumerated python node (default first). [Boolean or Int]
"query_python_debug": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionQueryResultsApplyGetSchema): If set, retrieves the schema of each tabular data in the results of the query instead of the data itself. [Boolean]
"query_results_apply_getschema": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionQueryResultsCacheMaxAge): If positive, controls the maximum age of the cached query results that Kusto is allowed to return [TimeSpan]
"query_results_cache_max_age": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# NOT DOCUMENTED - (CostBasedOptimizerBroadcastJoinBuildMax): Max Rows count for build in broadcast join.
"query_optimization_broadcast_build_maxSize": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# NOT DOCUMENTED - (CostBasedOptimizerBroadcastJoinProbeMin): Min Rows count for probe in broadcast join.
"query_optimization_broadcast_probe_minSize": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# NOT DOCUMENTED - (CostBasedOptimizer): Enables automatic optimizations.
"query_optimization_costbased_enabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# NOT DOCUMENTED - (OptionOptimizeInOperator): Optimizes in operands serialization.
"query_optimization_in_operator": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# NOT DOCUMENTED - (CostBasedOptimizerShufflingCardinalityThreshold): Shuffling Cardinality Threshold.
"query_optimization_shuffling_cardinality": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# NOT DOCUMENTED - (OptionQueryRemoteEntitiesDisabled): If set, queries cannot access remote databases / clusters. [Boolean]
"query_remote_entities_disabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# NOT DOCUMENTED - (RemoteInOperandsInQuery): Enables remoting in operands.
"query_remote_in_operands": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionProgressiveQueryMinRowCountPerUpdate): Hint for Kusto as to how many records to send in each update
# (Takes effect only if OptionProgressiveQueryIsProgressive is set)
"query_results_progressive_row_count": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionProgressiveProgressReportPeriod): Hint for Kusto as to how often to send progress frames (Takes effect only if OptionProgressiveQueryIsProgressive is set)
"query_results_progressive_update_period": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionTakeMaxRecords): Enables limiting query results to this number of records. [Long]
"query_take_max_records": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionQueryConsistency): Controls query consistency. ['strongconsistency' or 'normalconsistency' or 'weakconsistency']
"queryconsistency": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "enum", "values": ['strongconsistency', 'normalconsistency', 'weakconsistency']},
# (OptionRequestBlockRowLevelSecurity): If specified, blocks access to tables for which row_level_security policy is enabled [Boolean]
"request_block_row_level_security": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionRequestCalloutDisabled): If set, callouts to external services are blocked. [Boolean]
"request_callout_disabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionRequestDescription): Arbitrary text that the author of the request wants to include as the request description. [String]
"request_description": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionRequestExternalTableDisabled): If specified, indicates that the request cannot invoke code in the ExternalTable. [bool]
"request_external_table_disabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionDoNotImpersonate): If specified, indicates that the service shouldn't impersonate the caller's identity. [bool]
"request_impersonation_disabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionRequestReadOnly): If specified, indicates that the request must not be able to write anything. [Boolean]
"request_readonly": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionRequestEntitiesDisabled): If specified, indicates that the request cannot access remote databases and clusters. [bool]
"request_remote_entities_disabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionRequestSandboxedExecutionDisabled): If specified, indicates that the request cannot invoke code in the sandbox. [bool]
"request_sandboxed_execution_disabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# NOT DOCUMENTED - (OptionResponseDynamicSerialization): Controls the serialization of 'dynamic' values in result sets. ['string', 'json']
"response_dynamic_serialization": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "enum", "values": ['string', 'json']},
# NOT DOCUMENTED - (OptionResponseDynamicSerialization_2): Controls the serialization of 'dynamic' string and null values in result sets. ['legacy', 'current']
"response_dynamic_serialization_2": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "enum", "values": ['legacy', 'current']},
# (OptionResultsProgressiveEnabled): If set, enables the progressive query stream
"results_progressive_enabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# NOT DOCUMENTED - (OptionSandboxedExecutionDisabled): If set, using sandboxes as part of query execution is disabled. [Boolean]
"sandboxed_execution_disabled": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# (OptionServerTimeout): Overrides the default request timeout. [TimeSpan]
# is capped by 1hour
"servertimeout": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "str"},
# (OptionTruncationMaxRecords): Overrides the default maximum number of records a query is allowed to return to the caller (truncation). [Long]
"truncationmaxrecords": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionTruncationMaxSize): Overrides the dfefault maximum data size a query is allowed to return to the caller (truncation). [Long]
"truncationmaxsize": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "uint"},
# (OptionValidatePermissions): Validates user's permissions to perform the query and doesn't run the query itself. [Boolean]
"validate_permissions": {"schema": [Schema.AZURE_DATA_EXPLORER], "type": "bool"},
# For either implicit or explicit cross-application queries, specify resources you will be accessing
# see https://dev.loganalytics.io/documentation/Using-the-API/Cross-Resource-Queries
"workspaces": {"schema": [Schema.LOG_ANALYTICS], "type": "list"},
# For either implicit or explicit cross-application queries, specify resources you will be accessing
# see: https://dev.applicationinsights.io/documentation/Using-the-API/Cross-Resource-Queries
"applications": {"schema": [Schema.APPLICATION_INSIGHTS, Schema.AIMON], "type": "list"},
# The timespan over which to query data. This is an ISO8601 time period value. This timespan is applied in addition to any that are specified in the query expression.
# see: https://docs.microsoft.com/en-us/rest/api/application-insights/query/get
"timespan": {"schema": [Schema.APPLICATION_INSIGHTS, Schema.AIMON, Schema.LOG_ANALYTICS], "type": "iso8601_duration"},
}
# all lookup keys in table, must be without spaces, underscores and hypthen-minus, because parser ignores them
_OPTIONS_TABLE:Dict[str,Dict[str,Any]] = {
"ad": {"abbreviation": "autodataframe"},
"autodataframe": {"flag": "auto_dataframe", "type": "bool"},
"se": {"abbreviation": "shorterrors"},
"shorterrors": {"flag": "short_errors", "type": "bool"},
"f": {"abbreviation": "feedback"},
"feedback": {"flag": "feedback", "type": "bool"},
"sci": {"abbreviation": "showconninfo"},
"showconninfo": {"flag": "show_conn_info", "type": "str", "allow_none": True},
"c2lv": {"abbreviation": "columnstolocalvars"},
"columnstolocalvars": {"flag": "columns_to_local_vars", "type": "bool"},
"sqt": {"abbreviation": "showquerytime"},
"showquerytime": {"flag": "show_query_time", "type": "bool"},
"sq": {"abbreviation": "showquery"},
"showquery": {"flag": "show_query", "type": "bool"},
"sql": {"abbreviation": "showquerylink"},
"showquerylink": {"flag": "show_query_link", "type": "bool"},
"qld": {"abbreviation": "querylinkdestination"},
"querylinkdestination": {"flag": "query_link_destination", "type": "str"},
"esr": {"abbreviation": "enablesuppressresult"},
"enablesuppressresult": {"flag": "enable_suppress_result", "type": "bool"},
"pfi": {"abbreviation": "plotlyfsincludejs"},
"plotlyfsincludejs": {"flag": "plotly_fs_includejs", "type": "bool"},
"pw": {"abbreviation": "popupwindow"},
"popupwindow": {"flag": "popup_window", "type": "bool", "init": False},
"al": {"abbreviation": "autolimit"},
"autolimit": {"flag": "auto_limit", "type": "int", "allow_none": True},
"dl": {"abbreviation": "displaylimit"},
"displaylimit": {"flag": "display_limit", "type": "int", "allow_none": True},
"wait": {"abbreviation": "timeout"},
"to": {"abbreviation": "timeout"},
"timeout": {"flag": "timeout", "type": "int", "allow_none": True},
"ptst": {"abbreviation": "prettytablestyle"},
"prettytablestyle": {"flag": "prettytable_style", "type": "str"},
"var": {"abbreviation": "lastrawresultvar"},
"lastrawresultvar": {"flag": "last_raw_result_var", "type": "str"},
"tp": {"abbreviation": "tablepackage"},
"tablepackage": {"flag": "table_package", "type": "str"},
"pp": {"abbreviation": "plotpackage"},
"plotpackage": {"flag": "plot_package", "type": "str"},
"df": {"abbreviation": "dsnfilename"},
"dsnfilename": {"flag": "dsn_filename", "type": "str", "allow_none": True},
"vc": {"abbreviation": "validateconnectionstring"},
"validateconnectionstring": {"flag": "validate_connection_string", "type": "bool"},
"aps": {"abbreviation": "autopopupschema"},
"autopopupschema": {"flag": "auto_popup_schema", "type": "bool"},
"jd": {"abbreviation": "jsondisplay"},
"jsondisplay": {"flag": "json_display", "type": "str"},
"sjd": {"abbreviation": "schemajsondisplay"},
"schemajsondisplay": {"flag": "schema_json_display", "type": "str"},
"pd": {"abbreviation": "palettedesaturation"},
"palettedesaturation": {"flag": "palette_desaturation", "type": "float"},
"pn": {"abbreviation": "palettename"},
"paramsdict": {"flag": "params_dict", "type": "dict", "init": None},
"palettename": {"flag": "palette_name", "type": "str"},
"cache": {"flag": "cache", "type": "str", "allow_none": True},
"usecache": {"flag": "use_cache", "type": "str", "allow_none": True},
"tempfoldername": {"flag": "temp_folder_name", "type": "str"},
"cachefoldername": {"flag": "cache_folder_name", "type": "str"},
"exportfoldername": {"flag": "export_folder_name", "type": "str"},
"addkqlreftohelp": {"flag": "add_kql_ref_to_help", "type": "bool"},
"addschematohelp": {"flag": "add_schema_to_help", "type": "bool"},
"notebookapp": {"flag": "notebook_app", "type": "str"},
"debug": {"flag": "debug", "type": "bool"},
"checkmagicversion": {"flag": "check_magic_version", "type": "bool"},
"showwhatnew": {"flag": "show_what_new", "type": "bool"},
"showinitbanner": {"flag": "show_init_banner", "type": "bool"},
"warnmissingdependencies": {"flag": "warn_missing_dependencies", "type": "bool"},
"warnmissingenvvariables": {"flag": "warn_missing_env_variables", "type": "bool"},
"allowsinglelinecell": {"flag": "allow_single_line_cell", "type": "bool"},
"allowpycommentsbeforecell": {"flag": "allow_py_comments_before_cell", "type": "bool"},
"kqlmagickernel": {"flag": "kqlmagic_kernel", "type": "bool"},
"extrasrequire": {"flag": "extras_require", "type": "str"},
"testnotebookapp": {"flag": "test_notebook_app", "type": "str"},
"cloud": {"flag": "cloud", "type": "str"},
"enablesso": {"flag": "enable_sso", "type": "bool"},
"ssodbgcinterval": {"flag": "sso_db_gc_interval", "type": "int"},
"authusehttpclient": {"flag": "auth_use_http_client", "type": "bool"},
"tryazclilogin": {"flag": "try_azcli_login", "type": "bool"},
"tryazcliloginbyprofile": {"flag": "try_azcli_login_by_profile", "type": "bool"},
"tryvscodelogin": {"flag": "try_vscode_login", "type": "bool"},
"tryazcliloginsubscription": {"flag": "try_azcli_login_subscription", "type": "str", "allow_none": True},
"trytoken": {"flag": "try_token", "type": "dict", "allow_none": True},
"trymsi": {"flag": "try_msi", "type": "dict", "allow_none": True},
"idtag": {"abbreviation": "requestidtag"},
"requestidtag": {"flag": "request_id_tag", "type": "str", "allow_none": True},
"apptag": {"abbreviation": "requestapptag"},
"requestapptag": {"flag": "request_app_tag", "type": "str", "allow_none": True},
"usertag": {"abbreviation": "requestusertag"},
"requestusertag": {"flag": "request_user_tag", "type": "str", "allow_none": True},
"maxage": {"abbreviation": "requestcachemaxage"},
"requestcachemaxage": {"flag": "request_cache_max_age", "type": "int", "allow_none": True},
"dcln": {"abbreviation": "devicecodeloginnotification"},
"devicecodeloginnotification": {"flag": "device_code_login_notification", "type": "str"},
"dcne": {"abbreviation": "devicecodenotificationemail"},
"devicecodenotificationemail": {"flag": "device_code_notification_email", "type": "str"},
"saveas": {"flag": "save_as", "type": "str", "init": None},
"saveto": {"flag": "save_to", "type": "str", "init": None},
"query": {"flag": "query", "type": "str", "init": None},
"conn": {"flag": "conn", "type": "str", "init": None},
"queryproperties": {"flag": "query_properties", "type": "dict", "init": None},
"pc": {"abbreviation": "palettecolors"},
"palettecolors": {"flag": "palette_colors", "type": "int"},
"pr": {"abbreviation": "palettereverse"},
"palettereverse": {"flag": "palette_reverse", "type": "bool", "init": False},
"ps": {"abbreviation": "popupschema"},
"popupschema": {"flag": "popup_schema", "type": "bool", "init": False},
"did": {"abbreviation": "displayid"},
"displayid": {"flag": "display_id", "type": "bool", "init": False},
"displayhandlers": {"flag": "display_handlers", "type": "dict", "init": {}},
"pi": {"abbreviation": "popupinteraction"},
"popupinteraction": {"flag": "popup_interaction", "type": "str"},
"tempfilesserver": {"flag": "temp_files_server", "type": "str"},
"tempfilesserveraddress": {"flag": "temp_files_server_address", "type": "str", "allow_none": True},
"kernellocation": {"flag": "kernel_location", "type": "str"},
"kernelid": {"flag": "kernel_id", "type": "str", "allow_none": True},
"notebookserviceaddress": {"flag": "notebook_service_address", "type": "str", "allow_none": True},
"dtd": {"abbreviation": "dynamictodataframe"},
"dynamictodataframe": {"flag": "dynamic_to_dataframe", "type": "str"},
"tempfolderlocation": {"flag": "temp_folder_location", "type": "str"},
"pl": {"abbreviation": "plotlylayout"},
"plotlylayout": {"flag": "plotly_layout", "type": "dict", "allow_none": True},
"atw": {"abbreviation": "authtokenwarnings"},
"authtokenwarnings": {"flag": "auth_token_warnings", "type": "bool"},
"ecbp": {"abbreviation": "enablecurlybracketsparams"},
"enablecurlybracketsparams": {"flag": "enable_curly_brackets_params", "type": "bool"},
"nop": {"flag": "nop", "type": "bool", "init": False}, # does nothing, useful to indicate option part when no options are required
"av": {"abbreviation": "assignvar"},
"assignvar": {"flag": "assign_var", "type": "str", "allow_none": True},
"cv": {"abbreviation": "cursorvar"},
"cursorvar": {"flag": "cursor_var", "type": "str", "allow_none": True},
"ismagic": {"flag": "is_magic", "type": "bool"},
"caim": {"abbreviation": "codeauthinteractivemode"},
"codeauthinteractivemode": {"flag": "code_auth_interactive_mode", "type": "str", "allow_none": True},
}
@classmethod
def validate_override(cls, name:str, config:Configurable, **override_options)->Dict[str,Any]:
"""validate the provided option are valid"""
options = {}
for key, value in override_options.items():
obj = cls._get_obj(key, allow_abbr=True)
if obj.get("flag") in config.read_only_trait_names:
raise ValueError(f"option '{key}' in {name} is readony, cannot be set")
cls._convert(name, obj, key, value)
cls._validate_config_trait(name, obj, key, value, config)
options[obj.get("flag")] = value
return options
@classmethod
def parse_option(cls, dict_name:str, key:str, value:str, config:Configurable=None, lookup:Dict[str,Dict[str,Any]]=None, user_ns:Dict[str,Any]=None, allow_abbr:bool=None, force:bool=False):
"""validate the provided option are valid
return normalized key and value"""
obj = cls._get_obj(key, lookup=lookup, allow_abbr=allow_abbr)
value = cls._parse_value(dict_name, obj, key, value, user_ns=user_ns)
cls._validate_config_trait(dict_name, obj, key, value, config)
key_name = obj.get("flag", key)
if config.is_read_only(key_name) and value != getattr(config, key_name):
# done to raise the proer error
setattr(config, key_name, value)
return key_name, value
@classmethod
def _parse_value(cls, dict_name:str, obj:Dict[str,Any], key:str, string:str, user_ns:Dict[str,Any])->Any:
_type = obj.get("type")
if string == "" and _type == "str":
return string
# if we allow to bring value from python, we also allow from env variables
# when we parse env vironment with option we fon't use user_ns
if string.startswith('$') and user_ns:
env_var_name = string[1:]
if not is_env_var(env_var_name):
raise ValueError(f"failed to parse referred value, due environment variable {env_var_name} not set")
string = get_env_var(env_var_name)
_was_quoted, value = strip_if_quoted(string)
else:
try:
value = eval(string, None, user_ns)
except:
# if no user_ns it means parse is for environment var, and it just may be an unquoted object
if user_ns:
raise
value = string
# check value is of the right type
try:
return cls._convert(dict_name, obj, key, value)
except:
raise
@classmethod
def parse_config_key(cls, key:str, config:Configurable, allow_abbr:bool=None)->Tuple[str,str,Any]:
"""validate the provided option key is valid
return normalized key"""
obj = cls._get_obj(key, allow_abbr=allow_abbr)
name = obj.get("flag")
if "init" in obj:
value = obj.get("init")
elif name in cls.traits_dict:
value = getattr(config, name)
else:
raise f"internal error '{key}' has no init value and not defined as Kqlmagic traitlet"
return name, value
@classmethod
def _get_obj(cls, key:str, lookup:Dict[str,Dict[str,Any]]=None, allow_abbr:bool=None)->Dict[str,Any]:
lookup_key = key.lower().replace("-", "").replace("_", "")
lookup_table = lookup or cls._OPTIONS_TABLE
obj = lookup_table.get(lookup_key)
if obj is None:
raise ValueError(f"unknown option '{key}'")
if obj.get("abbreviation"):
obj = lookup_table.get(obj.get("abbreviation"))
if not allow_abbr is not True:
raise ValueError(f"unknown option '{key}'. (Found option abbreviation '{key}' for {obj.get('flag')})")
return obj
@classmethod
def _parse_kql_options(cls, code:str, is_cell:bool, config:Configurable, user_ns:Dict[str,Any])->Tuple[str,Dict[str,Any]]:
trimmed_kql = code
trimmed_kql = trimmed_kql.strip()
suppress_results = False
if trimmed_kql.endswith(";"):
suppress_results = not is_cell
if is_cell:
lines = trimmed_kql.splitlines(True)
if lines[-1].strip() == ";":
suppress_results = True
if suppress_results:
trimmed_kql = trimmed_kql[:-1].strip()
words = trimmed_kql.split()
properties = {}
table = options = cls.default_options.copy()
if not words:
return ("", options)
num_words = len(words)
first_word = 0
if num_words - first_word >= 2 and words[first_word + 1] == "<<":
options["result_var"] = words[first_word]
trimmed_kql = trimmed_kql[trimmed_kql.find("<<") + 2:]
first_word += 2
obj = None
key = None
opt_key = None
key_state = True
option_type = None
is_option = True
is_property = False
skip_words_count = 0
for word in words[first_word:]:
if key_state:
if skip_words_count == 0:
_comment, skip_words_count = cls._parse_comment(word, trimmed_kql)
if skip_words_count > 0:
skip_words_count -= 1
trimmed_kql = trimmed_kql[trimmed_kql.find(word) + len(word):]
continue
is_option = word.startswith("-")
is_property = word.startswith("+")
option_type = "option" if is_option else "query property"
if not is_option and not is_property:
break
# validate it is not a command
if is_option and word.startswith("--"):
raise ValueError(f"invalid {option_type} '{word}', cannot start with a bouble hyphen-minus")
trimmed_kql = trimmed_kql[trimmed_kql.find(word) + len(word):]
word = word[1:]
bool_value = True
if word[0].startswith("!"):
bool_value = False
word = word[1:]
if "=" in word:
parts = word.split("=", 1)
key = parts[0]
value = parts[1]
else:
key = word
value = None
if is_option:
lookup_key = key.lower().replace("-", "").replace("_", "")
obj = cls._OPTIONS_TABLE.get(lookup_key)
table = options
else:
lookup_key = key.lower()
obj = cls._QUERY_PROPERTIES_TABLE.get(lookup_key)
table = properties
if obj is not None:
if obj.get("abbreviation") is not None:
obj = cls._OPTIONS_TABLE.get(obj.get("abbreviation"))
if obj.get("flag") in config.read_only_trait_names:
raise ValueError(f"{option_type} {key} is readony, cannot be set")
_type = obj.get("type")
opt_key = obj.get("flag") or lookup_key
if _type == "bool" and value is None:
table[opt_key] = bool_value
else:
if not bool_value:
raise ValueError(f"{option_type} {key} cannot be negated")
if value is not None:
table[opt_key] = cls._parse_value("options" if is_option else "query properties", obj, key, value, user_ns)
else:
key_state = False
else:
raise ValueError(f"unknown {option_type} '{key}'")
else:
trimmed_kql = trimmed_kql[trimmed_kql.find(word) + len(word):]
table[opt_key] = cls._parse_value("options", obj, key, word, user_ns)
key_state = True
first_word += 1
# validate using config traits
if key_state and is_option:
cls._validate_config_trait("options", obj, key, options.get(opt_key), config)
if not key_state:
raise ValueError(f"{option_type} '{opt_key}' must have a value")
if options.get("query_properties"):
properties.update(options["query_properties"])
options["query_properties"] = properties
if suppress_results:
options["suppress_results"] = True
return (trimmed_kql.strip(), options)
@classmethod
def _parse_comment(cls, word:str, _str:str)->Tuple[str,int]:
comment = None
skip_words_count = 0
if word.startswith("//"):
idx_start = _str.find(word)
idx_end = _str[idx_start:].find("\n")
if idx_end > 0:
idx_end = idx_start + idx_end
comment = _str[idx_start:idx_end]
else:
comment = _str[idx_start:]
comment_words = comment.split()
skip_words_count = len(comment_words)
return comment, skip_words_count
@classmethod
def parse_and_get_kv_string(cls, conn_str:str, user_ns:Dict[str,Any], keep_original_key:bool=None)->Dict[str,Any]:
rest = conn_str
rest = rest.strip()
_was_quoted, rest = strip_if_quoted(rest)
matched_kv = {}
delimiter_required = False
lp_idx = rest.find("(")
eq_idx = rest.find("=")
sc_idx = rest.find(";")
l_char = "(" if eq_idx < 0 and sc_idx < 0 else "=" if lp_idx < 0 else "(" if lp_idx < eq_idx and lp_idx < sc_idx else "="
r_char = ")" if l_char == "(" else ";"
extra_delimiter = None if r_char == ";" else "."
while len(rest) > 0:
l_idx = rest.find(l_char)
r_idx = rest.find(r_char)
if l_idx < 0:
if l_char == "(":
# string ends with delimiter
if extra_delimiter is not None and extra_delimiter == rest:
break
else:
raise ValueError("invalid key/value string, missing left parethesis.")
# key only at end of string
elif r_idx < 0:
key = rest
val = ""
rest = ""
# key only
else:
key = rest[:r_idx].strip()
val = ""
rest = rest[r_idx + 1:].strip()
# key only
elif r_idx >= 0 and r_idx < l_idx:
if l_char == "(":
raise ValueError("invalid key/value string, missing left parethesis.")
else:
key = rest[:r_idx].strip()
val = ""
rest = rest[r_idx + 1:].strip()
# key and value
else:
key = rest[:l_idx].strip()
rest = rest[l_idx + 1:].strip()
r_idx = rest.find(r_char)
if r_idx < 0:
if l_char == "(":
raise ValueError("invalid key/value string, missing right parethesis.")
else:
val = rest
rest = ""
else:
val = rest[:r_idx].strip()
rest = rest[r_idx + 1:].strip()
if extra_delimiter is not None:
if key.startswith(extra_delimiter):
key = key[1:].strip()
elif delimiter_required:
raise ValueError("invalid key/value string, missing delimiter.")
delimiter_required = True
# key exist
if len(key) > 0:
if keep_original_key is True:
lookup_key = key
else:
val = cls._parse_value("key/value", {"type": "str"}, key, val, user_ns)
lookup_key = key.lower().replace("-", "").replace("_", "")
matched_kv[lookup_key] = val
# no key but value exist
elif len(val) > 0:
raise ValueError("invalid key/value string, missing key.")
# no key, no value in parenthesis mode
elif l_char == "(":
raise ValueError("invalid key/value string, missing key.")
return matched_kv
@classmethod
def parse_quote(cls, string:str):
string = string.strip()
delimiter = string[0]
delimiter_len = 1
triple_quote = len(string) > 2 and string[1] == delimiter and string[2] == delimiter
if triple_quote:
delimiter_len = 3
delimiter = delimiter * 3
quoted_string_len = string[3:].find(delimiter)
else:
escape = False
quoted_string_len = -1
count = 0
for c in string[1:]:
if c == "\\":
escape = not escape
elif escape:
pass
elif c == delimiter:
quoted_string_len = count
break
count += 1
if quoted_string_len >= 0:
trimmed_string = string[quoted_string_len + 2 * delimiter_len:]
if len(trimmed_string) > 0 and not trimmed_string[0].isspace():
raise SyntaxError("invalid syntax after quoted string, should be followed by whitespace only")
quoted_string = string[delimiter_len:quoted_string_len + delimiter_len]
quoted_words = len(quoted_string.split())
if len(quoted_string)> 0:
if quoted_string[-1].isspace():
quoted_words += 1
if quoted_string[0].isspace():
quoted_words += 1
else:
quoted_words = 1
return delimiter + quoted_string + delimiter, quoted_words
else:
raise SyntaxError("EOL while scanning quoted string")
@classmethod
def _convert(cls, name:str, obj:Dict[str,Any], key:str, value:Any)->Any:
if value is None:
if obj.get("allow_none"):
return None
else:
raise ValueError(f"option '{key}' doesn't allow None value.")
_type = None
try:
_type = obj.get("type")
if _type == "int":
if float(value) != int(value):
raise ValueError
return int(value)
elif _type == "uint":
if float(value) != int(value) or int(value) < 0:
raise ValueError
return int(value)
elif _type == "float":
return float(value)
elif _type == "bool":
if type(value) == str:
if value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
else:
raise ValueError
elif bool(value) != int(value):
raise ValueError
return bool(value)
elif _type == "dict":
return dict(value)
elif _type == "list":
if type(value) == str:
value = [value]
return list(value)
elif _type == "enum":
enum_values = obj.get("values", [])
if enum_values.index(value) >= 0:
return value
else:
raise ValueError
elif _type == "iso8601_duration":
# There are four ways to express a time interval:
# Start and end, such as "2007-03-01T13:00:00Z/2008-05-11T15:30:00Z"
# Start and duration, such as "2007-03-01T13:00:00Z/P1Y2M10DT2H30M"
# Duration and end, such as "P1Y2M10DT2H30M/2008-05-11T15:30:00Z"
# Duration only, such as "P1Y2M10DT2H30M", with additional context information
value_list = [value] if type(value) != list else list(value)[:2]
if len(value_list) == 0:
raise ValueError
elif len(value_list) == 1:
value = value_list[0]
if isinstance(value, timedelta):
isodate = Dependencies.get_module("isodate", message="timedelta convertion to iso8601 duration format is not supported without isodate module, use instead a datetime range format, or already converted string") # will throw if does not exist
value = isodate.duration_isoformat(value)
elif type(value) != str:
raise ValueError
return value
else:
start_value = value_list[0]
end_value = value_list[1]
if isinstance(start_value, timedelta):
isodate = Dependencies.get_module("isodate", dont_throw=True)
if isodate:
start_value = isodate.duration_isoformat(start_value)
else:
end_datetime = end_value if isinstance(end_value, datetime) else dateutil.parser.isoparse(end_value)
start_value = end_datetime - start_value
elif isinstance(end_value, timedelta):
isodate = Dependencies.get_module("isodate", dont_throw=True)
if isodate:
end_value = isodate.duration_isoformat(end_value)
else:
start_datetime = end_value if isinstance(start_value, datetime) else dateutil.parser.isoparse(start_value)
end_value = end_value + start_datetime
value_list = [v.strftime('%Y-%m-%dT%H:%M:%S%ZZ') if isinstance(v, datetime) else str(v) for v in [start_value, end_value]]
return "/".join(value_list)
else:
return str(value)
except Exception as e:
option_type = "property" if name == "query properties" else "option"
due_message = f"{e}" or f"invalid '{_type}' of value '{value}'"
raise ValueError(f"failed to set {option_type} '{key}' in {name}, due to {due_message}")
@classmethod
def _validate_config_trait(cls, dict_name:str, obj:Dict[str,Any], key:str, value:Any, config:Configurable)->None:
# validate using config traits
name = obj.get("flag")
if isinstance(config, Configurable) and name in cls.traits_dict:
#
# save current value
#
try:
new_value = cls._convert(dict_name, obj, key, value)
trait:TraitType = cls.traits_dict.get(name)
if hasattr(trait, "_validate"):
validated_value = trait._validate(config, new_value)
return
except Exception as error:
raise ValueError(f"failed to set option '{key}' in {dict_name}, due to invalid value '{value}'. Exception: {error}")
|
import sys
import click
import os
import datetime
from unittest import TestCase, main
from frigate.video import process_frames, start_or_restart_ffmpeg, capture_frames, get_frame_shape
from frigate.util import DictFrameManager, EventsPerSecond, draw_box_with_label
from frigate.motion import MotionDetector
from frigate.edgetpu import LocalObjectDetector
from frigate.objects import ObjectTracker
import multiprocessing as mp
import numpy as np
import cv2
from frigate.object_processing import COLOR_MAP, CameraState
class ProcessClip():
def __init__(self, clip_path, frame_shape, config):
self.clip_path = clip_path
self.frame_shape = frame_shape
self.camera_name = 'camera'
self.frame_manager = DictFrameManager()
self.frame_queue = mp.Queue()
self.detected_objects_queue = mp.Queue()
self.camera_state = CameraState(self.camera_name, config, self.frame_manager)
def load_frames(self):
fps = EventsPerSecond()
skipped_fps = EventsPerSecond()
stop_event = mp.Event()
detection_frame = mp.Value('d', datetime.datetime.now().timestamp()+100000)
current_frame = mp.Value('d', 0.0)
ffmpeg_cmd = f"ffmpeg -hide_banner -loglevel panic -i {self.clip_path} -f rawvideo -pix_fmt rgb24 pipe:".split(" ")
ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, self.frame_shape[0]*self.frame_shape[1]*self.frame_shape[2])
capture_frames(ffmpeg_process, self.camera_name, self.frame_shape, self.frame_manager, self.frame_queue, 1, fps, skipped_fps, stop_event, detection_frame, current_frame)
ffmpeg_process.wait()
ffmpeg_process.communicate()
def process_frames(self, objects_to_track=['person'], object_filters={}):
mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
mask[:] = 255
motion_detector = MotionDetector(self.frame_shape, mask)
object_detector = LocalObjectDetector(labels='/labelmap.txt')
object_tracker = ObjectTracker(10)
process_fps = mp.Value('d', 0.0)
detection_fps = mp.Value('d', 0.0)
current_frame = mp.Value('d', 0.0)
stop_event = mp.Event()
process_frames(self.camera_name, self.frame_queue, self.frame_shape, self.frame_manager, motion_detector, object_detector, object_tracker, self.detected_objects_queue,
process_fps, detection_fps, current_frame, objects_to_track, object_filters, mask, stop_event, exit_on_empty=True)
def objects_found(self, debug_path=None):
obj_detected = False
top_computed_score = 0.0
def handle_event(name, obj):
nonlocal obj_detected
nonlocal top_computed_score
if obj['computed_score'] > top_computed_score:
top_computed_score = obj['computed_score']
if not obj['false_positive']:
obj_detected = True
self.camera_state.on('new', handle_event)
self.camera_state.on('update', handle_event)
while(not self.detected_objects_queue.empty()):
camera_name, frame_time, current_tracked_objects = self.detected_objects_queue.get()
if not debug_path is None:
self.save_debug_frame(debug_path, frame_time, current_tracked_objects.values())
self.camera_state.update(frame_time, current_tracked_objects)
for obj in self.camera_state.tracked_objects.values():
print(f"{frame_time}: {obj["id"]} - {obj["computed_score"]} - {obj["score_history"]}")
return {
'object_detected': obj_detected,
'top_score': top_computed_score
}
def save_debug_frame(self, debug_path, frame_time, tracked_objects):
current_frame = self.frame_manager.get(f"{self.camera_name}{frame_time}")
# draw the bounding boxes on the frame
for obj in tracked_objects:
thickness = 2
color = (0,0,175)
if obj['frame_time'] != frame_time:
thickness = 1
color = (255,0,0)
else:
color = (255,255,0)
# draw the bounding boxes on the frame
box = obj['box']
draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj["score"]*100)}% {int(obj["area"])}", thickness=thickness, color=color)
# draw the regions on the frame
region = obj['region']
draw_box_with_label(current_frame, region[0], region[1], region[2], region[3], 'region', "", thickness=1, color=(0,255,0))
cv2.imwrite(f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg", cv2.cvtColor(current_frame, cv2.COLOR_RGB2BGR))
@click.command()
@click.option("-p", "--path", required=True, help="Path to clip or directory to test.")
@click.option("-l", "--label", default='person', help="Label name to detect.")
@click.option("-t", "--threshold", default=0.85, help="Threshold value for objects.")
@click.option("--debug-path", default=None, help="Path to output frames for debugging.")
def process(path, label, threshold, debug_path):
clips = []
if os.path.isdir(path):
files = os.listdir(path)
files.sort()
clips = [os.path.join(path, file) for file in files]
elif os.path.isfile(path):
clips.append(path)
config = {
'snapshots': {
'show_timestamp': False,
'draw_zones': False
},
'zones': {},
'objects': {
'track': [label],
'filters': {
'person': {
'threshold': threshold
}
}
}
}
results = []
for c in clips:
frame_shape = get_frame_shape(c)
process_clip = ProcessClip(c, frame_shape, config)
process_clip.load_frames()
process_clip.process_frames(objects_to_track=config['objects']['track'])
results.append((c, process_clip.objects_found(debug_path)))
for result in results:
print(f"{result[0]}: {result[1]}")
positive_count = sum(1 for result in results if result[1]['object_detected'])
print(f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s).")
if __name__ == '__main__':
process()
|
import sys
import click
import os
import datetime
from unittest import TestCase, main
from frigate.video import process_frames, start_or_restart_ffmpeg, capture_frames, get_frame_shape
from frigate.util import DictFrameManager, EventsPerSecond, draw_box_with_label
from frigate.motion import MotionDetector
from frigate.edgetpu import LocalObjectDetector
from frigate.objects import ObjectTracker
import multiprocessing as mp
import numpy as np
import cv2
from frigate.object_processing import COLOR_MAP, CameraState
class ProcessClip():
def __init__(self, clip_path, frame_shape, config):
self.clip_path = clip_path
self.frame_shape = frame_shape
self.camera_name = 'camera'
self.frame_manager = DictFrameManager()
self.frame_queue = mp.Queue()
self.detected_objects_queue = mp.Queue()
self.camera_state = CameraState(self.camera_name, config, self.frame_manager)
def load_frames(self):
fps = EventsPerSecond()
skipped_fps = EventsPerSecond()
stop_event = mp.Event()
detection_frame = mp.Value('d', datetime.datetime.now().timestamp()+100000)
current_frame = mp.Value('d', 0.0)
ffmpeg_cmd = f"ffmpeg -hide_banner -loglevel panic -i {self.clip_path} -f rawvideo -pix_fmt rgb24 pipe:".split(" ")
ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, self.frame_shape[0]*self.frame_shape[1]*self.frame_shape[2])
capture_frames(ffmpeg_process, self.camera_name, self.frame_shape, self.frame_manager, self.frame_queue, 1, fps, skipped_fps, stop_event, detection_frame, current_frame)
ffmpeg_process.wait()
ffmpeg_process.communicate()
def process_frames(self, objects_to_track=['person'], object_filters={}):
mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
mask[:] = 255
motion_detector = MotionDetector(self.frame_shape, mask)
object_detector = LocalObjectDetector(labels='/labelmap.txt')
object_tracker = ObjectTracker(10)
process_fps = mp.Value('d', 0.0)
detection_fps = mp.Value('d', 0.0)
current_frame = mp.Value('d', 0.0)
stop_event = mp.Event()
process_frames(self.camera_name, self.frame_queue, self.frame_shape, self.frame_manager, motion_detector, object_detector, object_tracker, self.detected_objects_queue,
process_fps, detection_fps, current_frame, objects_to_track, object_filters, mask, stop_event, exit_on_empty=True)
def objects_found(self, debug_path=None):
obj_detected = False
top_computed_score = 0.0
def handle_event(name, obj):
nonlocal obj_detected
nonlocal top_computed_score
if obj['computed_score'] > top_computed_score:
top_computed_score = obj['computed_score']
if not obj['false_positive']:
obj_detected = True
self.camera_state.on('new', handle_event)
self.camera_state.on('update', handle_event)
while(not self.detected_objects_queue.empty()):
camera_name, frame_time, current_tracked_objects = self.detected_objects_queue.get()
if not debug_path is None:
self.save_debug_frame(debug_path, frame_time, current_tracked_objects.values())
self.camera_state.update(frame_time, current_tracked_objects)
for obj in self.camera_state.tracked_objects.values():
print(f"{frame_time}: {obj['id']} - {obj['computed_score']} - {obj['score_history']}")
return {
'object_detected': obj_detected,
'top_score': top_computed_score
}
def save_debug_frame(self, debug_path, frame_time, tracked_objects):
current_frame = self.frame_manager.get(f"{self.camera_name}{frame_time}")
# draw the bounding boxes on the frame
for obj in tracked_objects:
thickness = 2
color = (0,0,175)
if obj['frame_time'] != frame_time:
thickness = 1
color = (255,0,0)
else:
color = (255,255,0)
# draw the bounding boxes on the frame
box = obj['box']
draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
# draw the regions on the frame
region = obj['region']
draw_box_with_label(current_frame, region[0], region[1], region[2], region[3], 'region', "", thickness=1, color=(0,255,0))
cv2.imwrite(f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg", cv2.cvtColor(current_frame, cv2.COLOR_RGB2BGR))
@click.command()
@click.option("-p", "--path", required=True, help="Path to clip or directory to test.")
@click.option("-l", "--label", default='person', help="Label name to detect.")
@click.option("-t", "--threshold", default=0.85, help="Threshold value for objects.")
@click.option("--debug-path", default=None, help="Path to output frames for debugging.")
def process(path, label, threshold, debug_path):
clips = []
if os.path.isdir(path):
files = os.listdir(path)
files.sort()
clips = [os.path.join(path, file) for file in files]
elif os.path.isfile(path):
clips.append(path)
config = {
'snapshots': {
'show_timestamp': False,
'draw_zones': False
},
'zones': {},
'objects': {
'track': [label],
'filters': {
'person': {
'threshold': threshold
}
}
}
}
results = []
for c in clips:
frame_shape = get_frame_shape(c)
process_clip = ProcessClip(c, frame_shape, config)
process_clip.load_frames()
process_clip.process_frames(objects_to_track=config['objects']['track'])
results.append((c, process_clip.objects_found(debug_path)))
for result in results:
print(f"{result[0]}: {result[1]}")
positive_count = sum(1 for result in results if result[1]['object_detected'])
print(f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s).")
if __name__ == '__main__':
process()
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
from abc import ABCMeta
from collections import defaultdict
from logging import FileHandler
import torch.nn as nn
from mmcv.runner.dist_utils import master_only
from mmcv.utils.logging import get_logger, logger_initialized, print_log
class BaseModule(nn.Module, metaclass=ABCMeta):
"""Base module for all modules in openmmlab.
``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional
functionality of parameter initialization. Compared with
``torch.nn.Module``, ``BaseModule`` mainly adds three attributes.
- ``init_cfg``: the config to control the initialization.
- ``init_weights``: The function of parameter
initialization and recording initialization
information.
- ``_params_init_info``: Used to track the parameter
initialization information. This attribute only
exists during executing the ``init_weights``.
Args:
init_cfg (dict, optional): Initialization config dict.
"""
def __init__(self, init_cfg=None):
"""Initialize BaseModule, inherited from `torch.nn.Module`"""
# NOTE init_cfg can be defined in different levels, but init_cfg
# in low levels has a higher priority.
super(BaseModule, self).__init__()
# define default value of init_cfg instead of hard code
# in init_weights() function
self._is_init = False
self.init_cfg = copy.deepcopy(init_cfg)
# Backward compatibility in derived classes
# if pretrained is not None:
# warnings.warn('DeprecationWarning: pretrained is a deprecated \
# key, please consider using init_cfg')
# self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
@property
def is_init(self):
return self._is_init
def init_weights(self):
"""Initialize the weights."""
is_top_level_module = False
# check if it is top-level module
if not hasattr(self, '_params_init_info'):
# The `_params_init_info` is used to record the initialization
# information of the parameters
# the key should be the obj:`nn.Parameter` of model and the value
# should be a dict containing
# - init_info (str): The string that describes the initialization.
# - tmp_mean_value (FloatTensor): The mean of the parameter,
# which indicates whether the parameter has been modified.
# this attribute would be deleted after all parameters
# is initialized.
self._params_init_info = defaultdict(dict)
is_top_level_module = True
# Initialize the `_params_init_info`,
# When detecting the `tmp_mean_value` of
# the corresponding parameter is changed, update related
# initialization information
for name, param in self.named_parameters():
self._params_init_info[param][
'init_info'] = f'The value is the same before and ' \
f'after calling `init_weights` ' \
f'of {self.__class__.__name__} '
self._params_init_info[param][
'tmp_mean_value'] = param.data.mean()
# pass `params_init_info` to all submodules
# All submodules share the same `params_init_info`,
# so it will be updated when parameters are
# modified at any level of the model.
for sub_module in self.modules():
sub_module._params_init_info = self._params_init_info
# Get the initialized logger, if not exist,
# create a logger named `mmcv`
logger_names = list(logger_initialized.keys())
logger_name = logger_names[0] if logger_names else 'mmcv'
from ..cnn import initialize
from ..cnn.utils.weight_init import update_init_info
module_name = self.__class__.__name__
if not self._is_init:
if self.init_cfg:
print_log(
f'initialize {module_name} with init_cfg {self.init_cfg}',
logger=logger_name)
initialize(self, self.init_cfg)
if isinstance(self.init_cfg, dict):
# prevent the parameters of
# the pre-trained model
# from being overwritten by
# the `init_weights`
if self.init_cfg['type'] == 'Pretrained':
return
for m in self.children():
if hasattr(m, 'init_weights'):
m.init_weights()
# users may overload the `init_weights`
update_init_info(
m,
init_info=f'Initialized by '
f'user-defined `init_weights`'
f' in {m.__class__.__name__} ')
self._is_init = True
else:
warnings.warn(f'init_weights of {self.__class__.__name__} has '
f'been called more than once.')
if is_top_level_module:
self._dump_init_info(logger_name)
for sub_module in self.modules():
del sub_module._params_init_info
@master_only
def _dump_init_info(self, logger_name):
"""Dump the initialization information to a file named
`initialization.log.json` in workdir.
Args:
logger_name (str): The name of logger.
"""
logger = get_logger(logger_name)
with_file_handler = False
# dump the information to the logger file if there is a `FileHandler`
for handler in logger.handlers:
if isinstance(handler, FileHandler):
handler.stream.write(
'Name of parameter - Initialization information\n')
for name, param in self.named_parameters():
handler.stream.write(
f'\n{name} - {param.shape}: '
f"\n{self._params_init_info[param]["init_info"]} \n")
handler.stream.flush()
with_file_handler = True
if not with_file_handler:
for name, param in self.named_parameters():
print_log(
f'\n{name} - {param.shape}: '
f"\n{self._params_init_info[param]["init_info"]} \n ",
logger=logger_name)
def __repr__(self):
s = super().__repr__()
if self.init_cfg:
s += f'\ninit_cfg={self.init_cfg}'
return s
class Sequential(BaseModule, nn.Sequential):
"""Sequential module in openmmlab.
Args:
init_cfg (dict, optional): Initialization config dict.
"""
def __init__(self, *args, init_cfg=None):
BaseModule.__init__(self, init_cfg)
nn.Sequential.__init__(self, *args)
class ModuleList(BaseModule, nn.ModuleList):
"""ModuleList in openmmlab.
Args:
modules (iterable, optional): an iterable of modules to add.
init_cfg (dict, optional): Initialization config dict.
"""
def __init__(self, modules=None, init_cfg=None):
BaseModule.__init__(self, init_cfg)
nn.ModuleList.__init__(self, modules)
class ModuleDict(BaseModule, nn.ModuleDict):
"""ModuleDict in openmmlab.
Args:
modules (dict, optional): a mapping (dictionary) of (string: module)
or an iterable of key-value pairs of type (string, module).
init_cfg (dict, optional): Initialization config dict.
"""
def __init__(self, modules=None, init_cfg=None):
BaseModule.__init__(self, init_cfg)
nn.ModuleDict.__init__(self, modules)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
from abc import ABCMeta
from collections import defaultdict
from logging import FileHandler
import torch.nn as nn
from mmcv.runner.dist_utils import master_only
from mmcv.utils.logging import get_logger, logger_initialized, print_log
class BaseModule(nn.Module, metaclass=ABCMeta):
"""Base module for all modules in openmmlab.
``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional
functionality of parameter initialization. Compared with
``torch.nn.Module``, ``BaseModule`` mainly adds three attributes.
- ``init_cfg``: the config to control the initialization.
- ``init_weights``: The function of parameter
initialization and recording initialization
information.
- ``_params_init_info``: Used to track the parameter
initialization information. This attribute only
exists during executing the ``init_weights``.
Args:
init_cfg (dict, optional): Initialization config dict.
"""
def __init__(self, init_cfg=None):
"""Initialize BaseModule, inherited from `torch.nn.Module`"""
# NOTE init_cfg can be defined in different levels, but init_cfg
# in low levels has a higher priority.
super(BaseModule, self).__init__()
# define default value of init_cfg instead of hard code
# in init_weights() function
self._is_init = False
self.init_cfg = copy.deepcopy(init_cfg)
# Backward compatibility in derived classes
# if pretrained is not None:
# warnings.warn('DeprecationWarning: pretrained is a deprecated \
# key, please consider using init_cfg')
# self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
@property
def is_init(self):
return self._is_init
def init_weights(self):
"""Initialize the weights."""
is_top_level_module = False
# check if it is top-level module
if not hasattr(self, '_params_init_info'):
# The `_params_init_info` is used to record the initialization
# information of the parameters
# the key should be the obj:`nn.Parameter` of model and the value
# should be a dict containing
# - init_info (str): The string that describes the initialization.
# - tmp_mean_value (FloatTensor): The mean of the parameter,
# which indicates whether the parameter has been modified.
# this attribute would be deleted after all parameters
# is initialized.
self._params_init_info = defaultdict(dict)
is_top_level_module = True
# Initialize the `_params_init_info`,
# When detecting the `tmp_mean_value` of
# the corresponding parameter is changed, update related
# initialization information
for name, param in self.named_parameters():
self._params_init_info[param][
'init_info'] = f'The value is the same before and ' \
f'after calling `init_weights` ' \
f'of {self.__class__.__name__} '
self._params_init_info[param][
'tmp_mean_value'] = param.data.mean()
# pass `params_init_info` to all submodules
# All submodules share the same `params_init_info`,
# so it will be updated when parameters are
# modified at any level of the model.
for sub_module in self.modules():
sub_module._params_init_info = self._params_init_info
# Get the initialized logger, if not exist,
# create a logger named `mmcv`
logger_names = list(logger_initialized.keys())
logger_name = logger_names[0] if logger_names else 'mmcv'
from ..cnn import initialize
from ..cnn.utils.weight_init import update_init_info
module_name = self.__class__.__name__
if not self._is_init:
if self.init_cfg:
print_log(
f'initialize {module_name} with init_cfg {self.init_cfg}',
logger=logger_name)
initialize(self, self.init_cfg)
if isinstance(self.init_cfg, dict):
# prevent the parameters of
# the pre-trained model
# from being overwritten by
# the `init_weights`
if self.init_cfg['type'] == 'Pretrained':
return
for m in self.children():
if hasattr(m, 'init_weights'):
m.init_weights()
# users may overload the `init_weights`
update_init_info(
m,
init_info=f'Initialized by '
f'user-defined `init_weights`'
f' in {m.__class__.__name__} ')
self._is_init = True
else:
warnings.warn(f'init_weights of {self.__class__.__name__} has '
f'been called more than once.')
if is_top_level_module:
self._dump_init_info(logger_name)
for sub_module in self.modules():
del sub_module._params_init_info
@master_only
def _dump_init_info(self, logger_name):
"""Dump the initialization information to a file named
`initialization.log.json` in workdir.
Args:
logger_name (str): The name of logger.
"""
logger = get_logger(logger_name)
with_file_handler = False
# dump the information to the logger file if there is a `FileHandler`
for handler in logger.handlers:
if isinstance(handler, FileHandler):
handler.stream.write(
'Name of parameter - Initialization information\n')
for name, param in self.named_parameters():
handler.stream.write(
f'\n{name} - {param.shape}: '
f"\n{self._params_init_info[param]['init_info']} \n")
handler.stream.flush()
with_file_handler = True
if not with_file_handler:
for name, param in self.named_parameters():
print_log(
f'\n{name} - {param.shape}: '
f"\n{self._params_init_info[param]['init_info']} \n ",
logger=logger_name)
def __repr__(self):
s = super().__repr__()
if self.init_cfg:
s += f'\ninit_cfg={self.init_cfg}'
return s
class Sequential(BaseModule, nn.Sequential):
"""Sequential module in openmmlab.
Args:
init_cfg (dict, optional): Initialization config dict.
"""
def __init__(self, *args, init_cfg=None):
BaseModule.__init__(self, init_cfg)
nn.Sequential.__init__(self, *args)
class ModuleList(BaseModule, nn.ModuleList):
"""ModuleList in openmmlab.
Args:
modules (iterable, optional): an iterable of modules to add.
init_cfg (dict, optional): Initialization config dict.
"""
def __init__(self, modules=None, init_cfg=None):
BaseModule.__init__(self, init_cfg)
nn.ModuleList.__init__(self, modules)
class ModuleDict(BaseModule, nn.ModuleDict):
"""ModuleDict in openmmlab.
Args:
modules (dict, optional): a mapping (dictionary) of (string: module)
or an iterable of key-value pairs of type (string, module).
init_cfg (dict, optional): Initialization config dict.
"""
def __init__(self, modules=None, init_cfg=None):
BaseModule.__init__(self, init_cfg)
nn.ModuleDict.__init__(self, modules)
|
"""create DOEs and execute design workflow
Caution:
This module requires fa_pytuils and delismm!
Please contatct the developers for these additional packages.
"""
import os
from collections import OrderedDict
import datetime
import numpy as np
import matplotlib.pyplot as plt
from delismm.model.doe import LatinizedCentroidalVoronoiTesselation, DOEfromFile
from delismm.model.samplecalculator import getY
from delismm.model.customsystemfunction import BoundsHandler, AbstractTargetFunction
from fa_pyutils.service.systemutils import getRunDir
from tankoh2.control_sf import createWindingDesign
from tankoh2 import programDir, log, pychain
from tankoh2.service import indent
dome = 'circle' # isotensoid circle
safetyFactor = 1 # 2.25
lb = OrderedDict([('r', 500.), ('lzylByR', 0.01), ('dp', 0.13 * safetyFactor)]) # [mm, - , MPa]
ub = OrderedDict([('r', 1600.), ('lzylByR', 12.), ('dp', 0.5 * safetyFactor)])
useFibreFailure = False
numberOfSamples = 201
class TankWinder(AbstractTargetFunction):
""""""
name = 'tank winder'
def __init__(self, lb, ub, runDir):
""""""
resultNames = ['frpMass', 'volume', 'area', 'lzylinder', 'numberOfLayers', 'angles', 'hoopLayerShifts']
AbstractTargetFunction.__init__(self, lb, ub, resultNames=resultNames)
self.doParallelization = []
self.runDir = runDir
self.allowFailedSample = True
def _call(self, parameters):
"""call function for the model"""
runDir = getRunDir(basePath=os.path.join(self.runDir), useMilliSeconds=True)
r, lzyl, burstPressure = parameters
result = createWindingDesign(dzyl=r * 2, lzylByR=lzyl, burstPressure=burstPressure,
minPolarOpening=r / 10, runDir=runDir,
domeType=pychain.winding.DOME_TYPES.ISOTENSOID if dome == 'isotensoid' else pychain.winding.DOME_TYPES.CIRCLE,
useFibreFailure = useFibreFailure)
return result
volumeFunc = lambda r, lzylByR: (4 / 3 * np.pi * r ** 3 + r * lzylByR * np.pi * r ** 2)
"""[m**3]"""
def plotGeometryRange(radii, lzylByRs, plotDir='', show=False, samples=None):
"""
:param radii: tuple with min and max radius [mm]
:param lzylByRs: tuple with min and max lzylByR [-]
:return: None
"""
radii = np.array(radii) / 1e3 # convert to m
if samples is not None:
samplesR, samplesLzylByR = samples[:2, :]
samplesR = samplesR / 1e3
fig = plt.figure(figsize=(15,6))
axes = [fig.add_subplot(1, 2, 1), fig.add_subplot(1, 2, 2)]
axes[1].set_yscale("log")
for ax in axes:
ax.set_title("Parameter bounds")
ax.set_xlabel('Radius [m]')
ax.set_ylabel('Volume [m^3]')
color = 'tab:blue'
for lzylByR in lzylByRs:
x = np.linspace(*radii,11)
volumes = [volumeFunc(r, lzylByR) for r in x]
ax.plot(x, volumes, color=color, label=f'lzylByR={lzylByR}')
color = 'tab:orange'
ax.legend()
if samples is not None:
volumes = volumeFunc(samplesR, samplesLzylByR)
ax.scatter(samplesR, volumes, label=f'samples')
if plotDir:
plt.savefig(plotDir+'/geometryRange.png')
if show:
plt.show()
def main():
sampleFile = '' + 'C:/PycharmProjects/tankoh2/tmp/doe_circle_20210520_135237_cvt/sampleX.txt'
startTime = datetime.datetime.now()
names = list(lb.keys())
runDir = getRunDir(f'doe_{dome}_{'puckff' if useFibreFailure else 'puckiff'}',
basePath=os.path.join(programDir, 'tmp'))
winder = TankWinder(lb, ub, runDir)
if sampleFile:
lcvt = DOEfromFile(sampleFile)
else:
lcvt = LatinizedCentroidalVoronoiTesselation(numberOfSamples, len(names))
sampleX = BoundsHandler.scaleToBoundsStatic(lcvt.sampleXNormalized, list(lb.values()), list(ub.values()))
plotGeometryRange([lb['r'], ub['r']],[lb['lzylByR'], ub['lzylByR']], plotDir=runDir, samples=sampleX)
lcvt.xToFile(os.path.join(runDir, 'sampleX.txt'))
lcvt.xToFileStatic(os.path.join(runDir, 'sampleX_bounds.txt'), sampleX)
sampleY = getY(sampleX, winder, verbose=True, runDir=runDir)
# store samples
lcvt.yToFile(os.path.join(runDir, 'sampleY.txt'), winder, sampleY)
# lcvt.xyToFile(os.path.join(runDir, 'full_doe2.txt'), winder, sampleY, True)
allSamples = [names + winder.resultNames]
for inputSample, outputSample in zip(sampleX.T, sampleY):
if hasattr(outputSample, '__iter__'):
allSamples.append(list(inputSample) + list(outputSample))
else:
allSamples.append(list(inputSample) + list([outputSample]))
with open(os.path.join(runDir, 'full_doe.txt'), 'w') as f:
f.write(indent(allSamples, hasHeader=True))
duration = datetime.datetime.now() - startTime
log.info(f'runtime {duration.seconds} seconds')
if __name__ == '__main__':
if 1:
main()
else:
plotGeometryRange([lb['r'], ub['r']],[lb['lzylByR'], ub['lzylByR']], show=True)
|
"""create DOEs and execute design workflow
Caution:
This module requires fa_pytuils and delismm!
Please contatct the developers for these additional packages.
"""
import os
from collections import OrderedDict
import datetime
import numpy as np
import matplotlib.pyplot as plt
from delismm.model.doe import LatinizedCentroidalVoronoiTesselation, DOEfromFile
from delismm.model.samplecalculator import getY
from delismm.model.customsystemfunction import BoundsHandler, AbstractTargetFunction
from fa_pyutils.service.systemutils import getRunDir
from tankoh2.control_sf import createWindingDesign
from tankoh2 import programDir, log, pychain
from tankoh2.service import indent
dome = 'circle' # isotensoid circle
safetyFactor = 1 # 2.25
lb = OrderedDict([('r', 500.), ('lzylByR', 0.01), ('dp', 0.13 * safetyFactor)]) # [mm, - , MPa]
ub = OrderedDict([('r', 1600.), ('lzylByR', 12.), ('dp', 0.5 * safetyFactor)])
useFibreFailure = False
numberOfSamples = 201
class TankWinder(AbstractTargetFunction):
""""""
name = 'tank winder'
def __init__(self, lb, ub, runDir):
""""""
resultNames = ['frpMass', 'volume', 'area', 'lzylinder', 'numberOfLayers', 'angles', 'hoopLayerShifts']
AbstractTargetFunction.__init__(self, lb, ub, resultNames=resultNames)
self.doParallelization = []
self.runDir = runDir
self.allowFailedSample = True
def _call(self, parameters):
"""call function for the model"""
runDir = getRunDir(basePath=os.path.join(self.runDir), useMilliSeconds=True)
r, lzyl, burstPressure = parameters
result = createWindingDesign(dzyl=r * 2, lzylByR=lzyl, burstPressure=burstPressure,
minPolarOpening=r / 10, runDir=runDir,
domeType=pychain.winding.DOME_TYPES.ISOTENSOID if dome == 'isotensoid' else pychain.winding.DOME_TYPES.CIRCLE,
useFibreFailure = useFibreFailure)
return result
volumeFunc = lambda r, lzylByR: (4 / 3 * np.pi * r ** 3 + r * lzylByR * np.pi * r ** 2)
"""[m**3]"""
def plotGeometryRange(radii, lzylByRs, plotDir='', show=False, samples=None):
"""
:param radii: tuple with min and max radius [mm]
:param lzylByRs: tuple with min and max lzylByR [-]
:return: None
"""
radii = np.array(radii) / 1e3 # convert to m
if samples is not None:
samplesR, samplesLzylByR = samples[:2, :]
samplesR = samplesR / 1e3
fig = plt.figure(figsize=(15,6))
axes = [fig.add_subplot(1, 2, 1), fig.add_subplot(1, 2, 2)]
axes[1].set_yscale("log")
for ax in axes:
ax.set_title("Parameter bounds")
ax.set_xlabel('Radius [m]')
ax.set_ylabel('Volume [m^3]')
color = 'tab:blue'
for lzylByR in lzylByRs:
x = np.linspace(*radii,11)
volumes = [volumeFunc(r, lzylByR) for r in x]
ax.plot(x, volumes, color=color, label=f'lzylByR={lzylByR}')
color = 'tab:orange'
ax.legend()
if samples is not None:
volumes = volumeFunc(samplesR, samplesLzylByR)
ax.scatter(samplesR, volumes, label=f'samples')
if plotDir:
plt.savefig(plotDir+'/geometryRange.png')
if show:
plt.show()
def main():
sampleFile = '' + 'C:/PycharmProjects/tankoh2/tmp/doe_circle_20210520_135237_cvt/sampleX.txt'
startTime = datetime.datetime.now()
names = list(lb.keys())
runDir = getRunDir(f'doe_{dome}_{"puckff" if useFibreFailure else "puckiff"}',
basePath=os.path.join(programDir, 'tmp'))
winder = TankWinder(lb, ub, runDir)
if sampleFile:
lcvt = DOEfromFile(sampleFile)
else:
lcvt = LatinizedCentroidalVoronoiTesselation(numberOfSamples, len(names))
sampleX = BoundsHandler.scaleToBoundsStatic(lcvt.sampleXNormalized, list(lb.values()), list(ub.values()))
plotGeometryRange([lb['r'], ub['r']],[lb['lzylByR'], ub['lzylByR']], plotDir=runDir, samples=sampleX)
lcvt.xToFile(os.path.join(runDir, 'sampleX.txt'))
lcvt.xToFileStatic(os.path.join(runDir, 'sampleX_bounds.txt'), sampleX)
sampleY = getY(sampleX, winder, verbose=True, runDir=runDir)
# store samples
lcvt.yToFile(os.path.join(runDir, 'sampleY.txt'), winder, sampleY)
# lcvt.xyToFile(os.path.join(runDir, 'full_doe2.txt'), winder, sampleY, True)
allSamples = [names + winder.resultNames]
for inputSample, outputSample in zip(sampleX.T, sampleY):
if hasattr(outputSample, '__iter__'):
allSamples.append(list(inputSample) + list(outputSample))
else:
allSamples.append(list(inputSample) + list([outputSample]))
with open(os.path.join(runDir, 'full_doe.txt'), 'w') as f:
f.write(indent(allSamples, hasHeader=True))
duration = datetime.datetime.now() - startTime
log.info(f'runtime {duration.seconds} seconds')
if __name__ == '__main__':
if 1:
main()
else:
plotGeometryRange([lb['r'], ub['r']],[lb['lzylByR'], ub['lzylByR']], show=True)
|
"""
This module controls defines celery tasks and their applicable schedules. The celery beat server and workers will start
when invoked. Please add internal-only celery tasks to the celery_tasks plugin.
When ran in development mode (CONFIG_LOCATION=<location of development.yaml configuration file. To run both the celery
beat scheduler and a worker simultaneously, and to have jobs kick off starting at the next minute, run the following
command: celery -A consoleme.celery_tasks.celery_tasks worker --loglevel=info -l DEBUG -B
"""
from __future__ import absolute_import
import json # We use a separate SetEncoder here so we cannot use ujson
import os
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
cache_all_scps,
cache_org_structure,
get_enabled_regions_for_account,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
)
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
asynpool.PROC_ALIVE_TIMEOUT = config.get("celery.asynpool_proc_alive_timeout", 60.0)
default_retry_kwargs = {
"autoretry_for": (Exception,),
"retry_backoff": True,
"retry_kwargs": {"max_retries": config.get("celery.default_max_retries", 5)},
}
class Celery(celery.Celery):
def on_configure(self) -> None:
sentry_dsn = config.get("sentry.dsn")
if sentry_dsn:
sentry_sdk.init(
sentry_dsn,
integrations=[
TornadoIntegration(),
CeleryIntegration(),
AioHttpIntegration(),
RedisIntegration(),
],
)
app = Celery(
"tasks",
broker=config.get(
f"celery.broker.{config.region}",
config.get("celery.broker.global", "redis://127.0.0.1:6379/1"),
),
backend=config.get(
f"celery.backend.{config.region}",
config.get("celery.broker.global", "redis://127.0.0.1:6379/2"),
),
)
if config.get("redis.use_redislite"):
import tempfile
import redislite
redislite_db_path = os.path.join(
config.get("redis.redislite.db_path", tempfile.NamedTemporaryFile().name)
)
redislite_client = redislite.Redis(redislite_db_path)
redislite_socket_path = f"redis+socket://{redislite_client.socket_file}"
app = Celery(
"tasks",
broker=config.get(f"{redislite_socket_path}/1"),
backend=config.get(f"{redislite_socket_path}/2"),
)
app.conf.result_expires = config.get("celery.result_expires", 60)
app.conf.worker_prefetch_multiplier = config.get("celery.worker_prefetch_multiplier", 4)
app.conf.task_acks_late = config.get("celery.task_acks_late", True)
if config.get("celery.purge") and not config.get("redis.use_redislite"):
# Useful to clear celery queue in development
with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"):
app.control.purge()
log = config.get_logger()
red = RedisHandler().redis_sync()
aws = get_plugin_by_name(config.get("plugins.aws", "default_aws"))
auth = get_plugin_by_name(config.get("plugins.auth", "default_auth"))()
group_mapping = get_plugin_by_name(
config.get("plugins.group_mapping", "default_group_mapping")
)()
internal_celery_tasks = get_plugin_by_name(
config.get("plugins.internal_celery_tasks", "default_celery_tasks")
)
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
internal_policies = get_plugin_by_name(
config.get("plugins.internal_policies", "default_policies")
)()
REDIS_IAM_COUNT = 1000
@app.task(soft_time_limit=20)
def report_celery_last_success_metrics() -> bool:
"""
For each celery task, this will determine the number of seconds since it has last been successful.
Celery tasks should be emitting redis stats with a deterministic key (In our case, `f"{task}.last_success"`.
report_celery_last_success_metrics should be ran periodically to emit metrics on when a task was last successful.
We can then alert when tasks are not ran when intended. We should also alert when no metrics are emitted
from this function.
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data = {"function": function}
current_time = int(time.time())
global schedule
for _, t in schedule.items():
task = t.get("task")
last_success = int(red.get(f"{task}.last_success") or 0)
if last_success == 0:
log_data["message"] = "Last Success Value is 0"
log_data["task_last_success_key"] = f"{task}.last_success"
log.error(log_data)
stats.gauge(f"{task}.time_since_last_success", current_time - last_success)
red.set(f"{task}.time_since_last_success", current_time - last_success)
red.set(
f"{function}.last_success", int(time.time())
) # Alert if this metric is not seen
stats.count(f"{function}.success")
stats.timer("worker.healthy")
return True
def get_celery_request_tags(**kwargs):
request = kwargs.get("request")
sender_hostname = "unknown"
sender = kwargs.get("sender")
if sender:
try:
sender_hostname = sender.hostname
except AttributeError:
sender_hostname = vars(sender.request).get("origin", "unknown")
if request and not isinstance(
request, Context
): # unlike others, task_revoked sends a Context for `request`
task_name = request.name
task_id = request.id
receiver_hostname = request.hostname
else:
try:
task_name = sender.name
except AttributeError:
task_name = kwargs.pop("name", "")
try:
task_id = sender.request.id
except AttributeError:
task_id = kwargs.pop("id", "")
try:
receiver_hostname = sender.request.hostname
except AttributeError:
receiver_hostname = ""
tags = {
"task_name": task_name,
"task_id": task_id,
"sender_hostname": sender_hostname,
"receiver_hostname": receiver_hostname,
}
tags["expired"] = kwargs.get("expired", False)
exception = kwargs.get("exception")
if not exception:
exception = kwargs.get("exc")
if exception:
tags["error"] = repr(exception)
if isinstance(exception, SoftTimeLimitExceeded):
tags["timed_out"] = True
return tags
@task_prerun.connect
def refresh_dynamic_config_in_worker(**kwargs):
tags = get_celery_request_tags(**kwargs)
log_data = {"function": f"{__name__}.{sys._getframe().f_code.co_name}"}
dynamic_config = red.get("DYNAMIC_CONFIG_CACHE")
if not dynamic_config:
log.error({**log_data, "error": "Unable to retrieve Dynamic Config from Redis"})
return
dynamic_config_j = json.loads(dynamic_config)
if config.CONFIG.config.get("dynamic_config", {}) != dynamic_config_j:
log.debug(
{
**log_data,
**tags,
"message": "Refreshing dynamic configuration on Celery Worker",
}
)
config.CONFIG.config["dynamic_config"] = dynamic_config_j
@task_received.connect
def report_number_pending_tasks(**kwargs):
"""
Report the number of pending tasks to our metrics broker every time a task is published. This metric can be used
for autoscaling workers.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-received
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
tags = get_celery_request_tags(**kwargs)
tags.pop("task_id", None)
stats.timer("celery.new_pending_task", tags=tags)
@task_success.connect
def report_successful_task(**kwargs):
"""
Report a generic success metric as tasks to our metrics broker every time a task finished correctly.
This metric can be used for autoscaling workers.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-success
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
tags = get_celery_request_tags(**kwargs)
red.set(f"{tags["task_name"]}.last_success", int(time.time()))
tags.pop("error", None)
tags.pop("task_id", None)
stats.timer("celery.successful_task", tags=tags)
@task_retry.connect
def report_task_retry(**kwargs):
"""
Report a generic retry metric as tasks to our metrics broker every time a task is retroed.
This metric can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-retry
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Retry",
}
# Add traceback if exception info is in the kwargs
einfo = kwargs.get("einfo")
if einfo:
log_data["traceback"] = einfo.traceback
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.retried_task", tags=error_tags)
@task_failure.connect
def report_failed_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a task fails. This is also called when
a task has hit a SoftTimeLimit.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-failure
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Failure",
}
# Add traceback if exception info is in the kwargs
einfo = kwargs.get("einfo")
if einfo:
log_data["traceback"] = einfo.traceback
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.failed_task", tags=error_tags)
@task_unknown.connect
def report_unknown_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a worker receives an unknown task.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-unknown
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Unknown",
}
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.unknown_task", tags=error_tags)
@task_rejected.connect
def report_rejected_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a task is rejected.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-rejected
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Rejected",
}
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.rejected_task", tags=error_tags)
@task_revoked.connect
def report_revoked_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a task is revoked.
This metric can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-revoked
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Revoked",
}
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.revoked_task", tags=error_tags)
@retry(
stop_max_attempt_number=4,
wait_exponential_multiplier=1000,
wait_exponential_max=1000,
)
def _add_role_to_redis(redis_key: str, role_entry: Dict) -> None:
"""
This function will add IAM role data to redis so that policy details can be quickly retrieved by the policies
endpoint.
IAM role data is stored in the `redis_key` redis key by the role's ARN.
Parameters
----------
redis_key : str
The redis key (hash)
role_entry : Dict
The role entry
Example: {'name': 'nameOfRole', 'accountId': '123456789012', 'arn': 'arn:aws:iam::123456789012:role/nameOfRole',
'templated': None, 'ttl': 1562510908, 'policy': '<json_formatted_policy>'}
"""
try:
red.hset(redis_key, role_entry["arn"], json.dumps(role_entry))
except Exception as e: # noqa
stats.count(
"cache_roles_for_account.error",
tags={"redis_key": redis_key, "error": str(e), "role_entry": role_entry},
)
log_data = {
"message": "Error syncing Account's IAM roles to Redis",
"account_id": role_entry["account_id"],
"arn": role_entry["arn"],
"role_entry": role_entry,
}
log.error(log_data, exc_info=True)
raise
@app.task(soft_time_limit=3600)
def cache_cloudtrail_errors_by_arn() -> Dict:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data: Dict = {"function": function}
cloudtrail_errors: Dict = internal_policies.error_count_by_arn()
if not cloudtrail_errors:
cloudtrail_errors = {}
red.setex(
config.get(
"celery.cache_cloudtrail_errors_by_arn.redis_key",
"CLOUDTRAIL_ERRORS_BY_ARN",
),
86400,
json.dumps(cloudtrail_errors),
)
log_data["number_of_roles_with_errors"]: len(cloudtrail_errors.keys())
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800)
def cache_policies_table_details() -> bool:
iam_role_redis_key = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
all_iam_roles = red.hgetall(iam_role_redis_key)
items = []
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
cloudtrail_errors = {}
cloudtrail_errors_j = red.get(
config.get(
"celery.cache_cloudtrail_errors_by_arn.redis_key",
"CLOUDTRAIL_ERRORS_BY_ARN",
)
)
if cloudtrail_errors_j:
cloudtrail_errors = json.loads(cloudtrail_errors_j)
s3_error_topic = config.get("redis.s3_errors", "S3_ERRORS")
all_s3_errors = red.get(s3_error_topic)
s3_errors = {}
if all_s3_errors:
s3_errors = json.loads(all_s3_errors)
for arn, role_details_j in all_iam_roles.items():
role_details = ujson.loads(role_details_j)
error_count = cloudtrail_errors.get(arn, 0)
s3_errors_for_arn = s3_errors.get(arn, [])
for error in s3_errors_for_arn:
error_count += int(error.get("count"))
account_id = arn.split(":")[4]
account_name = accounts_d.get(str(account_id), "Unknown")
resource_id = role_details.get("resourceId")
items.append(
{
"account_id": account_id,
"account_name": account_name,
"arn": arn,
"technology": "iam",
"templated": red.hget(
config.get("templated_roles.redis_key", "TEMPLATED_ROLES_v2"),
arn.lower(),
),
"errors": error_count,
"config_history_url": async_to_sync(
get_aws_config_history_url_for_resource
)(account_id, resource_id, arn, "AWS::IAM::Role"),
}
)
s3_bucket_key: str = config.get("redis.s3_bucket_key", "S3_BUCKETS")
s3_accounts = red.hkeys(s3_bucket_key)
if s3_accounts:
for account in s3_accounts:
account_name = accounts_d.get(str(account), "Unknown")
buckets = json.loads(red.hget(s3_bucket_key, account))
for bucket in buckets:
bucket_arn = f"arn:aws:s3:::{bucket}"
s3_errors_for_arn = s3_errors.get(bucket_arn, [])
error_count = 0
for error in s3_errors_for_arn:
error_count += int(error.get("count"))
items.append(
{
"account_id": account,
"account_name": account_name,
"arn": f"arn:aws:s3:::{bucket}",
"technology": "s3",
"templated": None,
"errors": error_count,
}
)
sns_topic_key: str = config.get("redis.sns_topics_key", "SNS_TOPICS")
sns_accounts = red.hkeys(sns_topic_key)
if sns_accounts:
for account in sns_accounts:
account_name = accounts_d.get(str(account), "Unknown")
topics = json.loads(red.hget(sns_topic_key, account))
for topic in topics:
error_count = 0
items.append(
{
"account_id": account,
"account_name": account_name,
"arn": topic,
"technology": "sns",
"templated": None,
"errors": error_count,
}
)
sqs_queue_key: str = config.get("redis.sqs_queues_key", "SQS_QUEUES")
sqs_accounts = red.hkeys(sqs_queue_key)
if sqs_accounts:
for account in sqs_accounts:
account_name = accounts_d.get(str(account), "Unknown")
queues = json.loads(red.hget(sqs_queue_key, account))
for queue in queues:
error_count = 0
items.append(
{
"account_id": account,
"account_name": account_name,
"arn": queue,
"technology": "sqs",
"templated": None,
"errors": error_count,
}
)
resources_from_aws_config_redis_key: str = config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
)
resources_from_aws_config = red.hgetall(resources_from_aws_config_redis_key)
if resources_from_aws_config:
for arn, value in resources_from_aws_config.items():
resource = json.loads(value)
technology = resource["resourceType"]
# Skip technologies that we retrieve directly
if technology in [
"AWS::IAM::Role",
"AWS::SQS::Queue",
"AWS::SNS::Topic",
"AWS::S3::Bucket",
]:
continue
account_id = arn.split(":")[4]
account_name = accounts_d.get(account_id, "Unknown")
items.append(
{
"account_id": account_id,
"account_name": account_name,
"arn": arn,
"technology": technology,
"templated": None,
"errors": 0,
}
)
s3_bucket = None
s3_key = None
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("cache_policies_table_details.s3.bucket")
s3_key = config.get(
"cache_policies_table_details.s3.file",
"policies_table/cache_policies_table_details_v1.json.gz",
)
async_to_sync(store_json_results_in_redis_and_s3)(
items,
redis_key=config.get("policies.redis_policies_key", "ALL_POLICIES"),
s3_bucket=s3_bucket,
s3_key=s3_key,
)
stats.count(
"cache_policies_table_details.success",
tags={"num_roles": len(all_iam_roles.keys())},
)
return True
@app.task(soft_time_limit=2700, **default_retry_kwargs)
def cache_roles_for_account(account_id: str) -> bool:
# Get the DynamoDB handler:
dynamo = IAMRoleDynamoHandler()
cache_key = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
# Only query IAM and put data in Dynamo if we're in the active region
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
client = boto3_cached_conn(
"iam",
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=f"https://sts.{config.region}.amazonaws.com",
),
)
paginator = client.get_paginator("get_account_authorization_details")
response_iterator = paginator.paginate()
all_iam_resources = {}
for response in response_iterator:
if not all_iam_resources:
all_iam_resources = response
else:
all_iam_resources["UserDetailList"].extend(response["UserDetailList"])
all_iam_resources["GroupDetailList"].extend(response["GroupDetailList"])
all_iam_resources["RoleDetailList"].extend(response["RoleDetailList"])
all_iam_resources["Policies"].extend(response["Policies"])
for k in response.keys():
if k not in [
"UserDetailList",
"GroupDetailList",
"RoleDetailList",
"Policies",
"ResponseMetadata",
"Marker",
"IsTruncated",
]:
# Fail hard if we find something unexpected
raise RuntimeError("Unexpected key {0} in response".format(k))
# Store entire response in S3
async_to_sync(store_json_results_in_redis_and_s3)(
all_iam_resources,
s3_bucket=config.get("cache_iam_resources_for_account.s3.bucket"),
s3_key=config.get(
"cache_iam_resources_for_account.s3.file",
"get_account_authorization_details/get_account_authorization_details_{account_id}_v1.json.gz",
).format(account_id=account_id),
)
iam_roles = all_iam_resources["RoleDetailList"]
async_to_sync(store_json_results_in_redis_and_s3)(
iam_roles,
s3_bucket=config.get("cache_roles_for_account.s3.bucket"),
s3_key=config.get(
"cache_roles_for_account.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="iam_roles", account_id=account_id),
)
ttl: int = int((datetime.utcnow() + timedelta(hours=36)).timestamp())
# Save them:
for role in iam_roles:
role_entry = {
"arn": role.get("Arn"),
"name": role.get("RoleName"),
"resourceId": role.get("RoleId"),
"accountId": account_id,
"ttl": ttl,
"policy": dynamo.convert_role_to_json(role),
"templated": red.hget(
config.get("templated_roles.redis_key", "TEMPLATED_ROLES_v2"),
role.get("Arn").lower(),
),
}
# DynamoDB:
dynamo.sync_iam_role_for_account(role_entry)
# Redis:
_add_role_to_redis(cache_key, role_entry)
# Run internal function on role. This can be used to inspect roles, add managed policies, or other actions
aws().handle_detected_role(role)
# Maybe store all resources in git
if config.get("cache_iam_resources_for_account.store_in_git.enabled"):
store_iam_resources_in_git(all_iam_resources, account_id)
stats.count("cache_roles_for_account.success", tags={"account_id": account_id})
return True
@app.task(soft_time_limit=3600)
def cache_roles_across_accounts() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
cache_key = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
log_data = {"function": function, "cache_key": cache_key}
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
tasks = []
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev"]:
# First, get list of accounts
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") in ["prod", "dev"]:
tasks.append(cache_roles_for_account.s(account_id))
else:
if account_id in config.get("celery.test_account_ids", []):
tasks.append(cache_roles_for_account.s(account_id))
results = group(*tasks).apply_async()
# results.join() forces function to wait until all tasks are complete
results.join()
else:
dynamo = IAMRoleDynamoHandler()
# In non-active regions, we just want to sync DDB data to Redis
roles = dynamo.fetch_all_roles()
for role_entry in roles:
_add_role_to_redis(cache_key, role_entry)
# Delete roles in Redis cache with expired TTL
all_roles = red.hgetall(cache_key)
roles_to_delete_from_cache = []
for arn, role_entry_j in all_roles.items():
role_entry = json.loads(role_entry_j)
if datetime.fromtimestamp(role_entry["ttl"]) < datetime.utcnow():
roles_to_delete_from_cache.append(arn)
if roles_to_delete_from_cache:
red.hdel(cache_key, *roles_to_delete_from_cache)
for arn in roles_to_delete_from_cache:
all_roles.pop(arn, None)
log_data["num_roles"] = len(all_roles)
# Store full list of roles in a single place. This list will be ~30 minutes out of date.
async_to_sync(store_json_results_in_redis_and_s3)(
all_roles,
redis_key=cache_key,
redis_data_type="hash",
s3_bucket=config.get(
"cache_roles_across_accounts.all_roles_combined.s3.bucket"
),
s3_key=config.get(
"cache_roles_across_accounts.all_roles_combined.s3.file",
"account_resource_cache/cache_all_roles_v1.json.gz",
),
)
stats.count(f"{function}.success")
log_data["num_accounts"] = len(accounts_d)
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_managed_policies_for_account(account_id: str) -> Dict[str, Union[str, int]]:
managed_policies: List[Dict] = get_all_managed_policies(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
)
all_policies: List = []
for policy in managed_policies:
all_policies.append(policy.get("Arn"))
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"account_id": account_id,
"number_managed_policies": len(all_policies),
}
log.debug(log_data)
stats.count(
"cache_managed_policies_for_account",
tags={"account_id": account_id, "num_managed_policies": len(all_policies)},
)
policy_key = config.get("redis.iam_managed_policies_key", "IAM_MANAGED_POLICIES")
red.hset(policy_key, account_id, json.dumps(all_policies))
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("account_resource_cache.s3.bucket")
s3_key = config.get(
"account_resource_cache.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="managed_policies", account_id=account_id)
async_to_sync(store_json_results_in_redis_and_s3)(
all_policies, s3_bucket=s3_bucket, s3_key=s3_key
)
return log_data
@app.task(soft_time_limit=120)
def cache_managed_policies_across_accounts() -> bool:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
# First, get list of accounts
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") == "prod":
cache_managed_policies_for_account.delay(account_id)
else:
if account_id in config.get("celery.test_account_ids", []):
cache_managed_policies_for_account.delay(account_id)
stats.count(f"{function}.success")
return True
@app.task(soft_time_limit=120)
def cache_s3_buckets_across_accounts() -> bool:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
# First, get list of accounts
accounts_d: List = async_to_sync(get_account_id_to_name_mapping)()
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") == "prod":
cache_s3_buckets_for_account.delay(account_id)
else:
if account_id in config.get("celery.test_account_ids", []):
cache_s3_buckets_for_account.delay(account_id)
stats.count(f"{function}.success")
return True
@app.task(soft_time_limit=120)
def cache_sqs_queues_across_accounts() -> bool:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
# First, get list of accounts
accounts_d: List = async_to_sync(get_account_id_to_name_mapping)()
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") == "prod":
cache_sqs_queues_for_account.delay(account_id)
else:
if account_id in config.get("celery.test_account_ids", []):
cache_sqs_queues_for_account.delay(account_id)
stats.count(f"{function}.success")
return True
@app.task(soft_time_limit=120)
def cache_sns_topics_across_accounts() -> bool:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
# First, get list of accounts
accounts_d: List = async_to_sync(get_account_id_to_name_mapping)()
for account_id in accounts_d.keys():
if config.get("environment") == "prod":
cache_sns_topics_for_account.delay(account_id)
else:
if account_id in config.get("celery.test_account_ids", []):
cache_sns_topics_for_account.delay(account_id)
stats.count(f"{function}.success")
return True
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_sqs_queues_for_account(account_id: str) -> Dict[str, Union[str, int]]:
all_queues: set = set()
enabled_regions = async_to_sync(get_enabled_regions_for_account)(account_id)
for region in enabled_regions:
client = boto3_cached_conn(
"sqs",
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=region,
read_only=True,
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=f"https://sts.{config.region}.amazonaws.com",
),
)
paginator = client.get_paginator("list_queues")
response_iterator = paginator.paginate(PaginationConfig={"PageSize": 1000})
for res in response_iterator:
for queue in res.get("QueueUrls", []):
arn = f"arn:aws:sqs:{region}:{account_id}:{queue.split("/")[4]}"
all_queues.add(arn)
sqs_queue_key: str = config.get("redis.sqs_queues_key", "SQS_QUEUES")
red.hset(sqs_queue_key, account_id, json.dumps(list(all_queues)))
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"account_id": account_id,
"number_sqs_queues": len(all_queues),
}
log.debug(log_data)
stats.count(
"cache_sqs_queues_for_account",
tags={"account_id": account_id, "number_sqs_queues": len(all_queues)},
)
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("account_resource_cache.s3.bucket")
s3_key = config.get(
"account_resource_cache.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="sqs_queues", account_id=account_id)
async_to_sync(store_json_results_in_redis_and_s3)(
all_queues, s3_bucket=s3_bucket, s3_key=s3_key
)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_sns_topics_for_account(account_id: str) -> Dict[str, Union[str, int]]:
# Make sure it is regional
all_topics: set = set()
enabled_regions = async_to_sync(get_enabled_regions_for_account)(account_id)
for region in enabled_regions:
topics = list_topics(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=region,
read_only=True,
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=f"https://sts.{config.region}.amazonaws.com",
),
)
for topic in topics:
all_topics.add(topic["TopicArn"])
sns_topic_key: str = config.get("redis.sns_topics_key", "SNS_TOPICS")
red.hset(sns_topic_key, account_id, json.dumps(list(all_topics)))
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"account_id": account_id,
"number_sns_topics": len(all_topics),
}
log.debug(log_data)
stats.count(
"cache_sns_topics_for_account",
tags={"account_id": account_id, "number_sns_topics": len(all_topics)},
)
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("account_resource_cache.s3.bucket")
s3_key = config.get(
"account_resource_cache.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="sns_topics", account_id=account_id)
async_to_sync(store_json_results_in_redis_and_s3)(
all_topics, s3_bucket=s3_bucket, s3_key=s3_key
)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_s3_buckets_for_account(account_id: str) -> Dict[str, Union[str, int]]:
s3_buckets: List = list_buckets(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
read_only=True,
)
buckets: List = []
for bucket in s3_buckets["Buckets"]:
buckets.append(bucket["Name"])
s3_bucket_key: str = config.get("redis.s3_buckets_key", "S3_BUCKETS")
red.hset(s3_bucket_key, account_id, json.dumps(buckets))
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"account_id": account_id,
"number_s3_buckets": len(buckets),
}
log.debug(log_data)
stats.count(
"cache_s3_buckets_for_account",
tags={"account_id": account_id, "number_s3_buckets": len(buckets)},
)
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("account_resource_cache.s3.bucket")
s3_key = config.get(
"account_resource_cache.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="s3_buckets", account_id=account_id)
async_to_sync(store_json_results_in_redis_and_s3)(
buckets, s3_bucket=s3_bucket, s3_key=s3_key
)
return log_data
@retry(
stop_max_attempt_number=4,
wait_exponential_multiplier=1000,
wait_exponential_max=1000,
)
def _scan_redis_iam_cache(
cache_key: str, index: int, count: int
) -> Tuple[int, Dict[str, str]]:
return red.hscan(cache_key, index, count=count)
@app.task(soft_time_limit=1800)
def clear_old_redis_iam_cache() -> bool:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
# Do not run if this is not in the active region:
if config.region != config.get("celery.active_region", config.region):
return False
# Need to loop over all items in the set:
cache_key: str = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
index: int = 0
expire_ttl: int = int((datetime.utcnow() - timedelta(hours=6)).timestamp())
roles_to_expire = []
# We will loop over REDIS_IAM_COUNT items at a time:
try:
while True:
results = _scan_redis_iam_cache(cache_key, index, REDIS_IAM_COUNT)
index = results[0]
# Verify if the role is too old:
for arn, role in results[1].items():
role = json.loads(role)
if role["ttl"] <= expire_ttl:
roles_to_expire.append(arn)
# We will be complete if the next index is 0:
if not index:
break
except: # noqa
log_data = {
"function": function,
"message": "Error retrieving roles from Redis for cache cleanup.",
}
log.error(log_data, exc_info=True)
raise
# Delete all the roles that we need to delete:
try:
if roles_to_expire:
red.hdel(cache_key, *roles_to_expire)
except: # noqa
log_data = {
"function": function,
"message": "Error deleting roles from Redis for cache cleanup.",
}
log.error(log_data, exc_info=True)
raise
stats.count(f"{function}.success", tags={"expired_roles": len(roles_to_expire)})
return True
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_resources_from_aws_config_for_account(account_id) -> dict:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
s3_bucket = config.get("aws_config_cache.s3.bucket")
s3_key = config.get(
"aws_config_cache.s3.file", "aws_config_cache/cache_{account_id}_v1.json.gz"
).format(account_id=account_id)
dynamo = UserDynamoHandler()
# Only query in active region, otherwise get data from DDB
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
results = aws_config.query(
config.get(
"cache_all_resources_from_aws_config.aws_config.all_resources_query",
"select * where accountId = '{account_id}'",
).format(account_id=account_id),
use_aggregator=False,
account_id=account_id,
)
ttl: int = int((datetime.utcnow() + timedelta(hours=36)).timestamp())
redis_result_set = {}
for result in results:
result["ttl"] = ttl
if result.get("arn"):
if redis_result_set.get(result["arn"]):
continue
redis_result_set[result["arn"]] = json.dumps(result)
if redis_result_set:
async_to_sync(store_json_results_in_redis_and_s3)(
redis_result_set,
redis_key=config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
),
redis_data_type="hash",
s3_bucket=s3_bucket,
s3_key=s3_key,
)
dynamo.write_resource_cache_data(results)
else:
redis_result_set = async_to_sync(retrieve_json_data_from_redis_or_s3)(
s3_bucket=s3_bucket, s3_key=s3_key
)
async_to_sync(store_json_results_in_redis_and_s3)(
redis_result_set,
redis_key=config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
),
redis_data_type="hash",
)
log_data = {
"function": function,
"account_id": account_id,
"number_resources_synced": len(redis_result_set),
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=3600)
def cache_resources_from_aws_config_across_accounts() -> Dict[
str, Union[Union[str, int], Any]
]:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
resource_redis_cache_key = config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
)
log_data = {
"function": function,
"resource_redis_cache_key": resource_redis_cache_key,
}
tasks = []
# First, get list of accounts
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") in ["prod", "dev"]:
tasks.append(cache_resources_from_aws_config_for_account.s(account_id))
else:
if account_id in config.get("celery.test_account_ids", []):
tasks.append(cache_resources_from_aws_config_for_account.s(account_id))
if tasks:
results = group(*tasks).apply_async()
results.join()
# Delete roles in Redis cache with expired TTL
all_resources = red.hgetall(resource_redis_cache_key)
if all_resources:
expired_arns = []
for arn, resource_entry_j in all_resources.items():
resource_entry = ujson.loads(resource_entry_j)
if datetime.fromtimestamp(resource_entry["ttl"]) < datetime.utcnow():
expired_arns.append(arn)
if expired_arns:
for expired_arn in expired_arns:
all_resources.pop(expired_arn, None)
red.hdel(resource_redis_cache_key, *expired_arns)
log_data["number_of_resources"] = len(all_resources)
# Cache all resource ARNs into a single file. Note: This runs synchronously with this task. This task triggers
# resource collection on all accounts to happen asynchronously. That means when we store or delete data within
# this task, we're always going to be caching the results from the previous task.
if config.region == config.get(
"celery.active_region", config.region
) or config.get("environment") in ["dev"]:
# Refresh all resources after deletion of expired entries
all_resources = red.hgetall(resource_redis_cache_key)
s3_bucket = config.get("aws_config_cache_combined.s3.bucket")
s3_key = config.get(
"aws_config_cache_combined.s3.file",
"aws_config_cache_combined/aws_config_resource_cache_combined_v1.json.gz",
)
async_to_sync(store_json_results_in_redis_and_s3)(
all_resources, s3_bucket=s3_bucket, s3_key=s3_key
)
stats.count(f"{function}.success")
return log_data
@app.task(soft_time_limit=1800)
def get_iam_role_limit() -> dict:
"""
This function will gather the number of existing IAM Roles and IAM Role quota in all owned AWS accounts.
"""
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
num_accounts = 0
num_roles = 0
if not config.get("celery.get_iam_role_limit.enabled"):
return {}
success_message = "Not running - Inactive region"
if config.region == config.get(
"celery.active_region", config.region
) and config.get("environment") in ["prod", "dev"]:
@sts_conn("iam")
def _get_delivery_channels(**kwargs) -> list:
"""Gets the delivery channels in the account/region -- calls are wrapped with CloudAux"""
return kwargs.pop("client").get_account_summary(**kwargs)
success_message = "Task successfully completed"
# First, get list of accounts
accounts_d: Dict = async_to_sync(get_account_id_to_name_mapping)()
num_accounts = len(accounts_d.keys())
for account_id, account_name in accounts_d.items():
try:
iam_summary = _get_delivery_channels(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
)
num_iam_roles = iam_summary["SummaryMap"]["Roles"]
iam_role_quota = iam_summary["SummaryMap"]["RolesQuota"]
iam_role_quota_ratio = num_iam_roles / iam_role_quota
num_roles += num_iam_roles
log_data = {
"function": function,
"message": "IAM role quota for account",
"num_iam_roles": num_iam_roles,
"iam_role_quota": iam_role_quota,
"iam_role_quota_ratio": iam_role_quota_ratio,
"account_id": account_id,
"account_name": account_name,
}
stats.gauge(
f"{function}.quota_ratio_gauge",
iam_role_quota_ratio,
tags={
"num_iam_roles": num_iam_roles,
"iam_role_quota": iam_role_quota,
"account_id": account_id,
"account_name": account_name,
},
)
log.debug(log_data)
except ClientError as e:
log_data = {
"function": function,
"message": "Error retrieving IAM quota",
"account_id": account_id,
"account_name": account_name,
"error": e,
}
stats.count(f"{function}.error", tags={"account_id": account_id})
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
raise
log_data = {
"function": function,
"num_accounts": num_accounts,
"num_roles": num_roles,
"message": success_message,
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=300)
def cache_policy_requests() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
requests = async_to_sync(cache_all_policy_requests)()
log_data = {
"function": function,
"num_requests": len(requests),
"message": "Successfully cached requests",
}
return log_data
@app.task(soft_time_limit=300)
def cache_cloud_account_mapping() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
account_mapping = async_to_sync(cache_cloud_accounts)()
log_data = {
"function": function,
"num_accounts": len(account_mapping.accounts),
"message": "Successfully cached cloud account mapping",
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_credential_authorization_mapping() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
authorization_mapping = async_to_sync(
generate_and_store_credential_authorization_mapping
)()
log_data = {
"function": function,
"message": "Successfully cached cloud credential authorization mapping",
"num_group_authorizations": len(authorization_mapping),
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_scps_across_organizations() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
scps = async_to_sync(cache_all_scps)()
log_data = {
"function": function,
"message": "Successfully cached service control policies",
"num_organizations": len(scps),
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_organization_structure() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
org_structure = async_to_sync(cache_org_structure)()
log_data = {
"function": function,
"message": "Successfully cached organization structure",
"num_organizations": len(org_structure),
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_resource_templates_task() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
templated_file_array = async_to_sync(cache_resource_templates)()
log_data = {
"function": function,
"message": "Successfully cached resource templates",
"num_templated_files": len(templated_file_array.templated_resources),
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_self_service_typeahead_task() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
self_service_typeahead = async_to_sync(cache_self_service_typeahead)()
log_data = {
"function": function,
"message": "Successfully cached roles and templates for self service typeahead",
"num_templated_files": len(self_service_typeahead.typeahead_entries),
}
log.debug(log_data)
return log_data
schedule_30_minute = timedelta(seconds=1800)
schedule_45_minute = timedelta(seconds=2700)
schedule_6_hours = timedelta(hours=6)
schedule_minute = timedelta(minutes=1)
schedule_5_minutes = timedelta(minutes=5)
schedule_24_hours = timedelta(hours=24)
schedule_1_hour = timedelta(hours=1)
if config.get("development", False):
# If debug mode, we will set up the schedule to run the next minute after the job starts
time_to_start = datetime.utcnow() + timedelta(minutes=1)
dev_schedule = crontab(hour=time_to_start.hour, minute=time_to_start.minute)
schedule_30_minute = dev_schedule
schedule_45_minute = dev_schedule
schedule_1_hour = dev_schedule
schedule_6_hours = dev_schedule
schedule_5_minutes = dev_schedule
schedule = {
"cache_roles_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_roles_across_accounts",
"options": {"expires": 1000},
"schedule": schedule_45_minute,
},
"clear_old_redis_iam_cache": {
"task": "consoleme.celery_tasks.celery_tasks.clear_old_redis_iam_cache",
"options": {"expires": 180},
"schedule": schedule_6_hours,
},
"cache_policies_table_details": {
"task": "consoleme.celery_tasks.celery_tasks.cache_policies_table_details",
"options": {"expires": 1000},
"schedule": schedule_30_minute,
},
"report_celery_last_success_metrics": {
"task": "consoleme.celery_tasks.celery_tasks.report_celery_last_success_metrics",
"options": {"expires": 60},
"schedule": schedule_minute,
},
"cache_managed_policies_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_managed_policies_across_accounts",
"options": {"expires": 1000},
"schedule": schedule_45_minute,
},
"cache_s3_buckets_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_s3_buckets_across_accounts",
"options": {"expires": 300},
"schedule": schedule_45_minute,
},
"cache_sqs_queues_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_sqs_queues_across_accounts",
"options": {"expires": 300},
"schedule": schedule_45_minute,
},
"cache_sns_topics_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_sns_topics_across_accounts",
"options": {"expires": 300},
"schedule": schedule_45_minute,
},
"get_iam_role_limit": {
"task": "consoleme.celery_tasks.celery_tasks.get_iam_role_limit",
"options": {"expires": 300},
"schedule": schedule_24_hours,
},
"cache_cloudtrail_errors_by_arn": {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_errors_by_arn",
"options": {"expires": 300},
"schedule": schedule_1_hour,
},
"cache_resources_from_aws_config_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_resources_from_aws_config_across_accounts",
"options": {"expires": 300},
"schedule": schedule_1_hour,
},
"cache_policy_requests": {
"task": "consoleme.celery_tasks.celery_tasks.cache_policy_requests",
"options": {"expires": 1000},
"schedule": schedule_5_minutes,
},
"cache_cloud_account_mapping": {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloud_account_mapping",
"options": {"expires": 1000},
"schedule": schedule_1_hour,
},
"cache_credential_authorization_mapping": {
"task": "consoleme.celery_tasks.celery_tasks.cache_credential_authorization_mapping",
"options": {"expires": 1000},
"schedule": schedule_5_minutes,
},
"cache_scps_across_organizations": {
"task": "consoleme.celery_tasks.celery_tasks.cache_scps_across_organizations",
"options": {"expires": 1000},
"schedule": schedule_1_hour,
},
"cache_organization_structure": {
"task": "consoleme.celery_tasks.celery_tasks.cache_organization_structure",
"options": {"expires": 1000},
"schedule": schedule_1_hour,
},
"cache_resource_templates_task": {
"task": "consoleme.celery_tasks.celery_tasks.cache_resource_templates_task",
"options": {"expires": 1000},
"schedule": schedule_30_minute,
},
"cache_self_service_typeahead_task": {
"task": "consoleme.celery_tasks.celery_tasks.cache_self_service_typeahead_task",
"options": {"expires": 1000},
"schedule": schedule_30_minute,
},
}
if internal_celery_tasks and isinstance(internal_celery_tasks, dict):
schedule = {**schedule, **internal_celery_tasks}
if config.get("celery.clear_tasks_for_development", False):
schedule = {}
app.conf.beat_schedule = schedule
app.conf.timezone = "UTC"
|
"""
This module controls defines celery tasks and their applicable schedules. The celery beat server and workers will start
when invoked. Please add internal-only celery tasks to the celery_tasks plugin.
When ran in development mode (CONFIG_LOCATION=<location of development.yaml configuration file. To run both the celery
beat scheduler and a worker simultaneously, and to have jobs kick off starting at the next minute, run the following
command: celery -A consoleme.celery_tasks.celery_tasks worker --loglevel=info -l DEBUG -B
"""
from __future__ import absolute_import
import json # We use a separate SetEncoder here so we cannot use ujson
import os
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
cache_all_scps,
cache_org_structure,
get_enabled_regions_for_account,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
)
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
asynpool.PROC_ALIVE_TIMEOUT = config.get("celery.asynpool_proc_alive_timeout", 60.0)
default_retry_kwargs = {
"autoretry_for": (Exception,),
"retry_backoff": True,
"retry_kwargs": {"max_retries": config.get("celery.default_max_retries", 5)},
}
class Celery(celery.Celery):
def on_configure(self) -> None:
sentry_dsn = config.get("sentry.dsn")
if sentry_dsn:
sentry_sdk.init(
sentry_dsn,
integrations=[
TornadoIntegration(),
CeleryIntegration(),
AioHttpIntegration(),
RedisIntegration(),
],
)
app = Celery(
"tasks",
broker=config.get(
f"celery.broker.{config.region}",
config.get("celery.broker.global", "redis://127.0.0.1:6379/1"),
),
backend=config.get(
f"celery.backend.{config.region}",
config.get("celery.broker.global", "redis://127.0.0.1:6379/2"),
),
)
if config.get("redis.use_redislite"):
import tempfile
import redislite
redislite_db_path = os.path.join(
config.get("redis.redislite.db_path", tempfile.NamedTemporaryFile().name)
)
redislite_client = redislite.Redis(redislite_db_path)
redislite_socket_path = f"redis+socket://{redislite_client.socket_file}"
app = Celery(
"tasks",
broker=config.get(f"{redislite_socket_path}/1"),
backend=config.get(f"{redislite_socket_path}/2"),
)
app.conf.result_expires = config.get("celery.result_expires", 60)
app.conf.worker_prefetch_multiplier = config.get("celery.worker_prefetch_multiplier", 4)
app.conf.task_acks_late = config.get("celery.task_acks_late", True)
if config.get("celery.purge") and not config.get("redis.use_redislite"):
# Useful to clear celery queue in development
with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"):
app.control.purge()
log = config.get_logger()
red = RedisHandler().redis_sync()
aws = get_plugin_by_name(config.get("plugins.aws", "default_aws"))
auth = get_plugin_by_name(config.get("plugins.auth", "default_auth"))()
group_mapping = get_plugin_by_name(
config.get("plugins.group_mapping", "default_group_mapping")
)()
internal_celery_tasks = get_plugin_by_name(
config.get("plugins.internal_celery_tasks", "default_celery_tasks")
)
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
internal_policies = get_plugin_by_name(
config.get("plugins.internal_policies", "default_policies")
)()
REDIS_IAM_COUNT = 1000
@app.task(soft_time_limit=20)
def report_celery_last_success_metrics() -> bool:
"""
For each celery task, this will determine the number of seconds since it has last been successful.
Celery tasks should be emitting redis stats with a deterministic key (In our case, `f"{task}.last_success"`.
report_celery_last_success_metrics should be ran periodically to emit metrics on when a task was last successful.
We can then alert when tasks are not ran when intended. We should also alert when no metrics are emitted
from this function.
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data = {"function": function}
current_time = int(time.time())
global schedule
for _, t in schedule.items():
task = t.get("task")
last_success = int(red.get(f"{task}.last_success") or 0)
if last_success == 0:
log_data["message"] = "Last Success Value is 0"
log_data["task_last_success_key"] = f"{task}.last_success"
log.error(log_data)
stats.gauge(f"{task}.time_since_last_success", current_time - last_success)
red.set(f"{task}.time_since_last_success", current_time - last_success)
red.set(
f"{function}.last_success", int(time.time())
) # Alert if this metric is not seen
stats.count(f"{function}.success")
stats.timer("worker.healthy")
return True
def get_celery_request_tags(**kwargs):
request = kwargs.get("request")
sender_hostname = "unknown"
sender = kwargs.get("sender")
if sender:
try:
sender_hostname = sender.hostname
except AttributeError:
sender_hostname = vars(sender.request).get("origin", "unknown")
if request and not isinstance(
request, Context
): # unlike others, task_revoked sends a Context for `request`
task_name = request.name
task_id = request.id
receiver_hostname = request.hostname
else:
try:
task_name = sender.name
except AttributeError:
task_name = kwargs.pop("name", "")
try:
task_id = sender.request.id
except AttributeError:
task_id = kwargs.pop("id", "")
try:
receiver_hostname = sender.request.hostname
except AttributeError:
receiver_hostname = ""
tags = {
"task_name": task_name,
"task_id": task_id,
"sender_hostname": sender_hostname,
"receiver_hostname": receiver_hostname,
}
tags["expired"] = kwargs.get("expired", False)
exception = kwargs.get("exception")
if not exception:
exception = kwargs.get("exc")
if exception:
tags["error"] = repr(exception)
if isinstance(exception, SoftTimeLimitExceeded):
tags["timed_out"] = True
return tags
@task_prerun.connect
def refresh_dynamic_config_in_worker(**kwargs):
tags = get_celery_request_tags(**kwargs)
log_data = {"function": f"{__name__}.{sys._getframe().f_code.co_name}"}
dynamic_config = red.get("DYNAMIC_CONFIG_CACHE")
if not dynamic_config:
log.error({**log_data, "error": "Unable to retrieve Dynamic Config from Redis"})
return
dynamic_config_j = json.loads(dynamic_config)
if config.CONFIG.config.get("dynamic_config", {}) != dynamic_config_j:
log.debug(
{
**log_data,
**tags,
"message": "Refreshing dynamic configuration on Celery Worker",
}
)
config.CONFIG.config["dynamic_config"] = dynamic_config_j
@task_received.connect
def report_number_pending_tasks(**kwargs):
"""
Report the number of pending tasks to our metrics broker every time a task is published. This metric can be used
for autoscaling workers.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-received
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
tags = get_celery_request_tags(**kwargs)
tags.pop("task_id", None)
stats.timer("celery.new_pending_task", tags=tags)
@task_success.connect
def report_successful_task(**kwargs):
"""
Report a generic success metric as tasks to our metrics broker every time a task finished correctly.
This metric can be used for autoscaling workers.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-success
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
tags = get_celery_request_tags(**kwargs)
red.set(f"{tags['task_name']}.last_success", int(time.time()))
tags.pop("error", None)
tags.pop("task_id", None)
stats.timer("celery.successful_task", tags=tags)
@task_retry.connect
def report_task_retry(**kwargs):
"""
Report a generic retry metric as tasks to our metrics broker every time a task is retroed.
This metric can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-retry
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Retry",
}
# Add traceback if exception info is in the kwargs
einfo = kwargs.get("einfo")
if einfo:
log_data["traceback"] = einfo.traceback
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.retried_task", tags=error_tags)
@task_failure.connect
def report_failed_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a task fails. This is also called when
a task has hit a SoftTimeLimit.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-failure
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Failure",
}
# Add traceback if exception info is in the kwargs
einfo = kwargs.get("einfo")
if einfo:
log_data["traceback"] = einfo.traceback
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.failed_task", tags=error_tags)
@task_unknown.connect
def report_unknown_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a worker receives an unknown task.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-unknown
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Unknown",
}
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.unknown_task", tags=error_tags)
@task_rejected.connect
def report_rejected_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a task is rejected.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-rejected
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Rejected",
}
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.rejected_task", tags=error_tags)
@task_revoked.connect
def report_revoked_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a task is revoked.
This metric can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-revoked
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Revoked",
}
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.revoked_task", tags=error_tags)
@retry(
stop_max_attempt_number=4,
wait_exponential_multiplier=1000,
wait_exponential_max=1000,
)
def _add_role_to_redis(redis_key: str, role_entry: Dict) -> None:
"""
This function will add IAM role data to redis so that policy details can be quickly retrieved by the policies
endpoint.
IAM role data is stored in the `redis_key` redis key by the role's ARN.
Parameters
----------
redis_key : str
The redis key (hash)
role_entry : Dict
The role entry
Example: {'name': 'nameOfRole', 'accountId': '123456789012', 'arn': 'arn:aws:iam::123456789012:role/nameOfRole',
'templated': None, 'ttl': 1562510908, 'policy': '<json_formatted_policy>'}
"""
try:
red.hset(redis_key, role_entry["arn"], json.dumps(role_entry))
except Exception as e: # noqa
stats.count(
"cache_roles_for_account.error",
tags={"redis_key": redis_key, "error": str(e), "role_entry": role_entry},
)
log_data = {
"message": "Error syncing Account's IAM roles to Redis",
"account_id": role_entry["account_id"],
"arn": role_entry["arn"],
"role_entry": role_entry,
}
log.error(log_data, exc_info=True)
raise
@app.task(soft_time_limit=3600)
def cache_cloudtrail_errors_by_arn() -> Dict:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data: Dict = {"function": function}
cloudtrail_errors: Dict = internal_policies.error_count_by_arn()
if not cloudtrail_errors:
cloudtrail_errors = {}
red.setex(
config.get(
"celery.cache_cloudtrail_errors_by_arn.redis_key",
"CLOUDTRAIL_ERRORS_BY_ARN",
),
86400,
json.dumps(cloudtrail_errors),
)
log_data["number_of_roles_with_errors"]: len(cloudtrail_errors.keys())
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800)
def cache_policies_table_details() -> bool:
iam_role_redis_key = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
all_iam_roles = red.hgetall(iam_role_redis_key)
items = []
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
cloudtrail_errors = {}
cloudtrail_errors_j = red.get(
config.get(
"celery.cache_cloudtrail_errors_by_arn.redis_key",
"CLOUDTRAIL_ERRORS_BY_ARN",
)
)
if cloudtrail_errors_j:
cloudtrail_errors = json.loads(cloudtrail_errors_j)
s3_error_topic = config.get("redis.s3_errors", "S3_ERRORS")
all_s3_errors = red.get(s3_error_topic)
s3_errors = {}
if all_s3_errors:
s3_errors = json.loads(all_s3_errors)
for arn, role_details_j in all_iam_roles.items():
role_details = ujson.loads(role_details_j)
error_count = cloudtrail_errors.get(arn, 0)
s3_errors_for_arn = s3_errors.get(arn, [])
for error in s3_errors_for_arn:
error_count += int(error.get("count"))
account_id = arn.split(":")[4]
account_name = accounts_d.get(str(account_id), "Unknown")
resource_id = role_details.get("resourceId")
items.append(
{
"account_id": account_id,
"account_name": account_name,
"arn": arn,
"technology": "iam",
"templated": red.hget(
config.get("templated_roles.redis_key", "TEMPLATED_ROLES_v2"),
arn.lower(),
),
"errors": error_count,
"config_history_url": async_to_sync(
get_aws_config_history_url_for_resource
)(account_id, resource_id, arn, "AWS::IAM::Role"),
}
)
s3_bucket_key: str = config.get("redis.s3_bucket_key", "S3_BUCKETS")
s3_accounts = red.hkeys(s3_bucket_key)
if s3_accounts:
for account in s3_accounts:
account_name = accounts_d.get(str(account), "Unknown")
buckets = json.loads(red.hget(s3_bucket_key, account))
for bucket in buckets:
bucket_arn = f"arn:aws:s3:::{bucket}"
s3_errors_for_arn = s3_errors.get(bucket_arn, [])
error_count = 0
for error in s3_errors_for_arn:
error_count += int(error.get("count"))
items.append(
{
"account_id": account,
"account_name": account_name,
"arn": f"arn:aws:s3:::{bucket}",
"technology": "s3",
"templated": None,
"errors": error_count,
}
)
sns_topic_key: str = config.get("redis.sns_topics_key", "SNS_TOPICS")
sns_accounts = red.hkeys(sns_topic_key)
if sns_accounts:
for account in sns_accounts:
account_name = accounts_d.get(str(account), "Unknown")
topics = json.loads(red.hget(sns_topic_key, account))
for topic in topics:
error_count = 0
items.append(
{
"account_id": account,
"account_name": account_name,
"arn": topic,
"technology": "sns",
"templated": None,
"errors": error_count,
}
)
sqs_queue_key: str = config.get("redis.sqs_queues_key", "SQS_QUEUES")
sqs_accounts = red.hkeys(sqs_queue_key)
if sqs_accounts:
for account in sqs_accounts:
account_name = accounts_d.get(str(account), "Unknown")
queues = json.loads(red.hget(sqs_queue_key, account))
for queue in queues:
error_count = 0
items.append(
{
"account_id": account,
"account_name": account_name,
"arn": queue,
"technology": "sqs",
"templated": None,
"errors": error_count,
}
)
resources_from_aws_config_redis_key: str = config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
)
resources_from_aws_config = red.hgetall(resources_from_aws_config_redis_key)
if resources_from_aws_config:
for arn, value in resources_from_aws_config.items():
resource = json.loads(value)
technology = resource["resourceType"]
# Skip technologies that we retrieve directly
if technology in [
"AWS::IAM::Role",
"AWS::SQS::Queue",
"AWS::SNS::Topic",
"AWS::S3::Bucket",
]:
continue
account_id = arn.split(":")[4]
account_name = accounts_d.get(account_id, "Unknown")
items.append(
{
"account_id": account_id,
"account_name": account_name,
"arn": arn,
"technology": technology,
"templated": None,
"errors": 0,
}
)
s3_bucket = None
s3_key = None
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("cache_policies_table_details.s3.bucket")
s3_key = config.get(
"cache_policies_table_details.s3.file",
"policies_table/cache_policies_table_details_v1.json.gz",
)
async_to_sync(store_json_results_in_redis_and_s3)(
items,
redis_key=config.get("policies.redis_policies_key", "ALL_POLICIES"),
s3_bucket=s3_bucket,
s3_key=s3_key,
)
stats.count(
"cache_policies_table_details.success",
tags={"num_roles": len(all_iam_roles.keys())},
)
return True
@app.task(soft_time_limit=2700, **default_retry_kwargs)
def cache_roles_for_account(account_id: str) -> bool:
# Get the DynamoDB handler:
dynamo = IAMRoleDynamoHandler()
cache_key = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
# Only query IAM and put data in Dynamo if we're in the active region
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
client = boto3_cached_conn(
"iam",
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=f"https://sts.{config.region}.amazonaws.com",
),
)
paginator = client.get_paginator("get_account_authorization_details")
response_iterator = paginator.paginate()
all_iam_resources = {}
for response in response_iterator:
if not all_iam_resources:
all_iam_resources = response
else:
all_iam_resources["UserDetailList"].extend(response["UserDetailList"])
all_iam_resources["GroupDetailList"].extend(response["GroupDetailList"])
all_iam_resources["RoleDetailList"].extend(response["RoleDetailList"])
all_iam_resources["Policies"].extend(response["Policies"])
for k in response.keys():
if k not in [
"UserDetailList",
"GroupDetailList",
"RoleDetailList",
"Policies",
"ResponseMetadata",
"Marker",
"IsTruncated",
]:
# Fail hard if we find something unexpected
raise RuntimeError("Unexpected key {0} in response".format(k))
# Store entire response in S3
async_to_sync(store_json_results_in_redis_and_s3)(
all_iam_resources,
s3_bucket=config.get("cache_iam_resources_for_account.s3.bucket"),
s3_key=config.get(
"cache_iam_resources_for_account.s3.file",
"get_account_authorization_details/get_account_authorization_details_{account_id}_v1.json.gz",
).format(account_id=account_id),
)
iam_roles = all_iam_resources["RoleDetailList"]
async_to_sync(store_json_results_in_redis_and_s3)(
iam_roles,
s3_bucket=config.get("cache_roles_for_account.s3.bucket"),
s3_key=config.get(
"cache_roles_for_account.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="iam_roles", account_id=account_id),
)
ttl: int = int((datetime.utcnow() + timedelta(hours=36)).timestamp())
# Save them:
for role in iam_roles:
role_entry = {
"arn": role.get("Arn"),
"name": role.get("RoleName"),
"resourceId": role.get("RoleId"),
"accountId": account_id,
"ttl": ttl,
"policy": dynamo.convert_role_to_json(role),
"templated": red.hget(
config.get("templated_roles.redis_key", "TEMPLATED_ROLES_v2"),
role.get("Arn").lower(),
),
}
# DynamoDB:
dynamo.sync_iam_role_for_account(role_entry)
# Redis:
_add_role_to_redis(cache_key, role_entry)
# Run internal function on role. This can be used to inspect roles, add managed policies, or other actions
aws().handle_detected_role(role)
# Maybe store all resources in git
if config.get("cache_iam_resources_for_account.store_in_git.enabled"):
store_iam_resources_in_git(all_iam_resources, account_id)
stats.count("cache_roles_for_account.success", tags={"account_id": account_id})
return True
@app.task(soft_time_limit=3600)
def cache_roles_across_accounts() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
cache_key = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
log_data = {"function": function, "cache_key": cache_key}
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
tasks = []
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev"]:
# First, get list of accounts
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") in ["prod", "dev"]:
tasks.append(cache_roles_for_account.s(account_id))
else:
if account_id in config.get("celery.test_account_ids", []):
tasks.append(cache_roles_for_account.s(account_id))
results = group(*tasks).apply_async()
# results.join() forces function to wait until all tasks are complete
results.join()
else:
dynamo = IAMRoleDynamoHandler()
# In non-active regions, we just want to sync DDB data to Redis
roles = dynamo.fetch_all_roles()
for role_entry in roles:
_add_role_to_redis(cache_key, role_entry)
# Delete roles in Redis cache with expired TTL
all_roles = red.hgetall(cache_key)
roles_to_delete_from_cache = []
for arn, role_entry_j in all_roles.items():
role_entry = json.loads(role_entry_j)
if datetime.fromtimestamp(role_entry["ttl"]) < datetime.utcnow():
roles_to_delete_from_cache.append(arn)
if roles_to_delete_from_cache:
red.hdel(cache_key, *roles_to_delete_from_cache)
for arn in roles_to_delete_from_cache:
all_roles.pop(arn, None)
log_data["num_roles"] = len(all_roles)
# Store full list of roles in a single place. This list will be ~30 minutes out of date.
async_to_sync(store_json_results_in_redis_and_s3)(
all_roles,
redis_key=cache_key,
redis_data_type="hash",
s3_bucket=config.get(
"cache_roles_across_accounts.all_roles_combined.s3.bucket"
),
s3_key=config.get(
"cache_roles_across_accounts.all_roles_combined.s3.file",
"account_resource_cache/cache_all_roles_v1.json.gz",
),
)
stats.count(f"{function}.success")
log_data["num_accounts"] = len(accounts_d)
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_managed_policies_for_account(account_id: str) -> Dict[str, Union[str, int]]:
managed_policies: List[Dict] = get_all_managed_policies(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
)
all_policies: List = []
for policy in managed_policies:
all_policies.append(policy.get("Arn"))
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"account_id": account_id,
"number_managed_policies": len(all_policies),
}
log.debug(log_data)
stats.count(
"cache_managed_policies_for_account",
tags={"account_id": account_id, "num_managed_policies": len(all_policies)},
)
policy_key = config.get("redis.iam_managed_policies_key", "IAM_MANAGED_POLICIES")
red.hset(policy_key, account_id, json.dumps(all_policies))
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("account_resource_cache.s3.bucket")
s3_key = config.get(
"account_resource_cache.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="managed_policies", account_id=account_id)
async_to_sync(store_json_results_in_redis_and_s3)(
all_policies, s3_bucket=s3_bucket, s3_key=s3_key
)
return log_data
@app.task(soft_time_limit=120)
def cache_managed_policies_across_accounts() -> bool:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
# First, get list of accounts
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") == "prod":
cache_managed_policies_for_account.delay(account_id)
else:
if account_id in config.get("celery.test_account_ids", []):
cache_managed_policies_for_account.delay(account_id)
stats.count(f"{function}.success")
return True
@app.task(soft_time_limit=120)
def cache_s3_buckets_across_accounts() -> bool:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
# First, get list of accounts
accounts_d: List = async_to_sync(get_account_id_to_name_mapping)()
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") == "prod":
cache_s3_buckets_for_account.delay(account_id)
else:
if account_id in config.get("celery.test_account_ids", []):
cache_s3_buckets_for_account.delay(account_id)
stats.count(f"{function}.success")
return True
@app.task(soft_time_limit=120)
def cache_sqs_queues_across_accounts() -> bool:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
# First, get list of accounts
accounts_d: List = async_to_sync(get_account_id_to_name_mapping)()
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") == "prod":
cache_sqs_queues_for_account.delay(account_id)
else:
if account_id in config.get("celery.test_account_ids", []):
cache_sqs_queues_for_account.delay(account_id)
stats.count(f"{function}.success")
return True
@app.task(soft_time_limit=120)
def cache_sns_topics_across_accounts() -> bool:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
# First, get list of accounts
accounts_d: List = async_to_sync(get_account_id_to_name_mapping)()
for account_id in accounts_d.keys():
if config.get("environment") == "prod":
cache_sns_topics_for_account.delay(account_id)
else:
if account_id in config.get("celery.test_account_ids", []):
cache_sns_topics_for_account.delay(account_id)
stats.count(f"{function}.success")
return True
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_sqs_queues_for_account(account_id: str) -> Dict[str, Union[str, int]]:
all_queues: set = set()
enabled_regions = async_to_sync(get_enabled_regions_for_account)(account_id)
for region in enabled_regions:
client = boto3_cached_conn(
"sqs",
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=region,
read_only=True,
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=f"https://sts.{config.region}.amazonaws.com",
),
)
paginator = client.get_paginator("list_queues")
response_iterator = paginator.paginate(PaginationConfig={"PageSize": 1000})
for res in response_iterator:
for queue in res.get("QueueUrls", []):
arn = f"arn:aws:sqs:{region}:{account_id}:{queue.split('/')[4]}"
all_queues.add(arn)
sqs_queue_key: str = config.get("redis.sqs_queues_key", "SQS_QUEUES")
red.hset(sqs_queue_key, account_id, json.dumps(list(all_queues)))
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"account_id": account_id,
"number_sqs_queues": len(all_queues),
}
log.debug(log_data)
stats.count(
"cache_sqs_queues_for_account",
tags={"account_id": account_id, "number_sqs_queues": len(all_queues)},
)
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("account_resource_cache.s3.bucket")
s3_key = config.get(
"account_resource_cache.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="sqs_queues", account_id=account_id)
async_to_sync(store_json_results_in_redis_and_s3)(
all_queues, s3_bucket=s3_bucket, s3_key=s3_key
)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_sns_topics_for_account(account_id: str) -> Dict[str, Union[str, int]]:
# Make sure it is regional
all_topics: set = set()
enabled_regions = async_to_sync(get_enabled_regions_for_account)(account_id)
for region in enabled_regions:
topics = list_topics(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=region,
read_only=True,
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=f"https://sts.{config.region}.amazonaws.com",
),
)
for topic in topics:
all_topics.add(topic["TopicArn"])
sns_topic_key: str = config.get("redis.sns_topics_key", "SNS_TOPICS")
red.hset(sns_topic_key, account_id, json.dumps(list(all_topics)))
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"account_id": account_id,
"number_sns_topics": len(all_topics),
}
log.debug(log_data)
stats.count(
"cache_sns_topics_for_account",
tags={"account_id": account_id, "number_sns_topics": len(all_topics)},
)
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("account_resource_cache.s3.bucket")
s3_key = config.get(
"account_resource_cache.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="sns_topics", account_id=account_id)
async_to_sync(store_json_results_in_redis_and_s3)(
all_topics, s3_bucket=s3_bucket, s3_key=s3_key
)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_s3_buckets_for_account(account_id: str) -> Dict[str, Union[str, int]]:
s3_buckets: List = list_buckets(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
read_only=True,
)
buckets: List = []
for bucket in s3_buckets["Buckets"]:
buckets.append(bucket["Name"])
s3_bucket_key: str = config.get("redis.s3_buckets_key", "S3_BUCKETS")
red.hset(s3_bucket_key, account_id, json.dumps(buckets))
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"account_id": account_id,
"number_s3_buckets": len(buckets),
}
log.debug(log_data)
stats.count(
"cache_s3_buckets_for_account",
tags={"account_id": account_id, "number_s3_buckets": len(buckets)},
)
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("account_resource_cache.s3.bucket")
s3_key = config.get(
"account_resource_cache.s3.file",
"account_resource_cache/cache_{resource_type}_{account_id}_v1.json.gz",
).format(resource_type="s3_buckets", account_id=account_id)
async_to_sync(store_json_results_in_redis_and_s3)(
buckets, s3_bucket=s3_bucket, s3_key=s3_key
)
return log_data
@retry(
stop_max_attempt_number=4,
wait_exponential_multiplier=1000,
wait_exponential_max=1000,
)
def _scan_redis_iam_cache(
cache_key: str, index: int, count: int
) -> Tuple[int, Dict[str, str]]:
return red.hscan(cache_key, index, count=count)
@app.task(soft_time_limit=1800)
def clear_old_redis_iam_cache() -> bool:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
# Do not run if this is not in the active region:
if config.region != config.get("celery.active_region", config.region):
return False
# Need to loop over all items in the set:
cache_key: str = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
index: int = 0
expire_ttl: int = int((datetime.utcnow() - timedelta(hours=6)).timestamp())
roles_to_expire = []
# We will loop over REDIS_IAM_COUNT items at a time:
try:
while True:
results = _scan_redis_iam_cache(cache_key, index, REDIS_IAM_COUNT)
index = results[0]
# Verify if the role is too old:
for arn, role in results[1].items():
role = json.loads(role)
if role["ttl"] <= expire_ttl:
roles_to_expire.append(arn)
# We will be complete if the next index is 0:
if not index:
break
except: # noqa
log_data = {
"function": function,
"message": "Error retrieving roles from Redis for cache cleanup.",
}
log.error(log_data, exc_info=True)
raise
# Delete all the roles that we need to delete:
try:
if roles_to_expire:
red.hdel(cache_key, *roles_to_expire)
except: # noqa
log_data = {
"function": function,
"message": "Error deleting roles from Redis for cache cleanup.",
}
log.error(log_data, exc_info=True)
raise
stats.count(f"{function}.success", tags={"expired_roles": len(roles_to_expire)})
return True
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_resources_from_aws_config_for_account(account_id) -> dict:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
s3_bucket = config.get("aws_config_cache.s3.bucket")
s3_key = config.get(
"aws_config_cache.s3.file", "aws_config_cache/cache_{account_id}_v1.json.gz"
).format(account_id=account_id)
dynamo = UserDynamoHandler()
# Only query in active region, otherwise get data from DDB
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
results = aws_config.query(
config.get(
"cache_all_resources_from_aws_config.aws_config.all_resources_query",
"select * where accountId = '{account_id}'",
).format(account_id=account_id),
use_aggregator=False,
account_id=account_id,
)
ttl: int = int((datetime.utcnow() + timedelta(hours=36)).timestamp())
redis_result_set = {}
for result in results:
result["ttl"] = ttl
if result.get("arn"):
if redis_result_set.get(result["arn"]):
continue
redis_result_set[result["arn"]] = json.dumps(result)
if redis_result_set:
async_to_sync(store_json_results_in_redis_and_s3)(
redis_result_set,
redis_key=config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
),
redis_data_type="hash",
s3_bucket=s3_bucket,
s3_key=s3_key,
)
dynamo.write_resource_cache_data(results)
else:
redis_result_set = async_to_sync(retrieve_json_data_from_redis_or_s3)(
s3_bucket=s3_bucket, s3_key=s3_key
)
async_to_sync(store_json_results_in_redis_and_s3)(
redis_result_set,
redis_key=config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
),
redis_data_type="hash",
)
log_data = {
"function": function,
"account_id": account_id,
"number_resources_synced": len(redis_result_set),
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=3600)
def cache_resources_from_aws_config_across_accounts() -> Dict[
str, Union[Union[str, int], Any]
]:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
resource_redis_cache_key = config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
)
log_data = {
"function": function,
"resource_redis_cache_key": resource_redis_cache_key,
}
tasks = []
# First, get list of accounts
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") in ["prod", "dev"]:
tasks.append(cache_resources_from_aws_config_for_account.s(account_id))
else:
if account_id in config.get("celery.test_account_ids", []):
tasks.append(cache_resources_from_aws_config_for_account.s(account_id))
if tasks:
results = group(*tasks).apply_async()
results.join()
# Delete roles in Redis cache with expired TTL
all_resources = red.hgetall(resource_redis_cache_key)
if all_resources:
expired_arns = []
for arn, resource_entry_j in all_resources.items():
resource_entry = ujson.loads(resource_entry_j)
if datetime.fromtimestamp(resource_entry["ttl"]) < datetime.utcnow():
expired_arns.append(arn)
if expired_arns:
for expired_arn in expired_arns:
all_resources.pop(expired_arn, None)
red.hdel(resource_redis_cache_key, *expired_arns)
log_data["number_of_resources"] = len(all_resources)
# Cache all resource ARNs into a single file. Note: This runs synchronously with this task. This task triggers
# resource collection on all accounts to happen asynchronously. That means when we store or delete data within
# this task, we're always going to be caching the results from the previous task.
if config.region == config.get(
"celery.active_region", config.region
) or config.get("environment") in ["dev"]:
# Refresh all resources after deletion of expired entries
all_resources = red.hgetall(resource_redis_cache_key)
s3_bucket = config.get("aws_config_cache_combined.s3.bucket")
s3_key = config.get(
"aws_config_cache_combined.s3.file",
"aws_config_cache_combined/aws_config_resource_cache_combined_v1.json.gz",
)
async_to_sync(store_json_results_in_redis_and_s3)(
all_resources, s3_bucket=s3_bucket, s3_key=s3_key
)
stats.count(f"{function}.success")
return log_data
@app.task(soft_time_limit=1800)
def get_iam_role_limit() -> dict:
"""
This function will gather the number of existing IAM Roles and IAM Role quota in all owned AWS accounts.
"""
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
num_accounts = 0
num_roles = 0
if not config.get("celery.get_iam_role_limit.enabled"):
return {}
success_message = "Not running - Inactive region"
if config.region == config.get(
"celery.active_region", config.region
) and config.get("environment") in ["prod", "dev"]:
@sts_conn("iam")
def _get_delivery_channels(**kwargs) -> list:
"""Gets the delivery channels in the account/region -- calls are wrapped with CloudAux"""
return kwargs.pop("client").get_account_summary(**kwargs)
success_message = "Task successfully completed"
# First, get list of accounts
accounts_d: Dict = async_to_sync(get_account_id_to_name_mapping)()
num_accounts = len(accounts_d.keys())
for account_id, account_name in accounts_d.items():
try:
iam_summary = _get_delivery_channels(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
)
num_iam_roles = iam_summary["SummaryMap"]["Roles"]
iam_role_quota = iam_summary["SummaryMap"]["RolesQuota"]
iam_role_quota_ratio = num_iam_roles / iam_role_quota
num_roles += num_iam_roles
log_data = {
"function": function,
"message": "IAM role quota for account",
"num_iam_roles": num_iam_roles,
"iam_role_quota": iam_role_quota,
"iam_role_quota_ratio": iam_role_quota_ratio,
"account_id": account_id,
"account_name": account_name,
}
stats.gauge(
f"{function}.quota_ratio_gauge",
iam_role_quota_ratio,
tags={
"num_iam_roles": num_iam_roles,
"iam_role_quota": iam_role_quota,
"account_id": account_id,
"account_name": account_name,
},
)
log.debug(log_data)
except ClientError as e:
log_data = {
"function": function,
"message": "Error retrieving IAM quota",
"account_id": account_id,
"account_name": account_name,
"error": e,
}
stats.count(f"{function}.error", tags={"account_id": account_id})
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
raise
log_data = {
"function": function,
"num_accounts": num_accounts,
"num_roles": num_roles,
"message": success_message,
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=300)
def cache_policy_requests() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
requests = async_to_sync(cache_all_policy_requests)()
log_data = {
"function": function,
"num_requests": len(requests),
"message": "Successfully cached requests",
}
return log_data
@app.task(soft_time_limit=300)
def cache_cloud_account_mapping() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
account_mapping = async_to_sync(cache_cloud_accounts)()
log_data = {
"function": function,
"num_accounts": len(account_mapping.accounts),
"message": "Successfully cached cloud account mapping",
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_credential_authorization_mapping() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
authorization_mapping = async_to_sync(
generate_and_store_credential_authorization_mapping
)()
log_data = {
"function": function,
"message": "Successfully cached cloud credential authorization mapping",
"num_group_authorizations": len(authorization_mapping),
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_scps_across_organizations() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
scps = async_to_sync(cache_all_scps)()
log_data = {
"function": function,
"message": "Successfully cached service control policies",
"num_organizations": len(scps),
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_organization_structure() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
org_structure = async_to_sync(cache_org_structure)()
log_data = {
"function": function,
"message": "Successfully cached organization structure",
"num_organizations": len(org_structure),
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_resource_templates_task() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
templated_file_array = async_to_sync(cache_resource_templates)()
log_data = {
"function": function,
"message": "Successfully cached resource templates",
"num_templated_files": len(templated_file_array.templated_resources),
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_self_service_typeahead_task() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
self_service_typeahead = async_to_sync(cache_self_service_typeahead)()
log_data = {
"function": function,
"message": "Successfully cached roles and templates for self service typeahead",
"num_templated_files": len(self_service_typeahead.typeahead_entries),
}
log.debug(log_data)
return log_data
schedule_30_minute = timedelta(seconds=1800)
schedule_45_minute = timedelta(seconds=2700)
schedule_6_hours = timedelta(hours=6)
schedule_minute = timedelta(minutes=1)
schedule_5_minutes = timedelta(minutes=5)
schedule_24_hours = timedelta(hours=24)
schedule_1_hour = timedelta(hours=1)
if config.get("development", False):
# If debug mode, we will set up the schedule to run the next minute after the job starts
time_to_start = datetime.utcnow() + timedelta(minutes=1)
dev_schedule = crontab(hour=time_to_start.hour, minute=time_to_start.minute)
schedule_30_minute = dev_schedule
schedule_45_minute = dev_schedule
schedule_1_hour = dev_schedule
schedule_6_hours = dev_schedule
schedule_5_minutes = dev_schedule
schedule = {
"cache_roles_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_roles_across_accounts",
"options": {"expires": 1000},
"schedule": schedule_45_minute,
},
"clear_old_redis_iam_cache": {
"task": "consoleme.celery_tasks.celery_tasks.clear_old_redis_iam_cache",
"options": {"expires": 180},
"schedule": schedule_6_hours,
},
"cache_policies_table_details": {
"task": "consoleme.celery_tasks.celery_tasks.cache_policies_table_details",
"options": {"expires": 1000},
"schedule": schedule_30_minute,
},
"report_celery_last_success_metrics": {
"task": "consoleme.celery_tasks.celery_tasks.report_celery_last_success_metrics",
"options": {"expires": 60},
"schedule": schedule_minute,
},
"cache_managed_policies_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_managed_policies_across_accounts",
"options": {"expires": 1000},
"schedule": schedule_45_minute,
},
"cache_s3_buckets_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_s3_buckets_across_accounts",
"options": {"expires": 300},
"schedule": schedule_45_minute,
},
"cache_sqs_queues_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_sqs_queues_across_accounts",
"options": {"expires": 300},
"schedule": schedule_45_minute,
},
"cache_sns_topics_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_sns_topics_across_accounts",
"options": {"expires": 300},
"schedule": schedule_45_minute,
},
"get_iam_role_limit": {
"task": "consoleme.celery_tasks.celery_tasks.get_iam_role_limit",
"options": {"expires": 300},
"schedule": schedule_24_hours,
},
"cache_cloudtrail_errors_by_arn": {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_errors_by_arn",
"options": {"expires": 300},
"schedule": schedule_1_hour,
},
"cache_resources_from_aws_config_across_accounts": {
"task": "consoleme.celery_tasks.celery_tasks.cache_resources_from_aws_config_across_accounts",
"options": {"expires": 300},
"schedule": schedule_1_hour,
},
"cache_policy_requests": {
"task": "consoleme.celery_tasks.celery_tasks.cache_policy_requests",
"options": {"expires": 1000},
"schedule": schedule_5_minutes,
},
"cache_cloud_account_mapping": {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloud_account_mapping",
"options": {"expires": 1000},
"schedule": schedule_1_hour,
},
"cache_credential_authorization_mapping": {
"task": "consoleme.celery_tasks.celery_tasks.cache_credential_authorization_mapping",
"options": {"expires": 1000},
"schedule": schedule_5_minutes,
},
"cache_scps_across_organizations": {
"task": "consoleme.celery_tasks.celery_tasks.cache_scps_across_organizations",
"options": {"expires": 1000},
"schedule": schedule_1_hour,
},
"cache_organization_structure": {
"task": "consoleme.celery_tasks.celery_tasks.cache_organization_structure",
"options": {"expires": 1000},
"schedule": schedule_1_hour,
},
"cache_resource_templates_task": {
"task": "consoleme.celery_tasks.celery_tasks.cache_resource_templates_task",
"options": {"expires": 1000},
"schedule": schedule_30_minute,
},
"cache_self_service_typeahead_task": {
"task": "consoleme.celery_tasks.celery_tasks.cache_self_service_typeahead_task",
"options": {"expires": 1000},
"schedule": schedule_30_minute,
},
}
if internal_celery_tasks and isinstance(internal_celery_tasks, dict):
schedule = {**schedule, **internal_celery_tasks}
if config.get("celery.clear_tasks_for_development", False):
schedule = {}
app.conf.beat_schedule = schedule
app.conf.timezone = "UTC"
|
import pathlib
import os
from typing import Optional
import vswhere
def find_cmake() -> Optional[pathlib.Path]:
# search in PATH
for p in os.getenv('PATH').split(';'):
cmake = pathlib.Path(p) / 'cmake.exe'
if cmake.exists():
return cmake
# default path
cmake = pathlib.Path("C:/Program Files/CMake/bin/cmake.exe")
if cmake.exists():
return cmake
# visual studio
path = vswhere.get_latest_path()
if path:
vspath = pathlib.Path(path)
cmake = vspath / 'Common7/IDE/CommonExtensions/Microsoft/CMake/CMake/bin/cmake.exe'
if cmake.exists():
# add path to MSBuild
msbuild_path = vspath / 'MSBuild\\Current\\Bin'
os.environ['PATH'] = f'{msbuild_path};{os.environ['PATH']}'
return cmake
return None
|
import pathlib
import os
from typing import Optional
import vswhere
def find_cmake() -> Optional[pathlib.Path]:
# search in PATH
for p in os.getenv('PATH').split(';'):
cmake = pathlib.Path(p) / 'cmake.exe'
if cmake.exists():
return cmake
# default path
cmake = pathlib.Path("C:/Program Files/CMake/bin/cmake.exe")
if cmake.exists():
return cmake
# visual studio
path = vswhere.get_latest_path()
if path:
vspath = pathlib.Path(path)
cmake = vspath / 'Common7/IDE/CommonExtensions/Microsoft/CMake/CMake/bin/cmake.exe'
if cmake.exists():
# add path to MSBuild
msbuild_path = vspath / 'MSBuild\\Current\\Bin'
os.environ['PATH'] = f'{msbuild_path};{os.environ["PATH"]}'
return cmake
return None
|
"""Script to check the configuration file."""
import argparse
import asyncio
from collections import OrderedDict
from collections.abc import Mapping, Sequence
from glob import glob
import logging
import os
from typing import Any, Callable, Dict, List, Tuple
from unittest.mock import patch
from homeassistant import bootstrap, core
from homeassistant.config import get_default_config_dir
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.check_config import async_check_ha_config_file
import homeassistant.util.yaml.loader as yaml_loader
# mypy: allow-untyped-calls, allow-untyped-defs
REQUIREMENTS = ("colorlog==4.7.2",)
_LOGGER = logging.getLogger(__name__)
# pylint: disable=protected-access
MOCKS: Dict[str, Tuple[str, Callable]] = {
"load": ("homeassistant.util.yaml.loader.load_yaml", yaml_loader.load_yaml),
"load*": ("homeassistant.config.load_yaml", yaml_loader.load_yaml),
"secrets": ("homeassistant.util.yaml.loader.secret_yaml", yaml_loader.secret_yaml),
}
SILENCE = ("homeassistant.scripts.check_config.yaml_loader.clear_secret_cache",)
PATCHES: Dict[str, Any] = {}
C_HEAD = "bold"
ERROR_STR = "General Errors"
def color(the_color, *args, reset=None):
"""Color helper."""
# pylint: disable=import-outside-toplevel
from colorlog.escape_codes import escape_codes, parse_colors
try:
if not args:
assert reset is None, "You cannot reset if nothing being printed"
return parse_colors(the_color)
return parse_colors(the_color) + " ".join(args) + escape_codes[reset or "reset"]
except KeyError as k:
raise ValueError(f"Invalid color {k!s} in {the_color}") from k
def run(script_args: List) -> int:
"""Handle check config commandline script."""
parser = argparse.ArgumentParser(description="Check Home Assistant configuration.")
parser.add_argument("--script", choices=["check_config"])
parser.add_argument(
"-c",
"--config",
default=get_default_config_dir(),
help="Directory that contains the Home Assistant configuration",
)
parser.add_argument(
"-i",
"--info",
nargs="?",
default=None,
const="all",
help="Show a portion of the config",
)
parser.add_argument(
"-f", "--files", action="store_true", help="Show used configuration files"
)
parser.add_argument(
"-s", "--secrets", action="store_true", help="Show secret information"
)
args, unknown = parser.parse_known_args()
if unknown:
print(color("red", "Unknown arguments:", ", ".join(unknown)))
config_dir = os.path.join(os.getcwd(), args.config)
print(color("bold", "Testing configuration at", config_dir))
res = check(config_dir, args.secrets)
domain_info: List[str] = []
if args.info:
domain_info = args.info.split(",")
if args.files:
print(color(C_HEAD, "yaml files"), "(used /", color("red", "not used") + ")")
deps = os.path.join(config_dir, "deps")
yaml_files = [
f
for f in glob(os.path.join(config_dir, "**/*.yaml"), recursive=True)
if not f.startswith(deps)
]
for yfn in sorted(yaml_files):
the_color = "" if yfn in res["yaml_files"] else "red"
print(color(the_color, "-", yfn))
if res["except"]:
print(color("bold_white", "Failed config"))
for domain, config in res["except"].items():
domain_info.append(domain)
print(" ", color("bold_red", domain + ":"), color("red", "", reset="red"))
dump_dict(config, reset="red")
print(color("reset"))
if domain_info:
if "all" in domain_info:
print(color("bold_white", "Successful config (all)"))
for domain, config in res["components"].items():
print(" ", color(C_HEAD, domain + ":"))
dump_dict(config)
else:
print(color("bold_white", "Successful config (partial)"))
for domain in domain_info:
if domain == ERROR_STR:
continue
print(" ", color(C_HEAD, domain + ":"))
dump_dict(res["components"].get(domain))
if args.secrets:
flatsecret: Dict[str, str] = {}
for sfn, sdict in res["secret_cache"].items():
sss = []
for skey in sdict:
if skey in flatsecret:
_LOGGER.error(
"Duplicated secrets in files %s and %s", flatsecret[skey], sfn
)
flatsecret[skey] = sfn
sss.append(color("green", skey) if skey in res["secrets"] else skey)
print(color(C_HEAD, "Secrets from", sfn + ":"), ", ".join(sss))
print(color(C_HEAD, "Used Secrets:"))
for skey, sval in res["secrets"].items():
if sval is None:
print(" -", skey + ":", color("red", "not found"))
continue
print(" -", skey + ":", sval)
return len(res["except"])
def check(config_dir, secrets=False):
"""Perform a check by mocking hass load functions."""
logging.getLogger("homeassistant.loader").setLevel(logging.CRITICAL)
res: Dict[str, Any] = {
"yaml_files": OrderedDict(), # yaml_files loaded
"secrets": OrderedDict(), # secret cache and secrets loaded
"except": OrderedDict(), # exceptions raised (with config)
#'components' is a HomeAssistantConfig # noqa: E265
"secret_cache": None,
}
# pylint: disable=possibly-unused-variable
def mock_load(filename):
"""Mock hass.util.load_yaml to save config file names."""
res["yaml_files"][filename] = True
return MOCKS["load"][1](filename)
# pylint: disable=possibly-unused-variable
def mock_secrets(ldr, node):
"""Mock _get_secrets."""
try:
val = MOCKS["secrets"][1](ldr, node)
except HomeAssistantError:
val = None
res["secrets"][node.value] = val
return val
# Patches to skip functions
for sil in SILENCE:
PATCHES[sil] = patch(sil)
# Patches with local mock functions
for key, val in MOCKS.items():
if not secrets and key == "secrets":
continue
# The * in the key is removed to find the mock_function (side_effect)
# This allows us to use one side_effect to patch multiple locations
mock_function = locals()[f"mock_{key.replace("*", "")}"]
PATCHES[key] = patch(val[0], side_effect=mock_function)
# Start all patches
for pat in PATCHES.values():
pat.start()
if secrets:
# Ensure !secrets point to the patched function
yaml_loader.yaml.SafeLoader.add_constructor("!secret", yaml_loader.secret_yaml)
try:
res["components"] = asyncio.run(async_check_config(config_dir))
res["secret_cache"] = OrderedDict(yaml_loader.__SECRET_CACHE)
for err in res["components"].errors:
domain = err.domain or ERROR_STR
res["except"].setdefault(domain, []).append(err.message)
if err.config:
res["except"].setdefault(domain, []).append(err.config)
except Exception as err: # pylint: disable=broad-except
print(color("red", "Fatal error while loading config:"), str(err))
res["except"].setdefault(ERROR_STR, []).append(str(err))
finally:
# Stop all patches
for pat in PATCHES.values():
pat.stop()
if secrets:
# Ensure !secrets point to the original function
yaml_loader.yaml.SafeLoader.add_constructor(
"!secret", yaml_loader.secret_yaml
)
bootstrap.clear_secret_cache()
return res
async def async_check_config(config_dir):
"""Check the HA config."""
hass = core.HomeAssistant()
hass.config.config_dir = config_dir
components = await async_check_ha_config_file(hass)
await hass.async_stop(force=True)
return components
def line_info(obj, **kwargs):
"""Display line config source."""
if hasattr(obj, "__config_file__"):
return color(
"cyan", f"[source {obj.__config_file__}:{obj.__line__ or "?"}]", **kwargs
)
return "?"
def dump_dict(layer, indent_count=3, listi=False, **kwargs):
"""Display a dict.
A friendly version of print yaml_loader.yaml.dump(config).
"""
def sort_dict_key(val):
"""Return the dict key for sorting."""
key = str(val[0]).lower()
return "0" if key == "platform" else key
indent_str = indent_count * " "
if listi or isinstance(layer, list):
indent_str = indent_str[:-1] + "-"
if isinstance(layer, Mapping):
for key, value in sorted(layer.items(), key=sort_dict_key):
if isinstance(value, (dict, list)):
print(indent_str, str(key) + ":", line_info(value, **kwargs))
dump_dict(value, indent_count + 2)
else:
print(indent_str, str(key) + ":", value)
indent_str = indent_count * " "
if isinstance(layer, Sequence):
for i in layer:
if isinstance(i, dict):
dump_dict(i, indent_count + 2, True)
else:
print(" ", indent_str, i)
|
"""Script to check the configuration file."""
import argparse
import asyncio
from collections import OrderedDict
from collections.abc import Mapping, Sequence
from glob import glob
import logging
import os
from typing import Any, Callable, Dict, List, Tuple
from unittest.mock import patch
from homeassistant import bootstrap, core
from homeassistant.config import get_default_config_dir
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.check_config import async_check_ha_config_file
import homeassistant.util.yaml.loader as yaml_loader
# mypy: allow-untyped-calls, allow-untyped-defs
REQUIREMENTS = ("colorlog==4.7.2",)
_LOGGER = logging.getLogger(__name__)
# pylint: disable=protected-access
MOCKS: Dict[str, Tuple[str, Callable]] = {
"load": ("homeassistant.util.yaml.loader.load_yaml", yaml_loader.load_yaml),
"load*": ("homeassistant.config.load_yaml", yaml_loader.load_yaml),
"secrets": ("homeassistant.util.yaml.loader.secret_yaml", yaml_loader.secret_yaml),
}
SILENCE = ("homeassistant.scripts.check_config.yaml_loader.clear_secret_cache",)
PATCHES: Dict[str, Any] = {}
C_HEAD = "bold"
ERROR_STR = "General Errors"
def color(the_color, *args, reset=None):
"""Color helper."""
# pylint: disable=import-outside-toplevel
from colorlog.escape_codes import escape_codes, parse_colors
try:
if not args:
assert reset is None, "You cannot reset if nothing being printed"
return parse_colors(the_color)
return parse_colors(the_color) + " ".join(args) + escape_codes[reset or "reset"]
except KeyError as k:
raise ValueError(f"Invalid color {k!s} in {the_color}") from k
def run(script_args: List) -> int:
"""Handle check config commandline script."""
parser = argparse.ArgumentParser(description="Check Home Assistant configuration.")
parser.add_argument("--script", choices=["check_config"])
parser.add_argument(
"-c",
"--config",
default=get_default_config_dir(),
help="Directory that contains the Home Assistant configuration",
)
parser.add_argument(
"-i",
"--info",
nargs="?",
default=None,
const="all",
help="Show a portion of the config",
)
parser.add_argument(
"-f", "--files", action="store_true", help="Show used configuration files"
)
parser.add_argument(
"-s", "--secrets", action="store_true", help="Show secret information"
)
args, unknown = parser.parse_known_args()
if unknown:
print(color("red", "Unknown arguments:", ", ".join(unknown)))
config_dir = os.path.join(os.getcwd(), args.config)
print(color("bold", "Testing configuration at", config_dir))
res = check(config_dir, args.secrets)
domain_info: List[str] = []
if args.info:
domain_info = args.info.split(",")
if args.files:
print(color(C_HEAD, "yaml files"), "(used /", color("red", "not used") + ")")
deps = os.path.join(config_dir, "deps")
yaml_files = [
f
for f in glob(os.path.join(config_dir, "**/*.yaml"), recursive=True)
if not f.startswith(deps)
]
for yfn in sorted(yaml_files):
the_color = "" if yfn in res["yaml_files"] else "red"
print(color(the_color, "-", yfn))
if res["except"]:
print(color("bold_white", "Failed config"))
for domain, config in res["except"].items():
domain_info.append(domain)
print(" ", color("bold_red", domain + ":"), color("red", "", reset="red"))
dump_dict(config, reset="red")
print(color("reset"))
if domain_info:
if "all" in domain_info:
print(color("bold_white", "Successful config (all)"))
for domain, config in res["components"].items():
print(" ", color(C_HEAD, domain + ":"))
dump_dict(config)
else:
print(color("bold_white", "Successful config (partial)"))
for domain in domain_info:
if domain == ERROR_STR:
continue
print(" ", color(C_HEAD, domain + ":"))
dump_dict(res["components"].get(domain))
if args.secrets:
flatsecret: Dict[str, str] = {}
for sfn, sdict in res["secret_cache"].items():
sss = []
for skey in sdict:
if skey in flatsecret:
_LOGGER.error(
"Duplicated secrets in files %s and %s", flatsecret[skey], sfn
)
flatsecret[skey] = sfn
sss.append(color("green", skey) if skey in res["secrets"] else skey)
print(color(C_HEAD, "Secrets from", sfn + ":"), ", ".join(sss))
print(color(C_HEAD, "Used Secrets:"))
for skey, sval in res["secrets"].items():
if sval is None:
print(" -", skey + ":", color("red", "not found"))
continue
print(" -", skey + ":", sval)
return len(res["except"])
def check(config_dir, secrets=False):
"""Perform a check by mocking hass load functions."""
logging.getLogger("homeassistant.loader").setLevel(logging.CRITICAL)
res: Dict[str, Any] = {
"yaml_files": OrderedDict(), # yaml_files loaded
"secrets": OrderedDict(), # secret cache and secrets loaded
"except": OrderedDict(), # exceptions raised (with config)
#'components' is a HomeAssistantConfig # noqa: E265
"secret_cache": None,
}
# pylint: disable=possibly-unused-variable
def mock_load(filename):
"""Mock hass.util.load_yaml to save config file names."""
res["yaml_files"][filename] = True
return MOCKS["load"][1](filename)
# pylint: disable=possibly-unused-variable
def mock_secrets(ldr, node):
"""Mock _get_secrets."""
try:
val = MOCKS["secrets"][1](ldr, node)
except HomeAssistantError:
val = None
res["secrets"][node.value] = val
return val
# Patches to skip functions
for sil in SILENCE:
PATCHES[sil] = patch(sil)
# Patches with local mock functions
for key, val in MOCKS.items():
if not secrets and key == "secrets":
continue
# The * in the key is removed to find the mock_function (side_effect)
# This allows us to use one side_effect to patch multiple locations
mock_function = locals()[f"mock_{key.replace('*', '')}"]
PATCHES[key] = patch(val[0], side_effect=mock_function)
# Start all patches
for pat in PATCHES.values():
pat.start()
if secrets:
# Ensure !secrets point to the patched function
yaml_loader.yaml.SafeLoader.add_constructor("!secret", yaml_loader.secret_yaml)
try:
res["components"] = asyncio.run(async_check_config(config_dir))
res["secret_cache"] = OrderedDict(yaml_loader.__SECRET_CACHE)
for err in res["components"].errors:
domain = err.domain or ERROR_STR
res["except"].setdefault(domain, []).append(err.message)
if err.config:
res["except"].setdefault(domain, []).append(err.config)
except Exception as err: # pylint: disable=broad-except
print(color("red", "Fatal error while loading config:"), str(err))
res["except"].setdefault(ERROR_STR, []).append(str(err))
finally:
# Stop all patches
for pat in PATCHES.values():
pat.stop()
if secrets:
# Ensure !secrets point to the original function
yaml_loader.yaml.SafeLoader.add_constructor(
"!secret", yaml_loader.secret_yaml
)
bootstrap.clear_secret_cache()
return res
async def async_check_config(config_dir):
"""Check the HA config."""
hass = core.HomeAssistant()
hass.config.config_dir = config_dir
components = await async_check_ha_config_file(hass)
await hass.async_stop(force=True)
return components
def line_info(obj, **kwargs):
"""Display line config source."""
if hasattr(obj, "__config_file__"):
return color(
"cyan", f"[source {obj.__config_file__}:{obj.__line__ or '?'}]", **kwargs
)
return "?"
def dump_dict(layer, indent_count=3, listi=False, **kwargs):
"""Display a dict.
A friendly version of print yaml_loader.yaml.dump(config).
"""
def sort_dict_key(val):
"""Return the dict key for sorting."""
key = str(val[0]).lower()
return "0" if key == "platform" else key
indent_str = indent_count * " "
if listi or isinstance(layer, list):
indent_str = indent_str[:-1] + "-"
if isinstance(layer, Mapping):
for key, value in sorted(layer.items(), key=sort_dict_key):
if isinstance(value, (dict, list)):
print(indent_str, str(key) + ":", line_info(value, **kwargs))
dump_dict(value, indent_count + 2)
else:
print(indent_str, str(key) + ":", value)
indent_str = indent_count * " "
if isinstance(layer, Sequence):
for i in layer:
if isinstance(i, dict):
dump_dict(i, indent_count + 2, True)
else:
print(" ", indent_str, i)
|
"""
Common solar physics coordinate systems.
This submodule implements various solar physics coordinate frames for use with
the `astropy.coordinates` module.
"""
from contextlib import contextmanager
import numpy as np
import astropy.units as u
from astropy.coordinates import ConvertError, QuantityAttribute
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping
from astropy.coordinates.representation import (
CartesianDifferential,
CartesianRepresentation,
CylindricalRepresentation,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalRepresentation,
)
from astropy.time import Time
from sunpy.sun.constants import radius as _RSUN
from sunpy.time.time import _variables_for_parse_time_docstring
from sunpy.util.decorators import add_common_docstring
from sunpy.util.exceptions import SunpyUserWarning
from .frameattributes import ObserverCoordinateAttribute, TimeFrameAttributeSunPy
_J2000 = Time('J2000.0', scale='tt')
__all__ = ['SunPyBaseCoordinateFrame', 'BaseHeliographic',
'HeliographicStonyhurst', 'HeliographicCarrington',
'Heliocentric', 'Helioprojective',
'HeliocentricEarthEcliptic', 'GeocentricSolarEcliptic',
'HeliocentricInertial', 'GeocentricEarthEquatorial']
def _frame_parameters():
"""
Returns formatting dictionary to use with add_common_docstring to populate frame docstrings
"""
ret = {}
# Each text block is missing the first indent because it already exists in the frame docstring
ret['data'] = ("data : `~astropy.coordinates.BaseRepresentation` or ``None``\n"
" A representation object or ``None`` to have no data\n"
" (or use the coordinate component arguments, see below).")
ret['common'] = (f"obstime : {_variables_for_parse_time_docstring()["parse_time_types"]}\n"
" The time of the observation. This is used to determine the\n"
" position of solar-system bodies (e.g., the Sun and the Earth) as\n"
" needed to define the origin and orientation of the frame.\n"
" representation_type : `~astropy.coordinates.BaseRepresentation`, str, optional\n"
" A representation class or string name of a representation class.\n"
" This may change the valid coordinate component arguments from the\n"
" defaults (see above). For example, passing\n"
" ``representation_type='cartesian'`` will make the frame expect\n"
" Cartesian coordinate component arguments (typically, ``x``, ``y``,\n"
" and ``z``).\n"
" copy : bool, optional\n"
" If `True` (default), make copies of the input coordinate arrays.")
ret['lonlat'] = ("lon : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`, optional\n"
" The longitude coordinate for this object (``lat`` must also be\n"
" given and ``data`` must be ``None``).\n"
" Not needed if ``data`` is given.\n"
" lat : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`, optional\n"
" The latitude coordinate for this object (``lon`` must also be\n"
" given and ``data`` must be ``None``).\n"
" Not needed if ``data`` is given.")
ret['radius'] = ("radius : `~astropy.units.Quantity`, optional\n"
" The radial distance coordinate from Sun center for this object.\n"
" Defaults to the radius of the Sun. Not needed if ``data`` is given.")
ret['distance_sun'] = ("distance : `~astropy.units.Quantity`, optional\n"
" The distance coordinate from Sun center for this object.\n"
" Not needed if ``data`` is given.")
ret['distance_earth'] = ("distance : `~astropy.units.Quantity`, optional\n"
" The distance coordinate from Earth center for this object.\n"
" Not needed if ``data`` is given.")
ret['xyz'] = ("x : `~astropy.units.Quantity`, optional\n"
" X-axis coordinate for this object. Not needed if ``data`` is given.\n"
" y : `~astropy.units.Quantity`, optional\n"
" Y-axis coordinate for this object. Not needed if ``data`` is given.\n"
" z : `~astropy.units.Quantity`, optional\n"
" Z-axis coordinate for this object. Not needed if ``data`` is given.")
ret['observer'] = ("observer : `~sunpy.coordinates.frames.HeliographicStonyhurst`, str\n"
" The location of the observer. If a string is provided,\n"
" it must be a solar system body that can be parsed by\n"
" `~sunpy.coordinates.ephemeris.get_body_heliographic_stonyhurst`\n"
" at the time ``obstime``. Defaults to Earth center.")
ret['rsun'] = ("rsun : `~astropy.units.Quantity`\n"
" The radius of the Sun in length units. Used to convert a 2D\n"
" coordinate (i.e., no ``radius`` component) to a 3D coordinate by\n"
" assuming that the coordinate is on the surface of the Sun. Defaults\n"
" to the photospheric radius as defined in `sunpy.sun.constants`.")
ret['equinox'] = (f"equinox : {_variables_for_parse_time_docstring()["parse_time_types"]}\n"
" The date for the mean vernal equinox.\n"
" Defaults to the J2000.0 equinox.")
return ret
class SunPyBaseCoordinateFrame(BaseCoordinateFrame):
"""
Base class for sunpy coordinate frames.
This class is not intended to be used directly and has no transformations defined.
* Defines the frame attribute ``obstime`` for observation time.
* Defines a default wrap angle of 180 degrees for longitude in spherical coordinates,
which can be overridden via the class variable ``_wrap_angle``.
* Inject a nice way of representing the object which the coordinate represents.
"""
obstime = TimeFrameAttributeSunPy()
default_representation = SphericalRepresentation
default_differential = SphericalDifferential
frame_specific_representation_info = {
SphericalDifferential: [RepresentationMapping('d_lon', 'd_lon', u.arcsec/u.s),
RepresentationMapping('d_lat', 'd_lat', u.arcsec/u.s),
RepresentationMapping('d_distance', 'd_distance', u.km/u.s)],
}
_wrap_angle = 180*u.deg # for longitude in spherical coordinates
def __init__(self, *args, **kwargs):
self.object_name = None
# If wrap_longitude=False is passed in, do not impose a specific wrap angle for the frame
if not kwargs.pop('wrap_longitude', True):
self._wrap_angle = None
super().__init__(*args, **kwargs)
# If obstime is specified, treat the default observer (None) as explicitly set
if self.obstime is not None and self.is_frame_attr_default('observer'):
self._attr_names_with_defaults.remove('observer')
return
def represent_as(self, base, s='base', in_frame_units=False):
data = super().represent_as(base, s, in_frame_units=in_frame_units)
# If a frame wrap angle is set, use that wrap angle for any spherical representations.
if self._wrap_angle is not None and \
isinstance(data, (UnitSphericalRepresentation, SphericalRepresentation)):
data.lon.wrap_angle = self._wrap_angle
return data
def __str__(self):
# We override this here so that when you print a SkyCoord it shows the
# observer as the string and not the whole massive coordinate.
if getattr(self, "object_name", None):
return f"<{self.__class__.__name__} Coordinate for '{self.object_name}'>"
else:
return super().__str__()
@property
def _is_2d(self):
return (self._data is not None and self._data.norm().unit is u.one
and u.allclose(self._data.norm(), 1*u.one))
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
# TODO: Remove this after the minimum Astropy dependency includes astropy/astropy#12005
cls._fix_property_docstrings()
@classmethod
def _fix_property_docstrings(cls):
# This class method adds docstrings to properties dynamically created by
# BaseCoordinateFrame.__init_subclass__(). Accordingly, this method needs to itself be
# called from SunPyBaseCoordinateFrame.__init_subclass__() to work for our subclasses.
property_docstrings = {
'default_representation': "Default representation for position data",
'default_differential': "Default representation for differential data",
'frame_specific_representation_info': "Mapping for frame-specific component names",
}
for prop, docstring in property_docstrings.items():
if getattr(cls, prop).__doc__ is None:
setattr(getattr(cls, prop), '__doc__', docstring)
# TODO: Remove this after the minimum Astropy dependency includes astropy/astropy#12005
SunPyBaseCoordinateFrame._fix_property_docstrings()
class BaseHeliographic(SunPyBaseCoordinateFrame):
"""
Base class for HeliographicCarrington (HGC) and HeliographicStonyhurst (HGS) frames.
This class is not intended to be used directly and has no transformations defined.
"""
frame_specific_representation_info = {
SphericalRepresentation: [RepresentationMapping('lon', 'lon', u.deg),
RepresentationMapping('lat', 'lat', u.deg),
RepresentationMapping('distance', 'radius', None)],
SphericalDifferential: [RepresentationMapping('d_lon', 'd_lon', u.arcsec/u.s),
RepresentationMapping('d_lat', 'd_lat', u.arcsec/u.s),
RepresentationMapping('d_distance', 'd_radius', u.km/u.s)],
}
rsun = QuantityAttribute(default=_RSUN, unit=u.km)
def make_3d(self):
"""
Returns a fully 3D coordinate based on this coordinate.
If this coordinate is only 2D (i.e., no ``radius`` component) or is a
unit vector (i.e., the norm of the coordinate is unity), a new
coordinate is created that corresponds to the surface of the Sun.
That is, the 3D coordinate will retain the ``lon`` and ``lat``, and
``radius`` will be set to the frame's ``rsun`` frame attribute.
If this coordinate is already fully 3D, it is directly returned, even
if it does not lie on the surface of the Sun.
Returns
-------
frame : `~sunpy.coordinates.frames.BaseHeliographic`
The fully 3D coordinate
"""
if self._is_2d:
return self.realize_frame(self._data * self.rsun)
# The coordinate is already 3D
return self
@add_common_docstring(**_frame_parameters())
class HeliographicStonyhurst(BaseHeliographic):
"""
A coordinate or frame in the Stonyhurst Heliographic (HGS) system.
- The origin is the center of the Sun.
- The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the projection of
the Sun-Earth line onto the Sun's equatorial plane.
This system is also know as the Heliocentric Earth Equatorial (HEEQ) system when
represented using Cartesian components.
A new instance can be created using the following signatures
(note that if supplied, ``obstime`` and ``representation_type`` must be
keyword arguments)::
HeliographicStonyhurst(lon, lat, obstime=obstime)
HeliographicStonyhurst(lon, lat, radius, obstime=obstime)
HeliographicStonyhurst(x, y, z, representation_type='cartesian', obstime=obstime)
Parameters
----------
{data}
{lonlat}
{radius}
{rsun}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(1*u.deg, 1*u.deg, 2*u.km,
... frame="heliographic_stonyhurst",
... obstime="2010/01/01T00:00:45")
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=2010-01-01T00:00:45.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, km)
(1., 1., 2.)>
>>> sc.frame
<HeliographicStonyhurst Coordinate (obstime=2010-01-01T00:00:45.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, km)
(1., 1., 2.)>
>>> sc = SkyCoord(HeliographicStonyhurst(-10*u.deg, 2*u.deg))
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=None, rsun=695700.0 km): (lon, lat) in deg
(-10., 2.)>
>>> sc = SkyCoord(CartesianRepresentation(0*u.km, 45*u.km, 2*u.km),
... obstime="2011/01/05T00:00:50",
... frame="heliographic_stonyhurst")
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=2011-01-05T00:00:50.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, km)
(90., 2.54480438, 45.04442252)>
"""
name = "heliographic_stonyhurst"
@add_common_docstring(**_frame_parameters())
class HeliographicCarrington(BaseHeliographic):
"""
A coordinate or frame in the Carrington Heliographic (HGC) system.
- The origin is the center of the Sun.
- The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.
- The X-axis and Y-axis rotate with a period of 25.38 days.
This system differs from Stonyhurst Heliographic (HGS) in its definition of longitude. This
longitude is an "apparent" longitude because it takes into account the time it takes for light
to travel from the Sun's surface to the observer. Thus, the observer needs to be specified to
be able to transform to any other coordinate frame.
A new instance can be created using the following signatures
(note that if supplied, ``obstime`` and ``observer`` must be a keyword argument)::
HeliographicCarrington(lon, lat, obstime=obstime, observer=observer)
HeliographicCarrington(lon, lat, radius, obstime=obstime, observer=observer)
If you want to define the location in HGC such that the observer for the coordinate frame is
the same as that location (e.g., the location of an observatory in its corresponding HGC
frame), use ``observer='self'``::
HeliographicCarrington(lon, lat, radius, obstime=obstime, observer='self')
Parameters
----------
{data}
{lonlat}
{radius}
{observer}
{rsun}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(1*u.deg, 2*u.deg, 3*u.km,
... frame="heliographic_carrington",
... observer="earth",
... obstime="2010/01/01T00:00:30")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2010-01-01T00:00:30.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (lon, lat, radius) in (deg, deg, km)
(1., 2., 3.)>
>>> sc = SkyCoord([1,2,3]*u.deg, [4,5,6]*u.deg, [5,6,7]*u.km,
... obstime="2010/01/01T00:00:45",
... observer="self",
... frame="heliographic_carrington")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2010-01-01T00:00:45.000, rsun=695700.0 km, observer=self): (lon, lat, radius) in (deg, deg, km)
[(1., 4., 5.), (2., 5., 6.), (3., 6., 7.)]>
>>> sc = SkyCoord(CartesianRepresentation(0*u.km, 45*u.km, 2*u.km),
... obstime="2011/01/05T00:00:50",
... frame="heliographic_carrington")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2011-01-05T00:00:50.000, rsun=695700.0 km, observer=None): (lon, lat, radius) in (deg, deg, km)
(90., 2.54480438, 45.04442252)>
"""
name = "heliographic_carrington"
_wrap_angle = 360*u.deg
observer = ObserverCoordinateAttribute(HeliographicStonyhurst)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not isinstance(self.observer, BaseCoordinateFrame) and self.observer == 'self' and self._is_2d:
raise ValueError("Full 3D coordinate (including radius) must be specified "
"when observer='self'.")
@add_common_docstring(**_frame_parameters())
class Heliocentric(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric system, which is observer-based.
- The origin is the center of the Sun.
- The Z-axis is aligned with the Sun-observer line.
- The Y-axis is aligned with the component of the vector to the Sun's north pole that is
perpendicular to the Z-axis.
This frame defaults to a Cartesian component representation, which is known as Heliocentric
Cartesian (HCC). This frame can also be represented using cylindrical components, where
where ``rho`` is the impact parameter and ``psi`` is the position angle.
``psi`` is measured relative to the west limb, rather than solar north, so is shifted
by 90 degrees compared to the convention of the Heliocentric Radial (HCR) system.
A new instance can be created using the following signatures
(note that if supplied, ``obstime``, ``observer``, and ``representation_type`` must be
keyword arguments)::
Heliocentric(x, y, z, obstime=obstime, observer=observer)
Heliocentric(rho, psi, z, representation_type='cylindrical', obstime=obstime, observer=observer)
Parameters
----------
{data}
{xyz}
{observer}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord, CartesianRepresentation
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(CartesianRepresentation(10*u.km, 1*u.km, 2*u.km),
... obstime="2011/01/05T00:00:50", observer="earth", frame="heliocentric")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-05T00:00:50.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in km
(10., 1., 2.)>
>>> sc = SkyCoord([1,2]*u.km, [3,4]*u.m, [5,6]*u.cm,
... obstime="2011/01/01T00:00:54", observer="earth", frame="heliocentric")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-01T00:00:54.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in (km, m, cm)
[(1., 3., 5.), (2., 4., 6.)]>
>>> sc = SkyCoord(CylindricalRepresentation(10*u.km, 60*u.deg, 10*u.km),
... obstime="2011/01/05T00:00:50", observer="earth", frame="heliocentric")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-05T00:00:50.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in km
(5., 8.66025404, 10.)>
"""
default_representation = CartesianRepresentation
default_differential = CartesianDifferential
frame_specific_representation_info = {
CylindricalRepresentation: [RepresentationMapping('phi', 'psi', u.deg)]
}
observer = ObserverCoordinateAttribute(HeliographicStonyhurst)
def represent_as(self, base, s='base', in_frame_units=False):
data = super().represent_as(base, s, in_frame_units=in_frame_units)
# For cylindrical representations, wrap the `psi` component (natively `phi`) at 360 deg
if isinstance(data, CylindricalRepresentation):
data.phi.wrap_at(360*u.deg, inplace=True)
return data
@add_common_docstring(**_frame_parameters())
class Helioprojective(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Helioprojective Cartesian (HPC) system, which is observer-based.
- The origin is the location of the observer.
- ``Tx`` (aka "theta_x") is the angle relative to the plane containing the Sun-observer line
and the Sun's rotation axis, with positive values in the direction of the Sun's west limb.
- ``Ty`` (aka "theta_y") is the angle relative to the Sun's equatorial plane, with positive
values in the direction of the Sun's north pole.
- ``distance`` is the Sun-observer distance.
This system is frequently used in a projective form without ``distance`` specified. For
observations looking very close to the center of the Sun, where the small-angle approximation
is appropriate, ``Tx`` and ``Ty`` can be approximated as Cartesian components.
A new instance can be created using the following signatures
(note that if supplied, ``obstime`` and ``observer`` must be keyword arguments)::
Helioprojective(Tx, Ty, obstime=obstime, observer=observer)
Helioprojective(Tx, Ty, distance, obstime=obstime, observer=observer)
Parameters
----------
{data}
Tx : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`
The theta_x coordinate for this object. Not needed if ``data`` is given.
Ty : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`
The theta_y coordinate for this object. Not needed if ``data`` is given.
distance : `~astropy.units.Quantity`
The distance coordinate from the observer for this object.
Not needed if ``data`` is given.
{observer}
{rsun}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(0*u.deg, 0*u.deg, 5*u.km,
... obstime="2010/01/01T00:00:00", observer="earth", frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2010-01-01T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, km)
(0., 0., 5.)>
>>> sc = SkyCoord(0*u.deg, 0*u.deg,
... obstime="2010/01/01T00:00:00", observer="earth", frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2010-01-01T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty) in arcsec
(0., 0.)>
>>> sc = SkyCoord(CartesianRepresentation(1*u.AU, 1e5*u.km, -2e5*u.km),
... obstime="2011/01/05T00:00:50", observer="earth", frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2011-01-05T00:00:50.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
(137.87948623, -275.75878762, 1.00000112)>
"""
frame_specific_representation_info = {
SphericalRepresentation: [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec),
RepresentationMapping('distance', 'distance', None)],
SphericalDifferential: [RepresentationMapping('d_lon', 'd_Tx', u.arcsec/u.s),
RepresentationMapping('d_lat', 'd_Ty', u.arcsec/u.s),
RepresentationMapping('d_distance', 'd_distance', u.km/u.s)],
UnitSphericalRepresentation: [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec)],
}
rsun = QuantityAttribute(default=_RSUN, unit=u.km)
observer = ObserverCoordinateAttribute(HeliographicStonyhurst)
@property
def angular_radius(self):
"""
Angular radius of the Sun as seen by the observer.
The ``rsun`` frame attribute is the radius of the Sun in length units.
The tangent vector from the observer to the edge of the Sun forms a
right-angle triangle with the radius of the Sun as the far side and the
Sun-observer distance as the hypotenuse. Thus, the sine of the angular
radius of the Sun is ratio of these two distances.
"""
from sunpy.coordinates.sun import _angular_radius # avoiding a circular import
if not isinstance(self.observer, HeliographicStonyhurst):
if self.observer is None:
raise ValueError("The observer must be defined, not `None`.")
raise ValueError("The observer must be fully defined by specifying `obstime`.")
return _angular_radius(self.rsun, self.observer.radius)
def make_3d(self):
"""
This method calculates the third coordinate of the Helioprojective
frame. It assumes that the coordinate point is on the surface of the Sun.
If a point in the frame is off limb then NaN will be returned.
Returns
-------
new_frame : `~sunpy.coordinates.frames.Helioprojective`
A new frame instance with all the attributes of the original but
now with a third coordinate.
"""
# Skip if we already are 3D
if not self._is_2d:
return self
if not isinstance(self.observer, BaseCoordinateFrame):
raise ConvertError("Cannot calculate distance to the Sun "
f"for observer '{self.observer}' "
"without `obstime` being specified.")
rep = self.represent_as(UnitSphericalRepresentation)
lat, lon = rep.lat, rep.lon
# Check for the use of floats with lower precision than the native Python float
if not set([lon.dtype.type, lat.dtype.type]).issubset([float, np.float64, np.longdouble]):
raise SunpyUserWarning("The Helioprojective component values appear to be lower "
"precision than the native Python float: "
f"Tx is {lon.dtype.name}, and Ty is {lat.dtype.name}. "
"To minimize precision loss, you may want to cast the values to "
"`float` or `numpy.float64` via the NumPy method `.astype()`.")
# Calculate the distance to the surface of the Sun using the law of cosines
cos_alpha = np.cos(lat) * np.cos(lon)
c = self.observer.radius**2 - self.rsun**2
b = -2 * self.observer.radius * cos_alpha
# Ignore sqrt of NaNs
with np.errstate(invalid='ignore'):
d = ((-1*b) - np.sqrt(b**2 - 4*c)) / 2 # use the "near" solution
if self._spherical_screen:
sphere_center = self._spherical_screen['center'].transform_to(self).cartesian
c = sphere_center.norm()**2 - self._spherical_screen['radius']**2
b = -2 * sphere_center.dot(rep)
# Ignore sqrt of NaNs
with np.errstate(invalid='ignore'):
dd = ((-1*b) + np.sqrt(b**2 - 4*c)) / 2 # use the "far" solution
d = np.fmin(d, dd) if self._spherical_screen['only_off_disk'] else dd
return self.realize_frame(SphericalRepresentation(lon=lon,
lat=lat,
distance=d))
_spherical_screen = None
@classmethod
@contextmanager
def assume_spherical_screen(cls, center, only_off_disk=False):
"""
Context manager to interpret 2D coordinates as being on the inside of a spherical screen.
The radius of the screen is the distance between the specified ``center`` and Sun center.
This ``center`` does not have to be the same as the observer location for the coordinate
frame. If they are the same, then this context manager is equivalent to assuming that the
helioprojective "zeta" component is zero.
This replaces the default assumption where 2D coordinates are mapped onto the surface of the
Sun.
Parameters
----------
center : `~astropy.coordinates.SkyCoord`
The center of the spherical screen
only_off_disk : `bool`, optional
If `True`, apply this assumption only to off-disk coordinates, with on-disk coordinates
still mapped onto the surface of the Sun. Defaults to `False`.
Examples
--------
.. minigallery:: sunpy.coordinates.Helioprojective.assume_spherical_screen
>>> import astropy.units as u
>>> from sunpy.coordinates import Helioprojective
>>> h = Helioprojective(range(7)*u.arcsec*319, [0]*7*u.arcsec,
... observer='earth', obstime='2020-04-08')
>>> print(h.make_3d())
<Helioprojective Coordinate (obstime=2020-04-08T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
[( 0., 0., 0.99660825), ( 319., 0., 0.99687244),
( 638., 0., 0.99778472), ( 957., 0., 1.00103285),
(1276., 0., nan), (1595., 0., nan),
(1914., 0., nan)]>
>>> with Helioprojective.assume_spherical_screen(h.observer):
... print(h.make_3d())
<Helioprojective Coordinate (obstime=2020-04-08T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
[( 0., 0., 1.00125872), ( 319., 0., 1.00125872),
( 638., 0., 1.00125872), ( 957., 0., 1.00125872),
(1276., 0., 1.00125872), (1595., 0., 1.00125872),
(1914., 0., 1.00125872)]>
>>> with Helioprojective.assume_spherical_screen(h.observer, only_off_disk=True):
... print(h.make_3d())
<Helioprojective Coordinate (obstime=2020-04-08T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
[( 0., 0., 0.99660825), ( 319., 0., 0.99687244),
( 638., 0., 0.99778472), ( 957., 0., 1.00103285),
(1276., 0., 1.00125872), (1595., 0., 1.00125872),
(1914., 0., 1.00125872)]>
"""
try:
old_spherical_screen = cls._spherical_screen # nominally None
center_hgs = center.transform_to(HeliographicStonyhurst(obstime=center.obstime))
cls._spherical_screen = {
'center': center,
'radius': center_hgs.radius,
'only_off_disk': only_off_disk
}
yield
finally:
cls._spherical_screen = old_spherical_screen
@add_common_docstring(**_frame_parameters())
class HeliocentricEarthEcliptic(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric Earth Ecliptic (HEE) system.
- The origin is the center of the Sun.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the Sun-Earth line.
- The Z-axis (+90 degrees latitude) is aligned with the component perpendicular to the X-axis
of the mean ecliptic pole at the observation time.
Parameters
----------
{data}
{lonlat}
{distance_sun}
{common}
"""
@add_common_docstring(**_frame_parameters())
class GeocentricSolarEcliptic(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Geocentric Solar Ecliptic (GSE) system.
- The origin is the center of the Earth.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the Earth-Sun line.
- The Z-axis (+90 degrees latitude) is aligned with the component perpendicular to the X-axis
of the mean ecliptic pole at the observation time.
Parameters
----------
{data}
{lonlat}
{distance_earth}
{common}
Notes
-----
Aberration due to Earth motion is not included.
"""
@add_common_docstring(**_frame_parameters())
class HeliocentricInertial(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric Inertial (HCI) system.
- The origin is the center of the Sun.
- The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the solar ascending
node on the ecliptic (mean J2000.0).
Parameters
----------
{data}
{lonlat}
{distance_sun}
{common}
Notes
-----
The solar ascending node on the ecliptic lies on the intersection of the solar equatorial
plane with the ecliptic plane, not on the intersection of the celestial equatorial plane with
the ecliptic plane.
"""
@add_common_docstring(**_frame_parameters())
class GeocentricEarthEquatorial(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Geocentric Earth Equatorial (GEI) system.
- The origin is the center of the Earth.
- The Z-axis (+90 degrees latitude) is aligned with the Earth's north pole.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the mean (not true)
vernal equinox.
Parameters
----------
{data}
{lonlat}
{distance_earth}
{equinox}
{common}
Notes
-----
Aberration due to Earth motion is not included.
"""
equinox = TimeFrameAttributeSunPy(default=_J2000)
|
"""
Common solar physics coordinate systems.
This submodule implements various solar physics coordinate frames for use with
the `astropy.coordinates` module.
"""
from contextlib import contextmanager
import numpy as np
import astropy.units as u
from astropy.coordinates import ConvertError, QuantityAttribute
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping
from astropy.coordinates.representation import (
CartesianDifferential,
CartesianRepresentation,
CylindricalRepresentation,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalRepresentation,
)
from astropy.time import Time
from sunpy.sun.constants import radius as _RSUN
from sunpy.time.time import _variables_for_parse_time_docstring
from sunpy.util.decorators import add_common_docstring
from sunpy.util.exceptions import SunpyUserWarning
from .frameattributes import ObserverCoordinateAttribute, TimeFrameAttributeSunPy
_J2000 = Time('J2000.0', scale='tt')
__all__ = ['SunPyBaseCoordinateFrame', 'BaseHeliographic',
'HeliographicStonyhurst', 'HeliographicCarrington',
'Heliocentric', 'Helioprojective',
'HeliocentricEarthEcliptic', 'GeocentricSolarEcliptic',
'HeliocentricInertial', 'GeocentricEarthEquatorial']
def _frame_parameters():
"""
Returns formatting dictionary to use with add_common_docstring to populate frame docstrings
"""
ret = {}
# Each text block is missing the first indent because it already exists in the frame docstring
ret['data'] = ("data : `~astropy.coordinates.BaseRepresentation` or ``None``\n"
" A representation object or ``None`` to have no data\n"
" (or use the coordinate component arguments, see below).")
ret['common'] = (f"obstime : {_variables_for_parse_time_docstring()['parse_time_types']}\n"
" The time of the observation. This is used to determine the\n"
" position of solar-system bodies (e.g., the Sun and the Earth) as\n"
" needed to define the origin and orientation of the frame.\n"
" representation_type : `~astropy.coordinates.BaseRepresentation`, str, optional\n"
" A representation class or string name of a representation class.\n"
" This may change the valid coordinate component arguments from the\n"
" defaults (see above). For example, passing\n"
" ``representation_type='cartesian'`` will make the frame expect\n"
" Cartesian coordinate component arguments (typically, ``x``, ``y``,\n"
" and ``z``).\n"
" copy : bool, optional\n"
" If `True` (default), make copies of the input coordinate arrays.")
ret['lonlat'] = ("lon : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`, optional\n"
" The longitude coordinate for this object (``lat`` must also be\n"
" given and ``data`` must be ``None``).\n"
" Not needed if ``data`` is given.\n"
" lat : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`, optional\n"
" The latitude coordinate for this object (``lon`` must also be\n"
" given and ``data`` must be ``None``).\n"
" Not needed if ``data`` is given.")
ret['radius'] = ("radius : `~astropy.units.Quantity`, optional\n"
" The radial distance coordinate from Sun center for this object.\n"
" Defaults to the radius of the Sun. Not needed if ``data`` is given.")
ret['distance_sun'] = ("distance : `~astropy.units.Quantity`, optional\n"
" The distance coordinate from Sun center for this object.\n"
" Not needed if ``data`` is given.")
ret['distance_earth'] = ("distance : `~astropy.units.Quantity`, optional\n"
" The distance coordinate from Earth center for this object.\n"
" Not needed if ``data`` is given.")
ret['xyz'] = ("x : `~astropy.units.Quantity`, optional\n"
" X-axis coordinate for this object. Not needed if ``data`` is given.\n"
" y : `~astropy.units.Quantity`, optional\n"
" Y-axis coordinate for this object. Not needed if ``data`` is given.\n"
" z : `~astropy.units.Quantity`, optional\n"
" Z-axis coordinate for this object. Not needed if ``data`` is given.")
ret['observer'] = ("observer : `~sunpy.coordinates.frames.HeliographicStonyhurst`, str\n"
" The location of the observer. If a string is provided,\n"
" it must be a solar system body that can be parsed by\n"
" `~sunpy.coordinates.ephemeris.get_body_heliographic_stonyhurst`\n"
" at the time ``obstime``. Defaults to Earth center.")
ret['rsun'] = ("rsun : `~astropy.units.Quantity`\n"
" The radius of the Sun in length units. Used to convert a 2D\n"
" coordinate (i.e., no ``radius`` component) to a 3D coordinate by\n"
" assuming that the coordinate is on the surface of the Sun. Defaults\n"
" to the photospheric radius as defined in `sunpy.sun.constants`.")
ret['equinox'] = (f"equinox : {_variables_for_parse_time_docstring()['parse_time_types']}\n"
" The date for the mean vernal equinox.\n"
" Defaults to the J2000.0 equinox.")
return ret
class SunPyBaseCoordinateFrame(BaseCoordinateFrame):
"""
Base class for sunpy coordinate frames.
This class is not intended to be used directly and has no transformations defined.
* Defines the frame attribute ``obstime`` for observation time.
* Defines a default wrap angle of 180 degrees for longitude in spherical coordinates,
which can be overridden via the class variable ``_wrap_angle``.
* Inject a nice way of representing the object which the coordinate represents.
"""
obstime = TimeFrameAttributeSunPy()
default_representation = SphericalRepresentation
default_differential = SphericalDifferential
frame_specific_representation_info = {
SphericalDifferential: [RepresentationMapping('d_lon', 'd_lon', u.arcsec/u.s),
RepresentationMapping('d_lat', 'd_lat', u.arcsec/u.s),
RepresentationMapping('d_distance', 'd_distance', u.km/u.s)],
}
_wrap_angle = 180*u.deg # for longitude in spherical coordinates
def __init__(self, *args, **kwargs):
self.object_name = None
# If wrap_longitude=False is passed in, do not impose a specific wrap angle for the frame
if not kwargs.pop('wrap_longitude', True):
self._wrap_angle = None
super().__init__(*args, **kwargs)
# If obstime is specified, treat the default observer (None) as explicitly set
if self.obstime is not None and self.is_frame_attr_default('observer'):
self._attr_names_with_defaults.remove('observer')
return
def represent_as(self, base, s='base', in_frame_units=False):
data = super().represent_as(base, s, in_frame_units=in_frame_units)
# If a frame wrap angle is set, use that wrap angle for any spherical representations.
if self._wrap_angle is not None and \
isinstance(data, (UnitSphericalRepresentation, SphericalRepresentation)):
data.lon.wrap_angle = self._wrap_angle
return data
def __str__(self):
# We override this here so that when you print a SkyCoord it shows the
# observer as the string and not the whole massive coordinate.
if getattr(self, "object_name", None):
return f"<{self.__class__.__name__} Coordinate for '{self.object_name}'>"
else:
return super().__str__()
@property
def _is_2d(self):
return (self._data is not None and self._data.norm().unit is u.one
and u.allclose(self._data.norm(), 1*u.one))
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
# TODO: Remove this after the minimum Astropy dependency includes astropy/astropy#12005
cls._fix_property_docstrings()
@classmethod
def _fix_property_docstrings(cls):
# This class method adds docstrings to properties dynamically created by
# BaseCoordinateFrame.__init_subclass__(). Accordingly, this method needs to itself be
# called from SunPyBaseCoordinateFrame.__init_subclass__() to work for our subclasses.
property_docstrings = {
'default_representation': "Default representation for position data",
'default_differential': "Default representation for differential data",
'frame_specific_representation_info': "Mapping for frame-specific component names",
}
for prop, docstring in property_docstrings.items():
if getattr(cls, prop).__doc__ is None:
setattr(getattr(cls, prop), '__doc__', docstring)
# TODO: Remove this after the minimum Astropy dependency includes astropy/astropy#12005
SunPyBaseCoordinateFrame._fix_property_docstrings()
class BaseHeliographic(SunPyBaseCoordinateFrame):
"""
Base class for HeliographicCarrington (HGC) and HeliographicStonyhurst (HGS) frames.
This class is not intended to be used directly and has no transformations defined.
"""
frame_specific_representation_info = {
SphericalRepresentation: [RepresentationMapping('lon', 'lon', u.deg),
RepresentationMapping('lat', 'lat', u.deg),
RepresentationMapping('distance', 'radius', None)],
SphericalDifferential: [RepresentationMapping('d_lon', 'd_lon', u.arcsec/u.s),
RepresentationMapping('d_lat', 'd_lat', u.arcsec/u.s),
RepresentationMapping('d_distance', 'd_radius', u.km/u.s)],
}
rsun = QuantityAttribute(default=_RSUN, unit=u.km)
def make_3d(self):
"""
Returns a fully 3D coordinate based on this coordinate.
If this coordinate is only 2D (i.e., no ``radius`` component) or is a
unit vector (i.e., the norm of the coordinate is unity), a new
coordinate is created that corresponds to the surface of the Sun.
That is, the 3D coordinate will retain the ``lon`` and ``lat``, and
``radius`` will be set to the frame's ``rsun`` frame attribute.
If this coordinate is already fully 3D, it is directly returned, even
if it does not lie on the surface of the Sun.
Returns
-------
frame : `~sunpy.coordinates.frames.BaseHeliographic`
The fully 3D coordinate
"""
if self._is_2d:
return self.realize_frame(self._data * self.rsun)
# The coordinate is already 3D
return self
@add_common_docstring(**_frame_parameters())
class HeliographicStonyhurst(BaseHeliographic):
"""
A coordinate or frame in the Stonyhurst Heliographic (HGS) system.
- The origin is the center of the Sun.
- The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the projection of
the Sun-Earth line onto the Sun's equatorial plane.
This system is also know as the Heliocentric Earth Equatorial (HEEQ) system when
represented using Cartesian components.
A new instance can be created using the following signatures
(note that if supplied, ``obstime`` and ``representation_type`` must be
keyword arguments)::
HeliographicStonyhurst(lon, lat, obstime=obstime)
HeliographicStonyhurst(lon, lat, radius, obstime=obstime)
HeliographicStonyhurst(x, y, z, representation_type='cartesian', obstime=obstime)
Parameters
----------
{data}
{lonlat}
{radius}
{rsun}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(1*u.deg, 1*u.deg, 2*u.km,
... frame="heliographic_stonyhurst",
... obstime="2010/01/01T00:00:45")
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=2010-01-01T00:00:45.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, km)
(1., 1., 2.)>
>>> sc.frame
<HeliographicStonyhurst Coordinate (obstime=2010-01-01T00:00:45.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, km)
(1., 1., 2.)>
>>> sc = SkyCoord(HeliographicStonyhurst(-10*u.deg, 2*u.deg))
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=None, rsun=695700.0 km): (lon, lat) in deg
(-10., 2.)>
>>> sc = SkyCoord(CartesianRepresentation(0*u.km, 45*u.km, 2*u.km),
... obstime="2011/01/05T00:00:50",
... frame="heliographic_stonyhurst")
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=2011-01-05T00:00:50.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, km)
(90., 2.54480438, 45.04442252)>
"""
name = "heliographic_stonyhurst"
@add_common_docstring(**_frame_parameters())
class HeliographicCarrington(BaseHeliographic):
"""
A coordinate or frame in the Carrington Heliographic (HGC) system.
- The origin is the center of the Sun.
- The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.
- The X-axis and Y-axis rotate with a period of 25.38 days.
This system differs from Stonyhurst Heliographic (HGS) in its definition of longitude. This
longitude is an "apparent" longitude because it takes into account the time it takes for light
to travel from the Sun's surface to the observer. Thus, the observer needs to be specified to
be able to transform to any other coordinate frame.
A new instance can be created using the following signatures
(note that if supplied, ``obstime`` and ``observer`` must be a keyword argument)::
HeliographicCarrington(lon, lat, obstime=obstime, observer=observer)
HeliographicCarrington(lon, lat, radius, obstime=obstime, observer=observer)
If you want to define the location in HGC such that the observer for the coordinate frame is
the same as that location (e.g., the location of an observatory in its corresponding HGC
frame), use ``observer='self'``::
HeliographicCarrington(lon, lat, radius, obstime=obstime, observer='self')
Parameters
----------
{data}
{lonlat}
{radius}
{observer}
{rsun}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(1*u.deg, 2*u.deg, 3*u.km,
... frame="heliographic_carrington",
... observer="earth",
... obstime="2010/01/01T00:00:30")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2010-01-01T00:00:30.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (lon, lat, radius) in (deg, deg, km)
(1., 2., 3.)>
>>> sc = SkyCoord([1,2,3]*u.deg, [4,5,6]*u.deg, [5,6,7]*u.km,
... obstime="2010/01/01T00:00:45",
... observer="self",
... frame="heliographic_carrington")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2010-01-01T00:00:45.000, rsun=695700.0 km, observer=self): (lon, lat, radius) in (deg, deg, km)
[(1., 4., 5.), (2., 5., 6.), (3., 6., 7.)]>
>>> sc = SkyCoord(CartesianRepresentation(0*u.km, 45*u.km, 2*u.km),
... obstime="2011/01/05T00:00:50",
... frame="heliographic_carrington")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2011-01-05T00:00:50.000, rsun=695700.0 km, observer=None): (lon, lat, radius) in (deg, deg, km)
(90., 2.54480438, 45.04442252)>
"""
name = "heliographic_carrington"
_wrap_angle = 360*u.deg
observer = ObserverCoordinateAttribute(HeliographicStonyhurst)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not isinstance(self.observer, BaseCoordinateFrame) and self.observer == 'self' and self._is_2d:
raise ValueError("Full 3D coordinate (including radius) must be specified "
"when observer='self'.")
@add_common_docstring(**_frame_parameters())
class Heliocentric(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric system, which is observer-based.
- The origin is the center of the Sun.
- The Z-axis is aligned with the Sun-observer line.
- The Y-axis is aligned with the component of the vector to the Sun's north pole that is
perpendicular to the Z-axis.
This frame defaults to a Cartesian component representation, which is known as Heliocentric
Cartesian (HCC). This frame can also be represented using cylindrical components, where
where ``rho`` is the impact parameter and ``psi`` is the position angle.
``psi`` is measured relative to the west limb, rather than solar north, so is shifted
by 90 degrees compared to the convention of the Heliocentric Radial (HCR) system.
A new instance can be created using the following signatures
(note that if supplied, ``obstime``, ``observer``, and ``representation_type`` must be
keyword arguments)::
Heliocentric(x, y, z, obstime=obstime, observer=observer)
Heliocentric(rho, psi, z, representation_type='cylindrical', obstime=obstime, observer=observer)
Parameters
----------
{data}
{xyz}
{observer}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord, CartesianRepresentation
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(CartesianRepresentation(10*u.km, 1*u.km, 2*u.km),
... obstime="2011/01/05T00:00:50", observer="earth", frame="heliocentric")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-05T00:00:50.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in km
(10., 1., 2.)>
>>> sc = SkyCoord([1,2]*u.km, [3,4]*u.m, [5,6]*u.cm,
... obstime="2011/01/01T00:00:54", observer="earth", frame="heliocentric")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-01T00:00:54.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in (km, m, cm)
[(1., 3., 5.), (2., 4., 6.)]>
>>> sc = SkyCoord(CylindricalRepresentation(10*u.km, 60*u.deg, 10*u.km),
... obstime="2011/01/05T00:00:50", observer="earth", frame="heliocentric")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-05T00:00:50.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in km
(5., 8.66025404, 10.)>
"""
default_representation = CartesianRepresentation
default_differential = CartesianDifferential
frame_specific_representation_info = {
CylindricalRepresentation: [RepresentationMapping('phi', 'psi', u.deg)]
}
observer = ObserverCoordinateAttribute(HeliographicStonyhurst)
def represent_as(self, base, s='base', in_frame_units=False):
data = super().represent_as(base, s, in_frame_units=in_frame_units)
# For cylindrical representations, wrap the `psi` component (natively `phi`) at 360 deg
if isinstance(data, CylindricalRepresentation):
data.phi.wrap_at(360*u.deg, inplace=True)
return data
@add_common_docstring(**_frame_parameters())
class Helioprojective(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Helioprojective Cartesian (HPC) system, which is observer-based.
- The origin is the location of the observer.
- ``Tx`` (aka "theta_x") is the angle relative to the plane containing the Sun-observer line
and the Sun's rotation axis, with positive values in the direction of the Sun's west limb.
- ``Ty`` (aka "theta_y") is the angle relative to the Sun's equatorial plane, with positive
values in the direction of the Sun's north pole.
- ``distance`` is the Sun-observer distance.
This system is frequently used in a projective form without ``distance`` specified. For
observations looking very close to the center of the Sun, where the small-angle approximation
is appropriate, ``Tx`` and ``Ty`` can be approximated as Cartesian components.
A new instance can be created using the following signatures
(note that if supplied, ``obstime`` and ``observer`` must be keyword arguments)::
Helioprojective(Tx, Ty, obstime=obstime, observer=observer)
Helioprojective(Tx, Ty, distance, obstime=obstime, observer=observer)
Parameters
----------
{data}
Tx : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`
The theta_x coordinate for this object. Not needed if ``data`` is given.
Ty : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`
The theta_y coordinate for this object. Not needed if ``data`` is given.
distance : `~astropy.units.Quantity`
The distance coordinate from the observer for this object.
Not needed if ``data`` is given.
{observer}
{rsun}
{common}
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(0*u.deg, 0*u.deg, 5*u.km,
... obstime="2010/01/01T00:00:00", observer="earth", frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2010-01-01T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, km)
(0., 0., 5.)>
>>> sc = SkyCoord(0*u.deg, 0*u.deg,
... obstime="2010/01/01T00:00:00", observer="earth", frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2010-01-01T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty) in arcsec
(0., 0.)>
>>> sc = SkyCoord(CartesianRepresentation(1*u.AU, 1e5*u.km, -2e5*u.km),
... obstime="2011/01/05T00:00:50", observer="earth", frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2011-01-05T00:00:50.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
(137.87948623, -275.75878762, 1.00000112)>
"""
frame_specific_representation_info = {
SphericalRepresentation: [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec),
RepresentationMapping('distance', 'distance', None)],
SphericalDifferential: [RepresentationMapping('d_lon', 'd_Tx', u.arcsec/u.s),
RepresentationMapping('d_lat', 'd_Ty', u.arcsec/u.s),
RepresentationMapping('d_distance', 'd_distance', u.km/u.s)],
UnitSphericalRepresentation: [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec)],
}
rsun = QuantityAttribute(default=_RSUN, unit=u.km)
observer = ObserverCoordinateAttribute(HeliographicStonyhurst)
@property
def angular_radius(self):
"""
Angular radius of the Sun as seen by the observer.
The ``rsun`` frame attribute is the radius of the Sun in length units.
The tangent vector from the observer to the edge of the Sun forms a
right-angle triangle with the radius of the Sun as the far side and the
Sun-observer distance as the hypotenuse. Thus, the sine of the angular
radius of the Sun is ratio of these two distances.
"""
from sunpy.coordinates.sun import _angular_radius # avoiding a circular import
if not isinstance(self.observer, HeliographicStonyhurst):
if self.observer is None:
raise ValueError("The observer must be defined, not `None`.")
raise ValueError("The observer must be fully defined by specifying `obstime`.")
return _angular_radius(self.rsun, self.observer.radius)
def make_3d(self):
"""
This method calculates the third coordinate of the Helioprojective
frame. It assumes that the coordinate point is on the surface of the Sun.
If a point in the frame is off limb then NaN will be returned.
Returns
-------
new_frame : `~sunpy.coordinates.frames.Helioprojective`
A new frame instance with all the attributes of the original but
now with a third coordinate.
"""
# Skip if we already are 3D
if not self._is_2d:
return self
if not isinstance(self.observer, BaseCoordinateFrame):
raise ConvertError("Cannot calculate distance to the Sun "
f"for observer '{self.observer}' "
"without `obstime` being specified.")
rep = self.represent_as(UnitSphericalRepresentation)
lat, lon = rep.lat, rep.lon
# Check for the use of floats with lower precision than the native Python float
if not set([lon.dtype.type, lat.dtype.type]).issubset([float, np.float64, np.longdouble]):
raise SunpyUserWarning("The Helioprojective component values appear to be lower "
"precision than the native Python float: "
f"Tx is {lon.dtype.name}, and Ty is {lat.dtype.name}. "
"To minimize precision loss, you may want to cast the values to "
"`float` or `numpy.float64` via the NumPy method `.astype()`.")
# Calculate the distance to the surface of the Sun using the law of cosines
cos_alpha = np.cos(lat) * np.cos(lon)
c = self.observer.radius**2 - self.rsun**2
b = -2 * self.observer.radius * cos_alpha
# Ignore sqrt of NaNs
with np.errstate(invalid='ignore'):
d = ((-1*b) - np.sqrt(b**2 - 4*c)) / 2 # use the "near" solution
if self._spherical_screen:
sphere_center = self._spherical_screen['center'].transform_to(self).cartesian
c = sphere_center.norm()**2 - self._spherical_screen['radius']**2
b = -2 * sphere_center.dot(rep)
# Ignore sqrt of NaNs
with np.errstate(invalid='ignore'):
dd = ((-1*b) + np.sqrt(b**2 - 4*c)) / 2 # use the "far" solution
d = np.fmin(d, dd) if self._spherical_screen['only_off_disk'] else dd
return self.realize_frame(SphericalRepresentation(lon=lon,
lat=lat,
distance=d))
_spherical_screen = None
@classmethod
@contextmanager
def assume_spherical_screen(cls, center, only_off_disk=False):
"""
Context manager to interpret 2D coordinates as being on the inside of a spherical screen.
The radius of the screen is the distance between the specified ``center`` and Sun center.
This ``center`` does not have to be the same as the observer location for the coordinate
frame. If they are the same, then this context manager is equivalent to assuming that the
helioprojective "zeta" component is zero.
This replaces the default assumption where 2D coordinates are mapped onto the surface of the
Sun.
Parameters
----------
center : `~astropy.coordinates.SkyCoord`
The center of the spherical screen
only_off_disk : `bool`, optional
If `True`, apply this assumption only to off-disk coordinates, with on-disk coordinates
still mapped onto the surface of the Sun. Defaults to `False`.
Examples
--------
.. minigallery:: sunpy.coordinates.Helioprojective.assume_spherical_screen
>>> import astropy.units as u
>>> from sunpy.coordinates import Helioprojective
>>> h = Helioprojective(range(7)*u.arcsec*319, [0]*7*u.arcsec,
... observer='earth', obstime='2020-04-08')
>>> print(h.make_3d())
<Helioprojective Coordinate (obstime=2020-04-08T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
[( 0., 0., 0.99660825), ( 319., 0., 0.99687244),
( 638., 0., 0.99778472), ( 957., 0., 1.00103285),
(1276., 0., nan), (1595., 0., nan),
(1914., 0., nan)]>
>>> with Helioprojective.assume_spherical_screen(h.observer):
... print(h.make_3d())
<Helioprojective Coordinate (obstime=2020-04-08T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
[( 0., 0., 1.00125872), ( 319., 0., 1.00125872),
( 638., 0., 1.00125872), ( 957., 0., 1.00125872),
(1276., 0., 1.00125872), (1595., 0., 1.00125872),
(1914., 0., 1.00125872)]>
>>> with Helioprojective.assume_spherical_screen(h.observer, only_off_disk=True):
... print(h.make_3d())
<Helioprojective Coordinate (obstime=2020-04-08T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)
[( 0., 0., 0.99660825), ( 319., 0., 0.99687244),
( 638., 0., 0.99778472), ( 957., 0., 1.00103285),
(1276., 0., 1.00125872), (1595., 0., 1.00125872),
(1914., 0., 1.00125872)]>
"""
try:
old_spherical_screen = cls._spherical_screen # nominally None
center_hgs = center.transform_to(HeliographicStonyhurst(obstime=center.obstime))
cls._spherical_screen = {
'center': center,
'radius': center_hgs.radius,
'only_off_disk': only_off_disk
}
yield
finally:
cls._spherical_screen = old_spherical_screen
@add_common_docstring(**_frame_parameters())
class HeliocentricEarthEcliptic(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric Earth Ecliptic (HEE) system.
- The origin is the center of the Sun.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the Sun-Earth line.
- The Z-axis (+90 degrees latitude) is aligned with the component perpendicular to the X-axis
of the mean ecliptic pole at the observation time.
Parameters
----------
{data}
{lonlat}
{distance_sun}
{common}
"""
@add_common_docstring(**_frame_parameters())
class GeocentricSolarEcliptic(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Geocentric Solar Ecliptic (GSE) system.
- The origin is the center of the Earth.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the Earth-Sun line.
- The Z-axis (+90 degrees latitude) is aligned with the component perpendicular to the X-axis
of the mean ecliptic pole at the observation time.
Parameters
----------
{data}
{lonlat}
{distance_earth}
{common}
Notes
-----
Aberration due to Earth motion is not included.
"""
@add_common_docstring(**_frame_parameters())
class HeliocentricInertial(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric Inertial (HCI) system.
- The origin is the center of the Sun.
- The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the solar ascending
node on the ecliptic (mean J2000.0).
Parameters
----------
{data}
{lonlat}
{distance_sun}
{common}
Notes
-----
The solar ascending node on the ecliptic lies on the intersection of the solar equatorial
plane with the ecliptic plane, not on the intersection of the celestial equatorial plane with
the ecliptic plane.
"""
@add_common_docstring(**_frame_parameters())
class GeocentricEarthEquatorial(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Geocentric Earth Equatorial (GEI) system.
- The origin is the center of the Earth.
- The Z-axis (+90 degrees latitude) is aligned with the Earth's north pole.
- The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the mean (not true)
vernal equinox.
Parameters
----------
{data}
{lonlat}
{distance_earth}
{equinox}
{common}
Notes
-----
Aberration due to Earth motion is not included.
"""
equinox = TimeFrameAttributeSunPy(default=_J2000)
|
import os.path as osp
from copy import deepcopy
from datetime import datetime
import ignite.distributed as idist
import mmcv
from functools import partial
from ignite.contrib.handlers import ProgressBar
from ignite.contrib.metrics import ROC_AUC, AveragePrecision
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint, DiskSaver, global_step_from_engine, param_scheduler
from ignite.metrics import Accuracy
from ignite.utils import manual_seed, setup_logger
from mmcv import Config
from mmcv.runner import build_optimizer
from torch.utils.data import WeightedRandomSampler
from ..classifiers import build_classifier
from ..datasets import build_dataset
from ..losses import build_loss
from .eval_hooks import MetricsTextLogger
from .step_fn import get_eval_step_fn, get_train_step_fn
from .train_hooks import TrainStatsTextLogger
from .utils import logits_transform, prob_transform
def train_classifier(local_rank: int, cfg: Config) -> None:
rank = idist.get_rank()
manual_seed(cfg.get('seed', 2022) + rank)
device = idist.device()
logger = setup_logger(
'imba-explain', filepath=osp.join(cfg.work_dir, f"{datetime.now().strftime("%Y%m%d_%H%M%S")}.log"))
env_info_dict = mmcv.collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line)
logger.info(f'Config:\n{cfg.pretty_text}')
train_set = build_dataset(cfg.data['train'])
val_set = build_dataset(cfg.data['val'])
logger.info(f'Training set size: {len(train_set)} samples. Validation set size: {len(val_set)} samples.')
data_loader_cfg = deepcopy(cfg.data['data_loader'])
use_weighted_sampler = data_loader_cfg.pop('weighted_sampler', False)
if use_weighted_sampler:
if not hasattr(train_set, 'imba_sampling_weights'):
raise ValueError('The training dataset class must implement the method imba_sampling_weights, '
'when weighted_sampler in data_loader config is True.')
weights = train_set.imba_sampling_weights()
sampler = WeightedRandomSampler(weights, num_samples=len(train_set))
data_loader_cfg.update({'sampler': sampler, 'shuffle': False})
train_loader = idist.auto_dataloader(train_set, **data_loader_cfg)
data_loader_cfg.update({'shuffle': False, 'sampler': None})
val_loader = idist.auto_dataloader(val_set, **data_loader_cfg)
epoch_length = len(train_loader)
classifier = build_classifier(cfg.classifier)
classifier.to(device)
classifier = idist.auto_model(classifier, sync_bn=cfg.get('sync_bn', False))
# build trainer
optimizer = build_optimizer(classifier, cfg.optimizer)
criterion = build_loss(cfg.loss)
# let the loss function receive the data distribution information
if hasattr(criterion, 'receive_data_dist_info'):
criterion.receive_data_dist_info(train_set.get_num_pos_neg())
criterion.to(device)
try:
has_parameter = next(criterion.parameters()) is not None
except StopIteration:
has_parameter = False
if has_parameter:
# in case where loss function contains learnable parameters, use DDP
criterion = idist.auto_model(criterion)
# when the loss function has learnable parameters, add them to the optimizer's parameter group
optimizer.add_param_group({'params': criterion.parameters()})
optimizer = idist.auto_optim(optimizer)
trainer = Engine(get_train_step_fn(classifier, criterion, optimizer, device))
trainer.logger = logger
# built evaluator
eval_step_fn = get_eval_step_fn(classifier, device)
evaluator = Engine(eval_step_fn)
evaluator.logger = logger
# evaluator handlers
pbar = ProgressBar(persist=True)
pbar.attach(evaluator)
# pred_select_inds is only used in cross-dataset test
_prob_transform = partial(prob_transform, pred_select_inds=None, target_select_inds=None)
_logits_transform = partial(logits_transform, pred_select_inds=None, target_select_inds=None)
val_metrics = {
'accuracy': Accuracy(output_transform=_prob_transform, device=device, **cfg.class_metrics['accuracy']),
'roc_auc': ROC_AUC(output_transform=_logits_transform, device=device, **cfg.class_metrics['roc_auc']),
'ap': AveragePrecision(output_transform=_logits_transform, device=device, **cfg.class_metrics['ap'])
}
for name, metric in val_metrics.items():
metric.attach(engine=evaluator, name=name)
metrics_logger = MetricsTextLogger(logger=logger)
metrics_logger.attach(evaluator, trainer)
# trainer handlers
def run_validation(engine_train: Engine, engine_val: Engine) -> None:
engine_val.run(val_loader)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=cfg.val_interval), run_validation, evaluator)
if cfg.cosine_annealing:
cycle_size = cfg.max_epochs * epoch_length
lr = cfg.optimizer['lr']
lr_scheduler = param_scheduler.CosineAnnealingScheduler(
optimizer=optimizer, param_name='lr', start_value=lr, end_value=lr * 0.01, cycle_size=cycle_size)
lr_scheduler = param_scheduler.create_lr_scheduler_with_warmup(
lr_scheduler, warmup_start_value=0.01 * lr, warmup_duration=1000, warmup_end_value=lr)
trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler)
to_save = {'classifier': classifier}
save_handler = DiskSaver(osp.join(osp.join(cfg.work_dir, 'ckpts')), require_empty=False)
score_fn = Checkpoint.get_default_score_fn('roc_auc')
ckpt_handler = Checkpoint(
to_save,
save_handler,
n_saved=cfg.get('n_saved', 1),
score_name='roc_auc',
score_function=score_fn,
global_step_transform=global_step_from_engine(trainer, Events.EPOCH_COMPLETED),
greater_or_equal=True)
evaluator.add_event_handler(Events.COMPLETED, ckpt_handler)
train_stats_logger = TrainStatsTextLogger(interval=cfg.log_interval, logger=logger)
train_stats_logger.attach(trainer, optimizer)
trainer.run(data=train_loader, max_epochs=cfg.max_epochs)
|
import os.path as osp
from copy import deepcopy
from datetime import datetime
import ignite.distributed as idist
import mmcv
from functools import partial
from ignite.contrib.handlers import ProgressBar
from ignite.contrib.metrics import ROC_AUC, AveragePrecision
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint, DiskSaver, global_step_from_engine, param_scheduler
from ignite.metrics import Accuracy
from ignite.utils import manual_seed, setup_logger
from mmcv import Config
from mmcv.runner import build_optimizer
from torch.utils.data import WeightedRandomSampler
from ..classifiers import build_classifier
from ..datasets import build_dataset
from ..losses import build_loss
from .eval_hooks import MetricsTextLogger
from .step_fn import get_eval_step_fn, get_train_step_fn
from .train_hooks import TrainStatsTextLogger
from .utils import logits_transform, prob_transform
def train_classifier(local_rank: int, cfg: Config) -> None:
rank = idist.get_rank()
manual_seed(cfg.get('seed', 2022) + rank)
device = idist.device()
logger = setup_logger(
'imba-explain', filepath=osp.join(cfg.work_dir, f"{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"))
env_info_dict = mmcv.collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line)
logger.info(f'Config:\n{cfg.pretty_text}')
train_set = build_dataset(cfg.data['train'])
val_set = build_dataset(cfg.data['val'])
logger.info(f'Training set size: {len(train_set)} samples. Validation set size: {len(val_set)} samples.')
data_loader_cfg = deepcopy(cfg.data['data_loader'])
use_weighted_sampler = data_loader_cfg.pop('weighted_sampler', False)
if use_weighted_sampler:
if not hasattr(train_set, 'imba_sampling_weights'):
raise ValueError('The training dataset class must implement the method imba_sampling_weights, '
'when weighted_sampler in data_loader config is True.')
weights = train_set.imba_sampling_weights()
sampler = WeightedRandomSampler(weights, num_samples=len(train_set))
data_loader_cfg.update({'sampler': sampler, 'shuffle': False})
train_loader = idist.auto_dataloader(train_set, **data_loader_cfg)
data_loader_cfg.update({'shuffle': False, 'sampler': None})
val_loader = idist.auto_dataloader(val_set, **data_loader_cfg)
epoch_length = len(train_loader)
classifier = build_classifier(cfg.classifier)
classifier.to(device)
classifier = idist.auto_model(classifier, sync_bn=cfg.get('sync_bn', False))
# build trainer
optimizer = build_optimizer(classifier, cfg.optimizer)
criterion = build_loss(cfg.loss)
# let the loss function receive the data distribution information
if hasattr(criterion, 'receive_data_dist_info'):
criterion.receive_data_dist_info(train_set.get_num_pos_neg())
criterion.to(device)
try:
has_parameter = next(criterion.parameters()) is not None
except StopIteration:
has_parameter = False
if has_parameter:
# in case where loss function contains learnable parameters, use DDP
criterion = idist.auto_model(criterion)
# when the loss function has learnable parameters, add them to the optimizer's parameter group
optimizer.add_param_group({'params': criterion.parameters()})
optimizer = idist.auto_optim(optimizer)
trainer = Engine(get_train_step_fn(classifier, criterion, optimizer, device))
trainer.logger = logger
# built evaluator
eval_step_fn = get_eval_step_fn(classifier, device)
evaluator = Engine(eval_step_fn)
evaluator.logger = logger
# evaluator handlers
pbar = ProgressBar(persist=True)
pbar.attach(evaluator)
# pred_select_inds is only used in cross-dataset test
_prob_transform = partial(prob_transform, pred_select_inds=None, target_select_inds=None)
_logits_transform = partial(logits_transform, pred_select_inds=None, target_select_inds=None)
val_metrics = {
'accuracy': Accuracy(output_transform=_prob_transform, device=device, **cfg.class_metrics['accuracy']),
'roc_auc': ROC_AUC(output_transform=_logits_transform, device=device, **cfg.class_metrics['roc_auc']),
'ap': AveragePrecision(output_transform=_logits_transform, device=device, **cfg.class_metrics['ap'])
}
for name, metric in val_metrics.items():
metric.attach(engine=evaluator, name=name)
metrics_logger = MetricsTextLogger(logger=logger)
metrics_logger.attach(evaluator, trainer)
# trainer handlers
def run_validation(engine_train: Engine, engine_val: Engine) -> None:
engine_val.run(val_loader)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=cfg.val_interval), run_validation, evaluator)
if cfg.cosine_annealing:
cycle_size = cfg.max_epochs * epoch_length
lr = cfg.optimizer['lr']
lr_scheduler = param_scheduler.CosineAnnealingScheduler(
optimizer=optimizer, param_name='lr', start_value=lr, end_value=lr * 0.01, cycle_size=cycle_size)
lr_scheduler = param_scheduler.create_lr_scheduler_with_warmup(
lr_scheduler, warmup_start_value=0.01 * lr, warmup_duration=1000, warmup_end_value=lr)
trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler)
to_save = {'classifier': classifier}
save_handler = DiskSaver(osp.join(osp.join(cfg.work_dir, 'ckpts')), require_empty=False)
score_fn = Checkpoint.get_default_score_fn('roc_auc')
ckpt_handler = Checkpoint(
to_save,
save_handler,
n_saved=cfg.get('n_saved', 1),
score_name='roc_auc',
score_function=score_fn,
global_step_transform=global_step_from_engine(trainer, Events.EPOCH_COMPLETED),
greater_or_equal=True)
evaluator.add_event_handler(Events.COMPLETED, ckpt_handler)
train_stats_logger = TrainStatsTextLogger(interval=cfg.log_interval, logger=logger)
train_stats_logger.attach(trainer, optimizer)
trainer.run(data=train_loader, max_epochs=cfg.max_epochs)
|
"""Test Home Assistant template helper methods."""
from datetime import datetime
import math
import random
import pytest
import pytz
from homeassistant.components import group
from homeassistant.const import (
LENGTH_METERS,
MASS_GRAMS,
MATCH_ALL,
PRESSURE_PA,
TEMP_CELSIUS,
VOLUME_LITERS,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
import homeassistant.util.dt as dt_util
from homeassistant.util.unit_system import UnitSystem
from tests.async_mock import patch
def _set_up_units(hass):
"""Set up the tests."""
hass.config.units = UnitSystem(
"custom", TEMP_CELSIUS, LENGTH_METERS, VOLUME_LITERS, MASS_GRAMS, PRESSURE_PA
)
def render_to_info(hass, template_str, variables=None):
"""Create render info from template."""
tmp = template.Template(template_str, hass)
return tmp.async_render_to_info(variables)
def extract_entities(hass, template_str, variables=None):
"""Extract entities from a template."""
info = render_to_info(hass, template_str, variables)
# pylint: disable=protected-access
assert not hasattr(info, "_domains")
return info._entities
def assert_result_info(info, result, entities=None, domains=None, all_states=False):
"""Check result info."""
assert info.result == result
# pylint: disable=protected-access
assert info._all_states == all_states
assert info.filter_lifecycle("invalid_entity_name.somewhere") == all_states
if entities is not None:
assert info._entities == frozenset(entities)
assert all([info.filter(entity) for entity in entities])
assert not info.filter("invalid_entity_name.somewhere")
else:
assert not info._entities
if domains is not None:
assert info._domains == frozenset(domains)
assert all([info.filter_lifecycle(domain + ".entity") for domain in domains])
else:
assert not hasattr(info, "_domains")
def test_template_equality():
"""Test template comparison and hashing."""
template_one = template.Template("{{ template_one }}")
template_one_1 = template.Template("{{ template_one }}")
template_two = template.Template("{{ template_two }}")
assert template_one == template_one_1
assert template_one != template_two
assert hash(template_one) == hash(template_one_1)
assert hash(template_one) != hash(template_two)
assert str(template_one_1) == 'Template("{{ template_one }}")'
with pytest.raises(TypeError):
template.Template(["{{ template_one }}"])
def test_invalid_template(hass):
"""Invalid template raises error."""
tmpl = template.Template("{{", hass)
with pytest.raises(TemplateError):
tmpl.ensure_valid()
with pytest.raises(TemplateError):
tmpl.async_render()
info = tmpl.async_render_to_info()
with pytest.raises(TemplateError):
assert info.result == "impossible"
tmpl = template.Template("{{states(keyword)}}", hass)
tmpl.ensure_valid()
with pytest.raises(TemplateError):
tmpl.async_render()
def test_referring_states_by_entity_id(hass):
"""Test referring states by entity id."""
hass.states.async_set("test.object", "happy")
assert (
template.Template("{{ states.test.object.state }}", hass).async_render()
== "happy"
)
assert (
template.Template('{{ states["test.object"].state }}', hass).async_render()
== "happy"
)
assert (
template.Template('{{ states("test.object") }}', hass).async_render() == "happy"
)
def test_invalid_entity_id(hass):
"""Test referring states by entity id."""
with pytest.raises(TemplateError):
template.Template('{{ states["big.fat..."] }}', hass).async_render()
with pytest.raises(TemplateError):
template.Template('{{ states.test["big.fat..."] }}', hass).async_render()
with pytest.raises(TemplateError):
template.Template('{{ states["invalid/domain"] }}', hass).async_render()
def test_raise_exception_on_error(hass):
"""Test raising an exception on error."""
with pytest.raises(TemplateError):
template.Template("{{ invalid_syntax").ensure_valid()
def test_iterating_all_states(hass):
"""Test iterating all states."""
tmpl_str = "{% for state in states %}{{ state.state }}{% endfor %}"
info = render_to_info(hass, tmpl_str)
assert_result_info(info, "", all_states=True)
hass.states.async_set("test.object", "happy")
hass.states.async_set("sensor.temperature", 10)
info = render_to_info(hass, tmpl_str)
assert_result_info(
info, "10happy", entities=["test.object", "sensor.temperature"], all_states=True
)
def test_iterating_domain_states(hass):
"""Test iterating domain states."""
tmpl_str = "{% for state in states.sensor %}{{ state.state }}{% endfor %}"
info = render_to_info(hass, tmpl_str)
assert_result_info(info, "", domains=["sensor"])
hass.states.async_set("test.object", "happy")
hass.states.async_set("sensor.back_door", "open")
hass.states.async_set("sensor.temperature", 10)
info = render_to_info(hass, tmpl_str)
assert_result_info(
info,
"open10",
entities=["sensor.back_door", "sensor.temperature"],
domains=["sensor"],
)
def test_float(hass):
"""Test float."""
hass.states.async_set("sensor.temperature", "12")
assert (
template.Template(
"{{ float(states.sensor.temperature.state) }}", hass
).async_render()
== "12.0"
)
assert (
template.Template(
"{{ float(states.sensor.temperature.state) > 11 }}", hass
).async_render()
== "True"
)
assert (
template.Template("{{ float('forgiving') }}", hass).async_render()
== "forgiving"
)
def test_rounding_value(hass):
"""Test rounding value."""
hass.states.async_set("sensor.temperature", 12.78)
assert (
template.Template(
"{{ states.sensor.temperature.state | round(1) }}", hass
).async_render()
== "12.8"
)
assert (
template.Template(
"{{ states.sensor.temperature.state | multiply(10) | round }}", hass
).async_render()
== "128"
)
assert (
template.Template(
'{{ states.sensor.temperature.state | round(1, "floor") }}', hass
).async_render()
== "12.7"
)
assert (
template.Template(
'{{ states.sensor.temperature.state | round(1, "ceil") }}', hass
).async_render()
== "12.8"
)
assert (
template.Template(
'{{ states.sensor.temperature.state | round(1, "half") }}', hass
).async_render()
== "13.0"
)
def test_rounding_value_get_original_value_on_error(hass):
"""Test rounding value get original value on error."""
assert template.Template("{{ None | round }}", hass).async_render() == "None"
assert (
template.Template('{{ "no_number" | round }}', hass).async_render()
== "no_number"
)
def test_multiply(hass):
"""Test multiply."""
tests = {None: "None", 10: "100", '"abcd"': "abcd"}
for inp, out in tests.items():
assert (
template.Template(
"{{ %s | multiply(10) | round }}" % inp, hass
).async_render()
== out
)
def test_logarithm(hass):
"""Test logarithm."""
tests = [
(4, 2, "2.0"),
(1000, 10, "3.0"),
(math.e, "", "1.0"),
('"invalid"', "_", "invalid"),
(10, '"invalid"', "10.0"),
]
for value, base, expected in tests:
assert (
template.Template(
f"{{{{ {value} | log({base}) | round(1) }}}}", hass
).async_render()
== expected
)
assert (
template.Template(
f"{{{{ log({value}, {base}) | round(1) }}}}", hass
).async_render()
== expected
)
def test_sine(hass):
"""Test sine."""
tests = [
(0, "0.0"),
(math.pi / 2, "1.0"),
(math.pi, "0.0"),
(math.pi * 1.5, "-1.0"),
(math.pi / 10, "0.309"),
('"duck"', "duck"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | sin | round(3) }}" % value, hass).async_render()
== expected
)
def test_cos(hass):
"""Test cosine."""
tests = [
(0, "1.0"),
(math.pi / 2, "0.0"),
(math.pi, "-1.0"),
(math.pi * 1.5, "-0.0"),
(math.pi / 10, "0.951"),
("'error'", "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | cos | round(3) }}" % value, hass).async_render()
== expected
)
def test_tan(hass):
"""Test tangent."""
tests = [
(0, "0.0"),
(math.pi, "-0.0"),
(math.pi / 180 * 45, "1.0"),
(math.pi / 180 * 90, "1.633123935319537e+16"),
(math.pi / 180 * 135, "-1.0"),
("'error'", "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | tan | round(3) }}" % value, hass).async_render()
== expected
)
def test_sqrt(hass):
"""Test square root."""
tests = [
(0, "0.0"),
(1, "1.0"),
(2, "1.414"),
(10, "3.162"),
(100, "10.0"),
("'error'", "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | sqrt | round(3) }}" % value, hass).async_render()
== expected
)
def test_arc_sine(hass):
"""Test arcus sine."""
tests = [
(-2.0, "-2.0"), # value error
(-1.0, "-1.571"),
(-0.5, "-0.524"),
(0.0, "0.0"),
(0.5, "0.524"),
(1.0, "1.571"),
(2.0, "2.0"), # value error
('"error"', "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | asin | round(3) }}" % value, hass).async_render()
== expected
)
def test_arc_cos(hass):
"""Test arcus cosine."""
tests = [
(-2.0, "-2.0"), # value error
(-1.0, "3.142"),
(-0.5, "2.094"),
(0.0, "1.571"),
(0.5, "1.047"),
(1.0, "0.0"),
(2.0, "2.0"), # value error
('"error"', "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | acos | round(3) }}" % value, hass).async_render()
== expected
)
def test_arc_tan(hass):
"""Test arcus tangent."""
tests = [
(-10.0, "-1.471"),
(-2.0, "-1.107"),
(-1.0, "-0.785"),
(-0.5, "-0.464"),
(0.0, "0.0"),
(0.5, "0.464"),
(1.0, "0.785"),
(2.0, "1.107"),
(10.0, "1.471"),
('"error"', "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | atan | round(3) }}" % value, hass).async_render()
== expected
)
def test_arc_tan2(hass):
"""Test two parameter version of arcus tangent."""
tests = [
(-10.0, -10.0, "-2.356"),
(-10.0, 0.0, "-1.571"),
(-10.0, 10.0, "-0.785"),
(0.0, -10.0, "3.142"),
(0.0, 0.0, "0.0"),
(0.0, 10.0, "0.0"),
(10.0, -10.0, "2.356"),
(10.0, 0.0, "1.571"),
(10.0, 10.0, "0.785"),
(-4.0, 3.0, "-0.927"),
(-1.0, 2.0, "-0.464"),
(2.0, 1.0, "1.107"),
('"duck"', '"goose"', "('duck', 'goose')"),
]
for y, x, expected in tests:
assert (
template.Template(
f"{{{{ ({y}, {x}) | atan2 | round(3) }}}}", hass
).async_render()
== expected
)
assert (
template.Template(
f"{{{{ atan2({y}, {x}) | round(3) }}}}", hass
).async_render()
== expected
)
def test_strptime(hass):
"""Test the parse timestamp method."""
tests = [
("2016-10-19 15:22:05.588122 UTC", "%Y-%m-%d %H:%M:%S.%f %Z", None),
("2016-10-19 15:22:05.588122+0100", "%Y-%m-%d %H:%M:%S.%f%z", None),
("2016-10-19 15:22:05.588122", "%Y-%m-%d %H:%M:%S.%f", None),
("2016-10-19", "%Y-%m-%d", None),
("2016", "%Y", None),
("15:22:05", "%H:%M:%S", None),
("1469119144", "%Y", "1469119144"),
("invalid", "%Y", "invalid"),
]
for inp, fmt, expected in tests:
if expected is None:
expected = datetime.strptime(inp, fmt)
temp = f"{{{{ strptime("{inp}", "{fmt}') }}}}"
assert template.Template(temp, hass).async_render() == str(expected)
def test_timestamp_custom(hass):
"""Test the timestamps to custom filter."""
now = dt_util.utcnow()
tests = [
(None, None, None, "None"),
(1469119144, None, True, "2016-07-21 16:39:04"),
(1469119144, "%Y", True, "2016"),
(1469119144, "invalid", True, "invalid"),
(dt_util.as_timestamp(now), None, False, now.strftime("%Y-%m-%d %H:%M:%S")),
]
for inp, fmt, local, out in tests:
if fmt:
fil = f"timestamp_custom('{fmt}')"
elif fmt and local:
fil = f"timestamp_custom('{fmt}', {local})"
else:
fil = "timestamp_custom"
assert template.Template(f"{{{{ {inp} | {fil} }}}}", hass).async_render() == out
def test_timestamp_local(hass):
"""Test the timestamps to local filter."""
tests = {None: "None", 1469119144: "2016-07-21 16:39:04"}
for inp, out in tests.items():
assert (
template.Template("{{ %s | timestamp_local }}" % inp, hass).async_render()
== out
)
def test_to_json(hass):
"""Test the object to JSON string filter."""
# Note that we're not testing the actual json.loads and json.dumps methods,
# only the filters, so we don't need to be exhaustive with our sample JSON.
expected_result = '{"Foo": "Bar"}'
actual_result = template.Template(
"{{ {'Foo': 'Bar'} | to_json }}", hass
).async_render()
assert actual_result == expected_result
def test_from_json(hass):
"""Test the JSON string to object filter."""
# Note that we're not testing the actual json.loads and json.dumps methods,
# only the filters, so we don't need to be exhaustive with our sample JSON.
expected_result = "Bar"
actual_result = template.Template(
'{{ (\'{"Foo": "Bar"}\' | from_json).Foo }}', hass
).async_render()
assert actual_result == expected_result
def test_min(hass):
"""Test the min filter."""
assert template.Template("{{ [1, 2, 3] | min }}", hass).async_render() == "1"
def test_max(hass):
"""Test the max filter."""
assert template.Template("{{ [1, 2, 3] | max }}", hass).async_render() == "3"
def test_ord(hass):
"""Test the ord filter."""
assert template.Template('{{ "d" | ord }}', hass).async_render() == "100"
def test_base64_encode(hass):
"""Test the base64_encode filter."""
assert (
template.Template('{{ "homeassistant" | base64_encode }}', hass).async_render()
== "aG9tZWFzc2lzdGFudA=="
)
def test_base64_decode(hass):
"""Test the base64_decode filter."""
assert (
template.Template(
'{{ "aG9tZWFzc2lzdGFudA==" | base64_decode }}', hass
).async_render()
== "homeassistant"
)
def test_ordinal(hass):
"""Test the ordinal filter."""
tests = [
(1, "1st"),
(2, "2nd"),
(3, "3rd"),
(4, "4th"),
(5, "5th"),
(12, "12th"),
(100, "100th"),
(101, "101st"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | ordinal }}" % value, hass).async_render()
== expected
)
def test_timestamp_utc(hass):
"""Test the timestamps to local filter."""
now = dt_util.utcnow()
tests = {
None: "None",
1469119144: "2016-07-21 16:39:04",
dt_util.as_timestamp(now): now.strftime("%Y-%m-%d %H:%M:%S"),
}
for inp, out in tests.items():
assert (
template.Template("{{ %s | timestamp_utc }}" % inp, hass).async_render()
== out
)
def test_as_timestamp(hass):
"""Test the as_timestamp function."""
assert (
template.Template('{{ as_timestamp("invalid") }}', hass).async_render()
== "None"
)
hass.mock = None
assert (
template.Template("{{ as_timestamp(states.mock) }}", hass).async_render()
== "None"
)
tpl = (
'{{ as_timestamp(strptime("2024-02-03T09:10:24+0000", '
'"%Y-%m-%dT%H:%M:%S%z")) }}'
)
assert template.Template(tpl, hass).async_render() == "1706951424.0"
@patch.object(random, "choice")
def test_random_every_time(test_choice, hass):
"""Ensure the random filter runs every time, not just once."""
tpl = template.Template("{{ [1,2] | random }}", hass)
test_choice.return_value = "foo"
assert tpl.async_render() == "foo"
test_choice.return_value = "bar"
assert tpl.async_render() == "bar"
def test_passing_vars_as_keywords(hass):
"""Test passing variables as keywords."""
assert template.Template("{{ hello }}", hass).async_render(hello=127) == "127"
def test_passing_vars_as_vars(hass):
"""Test passing variables as variables."""
assert template.Template("{{ hello }}", hass).async_render({"hello": 127}) == "127"
def test_passing_vars_as_list(hass):
"""Test passing variables as list."""
assert (
template.render_complex(
template.Template("{{ hello }}", hass), {"hello": ["foo", "bar"]}
)
== "['foo', 'bar']"
)
def test_passing_vars_as_list_element(hass):
"""Test passing variables as list."""
assert (
template.render_complex(
template.Template("{{ hello[1] }}", hass), {"hello": ["foo", "bar"]}
)
== "bar"
)
def test_passing_vars_as_dict_element(hass):
"""Test passing variables as list."""
assert (
template.render_complex(
template.Template("{{ hello.foo }}", hass), {"hello": {"foo": "bar"}}
)
== "bar"
)
def test_passing_vars_as_dict(hass):
"""Test passing variables as list."""
assert (
template.render_complex(
template.Template("{{ hello }}", hass), {"hello": {"foo": "bar"}}
)
== "{'foo': 'bar'}"
)
def test_render_with_possible_json_value_with_valid_json(hass):
"""Render with possible JSON value with valid JSON."""
tpl = template.Template("{{ value_json.hello }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}') == "world"
def test_render_with_possible_json_value_with_invalid_json(hass):
"""Render with possible JSON value with invalid JSON."""
tpl = template.Template("{{ value_json }}", hass)
assert tpl.async_render_with_possible_json_value("{ I AM NOT JSON }") == ""
def test_render_with_possible_json_value_with_template_error_value(hass):
"""Render with possible JSON value with template error value."""
tpl = template.Template("{{ non_existing.variable }}", hass)
assert tpl.async_render_with_possible_json_value("hello", "-") == "-"
def test_render_with_possible_json_value_with_missing_json_value(hass):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template("{{ value_json.goodbye }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}') == ""
def test_render_with_possible_json_value_valid_with_is_defined(hass):
"""Render with possible JSON value with known JSON object."""
tpl = template.Template("{{ value_json.hello|is_defined }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}') == "world"
def test_render_with_possible_json_value_undefined_json(hass):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template("{{ value_json.bye|is_defined }}", hass)
assert (
tpl.async_render_with_possible_json_value('{"hello": "world"}')
== '{"hello": "world"}'
)
def test_render_with_possible_json_value_undefined_json_error_value(hass):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template("{{ value_json.bye|is_defined }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}', "") == ""
def test_render_with_possible_json_value_non_string_value(hass):
"""Render with possible JSON value with non-string value."""
tpl = template.Template(
"""
{{ strptime(value~'+0000', '%Y-%m-%d %H:%M:%S%z') }}
""",
hass,
)
value = datetime(2019, 1, 18, 12, 13, 14)
expected = str(pytz.utc.localize(value))
assert tpl.async_render_with_possible_json_value(value) == expected
def test_if_state_exists(hass):
"""Test if state exists works."""
hass.states.async_set("test.object", "available")
tpl = template.Template(
"{% if states.test.object %}exists{% else %}not exists{% endif %}", hass
)
assert tpl.async_render() == "exists"
def test_is_state(hass):
"""Test is_state method."""
hass.states.async_set("test.object", "available")
tpl = template.Template(
"""
{% if is_state("test.object", "available") %}yes{% else %}no{% endif %}
""",
hass,
)
assert tpl.async_render() == "yes"
tpl = template.Template(
"""
{{ is_state("test.noobject", "available") }}
""",
hass,
)
assert tpl.async_render() == "False"
def test_is_state_attr(hass):
"""Test is_state_attr method."""
hass.states.async_set("test.object", "available", {"mode": "on"})
tpl = template.Template(
"""
{% if is_state_attr("test.object", "mode", "on") %}yes{% else %}no{% endif %}
""",
hass,
)
assert tpl.async_render() == "yes"
tpl = template.Template(
"""
{{ is_state_attr("test.noobject", "mode", "on") }}
""",
hass,
)
assert tpl.async_render() == "False"
def test_state_attr(hass):
"""Test state_attr method."""
hass.states.async_set("test.object", "available", {"mode": "on"})
tpl = template.Template(
"""
{% if state_attr("test.object", "mode") == "on" %}yes{% else %}no{% endif %}
""",
hass,
)
assert tpl.async_render() == "yes"
tpl = template.Template(
"""
{{ state_attr("test.noobject", "mode") == None }}
""",
hass,
)
assert tpl.async_render() == "True"
def test_states_function(hass):
"""Test using states as a function."""
hass.states.async_set("test.object", "available")
tpl = template.Template('{{ states("test.object") }}', hass)
assert tpl.async_render() == "available"
tpl2 = template.Template('{{ states("test.object2") }}', hass)
assert tpl2.async_render() == "unknown"
@patch(
"homeassistant.helpers.template.TemplateEnvironment.is_safe_callable",
return_value=True,
)
def test_now(mock_is_safe, hass):
"""Test now method."""
now = dt_util.now()
with patch("homeassistant.util.dt.now", return_value=now):
assert (
now.isoformat()
== template.Template("{{ now().isoformat() }}", hass).async_render()
)
@patch(
"homeassistant.helpers.template.TemplateEnvironment.is_safe_callable",
return_value=True,
)
def test_relative_time(mock_is_safe, hass):
"""Test relative_time method."""
now = datetime.strptime("2000-01-01 10:00:00 +00:00", "%Y-%m-%d %H:%M:%S %z")
with patch("homeassistant.util.dt.now", return_value=now):
assert (
"1 hour"
== template.Template(
'{{relative_time(strptime("2000-01-01 09:00:00", "%Y-%m-%d %H:%M:%S"))}}',
hass,
).async_render()
)
assert (
"2 hours"
== template.Template(
'{{relative_time(strptime("2000-01-01 09:00:00 +01:00", "%Y-%m-%d %H:%M:%S %z"))}}',
hass,
).async_render()
)
assert (
"1 hour"
== template.Template(
'{{relative_time(strptime("2000-01-01 03:00:00 -06:00", "%Y-%m-%d %H:%M:%S %z"))}}',
hass,
).async_render()
)
assert (
str(template.strptime("2000-01-01 11:00:00 +00:00", "%Y-%m-%d %H:%M:%S %z"))
== template.Template(
'{{relative_time(strptime("2000-01-01 11:00:00 +00:00", "%Y-%m-%d %H:%M:%S %z"))}}',
hass,
).async_render()
)
assert (
"string"
== template.Template('{{relative_time("string")}}', hass,).async_render()
)
@patch(
"homeassistant.helpers.template.TemplateEnvironment.is_safe_callable",
return_value=True,
)
def test_utcnow(mock_is_safe, hass):
"""Test utcnow method."""
now = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=now):
assert (
now.isoformat()
== template.Template("{{ utcnow().isoformat() }}", hass).async_render()
)
def test_regex_match(hass):
"""Test regex_match method."""
tpl = template.Template(
r"""
{{ '123-456-7890' | regex_match('(\\d{3})-(\\d{3})-(\\d{4})') }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ 'Home Assistant test' | regex_match('home', True) }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ 'Another Home Assistant test' | regex_match('Home') }}
""",
hass,
)
assert tpl.async_render() == "False"
tpl = template.Template(
"""
{{ ['Home Assistant test'] | regex_match('.*Assist') }}
""",
hass,
)
assert tpl.async_render() == "True"
def test_regex_search(hass):
"""Test regex_search method."""
tpl = template.Template(
r"""
{{ '123-456-7890' | regex_search('(\\d{3})-(\\d{3})-(\\d{4})') }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ 'Home Assistant test' | regex_search('home', True) }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ 'Another Home Assistant test' | regex_search('Home') }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ ['Home Assistant test'] | regex_search('Assist') }}
""",
hass,
)
assert tpl.async_render() == "True"
def test_regex_replace(hass):
"""Test regex_replace method."""
tpl = template.Template(
r"""
{{ 'Hello World' | regex_replace('(Hello\\s)',) }}
""",
hass,
)
assert tpl.async_render() == "World"
tpl = template.Template(
"""
{{ ['Home hinderant test'] | regex_replace('hinder', 'Assist') }}
""",
hass,
)
assert tpl.async_render() == "['Home Assistant test']"
def test_regex_findall_index(hass):
"""Test regex_findall_index method."""
tpl = template.Template(
"""
{{ 'Flight from JFK to LHR' | regex_findall_index('([A-Z]{3})', 0) }}
""",
hass,
)
assert tpl.async_render() == "JFK"
tpl = template.Template(
"""
{{ 'Flight from JFK to LHR' | regex_findall_index('([A-Z]{3})', 1) }}
""",
hass,
)
assert tpl.async_render() == "LHR"
tpl = template.Template(
"""
{{ ['JFK', 'LHR'] | regex_findall_index('([A-Z]{3})', 1) }}
""",
hass,
)
assert tpl.async_render() == "LHR"
def test_bitwise_and(hass):
"""Test bitwise_and method."""
tpl = template.Template(
"""
{{ 8 | bitwise_and(8) }}
""",
hass,
)
assert tpl.async_render() == str(8 & 8)
tpl = template.Template(
"""
{{ 10 | bitwise_and(2) }}
""",
hass,
)
assert tpl.async_render() == str(10 & 2)
tpl = template.Template(
"""
{{ 8 | bitwise_and(2) }}
""",
hass,
)
assert tpl.async_render() == str(8 & 2)
def test_bitwise_or(hass):
"""Test bitwise_or method."""
tpl = template.Template(
"""
{{ 8 | bitwise_or(8) }}
""",
hass,
)
assert tpl.async_render() == str(8 | 8)
tpl = template.Template(
"""
{{ 10 | bitwise_or(2) }}
""",
hass,
)
assert tpl.async_render() == str(10 | 2)
tpl = template.Template(
"""
{{ 8 | bitwise_or(2) }}
""",
hass,
)
assert tpl.async_render() == str(8 | 2)
def test_distance_function_with_1_state(hass):
"""Test distance function with 1 state."""
_set_up_units(hass)
hass.states.async_set(
"test.object", "happy", {"latitude": 32.87336, "longitude": -117.22943}
)
tpl = template.Template("{{ distance(states.test.object) | round }}", hass)
assert tpl.async_render() == "187"
def test_distance_function_with_2_states(hass):
"""Test distance function with 2 states."""
_set_up_units(hass)
hass.states.async_set(
"test.object", "happy", {"latitude": 32.87336, "longitude": -117.22943}
)
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
"{{ distance(states.test.object, states.test.object_2) | round }}", hass
)
assert tpl.async_render() == "187"
def test_distance_function_with_1_coord(hass):
"""Test distance function with 1 coord."""
_set_up_units(hass)
tpl = template.Template('{{ distance("32.87336", "-117.22943") | round }}', hass)
assert tpl.async_render() == "187"
def test_distance_function_with_2_coords(hass):
"""Test distance function with 2 coords."""
_set_up_units(hass)
assert (
template.Template(
'{{ distance("32.87336", "-117.22943", %s, %s) | round }}'
% (hass.config.latitude, hass.config.longitude),
hass,
).async_render()
== "187"
)
def test_distance_function_with_1_state_1_coord(hass):
"""Test distance function with 1 state 1 coord."""
_set_up_units(hass)
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
'{{ distance("32.87336", "-117.22943", states.test.object_2) ' "| round }}",
hass,
)
assert tpl.async_render() == "187"
tpl2 = template.Template(
'{{ distance(states.test.object_2, "32.87336", "-117.22943") ' "| round }}",
hass,
)
assert tpl2.async_render() == "187"
def test_distance_function_return_none_if_invalid_state(hass):
"""Test distance function return None if invalid state."""
hass.states.async_set("test.object_2", "happy", {"latitude": 10})
tpl = template.Template("{{ distance(states.test.object_2) | round }}", hass)
assert tpl.async_render() == "None"
def test_distance_function_return_none_if_invalid_coord(hass):
"""Test distance function return None if invalid coord."""
assert (
template.Template('{{ distance("123", "abc") }}', hass).async_render() == "None"
)
assert template.Template('{{ distance("123") }}', hass).async_render() == "None"
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template('{{ distance("123", states.test_object_2) }}', hass)
assert tpl.async_render() == "None"
def test_distance_function_with_2_entity_ids(hass):
"""Test distance function with 2 entity ids."""
_set_up_units(hass)
hass.states.async_set(
"test.object", "happy", {"latitude": 32.87336, "longitude": -117.22943}
)
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
'{{ distance("test.object", "test.object_2") | round }}', hass
)
assert tpl.async_render() == "187"
def test_distance_function_with_1_entity_1_coord(hass):
"""Test distance function with 1 entity_id and 1 coord."""
_set_up_units(hass)
hass.states.async_set(
"test.object",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
'{{ distance("test.object", "32.87336", "-117.22943") | round }}', hass
)
assert tpl.async_render() == "187"
def test_closest_function_home_vs_domain(hass):
"""Test closest function home vs domain."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"not_test_domain.but_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
assert (
template.Template(
"{{ closest(states.test_domain).entity_id }}", hass
).async_render()
== "test_domain.object"
)
assert (
template.Template(
"{{ (states.test_domain | closest).entity_id }}", hass
).async_render()
== "test_domain.object"
)
def test_closest_function_home_vs_all_states(hass):
"""Test closest function home vs all states."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain_2.and_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
assert (
template.Template("{{ closest(states).entity_id }}", hass).async_render()
== "test_domain_2.and_closer"
)
assert (
template.Template("{{ (states | closest).entity_id }}", hass).async_render()
== "test_domain_2.and_closer"
)
async def test_closest_function_home_vs_group_entity_id(hass):
"""Test closest function home vs group entity id."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"not_in_group.but_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
await group.Group.async_create_group(hass, "location group", ["test_domain.object"])
info = render_to_info(hass, '{{ closest("group.location_group").entity_id }}')
assert_result_info(
info, "test_domain.object", ["test_domain.object", "group.location_group"]
)
async def test_closest_function_home_vs_group_state(hass):
"""Test closest function home vs group state."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"not_in_group.but_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
await group.Group.async_create_group(hass, "location group", ["test_domain.object"])
info = render_to_info(hass, '{{ closest("group.location_group").entity_id }}')
assert_result_info(
info, "test_domain.object", ["test_domain.object", "group.location_group"]
)
info = render_to_info(hass, "{{ closest(states.group.location_group).entity_id }}")
assert_result_info(
info, "test_domain.object", ["test_domain.object", "group.location_group"]
)
async def test_expand(hass):
"""Test expand function."""
info = render_to_info(hass, "{{ expand('test.object') }}")
assert_result_info(info, "[]", ["test.object"])
info = render_to_info(hass, "{{ expand(56) }}")
assert_result_info(info, "[]")
hass.states.async_set("test.object", "happy")
info = render_to_info(
hass, "{{ expand('test.object') | map(attribute='entity_id') | join(', ') }}"
)
assert_result_info(info, "test.object", [])
info = render_to_info(
hass,
"{{ expand('group.new_group') | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "", ["group.new_group"])
info = render_to_info(
hass, "{{ expand(states.group) | map(attribute='entity_id') | join(', ') }}"
)
assert_result_info(info, "", [], ["group"])
await group.Group.async_create_group(hass, "new group", ["test.object"])
info = render_to_info(
hass,
"{{ expand('group.new_group') | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "test.object", ["group.new_group"])
info = render_to_info(
hass, "{{ expand(states.group) | map(attribute='entity_id') | join(', ') }}"
)
assert_result_info(info, "test.object", ["group.new_group"], ["group"])
info = render_to_info(
hass,
"{{ expand('group.new_group', 'test.object')"
" | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "test.object", ["group.new_group"])
info = render_to_info(
hass,
"{{ ['group.new_group', 'test.object'] | expand"
" | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "test.object", ["group.new_group"])
def test_closest_function_to_coord(hass):
"""Test closest function to coord."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain.closest_zone",
"happy",
{
"latitude": hass.config.latitude + 0.2,
"longitude": hass.config.longitude + 0.2,
},
)
hass.states.async_set(
"zone.far_away",
"zoning",
{
"latitude": hass.config.latitude + 0.3,
"longitude": hass.config.longitude + 0.3,
},
)
tpl = template.Template(
'{{ closest("%s", %s, states.test_domain).entity_id }}'
% (hass.config.latitude + 0.3, hass.config.longitude + 0.3),
hass,
)
assert tpl.async_render() == "test_domain.closest_zone"
tpl = template.Template(
'{{ (states.test_domain | closest("%s", %s)).entity_id }}'
% (hass.config.latitude + 0.3, hass.config.longitude + 0.3),
hass,
)
assert tpl.async_render() == "test_domain.closest_zone"
def test_closest_function_to_entity_id(hass):
"""Test closest function to entity id."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain.closest_zone",
"happy",
{
"latitude": hass.config.latitude + 0.2,
"longitude": hass.config.longitude + 0.2,
},
)
hass.states.async_set(
"zone.far_away",
"zoning",
{
"latitude": hass.config.latitude + 0.3,
"longitude": hass.config.longitude + 0.3,
},
)
info = render_to_info(
hass,
"{{ closest(zone, states.test_domain).entity_id }}",
{"zone": "zone.far_away"},
)
assert_result_info(
info,
"test_domain.closest_zone",
["test_domain.closest_home", "test_domain.closest_zone", "zone.far_away"],
["test_domain"],
)
info = render_to_info(
hass,
"{{ ([states.test_domain, 'test_domain.closest_zone'] "
"| closest(zone)).entity_id }}",
{"zone": "zone.far_away"},
)
assert_result_info(
info,
"test_domain.closest_zone",
["test_domain.closest_home", "test_domain.closest_zone", "zone.far_away"],
["test_domain"],
)
def test_closest_function_to_state(hass):
"""Test closest function to state."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain.closest_zone",
"happy",
{
"latitude": hass.config.latitude + 0.2,
"longitude": hass.config.longitude + 0.2,
},
)
hass.states.async_set(
"zone.far_away",
"zoning",
{
"latitude": hass.config.latitude + 0.3,
"longitude": hass.config.longitude + 0.3,
},
)
assert (
template.Template(
"{{ closest(states.zone.far_away, states.test_domain).entity_id }}", hass
).async_render()
== "test_domain.closest_zone"
)
def test_closest_function_invalid_state(hass):
"""Test closest function invalid state."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
for state in ("states.zone.non_existing", '"zone.non_existing"'):
assert (
template.Template("{{ closest(%s, states) }}" % state, hass).async_render()
== "None"
)
def test_closest_function_state_with_invalid_location(hass):
"""Test closest function state with invalid location."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{"latitude": "invalid latitude", "longitude": hass.config.longitude + 0.1},
)
assert (
template.Template(
"{{ closest(states.test_domain.closest_home, states) }}", hass
).async_render()
== "None"
)
def test_closest_function_invalid_coordinates(hass):
"""Test closest function invalid coordinates."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
assert (
template.Template(
'{{ closest("invalid", "coord", states) }}', hass
).async_render()
== "None"
)
assert (
template.Template(
'{{ states | closest("invalid", "coord") }}', hass
).async_render()
== "None"
)
def test_closest_function_no_location_states(hass):
"""Test closest function without location states."""
assert (
template.Template("{{ closest(states).entity_id }}", hass).async_render() == ""
)
def test_extract_entities_none_exclude_stuff(hass):
"""Test extract entities function with none or exclude stuff."""
assert template.extract_entities(hass, None) == []
assert template.extract_entities(hass, "mdi:water") == []
assert (
template.extract_entities(
hass, "{{ closest(states.zone.far_away, states.test_domain).entity_id }}"
)
== MATCH_ALL
)
assert (
template.extract_entities(hass, '{{ distance("123", states.test_object_2) }}')
== MATCH_ALL
)
def test_extract_entities_no_match_entities(hass):
"""Test extract entities function with none entities stuff."""
assert (
template.extract_entities(
hass, "{{ value_json.tst | timestamp_custom('%Y' True) }}"
)
== MATCH_ALL
)
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.state }},d
{% endfor %}
""",
)
assert_result_info(info, "", domains=["sensor"])
def test_generate_filter_iterators(hass):
"""Test extract entities function with none entities stuff."""
info = render_to_info(
hass,
"""
{% for state in states %}
{{ state.entity_id }}
{% endfor %}
""",
)
assert_result_info(info, "", all_states=True)
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}
{% endfor %}
""",
)
assert_result_info(info, "", domains=["sensor"])
hass.states.async_set("sensor.test_sensor", "off", {"attr": "value"})
# Don't need the entity because the state is not accessed
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}
{% endfor %}
""",
)
assert_result_info(info, "sensor.test_sensor", domains=["sensor"])
# But we do here because the state gets accessed
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.state }},
{% endfor %}
""",
)
assert_result_info(
info, "sensor.test_sensor=off,", ["sensor.test_sensor"], ["sensor"]
)
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.attributes.attr }},
{% endfor %}
""",
)
assert_result_info(
info, "sensor.test_sensor=value,", ["sensor.test_sensor"], ["sensor"]
)
def test_generate_select(hass):
"""Test extract entities function with none entities stuff."""
template_str = """
{{ states.sensor|selectattr("state","equalto","off")
|join(",", attribute="entity_id") }}
"""
tmp = template.Template(template_str, hass)
info = tmp.async_render_to_info()
assert_result_info(info, "", [], ["sensor"])
hass.states.async_set("sensor.test_sensor", "off", {"attr": "value"})
hass.states.async_set("sensor.test_sensor_on", "on")
info = tmp.async_render_to_info()
assert_result_info(
info,
"sensor.test_sensor",
["sensor.test_sensor", "sensor.test_sensor_on"],
["sensor"],
)
async def test_extract_entities_match_entities(hass):
"""Test extract entities function with entities stuff."""
assert (
template.extract_entities(
hass,
"""
{% if is_state('device_tracker.phone_1', 'home') %}
Ha, Hercules is home!
{% else %}
Hercules is at {{ states('device_tracker.phone_1') }}.
{% endif %}
""",
)
== ["device_tracker.phone_1"]
)
assert (
template.extract_entities(
hass,
"""
{{ as_timestamp(states.binary_sensor.garage_door.last_changed) }}
""",
)
== ["binary_sensor.garage_door"]
)
assert (
template.extract_entities(
hass,
"""
{{ states("binary_sensor.garage_door") }}
""",
)
== ["binary_sensor.garage_door"]
)
hass.states.async_set("device_tracker.phone_2", "not_home", {"battery": 20})
assert (
template.extract_entities(
hass,
"""
{{ is_state_attr('device_tracker.phone_2', 'battery', 40) }}
""",
)
== ["device_tracker.phone_2"]
)
assert sorted(["device_tracker.phone_1", "device_tracker.phone_2"]) == sorted(
template.extract_entities(
hass,
"""
{% if is_state('device_tracker.phone_1', 'home') %}
Ha, Hercules is home!
{% elif states.device_tracker.phone_2.attributes.battery < 40 %}
Hercules you power goes done!.
{% endif %}
""",
)
)
assert sorted(["sensor.pick_humidity", "sensor.pick_temperature"]) == sorted(
template.extract_entities(
hass,
"""
{{
states.sensor.pick_temperature.state ~ „°C (“ ~
states.sensor.pick_humidity.state ~ „ %“
}}
""",
)
)
assert sorted(
["sensor.luftfeuchtigkeit_mean", "input_number.luftfeuchtigkeit"]
) == sorted(
template.extract_entities(
hass,
"{% if (states('sensor.luftfeuchtigkeit_mean') | int)"
" > (states('input_number.luftfeuchtigkeit') | int +1.5)"
" %}true{% endif %}",
)
)
await group.Group.async_create_group(hass, "empty group", [])
assert ["group.empty_group"] == template.extract_entities(
hass, "{{ expand('group.empty_group') | list | length }}"
)
hass.states.async_set("test_domain.object", "exists")
await group.Group.async_create_group(hass, "expand group", ["test_domain.object"])
assert sorted(["group.expand_group", "test_domain.object"]) == sorted(
template.extract_entities(
hass, "{{ expand('group.expand_group') | list | length }}"
)
)
assert ["test_domain.entity"] == template.Template(
'{{ is_state("test_domain.entity", "on") }}', hass
).extract_entities()
def test_extract_entities_with_variables(hass):
"""Test extract entities function with variables and entities stuff."""
hass.states.async_set("input_boolean.switch", "on")
assert ["input_boolean.switch"] == template.extract_entities(
hass, "{{ is_state('input_boolean.switch', 'off') }}", {}
)
assert ["input_boolean.switch"] == template.extract_entities(
hass,
"{{ is_state(trigger.entity_id, 'off') }}",
{"trigger": {"entity_id": "input_boolean.switch"}},
)
assert MATCH_ALL == template.extract_entities(
hass, "{{ is_state(data, 'off') }}", {"data": "no_state"}
)
assert ["input_boolean.switch"] == template.extract_entities(
hass, "{{ is_state(data, 'off') }}", {"data": "input_boolean.switch"}
)
assert ["input_boolean.switch"] == template.extract_entities(
hass,
"{{ is_state(trigger.entity_id, 'off') }}",
{"trigger": {"entity_id": "input_boolean.switch"}},
)
hass.states.async_set("media_player.livingroom", "off")
assert {"media_player.livingroom"} == extract_entities(
hass,
"{{ is_state('media_player.' ~ where , 'playing') }}",
{"where": "livingroom"},
)
def test_jinja_namespace(hass):
"""Test Jinja's namespace command can be used."""
test_template = template.Template(
(
"{% set ns = namespace(a_key='') %}"
"{% set ns.a_key = states.sensor.dummy.state %}"
"{{ ns.a_key }}"
),
hass,
)
hass.states.async_set("sensor.dummy", "a value")
assert test_template.async_render() == "a value"
hass.states.async_set("sensor.dummy", "another value")
assert test_template.async_render() == "another value"
def test_state_with_unit(hass):
"""Test the state_with_unit property helper."""
hass.states.async_set("sensor.test", "23", {"unit_of_measurement": "beers"})
hass.states.async_set("sensor.test2", "wow")
tpl = template.Template("{{ states.sensor.test.state_with_unit }}", hass)
assert tpl.async_render() == "23 beers"
tpl = template.Template("{{ states.sensor.test2.state_with_unit }}", hass)
assert tpl.async_render() == "wow"
tpl = template.Template(
"{% for state in states %}{{ state.state_with_unit }} {% endfor %}", hass
)
assert tpl.async_render() == "23 beers wow"
tpl = template.Template("{{ states.sensor.non_existing.state_with_unit }}", hass)
assert tpl.async_render() == ""
def test_length_of_states(hass):
"""Test fetching the length of states."""
hass.states.async_set("sensor.test", "23")
hass.states.async_set("sensor.test2", "wow")
hass.states.async_set("climate.test2", "cooling")
tpl = template.Template("{{ states | length }}", hass)
assert tpl.async_render() == "3"
tpl = template.Template("{{ states.sensor | length }}", hass)
assert tpl.async_render() == "2"
def test_render_complex_handling_non_template_values(hass):
"""Test that we can render non-template fields."""
assert template.render_complex(
{True: 1, False: template.Template("{{ hello }}", hass)}, {"hello": 2}
) == {True: 1, False: "2"}
def test_urlencode(hass):
"""Test the urlencode method."""
tpl = template.Template(
("{% set dict = {'foo': 'x&y', 'bar': 42} %}" "{{ dict | urlencode }}"), hass,
)
assert tpl.async_render() == "foo=x%26y&bar=42"
tpl = template.Template(
("{% set string = 'the quick brown fox = true' %}" "{{ string | urlencode }}"),
hass,
)
assert tpl.async_render() == "the%20quick%20brown%20fox%20%3D%20true"
|
"""Test Home Assistant template helper methods."""
from datetime import datetime
import math
import random
import pytest
import pytz
from homeassistant.components import group
from homeassistant.const import (
LENGTH_METERS,
MASS_GRAMS,
MATCH_ALL,
PRESSURE_PA,
TEMP_CELSIUS,
VOLUME_LITERS,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
import homeassistant.util.dt as dt_util
from homeassistant.util.unit_system import UnitSystem
from tests.async_mock import patch
def _set_up_units(hass):
"""Set up the tests."""
hass.config.units = UnitSystem(
"custom", TEMP_CELSIUS, LENGTH_METERS, VOLUME_LITERS, MASS_GRAMS, PRESSURE_PA
)
def render_to_info(hass, template_str, variables=None):
"""Create render info from template."""
tmp = template.Template(template_str, hass)
return tmp.async_render_to_info(variables)
def extract_entities(hass, template_str, variables=None):
"""Extract entities from a template."""
info = render_to_info(hass, template_str, variables)
# pylint: disable=protected-access
assert not hasattr(info, "_domains")
return info._entities
def assert_result_info(info, result, entities=None, domains=None, all_states=False):
"""Check result info."""
assert info.result == result
# pylint: disable=protected-access
assert info._all_states == all_states
assert info.filter_lifecycle("invalid_entity_name.somewhere") == all_states
if entities is not None:
assert info._entities == frozenset(entities)
assert all([info.filter(entity) for entity in entities])
assert not info.filter("invalid_entity_name.somewhere")
else:
assert not info._entities
if domains is not None:
assert info._domains == frozenset(domains)
assert all([info.filter_lifecycle(domain + ".entity") for domain in domains])
else:
assert not hasattr(info, "_domains")
def test_template_equality():
"""Test template comparison and hashing."""
template_one = template.Template("{{ template_one }}")
template_one_1 = template.Template("{{ template_one }}")
template_two = template.Template("{{ template_two }}")
assert template_one == template_one_1
assert template_one != template_two
assert hash(template_one) == hash(template_one_1)
assert hash(template_one) != hash(template_two)
assert str(template_one_1) == 'Template("{{ template_one }}")'
with pytest.raises(TypeError):
template.Template(["{{ template_one }}"])
def test_invalid_template(hass):
"""Invalid template raises error."""
tmpl = template.Template("{{", hass)
with pytest.raises(TemplateError):
tmpl.ensure_valid()
with pytest.raises(TemplateError):
tmpl.async_render()
info = tmpl.async_render_to_info()
with pytest.raises(TemplateError):
assert info.result == "impossible"
tmpl = template.Template("{{states(keyword)}}", hass)
tmpl.ensure_valid()
with pytest.raises(TemplateError):
tmpl.async_render()
def test_referring_states_by_entity_id(hass):
"""Test referring states by entity id."""
hass.states.async_set("test.object", "happy")
assert (
template.Template("{{ states.test.object.state }}", hass).async_render()
== "happy"
)
assert (
template.Template('{{ states["test.object"].state }}', hass).async_render()
== "happy"
)
assert (
template.Template('{{ states("test.object") }}', hass).async_render() == "happy"
)
def test_invalid_entity_id(hass):
"""Test referring states by entity id."""
with pytest.raises(TemplateError):
template.Template('{{ states["big.fat..."] }}', hass).async_render()
with pytest.raises(TemplateError):
template.Template('{{ states.test["big.fat..."] }}', hass).async_render()
with pytest.raises(TemplateError):
template.Template('{{ states["invalid/domain"] }}', hass).async_render()
def test_raise_exception_on_error(hass):
"""Test raising an exception on error."""
with pytest.raises(TemplateError):
template.Template("{{ invalid_syntax").ensure_valid()
def test_iterating_all_states(hass):
"""Test iterating all states."""
tmpl_str = "{% for state in states %}{{ state.state }}{% endfor %}"
info = render_to_info(hass, tmpl_str)
assert_result_info(info, "", all_states=True)
hass.states.async_set("test.object", "happy")
hass.states.async_set("sensor.temperature", 10)
info = render_to_info(hass, tmpl_str)
assert_result_info(
info, "10happy", entities=["test.object", "sensor.temperature"], all_states=True
)
def test_iterating_domain_states(hass):
"""Test iterating domain states."""
tmpl_str = "{% for state in states.sensor %}{{ state.state }}{% endfor %}"
info = render_to_info(hass, tmpl_str)
assert_result_info(info, "", domains=["sensor"])
hass.states.async_set("test.object", "happy")
hass.states.async_set("sensor.back_door", "open")
hass.states.async_set("sensor.temperature", 10)
info = render_to_info(hass, tmpl_str)
assert_result_info(
info,
"open10",
entities=["sensor.back_door", "sensor.temperature"],
domains=["sensor"],
)
def test_float(hass):
"""Test float."""
hass.states.async_set("sensor.temperature", "12")
assert (
template.Template(
"{{ float(states.sensor.temperature.state) }}", hass
).async_render()
== "12.0"
)
assert (
template.Template(
"{{ float(states.sensor.temperature.state) > 11 }}", hass
).async_render()
== "True"
)
assert (
template.Template("{{ float('forgiving') }}", hass).async_render()
== "forgiving"
)
def test_rounding_value(hass):
"""Test rounding value."""
hass.states.async_set("sensor.temperature", 12.78)
assert (
template.Template(
"{{ states.sensor.temperature.state | round(1) }}", hass
).async_render()
== "12.8"
)
assert (
template.Template(
"{{ states.sensor.temperature.state | multiply(10) | round }}", hass
).async_render()
== "128"
)
assert (
template.Template(
'{{ states.sensor.temperature.state | round(1, "floor") }}', hass
).async_render()
== "12.7"
)
assert (
template.Template(
'{{ states.sensor.temperature.state | round(1, "ceil") }}', hass
).async_render()
== "12.8"
)
assert (
template.Template(
'{{ states.sensor.temperature.state | round(1, "half") }}', hass
).async_render()
== "13.0"
)
def test_rounding_value_get_original_value_on_error(hass):
"""Test rounding value get original value on error."""
assert template.Template("{{ None | round }}", hass).async_render() == "None"
assert (
template.Template('{{ "no_number" | round }}', hass).async_render()
== "no_number"
)
def test_multiply(hass):
"""Test multiply."""
tests = {None: "None", 10: "100", '"abcd"': "abcd"}
for inp, out in tests.items():
assert (
template.Template(
"{{ %s | multiply(10) | round }}" % inp, hass
).async_render()
== out
)
def test_logarithm(hass):
"""Test logarithm."""
tests = [
(4, 2, "2.0"),
(1000, 10, "3.0"),
(math.e, "", "1.0"),
('"invalid"', "_", "invalid"),
(10, '"invalid"', "10.0"),
]
for value, base, expected in tests:
assert (
template.Template(
f"{{{{ {value} | log({base}) | round(1) }}}}", hass
).async_render()
== expected
)
assert (
template.Template(
f"{{{{ log({value}, {base}) | round(1) }}}}", hass
).async_render()
== expected
)
def test_sine(hass):
"""Test sine."""
tests = [
(0, "0.0"),
(math.pi / 2, "1.0"),
(math.pi, "0.0"),
(math.pi * 1.5, "-1.0"),
(math.pi / 10, "0.309"),
('"duck"', "duck"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | sin | round(3) }}" % value, hass).async_render()
== expected
)
def test_cos(hass):
"""Test cosine."""
tests = [
(0, "1.0"),
(math.pi / 2, "0.0"),
(math.pi, "-1.0"),
(math.pi * 1.5, "-0.0"),
(math.pi / 10, "0.951"),
("'error'", "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | cos | round(3) }}" % value, hass).async_render()
== expected
)
def test_tan(hass):
"""Test tangent."""
tests = [
(0, "0.0"),
(math.pi, "-0.0"),
(math.pi / 180 * 45, "1.0"),
(math.pi / 180 * 90, "1.633123935319537e+16"),
(math.pi / 180 * 135, "-1.0"),
("'error'", "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | tan | round(3) }}" % value, hass).async_render()
== expected
)
def test_sqrt(hass):
"""Test square root."""
tests = [
(0, "0.0"),
(1, "1.0"),
(2, "1.414"),
(10, "3.162"),
(100, "10.0"),
("'error'", "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | sqrt | round(3) }}" % value, hass).async_render()
== expected
)
def test_arc_sine(hass):
"""Test arcus sine."""
tests = [
(-2.0, "-2.0"), # value error
(-1.0, "-1.571"),
(-0.5, "-0.524"),
(0.0, "0.0"),
(0.5, "0.524"),
(1.0, "1.571"),
(2.0, "2.0"), # value error
('"error"', "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | asin | round(3) }}" % value, hass).async_render()
== expected
)
def test_arc_cos(hass):
"""Test arcus cosine."""
tests = [
(-2.0, "-2.0"), # value error
(-1.0, "3.142"),
(-0.5, "2.094"),
(0.0, "1.571"),
(0.5, "1.047"),
(1.0, "0.0"),
(2.0, "2.0"), # value error
('"error"', "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | acos | round(3) }}" % value, hass).async_render()
== expected
)
def test_arc_tan(hass):
"""Test arcus tangent."""
tests = [
(-10.0, "-1.471"),
(-2.0, "-1.107"),
(-1.0, "-0.785"),
(-0.5, "-0.464"),
(0.0, "0.0"),
(0.5, "0.464"),
(1.0, "0.785"),
(2.0, "1.107"),
(10.0, "1.471"),
('"error"', "error"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | atan | round(3) }}" % value, hass).async_render()
== expected
)
def test_arc_tan2(hass):
"""Test two parameter version of arcus tangent."""
tests = [
(-10.0, -10.0, "-2.356"),
(-10.0, 0.0, "-1.571"),
(-10.0, 10.0, "-0.785"),
(0.0, -10.0, "3.142"),
(0.0, 0.0, "0.0"),
(0.0, 10.0, "0.0"),
(10.0, -10.0, "2.356"),
(10.0, 0.0, "1.571"),
(10.0, 10.0, "0.785"),
(-4.0, 3.0, "-0.927"),
(-1.0, 2.0, "-0.464"),
(2.0, 1.0, "1.107"),
('"duck"', '"goose"', "('duck', 'goose')"),
]
for y, x, expected in tests:
assert (
template.Template(
f"{{{{ ({y}, {x}) | atan2 | round(3) }}}}", hass
).async_render()
== expected
)
assert (
template.Template(
f"{{{{ atan2({y}, {x}) | round(3) }}}}", hass
).async_render()
== expected
)
def test_strptime(hass):
"""Test the parse timestamp method."""
tests = [
("2016-10-19 15:22:05.588122 UTC", "%Y-%m-%d %H:%M:%S.%f %Z", None),
("2016-10-19 15:22:05.588122+0100", "%Y-%m-%d %H:%M:%S.%f%z", None),
("2016-10-19 15:22:05.588122", "%Y-%m-%d %H:%M:%S.%f", None),
("2016-10-19", "%Y-%m-%d", None),
("2016", "%Y", None),
("15:22:05", "%H:%M:%S", None),
("1469119144", "%Y", "1469119144"),
("invalid", "%Y", "invalid"),
]
for inp, fmt, expected in tests:
if expected is None:
expected = datetime.strptime(inp, fmt)
temp = f"{{{{ strptime('{inp}', '{fmt}') }}}}"
assert template.Template(temp, hass).async_render() == str(expected)
def test_timestamp_custom(hass):
"""Test the timestamps to custom filter."""
now = dt_util.utcnow()
tests = [
(None, None, None, "None"),
(1469119144, None, True, "2016-07-21 16:39:04"),
(1469119144, "%Y", True, "2016"),
(1469119144, "invalid", True, "invalid"),
(dt_util.as_timestamp(now), None, False, now.strftime("%Y-%m-%d %H:%M:%S")),
]
for inp, fmt, local, out in tests:
if fmt:
fil = f"timestamp_custom('{fmt}')"
elif fmt and local:
fil = f"timestamp_custom('{fmt}', {local})"
else:
fil = "timestamp_custom"
assert template.Template(f"{{{{ {inp} | {fil} }}}}", hass).async_render() == out
def test_timestamp_local(hass):
"""Test the timestamps to local filter."""
tests = {None: "None", 1469119144: "2016-07-21 16:39:04"}
for inp, out in tests.items():
assert (
template.Template("{{ %s | timestamp_local }}" % inp, hass).async_render()
== out
)
def test_to_json(hass):
"""Test the object to JSON string filter."""
# Note that we're not testing the actual json.loads and json.dumps methods,
# only the filters, so we don't need to be exhaustive with our sample JSON.
expected_result = '{"Foo": "Bar"}'
actual_result = template.Template(
"{{ {'Foo': 'Bar'} | to_json }}", hass
).async_render()
assert actual_result == expected_result
def test_from_json(hass):
"""Test the JSON string to object filter."""
# Note that we're not testing the actual json.loads and json.dumps methods,
# only the filters, so we don't need to be exhaustive with our sample JSON.
expected_result = "Bar"
actual_result = template.Template(
'{{ (\'{"Foo": "Bar"}\' | from_json).Foo }}', hass
).async_render()
assert actual_result == expected_result
def test_min(hass):
"""Test the min filter."""
assert template.Template("{{ [1, 2, 3] | min }}", hass).async_render() == "1"
def test_max(hass):
"""Test the max filter."""
assert template.Template("{{ [1, 2, 3] | max }}", hass).async_render() == "3"
def test_ord(hass):
"""Test the ord filter."""
assert template.Template('{{ "d" | ord }}', hass).async_render() == "100"
def test_base64_encode(hass):
"""Test the base64_encode filter."""
assert (
template.Template('{{ "homeassistant" | base64_encode }}', hass).async_render()
== "aG9tZWFzc2lzdGFudA=="
)
def test_base64_decode(hass):
"""Test the base64_decode filter."""
assert (
template.Template(
'{{ "aG9tZWFzc2lzdGFudA==" | base64_decode }}', hass
).async_render()
== "homeassistant"
)
def test_ordinal(hass):
"""Test the ordinal filter."""
tests = [
(1, "1st"),
(2, "2nd"),
(3, "3rd"),
(4, "4th"),
(5, "5th"),
(12, "12th"),
(100, "100th"),
(101, "101st"),
]
for value, expected in tests:
assert (
template.Template("{{ %s | ordinal }}" % value, hass).async_render()
== expected
)
def test_timestamp_utc(hass):
"""Test the timestamps to local filter."""
now = dt_util.utcnow()
tests = {
None: "None",
1469119144: "2016-07-21 16:39:04",
dt_util.as_timestamp(now): now.strftime("%Y-%m-%d %H:%M:%S"),
}
for inp, out in tests.items():
assert (
template.Template("{{ %s | timestamp_utc }}" % inp, hass).async_render()
== out
)
def test_as_timestamp(hass):
"""Test the as_timestamp function."""
assert (
template.Template('{{ as_timestamp("invalid") }}', hass).async_render()
== "None"
)
hass.mock = None
assert (
template.Template("{{ as_timestamp(states.mock) }}", hass).async_render()
== "None"
)
tpl = (
'{{ as_timestamp(strptime("2024-02-03T09:10:24+0000", '
'"%Y-%m-%dT%H:%M:%S%z")) }}'
)
assert template.Template(tpl, hass).async_render() == "1706951424.0"
@patch.object(random, "choice")
def test_random_every_time(test_choice, hass):
"""Ensure the random filter runs every time, not just once."""
tpl = template.Template("{{ [1,2] | random }}", hass)
test_choice.return_value = "foo"
assert tpl.async_render() == "foo"
test_choice.return_value = "bar"
assert tpl.async_render() == "bar"
def test_passing_vars_as_keywords(hass):
"""Test passing variables as keywords."""
assert template.Template("{{ hello }}", hass).async_render(hello=127) == "127"
def test_passing_vars_as_vars(hass):
"""Test passing variables as variables."""
assert template.Template("{{ hello }}", hass).async_render({"hello": 127}) == "127"
def test_passing_vars_as_list(hass):
"""Test passing variables as list."""
assert (
template.render_complex(
template.Template("{{ hello }}", hass), {"hello": ["foo", "bar"]}
)
== "['foo', 'bar']"
)
def test_passing_vars_as_list_element(hass):
"""Test passing variables as list."""
assert (
template.render_complex(
template.Template("{{ hello[1] }}", hass), {"hello": ["foo", "bar"]}
)
== "bar"
)
def test_passing_vars_as_dict_element(hass):
"""Test passing variables as list."""
assert (
template.render_complex(
template.Template("{{ hello.foo }}", hass), {"hello": {"foo": "bar"}}
)
== "bar"
)
def test_passing_vars_as_dict(hass):
"""Test passing variables as list."""
assert (
template.render_complex(
template.Template("{{ hello }}", hass), {"hello": {"foo": "bar"}}
)
== "{'foo': 'bar'}"
)
def test_render_with_possible_json_value_with_valid_json(hass):
"""Render with possible JSON value with valid JSON."""
tpl = template.Template("{{ value_json.hello }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}') == "world"
def test_render_with_possible_json_value_with_invalid_json(hass):
"""Render with possible JSON value with invalid JSON."""
tpl = template.Template("{{ value_json }}", hass)
assert tpl.async_render_with_possible_json_value("{ I AM NOT JSON }") == ""
def test_render_with_possible_json_value_with_template_error_value(hass):
"""Render with possible JSON value with template error value."""
tpl = template.Template("{{ non_existing.variable }}", hass)
assert tpl.async_render_with_possible_json_value("hello", "-") == "-"
def test_render_with_possible_json_value_with_missing_json_value(hass):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template("{{ value_json.goodbye }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}') == ""
def test_render_with_possible_json_value_valid_with_is_defined(hass):
"""Render with possible JSON value with known JSON object."""
tpl = template.Template("{{ value_json.hello|is_defined }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}') == "world"
def test_render_with_possible_json_value_undefined_json(hass):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template("{{ value_json.bye|is_defined }}", hass)
assert (
tpl.async_render_with_possible_json_value('{"hello": "world"}')
== '{"hello": "world"}'
)
def test_render_with_possible_json_value_undefined_json_error_value(hass):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template("{{ value_json.bye|is_defined }}", hass)
assert tpl.async_render_with_possible_json_value('{"hello": "world"}', "") == ""
def test_render_with_possible_json_value_non_string_value(hass):
"""Render with possible JSON value with non-string value."""
tpl = template.Template(
"""
{{ strptime(value~'+0000', '%Y-%m-%d %H:%M:%S%z') }}
""",
hass,
)
value = datetime(2019, 1, 18, 12, 13, 14)
expected = str(pytz.utc.localize(value))
assert tpl.async_render_with_possible_json_value(value) == expected
def test_if_state_exists(hass):
"""Test if state exists works."""
hass.states.async_set("test.object", "available")
tpl = template.Template(
"{% if states.test.object %}exists{% else %}not exists{% endif %}", hass
)
assert tpl.async_render() == "exists"
def test_is_state(hass):
"""Test is_state method."""
hass.states.async_set("test.object", "available")
tpl = template.Template(
"""
{% if is_state("test.object", "available") %}yes{% else %}no{% endif %}
""",
hass,
)
assert tpl.async_render() == "yes"
tpl = template.Template(
"""
{{ is_state("test.noobject", "available") }}
""",
hass,
)
assert tpl.async_render() == "False"
def test_is_state_attr(hass):
"""Test is_state_attr method."""
hass.states.async_set("test.object", "available", {"mode": "on"})
tpl = template.Template(
"""
{% if is_state_attr("test.object", "mode", "on") %}yes{% else %}no{% endif %}
""",
hass,
)
assert tpl.async_render() == "yes"
tpl = template.Template(
"""
{{ is_state_attr("test.noobject", "mode", "on") }}
""",
hass,
)
assert tpl.async_render() == "False"
def test_state_attr(hass):
"""Test state_attr method."""
hass.states.async_set("test.object", "available", {"mode": "on"})
tpl = template.Template(
"""
{% if state_attr("test.object", "mode") == "on" %}yes{% else %}no{% endif %}
""",
hass,
)
assert tpl.async_render() == "yes"
tpl = template.Template(
"""
{{ state_attr("test.noobject", "mode") == None }}
""",
hass,
)
assert tpl.async_render() == "True"
def test_states_function(hass):
"""Test using states as a function."""
hass.states.async_set("test.object", "available")
tpl = template.Template('{{ states("test.object") }}', hass)
assert tpl.async_render() == "available"
tpl2 = template.Template('{{ states("test.object2") }}', hass)
assert tpl2.async_render() == "unknown"
@patch(
"homeassistant.helpers.template.TemplateEnvironment.is_safe_callable",
return_value=True,
)
def test_now(mock_is_safe, hass):
"""Test now method."""
now = dt_util.now()
with patch("homeassistant.util.dt.now", return_value=now):
assert (
now.isoformat()
== template.Template("{{ now().isoformat() }}", hass).async_render()
)
@patch(
"homeassistant.helpers.template.TemplateEnvironment.is_safe_callable",
return_value=True,
)
def test_relative_time(mock_is_safe, hass):
"""Test relative_time method."""
now = datetime.strptime("2000-01-01 10:00:00 +00:00", "%Y-%m-%d %H:%M:%S %z")
with patch("homeassistant.util.dt.now", return_value=now):
assert (
"1 hour"
== template.Template(
'{{relative_time(strptime("2000-01-01 09:00:00", "%Y-%m-%d %H:%M:%S"))}}',
hass,
).async_render()
)
assert (
"2 hours"
== template.Template(
'{{relative_time(strptime("2000-01-01 09:00:00 +01:00", "%Y-%m-%d %H:%M:%S %z"))}}',
hass,
).async_render()
)
assert (
"1 hour"
== template.Template(
'{{relative_time(strptime("2000-01-01 03:00:00 -06:00", "%Y-%m-%d %H:%M:%S %z"))}}',
hass,
).async_render()
)
assert (
str(template.strptime("2000-01-01 11:00:00 +00:00", "%Y-%m-%d %H:%M:%S %z"))
== template.Template(
'{{relative_time(strptime("2000-01-01 11:00:00 +00:00", "%Y-%m-%d %H:%M:%S %z"))}}',
hass,
).async_render()
)
assert (
"string"
== template.Template('{{relative_time("string")}}', hass,).async_render()
)
@patch(
"homeassistant.helpers.template.TemplateEnvironment.is_safe_callable",
return_value=True,
)
def test_utcnow(mock_is_safe, hass):
"""Test utcnow method."""
now = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=now):
assert (
now.isoformat()
== template.Template("{{ utcnow().isoformat() }}", hass).async_render()
)
def test_regex_match(hass):
"""Test regex_match method."""
tpl = template.Template(
r"""
{{ '123-456-7890' | regex_match('(\\d{3})-(\\d{3})-(\\d{4})') }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ 'Home Assistant test' | regex_match('home', True) }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ 'Another Home Assistant test' | regex_match('Home') }}
""",
hass,
)
assert tpl.async_render() == "False"
tpl = template.Template(
"""
{{ ['Home Assistant test'] | regex_match('.*Assist') }}
""",
hass,
)
assert tpl.async_render() == "True"
def test_regex_search(hass):
"""Test regex_search method."""
tpl = template.Template(
r"""
{{ '123-456-7890' | regex_search('(\\d{3})-(\\d{3})-(\\d{4})') }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ 'Home Assistant test' | regex_search('home', True) }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ 'Another Home Assistant test' | regex_search('Home') }}
""",
hass,
)
assert tpl.async_render() == "True"
tpl = template.Template(
"""
{{ ['Home Assistant test'] | regex_search('Assist') }}
""",
hass,
)
assert tpl.async_render() == "True"
def test_regex_replace(hass):
"""Test regex_replace method."""
tpl = template.Template(
r"""
{{ 'Hello World' | regex_replace('(Hello\\s)',) }}
""",
hass,
)
assert tpl.async_render() == "World"
tpl = template.Template(
"""
{{ ['Home hinderant test'] | regex_replace('hinder', 'Assist') }}
""",
hass,
)
assert tpl.async_render() == "['Home Assistant test']"
def test_regex_findall_index(hass):
"""Test regex_findall_index method."""
tpl = template.Template(
"""
{{ 'Flight from JFK to LHR' | regex_findall_index('([A-Z]{3})', 0) }}
""",
hass,
)
assert tpl.async_render() == "JFK"
tpl = template.Template(
"""
{{ 'Flight from JFK to LHR' | regex_findall_index('([A-Z]{3})', 1) }}
""",
hass,
)
assert tpl.async_render() == "LHR"
tpl = template.Template(
"""
{{ ['JFK', 'LHR'] | regex_findall_index('([A-Z]{3})', 1) }}
""",
hass,
)
assert tpl.async_render() == "LHR"
def test_bitwise_and(hass):
"""Test bitwise_and method."""
tpl = template.Template(
"""
{{ 8 | bitwise_and(8) }}
""",
hass,
)
assert tpl.async_render() == str(8 & 8)
tpl = template.Template(
"""
{{ 10 | bitwise_and(2) }}
""",
hass,
)
assert tpl.async_render() == str(10 & 2)
tpl = template.Template(
"""
{{ 8 | bitwise_and(2) }}
""",
hass,
)
assert tpl.async_render() == str(8 & 2)
def test_bitwise_or(hass):
"""Test bitwise_or method."""
tpl = template.Template(
"""
{{ 8 | bitwise_or(8) }}
""",
hass,
)
assert tpl.async_render() == str(8 | 8)
tpl = template.Template(
"""
{{ 10 | bitwise_or(2) }}
""",
hass,
)
assert tpl.async_render() == str(10 | 2)
tpl = template.Template(
"""
{{ 8 | bitwise_or(2) }}
""",
hass,
)
assert tpl.async_render() == str(8 | 2)
def test_distance_function_with_1_state(hass):
"""Test distance function with 1 state."""
_set_up_units(hass)
hass.states.async_set(
"test.object", "happy", {"latitude": 32.87336, "longitude": -117.22943}
)
tpl = template.Template("{{ distance(states.test.object) | round }}", hass)
assert tpl.async_render() == "187"
def test_distance_function_with_2_states(hass):
"""Test distance function with 2 states."""
_set_up_units(hass)
hass.states.async_set(
"test.object", "happy", {"latitude": 32.87336, "longitude": -117.22943}
)
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
"{{ distance(states.test.object, states.test.object_2) | round }}", hass
)
assert tpl.async_render() == "187"
def test_distance_function_with_1_coord(hass):
"""Test distance function with 1 coord."""
_set_up_units(hass)
tpl = template.Template('{{ distance("32.87336", "-117.22943") | round }}', hass)
assert tpl.async_render() == "187"
def test_distance_function_with_2_coords(hass):
"""Test distance function with 2 coords."""
_set_up_units(hass)
assert (
template.Template(
'{{ distance("32.87336", "-117.22943", %s, %s) | round }}'
% (hass.config.latitude, hass.config.longitude),
hass,
).async_render()
== "187"
)
def test_distance_function_with_1_state_1_coord(hass):
"""Test distance function with 1 state 1 coord."""
_set_up_units(hass)
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
'{{ distance("32.87336", "-117.22943", states.test.object_2) ' "| round }}",
hass,
)
assert tpl.async_render() == "187"
tpl2 = template.Template(
'{{ distance(states.test.object_2, "32.87336", "-117.22943") ' "| round }}",
hass,
)
assert tpl2.async_render() == "187"
def test_distance_function_return_none_if_invalid_state(hass):
"""Test distance function return None if invalid state."""
hass.states.async_set("test.object_2", "happy", {"latitude": 10})
tpl = template.Template("{{ distance(states.test.object_2) | round }}", hass)
assert tpl.async_render() == "None"
def test_distance_function_return_none_if_invalid_coord(hass):
"""Test distance function return None if invalid coord."""
assert (
template.Template('{{ distance("123", "abc") }}', hass).async_render() == "None"
)
assert template.Template('{{ distance("123") }}', hass).async_render() == "None"
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template('{{ distance("123", states.test_object_2) }}', hass)
assert tpl.async_render() == "None"
def test_distance_function_with_2_entity_ids(hass):
"""Test distance function with 2 entity ids."""
_set_up_units(hass)
hass.states.async_set(
"test.object", "happy", {"latitude": 32.87336, "longitude": -117.22943}
)
hass.states.async_set(
"test.object_2",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
'{{ distance("test.object", "test.object_2") | round }}', hass
)
assert tpl.async_render() == "187"
def test_distance_function_with_1_entity_1_coord(hass):
"""Test distance function with 1 entity_id and 1 coord."""
_set_up_units(hass)
hass.states.async_set(
"test.object",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
tpl = template.Template(
'{{ distance("test.object", "32.87336", "-117.22943") | round }}', hass
)
assert tpl.async_render() == "187"
def test_closest_function_home_vs_domain(hass):
"""Test closest function home vs domain."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"not_test_domain.but_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
assert (
template.Template(
"{{ closest(states.test_domain).entity_id }}", hass
).async_render()
== "test_domain.object"
)
assert (
template.Template(
"{{ (states.test_domain | closest).entity_id }}", hass
).async_render()
== "test_domain.object"
)
def test_closest_function_home_vs_all_states(hass):
"""Test closest function home vs all states."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain_2.and_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
assert (
template.Template("{{ closest(states).entity_id }}", hass).async_render()
== "test_domain_2.and_closer"
)
assert (
template.Template("{{ (states | closest).entity_id }}", hass).async_render()
== "test_domain_2.and_closer"
)
async def test_closest_function_home_vs_group_entity_id(hass):
"""Test closest function home vs group entity id."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"not_in_group.but_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
await group.Group.async_create_group(hass, "location group", ["test_domain.object"])
info = render_to_info(hass, '{{ closest("group.location_group").entity_id }}')
assert_result_info(
info, "test_domain.object", ["test_domain.object", "group.location_group"]
)
async def test_closest_function_home_vs_group_state(hass):
"""Test closest function home vs group state."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"not_in_group.but_closer",
"happy",
{"latitude": hass.config.latitude, "longitude": hass.config.longitude},
)
await group.Group.async_create_group(hass, "location group", ["test_domain.object"])
info = render_to_info(hass, '{{ closest("group.location_group").entity_id }}')
assert_result_info(
info, "test_domain.object", ["test_domain.object", "group.location_group"]
)
info = render_to_info(hass, "{{ closest(states.group.location_group).entity_id }}")
assert_result_info(
info, "test_domain.object", ["test_domain.object", "group.location_group"]
)
async def test_expand(hass):
"""Test expand function."""
info = render_to_info(hass, "{{ expand('test.object') }}")
assert_result_info(info, "[]", ["test.object"])
info = render_to_info(hass, "{{ expand(56) }}")
assert_result_info(info, "[]")
hass.states.async_set("test.object", "happy")
info = render_to_info(
hass, "{{ expand('test.object') | map(attribute='entity_id') | join(', ') }}"
)
assert_result_info(info, "test.object", [])
info = render_to_info(
hass,
"{{ expand('group.new_group') | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "", ["group.new_group"])
info = render_to_info(
hass, "{{ expand(states.group) | map(attribute='entity_id') | join(', ') }}"
)
assert_result_info(info, "", [], ["group"])
await group.Group.async_create_group(hass, "new group", ["test.object"])
info = render_to_info(
hass,
"{{ expand('group.new_group') | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "test.object", ["group.new_group"])
info = render_to_info(
hass, "{{ expand(states.group) | map(attribute='entity_id') | join(', ') }}"
)
assert_result_info(info, "test.object", ["group.new_group"], ["group"])
info = render_to_info(
hass,
"{{ expand('group.new_group', 'test.object')"
" | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "test.object", ["group.new_group"])
info = render_to_info(
hass,
"{{ ['group.new_group', 'test.object'] | expand"
" | map(attribute='entity_id') | join(', ') }}",
)
assert_result_info(info, "test.object", ["group.new_group"])
def test_closest_function_to_coord(hass):
"""Test closest function to coord."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain.closest_zone",
"happy",
{
"latitude": hass.config.latitude + 0.2,
"longitude": hass.config.longitude + 0.2,
},
)
hass.states.async_set(
"zone.far_away",
"zoning",
{
"latitude": hass.config.latitude + 0.3,
"longitude": hass.config.longitude + 0.3,
},
)
tpl = template.Template(
'{{ closest("%s", %s, states.test_domain).entity_id }}'
% (hass.config.latitude + 0.3, hass.config.longitude + 0.3),
hass,
)
assert tpl.async_render() == "test_domain.closest_zone"
tpl = template.Template(
'{{ (states.test_domain | closest("%s", %s)).entity_id }}'
% (hass.config.latitude + 0.3, hass.config.longitude + 0.3),
hass,
)
assert tpl.async_render() == "test_domain.closest_zone"
def test_closest_function_to_entity_id(hass):
"""Test closest function to entity id."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain.closest_zone",
"happy",
{
"latitude": hass.config.latitude + 0.2,
"longitude": hass.config.longitude + 0.2,
},
)
hass.states.async_set(
"zone.far_away",
"zoning",
{
"latitude": hass.config.latitude + 0.3,
"longitude": hass.config.longitude + 0.3,
},
)
info = render_to_info(
hass,
"{{ closest(zone, states.test_domain).entity_id }}",
{"zone": "zone.far_away"},
)
assert_result_info(
info,
"test_domain.closest_zone",
["test_domain.closest_home", "test_domain.closest_zone", "zone.far_away"],
["test_domain"],
)
info = render_to_info(
hass,
"{{ ([states.test_domain, 'test_domain.closest_zone'] "
"| closest(zone)).entity_id }}",
{"zone": "zone.far_away"},
)
assert_result_info(
info,
"test_domain.closest_zone",
["test_domain.closest_home", "test_domain.closest_zone", "zone.far_away"],
["test_domain"],
)
def test_closest_function_to_state(hass):
"""Test closest function to state."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
hass.states.async_set(
"test_domain.closest_zone",
"happy",
{
"latitude": hass.config.latitude + 0.2,
"longitude": hass.config.longitude + 0.2,
},
)
hass.states.async_set(
"zone.far_away",
"zoning",
{
"latitude": hass.config.latitude + 0.3,
"longitude": hass.config.longitude + 0.3,
},
)
assert (
template.Template(
"{{ closest(states.zone.far_away, states.test_domain).entity_id }}", hass
).async_render()
== "test_domain.closest_zone"
)
def test_closest_function_invalid_state(hass):
"""Test closest function invalid state."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
for state in ("states.zone.non_existing", '"zone.non_existing"'):
assert (
template.Template("{{ closest(%s, states) }}" % state, hass).async_render()
== "None"
)
def test_closest_function_state_with_invalid_location(hass):
"""Test closest function state with invalid location."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{"latitude": "invalid latitude", "longitude": hass.config.longitude + 0.1},
)
assert (
template.Template(
"{{ closest(states.test_domain.closest_home, states) }}", hass
).async_render()
== "None"
)
def test_closest_function_invalid_coordinates(hass):
"""Test closest function invalid coordinates."""
hass.states.async_set(
"test_domain.closest_home",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
assert (
template.Template(
'{{ closest("invalid", "coord", states) }}', hass
).async_render()
== "None"
)
assert (
template.Template(
'{{ states | closest("invalid", "coord") }}', hass
).async_render()
== "None"
)
def test_closest_function_no_location_states(hass):
"""Test closest function without location states."""
assert (
template.Template("{{ closest(states).entity_id }}", hass).async_render() == ""
)
def test_extract_entities_none_exclude_stuff(hass):
"""Test extract entities function with none or exclude stuff."""
assert template.extract_entities(hass, None) == []
assert template.extract_entities(hass, "mdi:water") == []
assert (
template.extract_entities(
hass, "{{ closest(states.zone.far_away, states.test_domain).entity_id }}"
)
== MATCH_ALL
)
assert (
template.extract_entities(hass, '{{ distance("123", states.test_object_2) }}')
== MATCH_ALL
)
def test_extract_entities_no_match_entities(hass):
"""Test extract entities function with none entities stuff."""
assert (
template.extract_entities(
hass, "{{ value_json.tst | timestamp_custom('%Y' True) }}"
)
== MATCH_ALL
)
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.state }},d
{% endfor %}
""",
)
assert_result_info(info, "", domains=["sensor"])
def test_generate_filter_iterators(hass):
"""Test extract entities function with none entities stuff."""
info = render_to_info(
hass,
"""
{% for state in states %}
{{ state.entity_id }}
{% endfor %}
""",
)
assert_result_info(info, "", all_states=True)
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}
{% endfor %}
""",
)
assert_result_info(info, "", domains=["sensor"])
hass.states.async_set("sensor.test_sensor", "off", {"attr": "value"})
# Don't need the entity because the state is not accessed
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}
{% endfor %}
""",
)
assert_result_info(info, "sensor.test_sensor", domains=["sensor"])
# But we do here because the state gets accessed
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.state }},
{% endfor %}
""",
)
assert_result_info(
info, "sensor.test_sensor=off,", ["sensor.test_sensor"], ["sensor"]
)
info = render_to_info(
hass,
"""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.attributes.attr }},
{% endfor %}
""",
)
assert_result_info(
info, "sensor.test_sensor=value,", ["sensor.test_sensor"], ["sensor"]
)
def test_generate_select(hass):
"""Test extract entities function with none entities stuff."""
template_str = """
{{ states.sensor|selectattr("state","equalto","off")
|join(",", attribute="entity_id") }}
"""
tmp = template.Template(template_str, hass)
info = tmp.async_render_to_info()
assert_result_info(info, "", [], ["sensor"])
hass.states.async_set("sensor.test_sensor", "off", {"attr": "value"})
hass.states.async_set("sensor.test_sensor_on", "on")
info = tmp.async_render_to_info()
assert_result_info(
info,
"sensor.test_sensor",
["sensor.test_sensor", "sensor.test_sensor_on"],
["sensor"],
)
async def test_extract_entities_match_entities(hass):
"""Test extract entities function with entities stuff."""
assert (
template.extract_entities(
hass,
"""
{% if is_state('device_tracker.phone_1', 'home') %}
Ha, Hercules is home!
{% else %}
Hercules is at {{ states('device_tracker.phone_1') }}.
{% endif %}
""",
)
== ["device_tracker.phone_1"]
)
assert (
template.extract_entities(
hass,
"""
{{ as_timestamp(states.binary_sensor.garage_door.last_changed) }}
""",
)
== ["binary_sensor.garage_door"]
)
assert (
template.extract_entities(
hass,
"""
{{ states("binary_sensor.garage_door") }}
""",
)
== ["binary_sensor.garage_door"]
)
hass.states.async_set("device_tracker.phone_2", "not_home", {"battery": 20})
assert (
template.extract_entities(
hass,
"""
{{ is_state_attr('device_tracker.phone_2', 'battery', 40) }}
""",
)
== ["device_tracker.phone_2"]
)
assert sorted(["device_tracker.phone_1", "device_tracker.phone_2"]) == sorted(
template.extract_entities(
hass,
"""
{% if is_state('device_tracker.phone_1', 'home') %}
Ha, Hercules is home!
{% elif states.device_tracker.phone_2.attributes.battery < 40 %}
Hercules you power goes done!.
{% endif %}
""",
)
)
assert sorted(["sensor.pick_humidity", "sensor.pick_temperature"]) == sorted(
template.extract_entities(
hass,
"""
{{
states.sensor.pick_temperature.state ~ „°C (“ ~
states.sensor.pick_humidity.state ~ „ %“
}}
""",
)
)
assert sorted(
["sensor.luftfeuchtigkeit_mean", "input_number.luftfeuchtigkeit"]
) == sorted(
template.extract_entities(
hass,
"{% if (states('sensor.luftfeuchtigkeit_mean') | int)"
" > (states('input_number.luftfeuchtigkeit') | int +1.5)"
" %}true{% endif %}",
)
)
await group.Group.async_create_group(hass, "empty group", [])
assert ["group.empty_group"] == template.extract_entities(
hass, "{{ expand('group.empty_group') | list | length }}"
)
hass.states.async_set("test_domain.object", "exists")
await group.Group.async_create_group(hass, "expand group", ["test_domain.object"])
assert sorted(["group.expand_group", "test_domain.object"]) == sorted(
template.extract_entities(
hass, "{{ expand('group.expand_group') | list | length }}"
)
)
assert ["test_domain.entity"] == template.Template(
'{{ is_state("test_domain.entity", "on") }}', hass
).extract_entities()
def test_extract_entities_with_variables(hass):
"""Test extract entities function with variables and entities stuff."""
hass.states.async_set("input_boolean.switch", "on")
assert ["input_boolean.switch"] == template.extract_entities(
hass, "{{ is_state('input_boolean.switch', 'off') }}", {}
)
assert ["input_boolean.switch"] == template.extract_entities(
hass,
"{{ is_state(trigger.entity_id, 'off') }}",
{"trigger": {"entity_id": "input_boolean.switch"}},
)
assert MATCH_ALL == template.extract_entities(
hass, "{{ is_state(data, 'off') }}", {"data": "no_state"}
)
assert ["input_boolean.switch"] == template.extract_entities(
hass, "{{ is_state(data, 'off') }}", {"data": "input_boolean.switch"}
)
assert ["input_boolean.switch"] == template.extract_entities(
hass,
"{{ is_state(trigger.entity_id, 'off') }}",
{"trigger": {"entity_id": "input_boolean.switch"}},
)
hass.states.async_set("media_player.livingroom", "off")
assert {"media_player.livingroom"} == extract_entities(
hass,
"{{ is_state('media_player.' ~ where , 'playing') }}",
{"where": "livingroom"},
)
def test_jinja_namespace(hass):
"""Test Jinja's namespace command can be used."""
test_template = template.Template(
(
"{% set ns = namespace(a_key='') %}"
"{% set ns.a_key = states.sensor.dummy.state %}"
"{{ ns.a_key }}"
),
hass,
)
hass.states.async_set("sensor.dummy", "a value")
assert test_template.async_render() == "a value"
hass.states.async_set("sensor.dummy", "another value")
assert test_template.async_render() == "another value"
def test_state_with_unit(hass):
"""Test the state_with_unit property helper."""
hass.states.async_set("sensor.test", "23", {"unit_of_measurement": "beers"})
hass.states.async_set("sensor.test2", "wow")
tpl = template.Template("{{ states.sensor.test.state_with_unit }}", hass)
assert tpl.async_render() == "23 beers"
tpl = template.Template("{{ states.sensor.test2.state_with_unit }}", hass)
assert tpl.async_render() == "wow"
tpl = template.Template(
"{% for state in states %}{{ state.state_with_unit }} {% endfor %}", hass
)
assert tpl.async_render() == "23 beers wow"
tpl = template.Template("{{ states.sensor.non_existing.state_with_unit }}", hass)
assert tpl.async_render() == ""
def test_length_of_states(hass):
"""Test fetching the length of states."""
hass.states.async_set("sensor.test", "23")
hass.states.async_set("sensor.test2", "wow")
hass.states.async_set("climate.test2", "cooling")
tpl = template.Template("{{ states | length }}", hass)
assert tpl.async_render() == "3"
tpl = template.Template("{{ states.sensor | length }}", hass)
assert tpl.async_render() == "2"
def test_render_complex_handling_non_template_values(hass):
"""Test that we can render non-template fields."""
assert template.render_complex(
{True: 1, False: template.Template("{{ hello }}", hass)}, {"hello": 2}
) == {True: 1, False: "2"}
def test_urlencode(hass):
"""Test the urlencode method."""
tpl = template.Template(
("{% set dict = {'foo': 'x&y', 'bar': 42} %}" "{{ dict | urlencode }}"), hass,
)
assert tpl.async_render() == "foo=x%26y&bar=42"
tpl = template.Template(
("{% set string = 'the quick brown fox = true' %}" "{{ string | urlencode }}"),
hass,
)
assert tpl.async_render() == "the%20quick%20brown%20fox%20%3D%20true"
|
#!/usr/bin/env python
# Copyright 2020 The Tekton Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script helps synchronize contents from their respective sources of
# truth (usually GitHub repositories of each Tekton
# components, such as tektoncd/pipelines) to tektoncd/website.
import copy
import fnmatch
import json
import logging
import markdown
from multiprocessing import Pool
import os
import os.path
import re
import sys
from urllib.error import URLError
from urllib.parse import urlparse, urljoin, urlunparse
from bs4 import BeautifulSoup
import click
import git
from jinja2 import Environment
from jinja2 import FileSystemLoader
from ruamel.yaml import YAML
CONTENT_DIR = './content/en/docs'
VAULT_DIR = './content/en/vault'
JS_ASSET_DIR = './assets/js'
TEMPLATE_DIR = './templates'
BASE_FOLDER = os.path.dirname(os.path.abspath(__file__))
DEFAULT_CONFIG_FOLDER = os.path.join(BASE_FOLDER, 'config')
DEFAULT_CACHE_FOLDER = os.path.join(BASE_FOLDER, '.cache')
jinja_env = Environment(loader=FileSystemLoader(TEMPLATE_DIR))
FM_BOUNDARY = re.compile(r"^(?:<!--\n)?-{3,}\s*$(?:\n-->)?", re.MULTILINE)
YAML_SEPARATOR = "---\n"
FOLDER_INDEX = '_index.md'
def doc_config(doc, folder_config, weight=None):
""" Return the target name, folder and header for doc based on folder_config
:param doc: the doc as a gitpython Blob
:param folder_config: a dict with the configuration of the folder the doc
was found in, as specified in the sync config file under `folders`
:params weight: optional weight of the doc. When specified it's set in the
returned header dict
:returns: a tuple (target_filename, target_folder, header), which describes
which files `doc` should be written to, in which folder, with which header
"""
index_file = folder_config.get('index', FOLDER_INDEX)
target_folder = folder_config.get('target', '')
# If the doc name is configured as index, rewrite it to FOLDER_INDEX
target_filename = FOLDER_INDEX if doc.name == index_file else doc.name
# If an header is specified, build it an return it
header_dict = None
if 'header' in folder_config:
header_dict = copy.deepcopy(folder_config['header'])
if weight is not None:
header_dict['weight'] = weight
return target_filename, target_folder, header_dict
def docs_from_tree(tree, include=['*'], exclude=[]):
""" Get matching docs (git blobs) from a git tree
Filter all blobs directly under a tree based on include and
exclude lists. Filters are specified as list of unix style
filename pattern:
(https://docs.python.org/3/library/fnmatch.html) """
return filter(lambda b:
any(fnmatch.fnmatch(b.name, i) for i in include) and
not any(fnmatch.fnmatch(b.name, e) for e in exclude), tree.blobs)
def transform_docs(git_repo, tag, folders, site_folder, base_path, base_url):
""" Transform all folders configured for a tag
:param git_repo: a gitpython Repo object, that points to the source git repo
:param tag: a string that represent the git tag to be used
:param folders: a list of folder names with a dict config each, loaded from
sync config file
:param site_folder: the root folder on disk where files shall be written to
:param base_path: used to rewrite relative links to sync'ed files
:param base_url: used to rewrite relative links to unknown files
"""
# Get the root tree for the requested version from the repo
try:
tag = next(x for x in git_repo.tags if x.name == tag)
except StopIteration:
# When no tag is found try to match a branch (remote heads)
try:
tag = next(x for x in git_repo.remote().refs if x.remote_head == tag)
except StopIteration:
logging.error(f'No tag or branch {tag} found in {git_repo}')
sys.exit(1)
# List all relevant blobs based on the folder config
files = []
for folder, folder_config in folders.items():
if folder == '.':
root = tag.commit.tree
else:
root = tag.commit.tree.join(folder)
docs = docs_from_tree(
tree=root, include=folder_config.get('include', ['*']),
exclude=folder_config.get('exclude', []))
# zip doc, folder, targer and header so we can process them in parallel later
files.extend([(doc, folder, *doc_config(doc, folder_config, idx))
for idx, doc in enumerate(docs)])
# Build a dict of all valid local links
# This is used by `transfor_line` to identify local links
local_files = {doc.path: (target, target_folder) for
doc, _, target, target_folder, _ in files}
# Build a list of tuple of `transform_doc` parameters
tranform_args = [
(*f, local_files, base_path, base_url, site_folder) for f in files]
with Pool() as pool:
results = pool.starmap(transform_doc, tranform_args)
# Return the list of files transformed
return results
def safe_makedirs(path):
try:
os.makedirs(path, exist_ok=True)
except FileExistsError:
pass
def transform_doc(doc, source_folder, target, target_folder, header,
local_files, base_path, base_url, site_folder):
""" Transform a single doc to the target file
Read a doc (git blob), transform links in it
and writes the results in to a target file
:param doc: The source doc as gitpython Blob
:param source_folder: the name of the folder in the source repo where
the file comes from
:param target: the name of the file the transformed doc shall be written to
:param target_folder: the folder within `site_folder` where the transformed
doc shall be written to
:param header: a dict with the content of a header (if any) to be prepended
in the transformed doc
:param local_files: a dict source file -> target used to rewrite
relative links to sync'ed files
:param base_path: used to rewrite relative links to sync'ed files
:param base_url: used to rewrite relative links to unknown files
:param site_folder: the root folder on disk where files shall be written to
"""
site_target_folder = os.path.normpath(os.path.join(site_folder, target_folder))
safe_makedirs(site_target_folder)
target = os.path.join(site_target_folder, target)
# Look for markdown files.
# Some machines seem to use text/plain (e.g. running on a mac) and some use
# text/markdown (e.g. running in a fresh ubuntu container)
if doc.mime_type == 'text/plain' or doc.mime_type == 'text/markdown':
with open(target, 'w+') as target_doc:
# If there is an header configured, write it (in YAML)
doc_all = decode(doc.data_stream.read())
doc_markdown, fm = read_front_matter(doc_all)
# Update the doc front matter with the configured one and write it
write_front_matter(target_doc, fm, header)
doc_markdown = transform_links_doc(
doc_markdown, source_folder, local_files, base_path, base_url)
target_doc.write(doc_markdown)
return target
# Pass-through for other mime types
with open(target, 'bw+') as target_doc:
logging.info(f'Pass-through {doc.mime_type} file {doc.path}')
target_doc.write(doc.data_stream.read())
return target
def decode(s, encodings=('utf8', 'latin1', 'ascii')):
for encoding in encodings:
try:
return s.decode(encoding)
except UnicodeDecodeError:
pass
return s.decode('ascii', 'ignore')
def read_front_matter(text):
""" returns a tuple text, frontmatter (as dict) """
if FM_BOUNDARY.match(text):
try:
_, fm, content = FM_BOUNDARY.split(text, 2)
except ValueError:
# Not enough values to unpack, boundary was matched once
return text, None
if content.startswith('\n'):
content = content[1:]
return content, YAML().load(fm)
else:
return text, None
def write_front_matter(target_doc, fm_doc, fm_config):
fm_doc = fm_doc or {}
fm_config = fm_config or {}
fm_doc.update(fm_config)
if fm_doc:
target_doc.write(YAML_SEPARATOR)
YAML().dump(fm_doc, target_doc)
target_doc.write(YAML_SEPARATOR)
def transform_links_doc(text, base_path, local_files, rewrite_path, rewrite_url):
""" transform all the links the text """
links = get_links(text)
# Rewrite map, only use links with an href
rewrite_map = {x.get("href"): transform_link(x.get("href"), base_path, local_files, rewrite_path, rewrite_url)
for x in links if x.get("href")}
for source, target in rewrite_map.items():
text = text.replace(f'({source})', f'({target})')
return text
def get_links(md):
""" return a list of all the links in a string formatted in markdown """
md = markdown.markdown(md)
soup = BeautifulSoup(md, 'html.parser')
return soup.find_all("a")
def transform_link(link, base_path, local_files, rewrite_path, rewrite_url):
""" Transform hrefs to be valid URLs on the web-site
Relative URLs are rewritten to `rewrite_path` when `link`
points to a sync'ed file. Else they're rewritten to `rewrite_url`.
Absolute URLs are not changed (they may be external)
Fragments are relative to the page and do not need changes,
except for lower() on local files because hugo generated
anchors are always lower case.
:param link: the link to be re-written
:param base_path: the folder where the source document that contains
the link lives
:param local_files: a dict source file -> (target file, folder) that
maps sync'ed files from their fully qualified source name into their
filename in the site folder
:param rewrite_path: the file local (sync'ed) files are rewritten to
:param rewrite_url: the URL remote files are rewritten to
:note: urlparse treats URLs without scheme like path only URLs,
so 'github.com' will be rewritten to 'rewrite_url/github.com'
"""
# ignore empty links
if not link:
return link
# urlparse returns a named tuple
parsed = urlparse(link)
if is_absolute_url(parsed):
return link
if is_fragment(parsed):
# A fragment only link points to an .md file
return urlunparse(parsed._replace(fragment=parsed.fragment.lower()))
path = os.path.normpath(parsed.path)
# The list if local_file includes paths based on the root of the git
# repo, so we need join base_path and normalize to fq_path to find the
# link in the list of local files
fq_path = os.path.normpath(os.path.join(base_path, parsed.path))
if fq_path in local_files:
target_file = local_files[fq_path][0]
target_folder = local_files[fq_path][1]
is_index = (target_file == FOLDER_INDEX)
filename, ext = os.path.splitext(target_file)
# Special handling for md files
if ext == '.md':
# Links to the index file are rendered as base_path/
if is_index:
target_file = ''
# links to md other files are rendered as .../[md filename]/
else:
target_file = filename + '/'
# for .md files, lower the case of fragments to match hugo's behaviour
parsed = parsed._replace(fragment=parsed.fragment.lower())
if target_folder:
new_path = [rewrite_path, target_folder, target_file]
else:
new_path = [rewrite_path, target_file]
return parsed._replace(path="/".join(new_path)).geturl()
# when not found on disk, append to the base_url
return urljoin(rewrite_url, parsed._replace(path=fq_path).geturl())
def is_absolute_url(parsed_url):
""" check if it is an absolute url """
return all([parsed_url.scheme, parsed_url.netloc])
def is_fragment(parsed_url):
""" determine if the url is an a link """
return len(parsed_url.fragment) > 0 and not any(parsed_url[:-1])
def download_resources_to_project(yaml_list, clones):
""" download the files from local clones based on a spec.
The YAML sync spec can be found in sync/config/README.md """
for entry in yaml_list:
component = entry['component']
repository = entry['repository']
local_clone = clones.get(repository)
if not local_clone:
logging.error(f'No git clone found for {repository} in {clones}')
sys.exit(1)
for index, tag in enumerate(entry['tags']):
logging.info(f'Syncing {component}@{tag['name']}')
link_base_url = f'{repository}/tree/{tag['name']}/'
if index == 0:
# first links belongs on the home page
base_path = f'/docs/{component}'.lower()
site_dir = f'{CONTENT_DIR}/{component}'
os.makedirs(site_dir, exist_ok=True)
else:
# the other links belong in the other versions a.k.a vault
base_path = f'/vault/{component}-{tag['displayName']}'
site_dir = f'{VAULT_DIR}/{component}-{tag['displayName']}'
os.makedirs(site_dir, exist_ok=True)
results = transform_docs(
git_repo=local_clone,
tag=tag['name'],
folders=tag['folders'],
site_folder=site_dir,
base_path=base_path,
base_url=link_base_url)
logging.debug(f'Finished syncing {component}@{tag['name']}: ')
logging.debug(f'{results}')
def get_files_in_path(path, file_type):
""" return a list of all the files in path that match the file_type """
file_list = []
# walk through every file in directory and its sub directories
for root, dirs, files in os.walk(path):
for file in files:
# append the file name to the list if is it the correct type
if file.endswith(file_type):
file_list.append(os.path.join(root, file))
return file_list
def load_config(files):
""" return a list of yaml files"""
yaml = YAML()
dic_list = []
for file in files:
with open(file, 'r') as text:
# get the paths from the config file
dic_list.append({
"filename": file,
"content": yaml.load(text)
})
return dic_list
def save_config(config):
""" save config files back to yaml """
yaml = YAML()
for c in config:
with open(c['filename'], 'w') as out:
yaml.dump(c['content'], out)
def get_tags(sync_config):
""" return a list of tags with, there name, and displayName """
tags = []
for tag in sync_config['tags']:
tags.append({'name': tag['name'], 'displayName': tag['displayName']})
return tags
def get_versions(sync_configs):
""" return the list of all the versions and there tag, name, archive """
component_versions = []
for sync_config in sync_configs:
component_versions.append({
'name': sync_config['component'],
'tags': get_tags(sync_config),
'archive': sync_config['archive']
})
return component_versions
def create_resource(dest_prefix, file, versions):
""" create site resource based on the version and file """
resource_template = jinja_env.get_template(f'{file}.template')
if file.endswith(".js"):
serialize = json.dumps(versions)
resource = resource_template.render(component_versions_json=serialize)
elif file.endswith(".md"):
resource = resource_template.render(component_versions=versions)
else:
logging.warning(f'Cannot create resource for {file}. Only .js and .md supported')
return
with open(f'{dest_prefix}/{file}', 'w') as f:
f.write(resource)
def clone_repo(repo, update):
project = repo.split('/')[-1]
clone_dir = os.path.join(DEFAULT_CACHE_FOLDER, project)
if os.path.isdir(clone_dir):
if not update:
print(f'{project}: Cache folder {clone_dir} found, skipping clone.')
return repo, git.Repo(clone_dir)
# Cleanup and update via fetch --all
print(f'{project}: updating started')
cloned_repo = git.Repo(clone_dir)
cloned_repo.git.reset('--hard')
cloned_repo.git.clean('-xdf')
cloned_repo.git.fetch('--all')
print(f'{project}: updating completed')
return repo, cloned_repo
# Clone the repo
print(f'{project}: cloning started')
cloned_repo = git.Repo.clone_from(repo, clone_dir)
print(f'{project}: cloning completed')
return repo, cloned_repo
def clone_repos(sync_configs, update):
# Make sure the cache folder exists
safe_makedirs(DEFAULT_CACHE_FOLDER)
with Pool() as pool:
results = pool.starmap(clone_repo, [(x['repository'], update) for x in sync_configs])
return {x: y for x, y in results}
@click.command()
@click.option('--config-folder', default=DEFAULT_CONFIG_FOLDER,
help='the folder that contains the config files')
@click.option('--update-cache/--no-update-cache', default=False,
help='update clone caches. !! This will force cleanup caches !!')
def sync(config_folder, update_cache):
""" fetch all the files and sync it to the website """
# get the path of the urls needed
config_files = get_files_in_path(config_folder, ".yaml")
config = [x["content"] for x in load_config(config_files)]
# clone all relevant repos
clones = clone_repos(config, update_cache)
# download resources from the clone cache
download_resources_to_project(config, clones)
versions = get_versions(config)
# create version switcher script
create_resource(JS_ASSET_DIR, "version-switcher.js", versions)
# create index for vault
create_resource(VAULT_DIR, FOLDER_INDEX, versions)
if __name__ == '__main__':
sync()
|
#!/usr/bin/env python
# Copyright 2020 The Tekton Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script helps synchronize contents from their respective sources of
# truth (usually GitHub repositories of each Tekton
# components, such as tektoncd/pipelines) to tektoncd/website.
import copy
import fnmatch
import json
import logging
import markdown
from multiprocessing import Pool
import os
import os.path
import re
import sys
from urllib.error import URLError
from urllib.parse import urlparse, urljoin, urlunparse
from bs4 import BeautifulSoup
import click
import git
from jinja2 import Environment
from jinja2 import FileSystemLoader
from ruamel.yaml import YAML
CONTENT_DIR = './content/en/docs'
VAULT_DIR = './content/en/vault'
JS_ASSET_DIR = './assets/js'
TEMPLATE_DIR = './templates'
BASE_FOLDER = os.path.dirname(os.path.abspath(__file__))
DEFAULT_CONFIG_FOLDER = os.path.join(BASE_FOLDER, 'config')
DEFAULT_CACHE_FOLDER = os.path.join(BASE_FOLDER, '.cache')
jinja_env = Environment(loader=FileSystemLoader(TEMPLATE_DIR))
FM_BOUNDARY = re.compile(r"^(?:<!--\n)?-{3,}\s*$(?:\n-->)?", re.MULTILINE)
YAML_SEPARATOR = "---\n"
FOLDER_INDEX = '_index.md'
def doc_config(doc, folder_config, weight=None):
""" Return the target name, folder and header for doc based on folder_config
:param doc: the doc as a gitpython Blob
:param folder_config: a dict with the configuration of the folder the doc
was found in, as specified in the sync config file under `folders`
:params weight: optional weight of the doc. When specified it's set in the
returned header dict
:returns: a tuple (target_filename, target_folder, header), which describes
which files `doc` should be written to, in which folder, with which header
"""
index_file = folder_config.get('index', FOLDER_INDEX)
target_folder = folder_config.get('target', '')
# If the doc name is configured as index, rewrite it to FOLDER_INDEX
target_filename = FOLDER_INDEX if doc.name == index_file else doc.name
# If an header is specified, build it an return it
header_dict = None
if 'header' in folder_config:
header_dict = copy.deepcopy(folder_config['header'])
if weight is not None:
header_dict['weight'] = weight
return target_filename, target_folder, header_dict
def docs_from_tree(tree, include=['*'], exclude=[]):
""" Get matching docs (git blobs) from a git tree
Filter all blobs directly under a tree based on include and
exclude lists. Filters are specified as list of unix style
filename pattern:
(https://docs.python.org/3/library/fnmatch.html) """
return filter(lambda b:
any(fnmatch.fnmatch(b.name, i) for i in include) and
not any(fnmatch.fnmatch(b.name, e) for e in exclude), tree.blobs)
def transform_docs(git_repo, tag, folders, site_folder, base_path, base_url):
""" Transform all folders configured for a tag
:param git_repo: a gitpython Repo object, that points to the source git repo
:param tag: a string that represent the git tag to be used
:param folders: a list of folder names with a dict config each, loaded from
sync config file
:param site_folder: the root folder on disk where files shall be written to
:param base_path: used to rewrite relative links to sync'ed files
:param base_url: used to rewrite relative links to unknown files
"""
# Get the root tree for the requested version from the repo
try:
tag = next(x for x in git_repo.tags if x.name == tag)
except StopIteration:
# When no tag is found try to match a branch (remote heads)
try:
tag = next(x for x in git_repo.remote().refs if x.remote_head == tag)
except StopIteration:
logging.error(f'No tag or branch {tag} found in {git_repo}')
sys.exit(1)
# List all relevant blobs based on the folder config
files = []
for folder, folder_config in folders.items():
if folder == '.':
root = tag.commit.tree
else:
root = tag.commit.tree.join(folder)
docs = docs_from_tree(
tree=root, include=folder_config.get('include', ['*']),
exclude=folder_config.get('exclude', []))
# zip doc, folder, targer and header so we can process them in parallel later
files.extend([(doc, folder, *doc_config(doc, folder_config, idx))
for idx, doc in enumerate(docs)])
# Build a dict of all valid local links
# This is used by `transfor_line` to identify local links
local_files = {doc.path: (target, target_folder) for
doc, _, target, target_folder, _ in files}
# Build a list of tuple of `transform_doc` parameters
tranform_args = [
(*f, local_files, base_path, base_url, site_folder) for f in files]
with Pool() as pool:
results = pool.starmap(transform_doc, tranform_args)
# Return the list of files transformed
return results
def safe_makedirs(path):
try:
os.makedirs(path, exist_ok=True)
except FileExistsError:
pass
def transform_doc(doc, source_folder, target, target_folder, header,
local_files, base_path, base_url, site_folder):
""" Transform a single doc to the target file
Read a doc (git blob), transform links in it
and writes the results in to a target file
:param doc: The source doc as gitpython Blob
:param source_folder: the name of the folder in the source repo where
the file comes from
:param target: the name of the file the transformed doc shall be written to
:param target_folder: the folder within `site_folder` where the transformed
doc shall be written to
:param header: a dict with the content of a header (if any) to be prepended
in the transformed doc
:param local_files: a dict source file -> target used to rewrite
relative links to sync'ed files
:param base_path: used to rewrite relative links to sync'ed files
:param base_url: used to rewrite relative links to unknown files
:param site_folder: the root folder on disk where files shall be written to
"""
site_target_folder = os.path.normpath(os.path.join(site_folder, target_folder))
safe_makedirs(site_target_folder)
target = os.path.join(site_target_folder, target)
# Look for markdown files.
# Some machines seem to use text/plain (e.g. running on a mac) and some use
# text/markdown (e.g. running in a fresh ubuntu container)
if doc.mime_type == 'text/plain' or doc.mime_type == 'text/markdown':
with open(target, 'w+') as target_doc:
# If there is an header configured, write it (in YAML)
doc_all = decode(doc.data_stream.read())
doc_markdown, fm = read_front_matter(doc_all)
# Update the doc front matter with the configured one and write it
write_front_matter(target_doc, fm, header)
doc_markdown = transform_links_doc(
doc_markdown, source_folder, local_files, base_path, base_url)
target_doc.write(doc_markdown)
return target
# Pass-through for other mime types
with open(target, 'bw+') as target_doc:
logging.info(f'Pass-through {doc.mime_type} file {doc.path}')
target_doc.write(doc.data_stream.read())
return target
def decode(s, encodings=('utf8', 'latin1', 'ascii')):
for encoding in encodings:
try:
return s.decode(encoding)
except UnicodeDecodeError:
pass
return s.decode('ascii', 'ignore')
def read_front_matter(text):
""" returns a tuple text, frontmatter (as dict) """
if FM_BOUNDARY.match(text):
try:
_, fm, content = FM_BOUNDARY.split(text, 2)
except ValueError:
# Not enough values to unpack, boundary was matched once
return text, None
if content.startswith('\n'):
content = content[1:]
return content, YAML().load(fm)
else:
return text, None
def write_front_matter(target_doc, fm_doc, fm_config):
fm_doc = fm_doc or {}
fm_config = fm_config or {}
fm_doc.update(fm_config)
if fm_doc:
target_doc.write(YAML_SEPARATOR)
YAML().dump(fm_doc, target_doc)
target_doc.write(YAML_SEPARATOR)
def transform_links_doc(text, base_path, local_files, rewrite_path, rewrite_url):
""" transform all the links the text """
links = get_links(text)
# Rewrite map, only use links with an href
rewrite_map = {x.get("href"): transform_link(x.get("href"), base_path, local_files, rewrite_path, rewrite_url)
for x in links if x.get("href")}
for source, target in rewrite_map.items():
text = text.replace(f'({source})', f'({target})')
return text
def get_links(md):
""" return a list of all the links in a string formatted in markdown """
md = markdown.markdown(md)
soup = BeautifulSoup(md, 'html.parser')
return soup.find_all("a")
def transform_link(link, base_path, local_files, rewrite_path, rewrite_url):
""" Transform hrefs to be valid URLs on the web-site
Relative URLs are rewritten to `rewrite_path` when `link`
points to a sync'ed file. Else they're rewritten to `rewrite_url`.
Absolute URLs are not changed (they may be external)
Fragments are relative to the page and do not need changes,
except for lower() on local files because hugo generated
anchors are always lower case.
:param link: the link to be re-written
:param base_path: the folder where the source document that contains
the link lives
:param local_files: a dict source file -> (target file, folder) that
maps sync'ed files from their fully qualified source name into their
filename in the site folder
:param rewrite_path: the file local (sync'ed) files are rewritten to
:param rewrite_url: the URL remote files are rewritten to
:note: urlparse treats URLs without scheme like path only URLs,
so 'github.com' will be rewritten to 'rewrite_url/github.com'
"""
# ignore empty links
if not link:
return link
# urlparse returns a named tuple
parsed = urlparse(link)
if is_absolute_url(parsed):
return link
if is_fragment(parsed):
# A fragment only link points to an .md file
return urlunparse(parsed._replace(fragment=parsed.fragment.lower()))
path = os.path.normpath(parsed.path)
# The list if local_file includes paths based on the root of the git
# repo, so we need join base_path and normalize to fq_path to find the
# link in the list of local files
fq_path = os.path.normpath(os.path.join(base_path, parsed.path))
if fq_path in local_files:
target_file = local_files[fq_path][0]
target_folder = local_files[fq_path][1]
is_index = (target_file == FOLDER_INDEX)
filename, ext = os.path.splitext(target_file)
# Special handling for md files
if ext == '.md':
# Links to the index file are rendered as base_path/
if is_index:
target_file = ''
# links to md other files are rendered as .../[md filename]/
else:
target_file = filename + '/'
# for .md files, lower the case of fragments to match hugo's behaviour
parsed = parsed._replace(fragment=parsed.fragment.lower())
if target_folder:
new_path = [rewrite_path, target_folder, target_file]
else:
new_path = [rewrite_path, target_file]
return parsed._replace(path="/".join(new_path)).geturl()
# when not found on disk, append to the base_url
return urljoin(rewrite_url, parsed._replace(path=fq_path).geturl())
def is_absolute_url(parsed_url):
""" check if it is an absolute url """
return all([parsed_url.scheme, parsed_url.netloc])
def is_fragment(parsed_url):
""" determine if the url is an a link """
return len(parsed_url.fragment) > 0 and not any(parsed_url[:-1])
def download_resources_to_project(yaml_list, clones):
""" download the files from local clones based on a spec.
The YAML sync spec can be found in sync/config/README.md """
for entry in yaml_list:
component = entry['component']
repository = entry['repository']
local_clone = clones.get(repository)
if not local_clone:
logging.error(f'No git clone found for {repository} in {clones}')
sys.exit(1)
for index, tag in enumerate(entry['tags']):
logging.info(f'Syncing {component}@{tag["name"]}')
link_base_url = f'{repository}/tree/{tag["name"]}/'
if index == 0:
# first links belongs on the home page
base_path = f'/docs/{component}'.lower()
site_dir = f'{CONTENT_DIR}/{component}'
os.makedirs(site_dir, exist_ok=True)
else:
# the other links belong in the other versions a.k.a vault
base_path = f'/vault/{component}-{tag["displayName"]}'
site_dir = f'{VAULT_DIR}/{component}-{tag["displayName"]}'
os.makedirs(site_dir, exist_ok=True)
results = transform_docs(
git_repo=local_clone,
tag=tag['name'],
folders=tag['folders'],
site_folder=site_dir,
base_path=base_path,
base_url=link_base_url)
logging.debug(f'Finished syncing {component}@{tag["name"]}: ')
logging.debug(f'{results}')
def get_files_in_path(path, file_type):
""" return a list of all the files in path that match the file_type """
file_list = []
# walk through every file in directory and its sub directories
for root, dirs, files in os.walk(path):
for file in files:
# append the file name to the list if is it the correct type
if file.endswith(file_type):
file_list.append(os.path.join(root, file))
return file_list
def load_config(files):
""" return a list of yaml files"""
yaml = YAML()
dic_list = []
for file in files:
with open(file, 'r') as text:
# get the paths from the config file
dic_list.append({
"filename": file,
"content": yaml.load(text)
})
return dic_list
def save_config(config):
""" save config files back to yaml """
yaml = YAML()
for c in config:
with open(c['filename'], 'w') as out:
yaml.dump(c['content'], out)
def get_tags(sync_config):
""" return a list of tags with, there name, and displayName """
tags = []
for tag in sync_config['tags']:
tags.append({'name': tag['name'], 'displayName': tag['displayName']})
return tags
def get_versions(sync_configs):
""" return the list of all the versions and there tag, name, archive """
component_versions = []
for sync_config in sync_configs:
component_versions.append({
'name': sync_config['component'],
'tags': get_tags(sync_config),
'archive': sync_config['archive']
})
return component_versions
def create_resource(dest_prefix, file, versions):
""" create site resource based on the version and file """
resource_template = jinja_env.get_template(f'{file}.template')
if file.endswith(".js"):
serialize = json.dumps(versions)
resource = resource_template.render(component_versions_json=serialize)
elif file.endswith(".md"):
resource = resource_template.render(component_versions=versions)
else:
logging.warning(f'Cannot create resource for {file}. Only .js and .md supported')
return
with open(f'{dest_prefix}/{file}', 'w') as f:
f.write(resource)
def clone_repo(repo, update):
project = repo.split('/')[-1]
clone_dir = os.path.join(DEFAULT_CACHE_FOLDER, project)
if os.path.isdir(clone_dir):
if not update:
print(f'{project}: Cache folder {clone_dir} found, skipping clone.')
return repo, git.Repo(clone_dir)
# Cleanup and update via fetch --all
print(f'{project}: updating started')
cloned_repo = git.Repo(clone_dir)
cloned_repo.git.reset('--hard')
cloned_repo.git.clean('-xdf')
cloned_repo.git.fetch('--all')
print(f'{project}: updating completed')
return repo, cloned_repo
# Clone the repo
print(f'{project}: cloning started')
cloned_repo = git.Repo.clone_from(repo, clone_dir)
print(f'{project}: cloning completed')
return repo, cloned_repo
def clone_repos(sync_configs, update):
# Make sure the cache folder exists
safe_makedirs(DEFAULT_CACHE_FOLDER)
with Pool() as pool:
results = pool.starmap(clone_repo, [(x['repository'], update) for x in sync_configs])
return {x: y for x, y in results}
@click.command()
@click.option('--config-folder', default=DEFAULT_CONFIG_FOLDER,
help='the folder that contains the config files')
@click.option('--update-cache/--no-update-cache', default=False,
help='update clone caches. !! This will force cleanup caches !!')
def sync(config_folder, update_cache):
""" fetch all the files and sync it to the website """
# get the path of the urls needed
config_files = get_files_in_path(config_folder, ".yaml")
config = [x["content"] for x in load_config(config_files)]
# clone all relevant repos
clones = clone_repos(config, update_cache)
# download resources from the clone cache
download_resources_to_project(config, clones)
versions = get_versions(config)
# create version switcher script
create_resource(JS_ASSET_DIR, "version-switcher.js", versions)
# create index for vault
create_resource(VAULT_DIR, FOLDER_INDEX, versions)
if __name__ == '__main__':
sync()
|
import json
import os
from .config import API_KEY
import requests
from datetime import date
from w3lib.html import remove_tags
def save_data_to_json(file_name, data):
"""Save data to a json file creating it if it does not already exist
:parameter: file_name -> 'example' do not add the '.json'
:parameter: data -> json data with the following structure [{},{},...]"""
# Save Data
if os.path.exists(file_name+'.json') == False:
with open(file_name+'.json', 'w', encoding='utf-8') as json_file:
# json.dump(data, json_file, indent=0, ensure_ascii=False)
for entry in data:
json.dump(entry, json_file)
json_file.write('\n')
json_file.close()
else:
with open(file_name+'.json', 'a+', encoding='utf-8') as json_file:
# json.dump(data, json_file, indent=0, ensure_ascii=False)
for entry in data:
json.dump(entry, json_file, ensure_ascii=False)
json_file.write('\n')
json_file.close()
def call_api():
#json query
params = {
'api_key': API_KEY,
'limit': 3890
}
response = requests.get(url=f"https://api.itjobs.pt/job/list.json", params=params)
# print(response)
# Save results to json
results = response.json()
jobs = results['results']
return jobs
def main(filename):
jobs = call_api()
job_offers = []
for j in jobs:
title = j['title']
description = remove_tags(j['body'])
post_date = j['publishedAt']
company = j['company']['name']
try:
job_location = [i['name'] for i in j['locations']]
', '.join(job_location)
except KeyError:
job_location = ''
try:
salary = j['wage']
except AttributeError:
salary = ''
job_offer = {
'job_title': title,
'job_description': description,
'post_date': post_date,
'scrape_date': date.today().strftime("%d/%m/%Y"),
'company': company,
'job_location': job_location,
'job_category': '',
'job_ref': f"https://www.itjobs.pt/oferta/{j['id']}/{j['slug']}",
'salary': salary,
}
job_offers.append(job_offer)
save_data_to_json(f"C:/Users/gilnr/OneDrive - NOVASBE/Work Project/Code/Data/{filename}", job_offers)
if __name__ == '__main__':
main('itjobs_jobs')
print('Jobs Retrieved successefully')
|
import json
import os
from .config import API_KEY
import requests
from datetime import date
from w3lib.html import remove_tags
def save_data_to_json(file_name, data):
"""Save data to a json file creating it if it does not already exist
:parameter: file_name -> 'example' do not add the '.json'
:parameter: data -> json data with the following structure [{},{},...]"""
# Save Data
if os.path.exists(file_name+'.json') == False:
with open(file_name+'.json', 'w', encoding='utf-8') as json_file:
# json.dump(data, json_file, indent=0, ensure_ascii=False)
for entry in data:
json.dump(entry, json_file)
json_file.write('\n')
json_file.close()
else:
with open(file_name+'.json', 'a+', encoding='utf-8') as json_file:
# json.dump(data, json_file, indent=0, ensure_ascii=False)
for entry in data:
json.dump(entry, json_file, ensure_ascii=False)
json_file.write('\n')
json_file.close()
def call_api():
#json query
params = {
'api_key': API_KEY,
'limit': 3890
}
response = requests.get(url=f"https://api.itjobs.pt/job/list.json", params=params)
# print(response)
# Save results to json
results = response.json()
jobs = results['results']
return jobs
def main(filename):
jobs = call_api()
job_offers = []
for j in jobs:
title = j['title']
description = remove_tags(j['body'])
post_date = j['publishedAt']
company = j['company']['name']
try:
job_location = [i['name'] for i in j['locations']]
', '.join(job_location)
except KeyError:
job_location = ''
try:
salary = j['wage']
except AttributeError:
salary = ''
job_offer = {
'job_title': title,
'job_description': description,
'post_date': post_date,
'scrape_date': date.today().strftime("%d/%m/%Y"),
'company': company,
'job_location': job_location,
'job_category': '',
'job_ref': f"https://www.itjobs.pt/oferta/{j['id']}/{j['slug']}",
'salary': salary,
}
job_offers.append(job_offer)
save_data_to_json(f"C:/Users/gilnr/OneDrive - NOVASBE/Work Project/Code/Data/{filename}", job_offers)
if __name__ == '__main__':
main('itjobs_jobs')
print('Jobs Retrieved successefully')
|
# AutoTransform
# Large scale, component based code modification library
#
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2022-present Nathan Rockenbach <http://github.com/nathro>
# @black_format
"""The settings command is used to update AutoTransform settings, such as scheduler.json files,
manager.json files, configs and imported components."""
import json
import os
from argparse import ArgumentParser, Namespace
from typing import Dict, List
from autotransform.config.config import Config
from autotransform.config.default import DefaultConfigFetcher
from autotransform.schema.schema import AutoTransformSchema
from autotransform.util.component import ComponentFactory, ComponentImport
from autotransform.util.console import choose_options_from_list, error, get_str, info
from autotransform.util.manager import Manager
from autotransform.util.package import get_config_dir
from autotransform.util.scheduler import Scheduler
def add_args(parser: ArgumentParser) -> None:
"""Adds the args to a subparser that are required to update/view settings.
Args:
parser (ArgumentParser): The parser for the command.
"""
setting_type_group = parser.add_mutually_exclusive_group(required=True)
setting_type_group.add_argument(
"--user-config",
dest="setting_type",
action="store_const",
const="user_config",
help="Update or view the user configuration for AutoTransform",
)
setting_type_group.add_argument(
"--repo-config",
dest="setting_type",
action="store_const",
const="repo_config",
help="Update or view the repo configuration for AutoTransform",
)
setting_type_group.add_argument(
"--cwd-config",
dest="setting_type",
action="store_const",
const="cwd_config",
help="Update or view the current working directory configuration for AutoTransform",
)
setting_type_group.add_argument(
"--custom-components",
dest="setting_type",
action="store_const",
const="custom_components",
help="Update or view custom components",
)
setting_type_group.add_argument(
"--manager",
dest="setting_type",
action="store_const",
const="manager",
help="Update or view manager settings",
)
setting_type_group.add_argument(
"--schema",
type=str,
help="The path to an existing or to be created JSON encoded schema.",
)
parser.add_argument(
"--update",
dest="update_settings",
action="store_true",
help="Used to indicate updates are to be made to the settings.",
)
parser.set_defaults(func=settings_command_main, update_settings=False)
def settings_command_main(args: Namespace) -> None:
"""The main method for the settings command, handles the actual execution of updating
and viewing settings.
Args:
args (Namespace): The arguments supplied to the settings command.
"""
if args.setting_type == "user_config":
path = f"{get_config_dir}/{DefaultConfigFetcher.FILE_NAME}"
handle_config(path, "User", args.update_settings)
elif args.setting_type == "repo_config":
path = f"{DefaultConfigFetcher.get_repo_config_dir()}/{DefaultConfigFetcher.FILE_NAME}"
handle_config(path, "Repo", args.update_settings)
elif args.setting_type == "cwd_config":
path = f"{DefaultConfigFetcher.get_cwd_config_dir()}/{DefaultConfigFetcher.FILE_NAME}"
handle_config(path, "CWD", args.update_settings)
elif args.setting_type == "custom_components":
handle_custom_components(args.update_settings)
elif args.setting_type == "manager":
handle_manager(args.update_settings)
else:
handle_schema(args.update_settings, args.schema)
def handle_config(path: str, config_type: str, update: bool) -> None:
"""Handles updating a config file.
Args:
path (str): The path to the file.
config_type (str): The type of config being updated (i.e. user).
update (bool): Whether to update the config.
"""
config = Config.read(path)
info(f"Current {config_type} Config\n{config!r}")
if not update:
return
config.from_console(config, user_config=config_type == "User")[0].write(path)
def handle_custom_components(update: bool) -> None:
"""Handle updating/viewing custom components.
Args:
update (bool): Whether to apply updates to the custom components.
"""
component_file_name = choose_options_from_list(
"Select a component type",
[
("input.json", "Inputs"),
("filter.json", "Filters"),
("batcher.json", "Batchers"),
("transformer.json", "Transformers"),
("validator.json", "Validators"),
("repo.json", "Repos"),
("schema_builder.json", "Schema Builders"),
("runner.json", "Runners"),
("item.json", "Items"),
("change.json", "Changes"),
("step.json", "Steps"),
("condition.json", "Conditions"),
],
)[0]
component_dict = ComponentFactory.get_custom_components_dict(component_file_name, strict=False)
if component_dict:
info("Custom components:")
for name, component_import in component_dict.items():
info(f"\t{name.removeprefix("custom/")}: {component_import!r}")
else:
info("No existing custom components")
if not update:
return
# Remove components
components_to_remove = get_components_to_remove(component_dict)
changed = bool(components_to_remove)
for component in components_to_remove:
del component_dict[component]
# Add components
components_to_add = get_components_to_add(component_dict)
changed = changed or bool(components_to_add)
component_dict = component_dict | components_to_add
if changed:
file_path = ComponentFactory.get_custom_components_path(component_file_name)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "w+", encoding="UTF-8") as component_file:
component_file.write(
json.dumps(
{k.removeprefix("custom/"): v.bundle() for k, v in component_dict.items()},
indent=4,
)
)
component_file.flush()
def get_components_to_remove(component_dict: Dict[str, ComponentImport]) -> List[str]:
"""Gets a list of components to remove from the dictionary using console input.
Args:
component_dict (Dict[str, ComponentImport]): The custom component dictionary.
Returns:
List[str]: The keys to remove from the dictionary.
"""
components = []
if component_dict:
name = get_str("Enter a component name to remove(blank to skip): ")
else:
name = ""
while name != "":
if name.startswith("custom/"):
name = name.removeprefix("custom/")
if f"custom/{name}" not in component_dict:
error(f"No component import with name: {name}")
elif f"custom/{name}" in components:
error(f"Already removing component import with name: {name}")
else:
components.append(f"custom/{name}")
if len(component_dict) <= len(components):
break
name = get_str("Enter a component name to remove(blank to skip): ")
return components
def get_components_to_add(component_dict: Dict[str, ComponentImport]) -> Dict[str, ComponentImport]:
"""Gets a dictionary of new components to add to the custom component imports.
Args:
component_dict (Dict[str, ComponentImport]): The existing custom components.
Returns:
Dict[str, ComponentImport]: The components to add to the dictionary.
"""
components_to_add = {}
name = get_str("Enter component name to add(blank to skip): ")
while name != "":
if name.startswith("custom/"):
name = name.removeprefix("custom/")
if f"custom/{name}" in component_dict:
error(f"Component already exists with name: {name}")
elif f"custom/{name}" in components_to_add:
error(f"Already adding component with name: {name}")
else:
class_name = get_str("Enter the class representing this component: ")
module = get_str("Enter the fully qualified name of the module for the class: ")
components_to_add[f"custom/{name}"] = ComponentImport(
class_name=class_name, module=module
)
name = get_str("Enter component name to add(blank to skip): ")
return components_to_add
def handle_manager(update: bool) -> None:
"""Handle updating/viewing the Manager.
Args:
update (bool): Whether to apply updates to the Manager.
"""
path = f"{DefaultConfigFetcher.get_repo_config_dir()}/manager.json"
manager = None
try:
manager = Manager.read(path)
info(f"Current Manager\n{manager!r}")
except FileNotFoundError:
error("No Manager file found")
except json.JSONDecodeError as err:
error(f"Failed to decode Manager JSON\n{err}")
except ValueError as err:
error(f"Invalid Manager value\n{err}")
except TypeError as err:
error(f"Invalid Manager type\n{err}")
if not update:
return
Manager.from_console(manager).write(path)
def handle_scheduler(update: bool) -> None:
"""Handle updating/viewing the Scheduler.
Args:
update (bool): Whether to apply updates to the Scheduler.
"""
path = f"{DefaultConfigFetcher.get_repo_config_dir()}/scheduler.json"
scheduler = None
try:
scheduler = Scheduler.read(path)
info(f"Current Scheduler\n{scheduler!r}")
except FileNotFoundError:
error("No Manager file found")
except json.JSONDecodeError as err:
error(f"Failed to decode Manager JSON\n{err}")
except ValueError as err:
error(f"Invalid Manager value\n{err}")
except TypeError as err:
error(f"Invalid Manager type\n{err}")
if not update:
return
Scheduler.from_console(scheduler).write(path)
def handle_schema(update: bool, file_path: str) -> None:
"""Handle updating/viewing a Schema.
Args:
update (bool): Whether to apply updates to the Schema.
file_path (str): The path to the Schema file.
"""
try:
with open(file_path, "r", encoding="UTF-8") as schema_file:
schema = AutoTransformSchema.from_data(json.loads(schema_file.read()))
info(f"Existing schema:\n{schema!r}")
except FileNotFoundError:
info(f"No schema found at path: {file_path}")
schema = None
if update:
schema = AutoTransformSchema.from_console(schema)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "w+", encoding="UTF-8") as schema_file:
schema_file.write(json.dumps(schema.bundle(), indent=4))
schema_file.flush()
|
# AutoTransform
# Large scale, component based code modification library
#
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2022-present Nathan Rockenbach <http://github.com/nathro>
# @black_format
"""The settings command is used to update AutoTransform settings, such as scheduler.json files,
manager.json files, configs and imported components."""
import json
import os
from argparse import ArgumentParser, Namespace
from typing import Dict, List
from autotransform.config.config import Config
from autotransform.config.default import DefaultConfigFetcher
from autotransform.schema.schema import AutoTransformSchema
from autotransform.util.component import ComponentFactory, ComponentImport
from autotransform.util.console import choose_options_from_list, error, get_str, info
from autotransform.util.manager import Manager
from autotransform.util.package import get_config_dir
from autotransform.util.scheduler import Scheduler
def add_args(parser: ArgumentParser) -> None:
"""Adds the args to a subparser that are required to update/view settings.
Args:
parser (ArgumentParser): The parser for the command.
"""
setting_type_group = parser.add_mutually_exclusive_group(required=True)
setting_type_group.add_argument(
"--user-config",
dest="setting_type",
action="store_const",
const="user_config",
help="Update or view the user configuration for AutoTransform",
)
setting_type_group.add_argument(
"--repo-config",
dest="setting_type",
action="store_const",
const="repo_config",
help="Update or view the repo configuration for AutoTransform",
)
setting_type_group.add_argument(
"--cwd-config",
dest="setting_type",
action="store_const",
const="cwd_config",
help="Update or view the current working directory configuration for AutoTransform",
)
setting_type_group.add_argument(
"--custom-components",
dest="setting_type",
action="store_const",
const="custom_components",
help="Update or view custom components",
)
setting_type_group.add_argument(
"--manager",
dest="setting_type",
action="store_const",
const="manager",
help="Update or view manager settings",
)
setting_type_group.add_argument(
"--schema",
type=str,
help="The path to an existing or to be created JSON encoded schema.",
)
parser.add_argument(
"--update",
dest="update_settings",
action="store_true",
help="Used to indicate updates are to be made to the settings.",
)
parser.set_defaults(func=settings_command_main, update_settings=False)
def settings_command_main(args: Namespace) -> None:
"""The main method for the settings command, handles the actual execution of updating
and viewing settings.
Args:
args (Namespace): The arguments supplied to the settings command.
"""
if args.setting_type == "user_config":
path = f"{get_config_dir}/{DefaultConfigFetcher.FILE_NAME}"
handle_config(path, "User", args.update_settings)
elif args.setting_type == "repo_config":
path = f"{DefaultConfigFetcher.get_repo_config_dir()}/{DefaultConfigFetcher.FILE_NAME}"
handle_config(path, "Repo", args.update_settings)
elif args.setting_type == "cwd_config":
path = f"{DefaultConfigFetcher.get_cwd_config_dir()}/{DefaultConfigFetcher.FILE_NAME}"
handle_config(path, "CWD", args.update_settings)
elif args.setting_type == "custom_components":
handle_custom_components(args.update_settings)
elif args.setting_type == "manager":
handle_manager(args.update_settings)
else:
handle_schema(args.update_settings, args.schema)
def handle_config(path: str, config_type: str, update: bool) -> None:
"""Handles updating a config file.
Args:
path (str): The path to the file.
config_type (str): The type of config being updated (i.e. user).
update (bool): Whether to update the config.
"""
config = Config.read(path)
info(f"Current {config_type} Config\n{config!r}")
if not update:
return
config.from_console(config, user_config=config_type == "User")[0].write(path)
def handle_custom_components(update: bool) -> None:
"""Handle updating/viewing custom components.
Args:
update (bool): Whether to apply updates to the custom components.
"""
component_file_name = choose_options_from_list(
"Select a component type",
[
("input.json", "Inputs"),
("filter.json", "Filters"),
("batcher.json", "Batchers"),
("transformer.json", "Transformers"),
("validator.json", "Validators"),
("repo.json", "Repos"),
("schema_builder.json", "Schema Builders"),
("runner.json", "Runners"),
("item.json", "Items"),
("change.json", "Changes"),
("step.json", "Steps"),
("condition.json", "Conditions"),
],
)[0]
component_dict = ComponentFactory.get_custom_components_dict(component_file_name, strict=False)
if component_dict:
info("Custom components:")
for name, component_import in component_dict.items():
info(f"\t{name.removeprefix('custom/')}: {component_import!r}")
else:
info("No existing custom components")
if not update:
return
# Remove components
components_to_remove = get_components_to_remove(component_dict)
changed = bool(components_to_remove)
for component in components_to_remove:
del component_dict[component]
# Add components
components_to_add = get_components_to_add(component_dict)
changed = changed or bool(components_to_add)
component_dict = component_dict | components_to_add
if changed:
file_path = ComponentFactory.get_custom_components_path(component_file_name)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "w+", encoding="UTF-8") as component_file:
component_file.write(
json.dumps(
{k.removeprefix("custom/"): v.bundle() for k, v in component_dict.items()},
indent=4,
)
)
component_file.flush()
def get_components_to_remove(component_dict: Dict[str, ComponentImport]) -> List[str]:
"""Gets a list of components to remove from the dictionary using console input.
Args:
component_dict (Dict[str, ComponentImport]): The custom component dictionary.
Returns:
List[str]: The keys to remove from the dictionary.
"""
components = []
if component_dict:
name = get_str("Enter a component name to remove(blank to skip): ")
else:
name = ""
while name != "":
if name.startswith("custom/"):
name = name.removeprefix("custom/")
if f"custom/{name}" not in component_dict:
error(f"No component import with name: {name}")
elif f"custom/{name}" in components:
error(f"Already removing component import with name: {name}")
else:
components.append(f"custom/{name}")
if len(component_dict) <= len(components):
break
name = get_str("Enter a component name to remove(blank to skip): ")
return components
def get_components_to_add(component_dict: Dict[str, ComponentImport]) -> Dict[str, ComponentImport]:
"""Gets a dictionary of new components to add to the custom component imports.
Args:
component_dict (Dict[str, ComponentImport]): The existing custom components.
Returns:
Dict[str, ComponentImport]: The components to add to the dictionary.
"""
components_to_add = {}
name = get_str("Enter component name to add(blank to skip): ")
while name != "":
if name.startswith("custom/"):
name = name.removeprefix("custom/")
if f"custom/{name}" in component_dict:
error(f"Component already exists with name: {name}")
elif f"custom/{name}" in components_to_add:
error(f"Already adding component with name: {name}")
else:
class_name = get_str("Enter the class representing this component: ")
module = get_str("Enter the fully qualified name of the module for the class: ")
components_to_add[f"custom/{name}"] = ComponentImport(
class_name=class_name, module=module
)
name = get_str("Enter component name to add(blank to skip): ")
return components_to_add
def handle_manager(update: bool) -> None:
"""Handle updating/viewing the Manager.
Args:
update (bool): Whether to apply updates to the Manager.
"""
path = f"{DefaultConfigFetcher.get_repo_config_dir()}/manager.json"
manager = None
try:
manager = Manager.read(path)
info(f"Current Manager\n{manager!r}")
except FileNotFoundError:
error("No Manager file found")
except json.JSONDecodeError as err:
error(f"Failed to decode Manager JSON\n{err}")
except ValueError as err:
error(f"Invalid Manager value\n{err}")
except TypeError as err:
error(f"Invalid Manager type\n{err}")
if not update:
return
Manager.from_console(manager).write(path)
def handle_scheduler(update: bool) -> None:
"""Handle updating/viewing the Scheduler.
Args:
update (bool): Whether to apply updates to the Scheduler.
"""
path = f"{DefaultConfigFetcher.get_repo_config_dir()}/scheduler.json"
scheduler = None
try:
scheduler = Scheduler.read(path)
info(f"Current Scheduler\n{scheduler!r}")
except FileNotFoundError:
error("No Manager file found")
except json.JSONDecodeError as err:
error(f"Failed to decode Manager JSON\n{err}")
except ValueError as err:
error(f"Invalid Manager value\n{err}")
except TypeError as err:
error(f"Invalid Manager type\n{err}")
if not update:
return
Scheduler.from_console(scheduler).write(path)
def handle_schema(update: bool, file_path: str) -> None:
"""Handle updating/viewing a Schema.
Args:
update (bool): Whether to apply updates to the Schema.
file_path (str): The path to the Schema file.
"""
try:
with open(file_path, "r", encoding="UTF-8") as schema_file:
schema = AutoTransformSchema.from_data(json.loads(schema_file.read()))
info(f"Existing schema:\n{schema!r}")
except FileNotFoundError:
info(f"No schema found at path: {file_path}")
schema = None
if update:
schema = AutoTransformSchema.from_console(schema)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "w+", encoding="UTF-8") as schema_file:
schema_file.write(json.dumps(schema.bundle(), indent=4))
schema_file.flush()
|
import boto3
from datetime import datetime, timedelta
from os import environ
from src.functions.csv_dump import send_sql
rds_client = boto3.client("rds-data")
ddb_resource = boto3.resource("dynamodb")
shub_table = ddb_resource.Table(environ.get("TABLE_SHUB"))
shub_index = environ.get("SHUB_INDEX")
db_name = environ.get("DB_NAME")
db_table = environ.get("DB_TABLE")
db_cluster_arn = environ.get("DB_CLUSTER_ARN")
db_secret_arn = environ.get("DB_SECRET_ARN")
pipeline_version = environ.get("PIPELINE_VERSION")
def handler(event, context):
days_ago = []
for day in range(1,8):
date = (datetime.now() - timedelta(day)).strftime("%Y-%m-%d")
days_ago.append(date)
print(f"Offloading fixtures from '{days_ago[-1]}' to '{days_ago[0]}'")
for day in days_ago:
fixtures = get_day_fixtures(day)
print(f"Updating '{len(fixtures)}' fixtures on '{day}' into aurora ...")
for fixture in fixtures:
update_sql = f"update {db_table} set gameFtScore = :fts where gameID = :id"
update_params = [
{"name": "id", "value":{"stringValue": fixture["fixture_id"]}},
{"name": "fts", "value":{"stringValue": fixture["ft_score"]}}
]
response = send_sql(db_name, db_secret_arn, db_cluster_arn, update_sql, update_params)
if response:
print(f"Updated '{day}" fixture id: "{fixture["fixture_id"]}'")
return {
"query_dates": days_ago,
"pipeline_version": pipeline_version
}
def get_day_fixtures(date):
day_fixtures = shub_table.query(
IndexName=shub_index,
KeyConditionExpression="play_date = :date and ft_score > :fts",
ExpressionAttributeValues={ ":date": date, ":fts": "-" },
ProjectionExpression="fixture_id, ft_score"
)
return day_fixtures["Items"]
|
import boto3
from datetime import datetime, timedelta
from os import environ
from src.functions.csv_dump import send_sql
rds_client = boto3.client("rds-data")
ddb_resource = boto3.resource("dynamodb")
shub_table = ddb_resource.Table(environ.get("TABLE_SHUB"))
shub_index = environ.get("SHUB_INDEX")
db_name = environ.get("DB_NAME")
db_table = environ.get("DB_TABLE")
db_cluster_arn = environ.get("DB_CLUSTER_ARN")
db_secret_arn = environ.get("DB_SECRET_ARN")
pipeline_version = environ.get("PIPELINE_VERSION")
def handler(event, context):
days_ago = []
for day in range(1,8):
date = (datetime.now() - timedelta(day)).strftime("%Y-%m-%d")
days_ago.append(date)
print(f"Offloading fixtures from '{days_ago[-1]}' to '{days_ago[0]}'")
for day in days_ago:
fixtures = get_day_fixtures(day)
print(f"Updating '{len(fixtures)}' fixtures on '{day}' into aurora ...")
for fixture in fixtures:
update_sql = f"update {db_table} set gameFtScore = :fts where gameID = :id"
update_params = [
{"name": "id", "value":{"stringValue": fixture["fixture_id"]}},
{"name": "fts", "value":{"stringValue": fixture["ft_score"]}}
]
response = send_sql(db_name, db_secret_arn, db_cluster_arn, update_sql, update_params)
if response:
print(f"Updated '{day}' fixture id: '{fixture['fixture_id']}'")
return {
"query_dates": days_ago,
"pipeline_version": pipeline_version
}
def get_day_fixtures(date):
day_fixtures = shub_table.query(
IndexName=shub_index,
KeyConditionExpression="play_date = :date and ft_score > :fts",
ExpressionAttributeValues={ ":date": date, ":fts": "-" },
ProjectionExpression="fixture_id, ft_score"
)
return day_fixtures["Items"]
|
#!/usr/bin/env python3
"""
Common test run patterns
"""
from datetime import datetime
from clusters import NullCluster
from pre_tests import NullPreTest
from ci_tests import NullTest
from post_tests import NullPostTest
class ClusterTestSetsRunner:
"""A cluster test runner that runs multiple sets of pre, test & post steps
wrapped by a cluster provision and with similar semantics to
ClusterTestRunner. Each test set will attempt to run regardless of the outcome of
prior sets. This can be overriden on a set by set basis with 'always_run'"""
def __init__(
self,
cluster=NullCluster(),
final_post=NullPostTest(),
sets=None,
):
self.cluster = cluster
self.final_post = final_post
if sets is None:
sets = []
self.sets = sets
def run(self):
hold = None
try:
self.log_event("About to provision")
self.cluster.provision()
self.log_event("provisioned")
except Exception as err:
self.log_event("ERROR: provision failed")
hold = err
if hold is None:
for idx, test_set in enumerate(self.sets):
test_set = {
**{
"name": f"set {idx + 1}",
"pre_test": NullPreTest(),
"test": NullTest(),
"post_test": NullPostTest(),
"always_run": True,
},
**test_set,
}
if hold is None or test_set["always_run"]:
try:
self.log_event("About to run", test_set)
self.run_test_set(test_set)
self.log_event("run completed", test_set)
except Exception as err:
self.log_event("ERROR: run failed", test_set)
if hold is None:
hold = err
try:
self.log_event("About to teardown")
self.cluster.teardown()
self.log_event("teardown completed")
except Exception as err:
self.log_event("ERROR: teardown failed")
if hold is None:
hold = err
try:
self.log_event("About to run final post")
self.final_post.run()
self.log_event("final post completed")
except Exception as err:
self.log_event("ERROR: final post failed")
if hold is None:
hold = err
if hold is not None:
raise hold
def run_test_set(self, test_set):
hold = None
try:
self.log_event("About to run pre test", test_set)
test_set["pre_test"].run()
self.log_event("pre test completed", test_set)
except Exception as err:
self.log_event("ERROR: pre test failed", test_set)
hold = err
if hold is None:
try:
self.log_event("About to run test", test_set)
test_set["test"].run()
self.log_event("test completed", test_set)
except Exception as err:
self.log_event("ERROR: test failed", test_set)
hold = err
try:
self.log_event("About to run post test", test_set)
test_set["post_test"].run(
test_output_dirs=test_set["test"].test_output_dirs
)
self.log_event("post test completed", test_set)
except Exception as err:
self.log_event("ERROR: post test failed", test_set)
if hold is None:
hold = err
if hold is not None:
raise hold
def log_event(self, msg, test_set=None):
now = datetime.now()
time = now.strftime("%H:%M:%S")
marker = "****"
if test_set is not None and test_set["name"] is not None:
msg = f"{msg} [{test_set["name"]}]"
print(marker)
print(f"{marker} {time}: {msg}")
print(marker)
# pylint: disable=too-many-arguments
class ClusterTestRunner(ClusterTestSetsRunner):
"""A simple cluster test runner that:
. provisions a cluster
. runs any pre_test (if provision was successful)
. runs the test (if provisioned and any pre_test was successful)
. runs post_test (if the test ran)
. tears down the cluster"""
def __init__(
self,
cluster=NullCluster(),
pre_test=NullPreTest(),
test=NullTest(),
post_test=NullPostTest(),
final_post=NullPostTest(),
):
super().__init__(
cluster=cluster,
final_post=final_post,
sets=[
{
"name": None,
"pre_test": pre_test,
"test": test,
"post_test": post_test,
}
],
)
|
#!/usr/bin/env python3
"""
Common test run patterns
"""
from datetime import datetime
from clusters import NullCluster
from pre_tests import NullPreTest
from ci_tests import NullTest
from post_tests import NullPostTest
class ClusterTestSetsRunner:
"""A cluster test runner that runs multiple sets of pre, test & post steps
wrapped by a cluster provision and with similar semantics to
ClusterTestRunner. Each test set will attempt to run regardless of the outcome of
prior sets. This can be overriden on a set by set basis with 'always_run'"""
def __init__(
self,
cluster=NullCluster(),
final_post=NullPostTest(),
sets=None,
):
self.cluster = cluster
self.final_post = final_post
if sets is None:
sets = []
self.sets = sets
def run(self):
hold = None
try:
self.log_event("About to provision")
self.cluster.provision()
self.log_event("provisioned")
except Exception as err:
self.log_event("ERROR: provision failed")
hold = err
if hold is None:
for idx, test_set in enumerate(self.sets):
test_set = {
**{
"name": f"set {idx + 1}",
"pre_test": NullPreTest(),
"test": NullTest(),
"post_test": NullPostTest(),
"always_run": True,
},
**test_set,
}
if hold is None or test_set["always_run"]:
try:
self.log_event("About to run", test_set)
self.run_test_set(test_set)
self.log_event("run completed", test_set)
except Exception as err:
self.log_event("ERROR: run failed", test_set)
if hold is None:
hold = err
try:
self.log_event("About to teardown")
self.cluster.teardown()
self.log_event("teardown completed")
except Exception as err:
self.log_event("ERROR: teardown failed")
if hold is None:
hold = err
try:
self.log_event("About to run final post")
self.final_post.run()
self.log_event("final post completed")
except Exception as err:
self.log_event("ERROR: final post failed")
if hold is None:
hold = err
if hold is not None:
raise hold
def run_test_set(self, test_set):
hold = None
try:
self.log_event("About to run pre test", test_set)
test_set["pre_test"].run()
self.log_event("pre test completed", test_set)
except Exception as err:
self.log_event("ERROR: pre test failed", test_set)
hold = err
if hold is None:
try:
self.log_event("About to run test", test_set)
test_set["test"].run()
self.log_event("test completed", test_set)
except Exception as err:
self.log_event("ERROR: test failed", test_set)
hold = err
try:
self.log_event("About to run post test", test_set)
test_set["post_test"].run(
test_output_dirs=test_set["test"].test_output_dirs
)
self.log_event("post test completed", test_set)
except Exception as err:
self.log_event("ERROR: post test failed", test_set)
if hold is None:
hold = err
if hold is not None:
raise hold
def log_event(self, msg, test_set=None):
now = datetime.now()
time = now.strftime("%H:%M:%S")
marker = "****"
if test_set is not None and test_set["name"] is not None:
msg = f"{msg} [{test_set['name']}]"
print(marker)
print(f"{marker} {time}: {msg}")
print(marker)
# pylint: disable=too-many-arguments
class ClusterTestRunner(ClusterTestSetsRunner):
"""A simple cluster test runner that:
. provisions a cluster
. runs any pre_test (if provision was successful)
. runs the test (if provisioned and any pre_test was successful)
. runs post_test (if the test ran)
. tears down the cluster"""
def __init__(
self,
cluster=NullCluster(),
pre_test=NullPreTest(),
test=NullTest(),
post_test=NullPostTest(),
final_post=NullPostTest(),
):
super().__init__(
cluster=cluster,
final_post=final_post,
sets=[
{
"name": None,
"pre_test": pre_test,
"test": test,
"post_test": post_test,
}
],
)
|
import collections
import inspect
import logging
from typing import (
Any,
Callable,
Dict,
Optional,
Tuple,
Union,
overload,
)
from fastapi import APIRouter, FastAPI
from starlette.requests import Request
from uvicorn.config import Config
from uvicorn.lifespan.on import LifespanOn
from ray.serve.common import DeploymentStatusInfo
from ray.serve.config import (
AutoscalingConfig,
DeploymentConfig,
HTTPOptions,
)
from ray.serve.constants import (
DEFAULT_CHECKPOINT_PATH,
HTTP_PROXY_TIMEOUT,
SERVE_CONTROLLER_NAME,
CONTROLLER_MAX_CONCURRENCY,
DEFAULT_HTTP_HOST,
DEFAULT_HTTP_PORT,
)
from ray.serve.controller import ServeController
from ray.serve.deployment import Deployment
from ray.serve.exceptions import RayServeException
from ray.experimental.dag import DAGNode
from ray.serve.handle import RayServeHandle
from ray.serve.http_util import ASGIHTTPSender, make_fastapi_class_based_view
from ray.serve.logging_utils import LoggingContext
from ray.serve.utils import (
ensure_serialization_context,
format_actor_name,
get_current_node_resource_key,
get_random_letters,
in_interactive_shell,
DEFAULT,
)
from ray.util.annotations import PublicAPI
import ray
from ray import cloudpickle
from ray.serve.deployment_graph import ClassNode, FunctionNode
from ray.serve.application import Application
from ray.serve.client import ServeControllerClient, get_controller_namespace
from ray.serve.context import (
set_global_client,
get_global_client,
get_internal_replica_context,
ReplicaContext,
)
from ray._private.usage import usage_lib
logger = logging.getLogger(__file__)
@PublicAPI(stability="beta")
def start(
detached: bool = False,
http_options: Optional[Union[dict, HTTPOptions]] = None,
dedicated_cpu: bool = False,
_checkpoint_path: str = DEFAULT_CHECKPOINT_PATH,
_override_controller_namespace: Optional[str] = None,
**kwargs,
) -> ServeControllerClient:
"""Initialize a serve instance.
By default, the instance will be scoped to the lifetime of the returned
Client object (or when the script exits). If detached is set to True, the
instance will instead persist until serve.shutdown() is called. This is
only relevant if connecting to a long-running Ray cluster (e.g., with
ray.init(address="auto") or ray.init("ray://<remote_addr>")).
Args:
detached (bool): Whether not the instance should be detached from this
script. If set, the instance will live on the Ray cluster until it is
explicitly stopped with serve.shutdown().
http_options (Optional[Dict, serve.HTTPOptions]): Configuration options
for HTTP proxy. You can pass in a dictionary or HTTPOptions object
with fields:
- host(str, None): Host for HTTP servers to listen on. Defaults to
"127.0.0.1". To expose Serve publicly, you probably want to set
this to "0.0.0.0".
- port(int): Port for HTTP server. Defaults to 8000.
- root_path(str): Root path to mount the serve application
(for example, "/serve"). All deployment routes will be prefixed
with this path. Defaults to "".
- middlewares(list): A list of Starlette middlewares that will be
applied to the HTTP servers in the cluster. Defaults to [].
- location(str, serve.config.DeploymentMode): The deployment
location of HTTP servers:
- "HeadOnly": start one HTTP server on the head node. Serve
assumes the head node is the node you executed serve.start
on. This is the default.
- "EveryNode": start one HTTP server per node.
- "NoServer" or None: disable HTTP server.
- num_cpus (int): The number of CPU cores to reserve for each
internal Serve HTTP proxy actor. Defaults to 0.
dedicated_cpu (bool): Whether to reserve a CPU core for the internal
Serve controller actor. Defaults to False.
"""
usage_lib.record_library_usage("serve")
http_deprecated_args = ["http_host", "http_port", "http_middlewares"]
for key in http_deprecated_args:
if key in kwargs:
raise ValueError(
f"{key} is deprecated, please use serve.start(http_options="
f'{{'{key}': {kwargs[key]}}}) instead.'
)
# Initialize ray if needed.
ray.worker.global_worker.filter_logs_by_job = False
if not ray.is_initialized():
ray.init(namespace="serve")
controller_namespace = get_controller_namespace(
detached, _override_controller_namespace=_override_controller_namespace
)
try:
client = get_global_client(
_override_controller_namespace=_override_controller_namespace,
_health_check_controller=True,
)
logger.info(
"Connecting to existing Serve instance in namespace "
f"'{controller_namespace}'."
)
_check_http_and_checkpoint_options(client, http_options, _checkpoint_path)
return client
except RayServeException:
pass
if detached:
controller_name = SERVE_CONTROLLER_NAME
else:
controller_name = format_actor_name(get_random_letters(), SERVE_CONTROLLER_NAME)
if isinstance(http_options, dict):
http_options = HTTPOptions.parse_obj(http_options)
if http_options is None:
http_options = HTTPOptions()
controller = ServeController.options(
num_cpus=1 if dedicated_cpu else 0,
name=controller_name,
lifetime="detached" if detached else None,
max_restarts=-1,
max_task_retries=-1,
# Pin Serve controller on the head node.
resources={get_current_node_resource_key(): 0.01},
namespace=controller_namespace,
max_concurrency=CONTROLLER_MAX_CONCURRENCY,
).remote(
controller_name,
http_options,
_checkpoint_path,
detached=detached,
_override_controller_namespace=_override_controller_namespace,
)
proxy_handles = ray.get(controller.get_http_proxies.remote())
if len(proxy_handles) > 0:
try:
ray.get(
[handle.ready.remote() for handle in proxy_handles.values()],
timeout=HTTP_PROXY_TIMEOUT,
)
except ray.exceptions.GetTimeoutError:
raise TimeoutError(
f"HTTP proxies not available after {HTTP_PROXY_TIMEOUT}s."
)
client = ServeControllerClient(
controller,
controller_name,
detached=detached,
_override_controller_namespace=_override_controller_namespace,
)
set_global_client(client)
logger.info(
f"Started{" detached " if detached else " "}Serve instance in "
f"namespace '{controller_namespace}'."
)
return client
@PublicAPI
def shutdown() -> None:
"""Completely shut down the connected Serve instance.
Shuts down all processes and deletes all state associated with the
instance.
"""
try:
client = get_global_client()
except RayServeException:
logger.info(
"Nothing to shut down. There's no Serve application "
"running on this Ray cluster."
)
return
client.shutdown()
set_global_client(None)
@PublicAPI
def get_replica_context() -> ReplicaContext:
"""If called from a deployment, returns the deployment and replica tag.
A replica tag uniquely identifies a single replica for a Ray Serve
deployment at runtime. Replica tags are of the form
`<deployment_name>#<random letters>`.
Raises:
RayServeException: if not called from within a Ray Serve deployment.
Example:
>>> from ray import serve
>>> # deployment_name
>>> serve.get_replica_context().deployment # doctest: +SKIP
>>> # deployment_name#krcwoa
>>> serve.get_replica_context().replica_tag # doctest: +SKIP
"""
internal_replica_context = get_internal_replica_context()
if internal_replica_context is None:
raise RayServeException(
"`serve.get_replica_context()` "
"may only be called from within a "
"Ray Serve deployment."
)
return internal_replica_context
@PublicAPI(stability="beta")
def ingress(app: Union["FastAPI", "APIRouter", Callable]):
"""Mark an ASGI application ingress for Serve.
Args:
app (FastAPI,APIRouter,Starlette,etc): the app or router object serve
as ingress for this deployment. It can be any ASGI compatible
object.
Example:
>>> from fastapi import FastAPI
>>> from ray import serve
>>> app = FastAPI() # doctest: +SKIP
>>> app = FastAPI() # doctest: +SKIP
>>> @serve.deployment # doctest: +SKIP
... @serve.ingress(app) # doctest: +SKIP
... class App: # doctest: +SKIP
... pass # doctest: +SKIP
>>> App.deploy() # doctest: +SKIP
"""
def decorator(cls):
if not inspect.isclass(cls):
raise ValueError("@serve.ingress must be used with a class.")
if issubclass(cls, collections.abc.Callable):
raise ValueError(
"Class passed to @serve.ingress may not have __call__ method."
)
# Sometimes there are decorators on the methods. We want to fix
# the fast api routes here.
if isinstance(app, (FastAPI, APIRouter)):
make_fastapi_class_based_view(app, cls)
# Free the state of the app so subsequent modification won't affect
# this ingress deployment. We don't use copy.copy here to avoid
# recursion issue.
ensure_serialization_context()
frozen_app = cloudpickle.loads(cloudpickle.dumps(app))
class ASGIAppWrapper(cls):
async def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._serve_app = frozen_app
# Use uvicorn's lifespan handling code to properly deal with
# startup and shutdown event.
self._serve_asgi_lifespan = LifespanOn(
Config(self._serve_app, lifespan="on")
)
# Replace uvicorn logger with our own.
self._serve_asgi_lifespan.logger = logger
# LifespanOn's logger logs in INFO level thus becomes spammy
# Within this block we temporarily uplevel for cleaner logging
with LoggingContext(
self._serve_asgi_lifespan.logger, level=logging.WARNING
):
await self._serve_asgi_lifespan.startup()
async def __call__(self, request: Request):
sender = ASGIHTTPSender()
await self._serve_app(
request.scope,
request.receive,
sender,
)
return sender.build_asgi_response()
# NOTE: __del__ must be async so that we can run asgi shutdown
# in the same event loop.
async def __del__(self):
# LifespanOn's logger logs in INFO level thus becomes spammy
# Within this block we temporarily uplevel for cleaner logging
with LoggingContext(
self._serve_asgi_lifespan.logger, level=logging.WARNING
):
await self._serve_asgi_lifespan.shutdown()
# Make sure to call user's del method as well.
super_cls = super()
if hasattr(super_cls, "__del__"):
super_cls.__del__()
ASGIAppWrapper.__name__ = cls.__name__
return ASGIAppWrapper
return decorator
@overload
def deployment(func_or_class: Callable) -> Deployment:
pass
@overload
def deployment(
name: Optional[str] = None,
version: Optional[str] = None,
prev_version: Optional[str] = None,
num_replicas: Optional[int] = None,
init_args: Optional[Tuple[Any]] = None,
init_kwargs: Optional[Dict[Any, Any]] = None,
route_prefix: Union[str, None, DEFAULT] = DEFAULT.VALUE,
ray_actor_options: Optional[Dict] = None,
user_config: Optional[Any] = None,
max_concurrent_queries: Optional[int] = None,
_autoscaling_config: Optional[Union[Dict, AutoscalingConfig]] = None,
_graceful_shutdown_wait_loop_s: Optional[float] = None,
_graceful_shutdown_timeout_s: Optional[float] = None,
_health_check_period_s: Optional[float] = None,
_health_check_timeout_s: Optional[float] = None,
) -> Callable[[Callable], Deployment]:
pass
@PublicAPI
def deployment(
_func_or_class: Optional[Callable] = None,
name: Optional[str] = None,
version: Optional[str] = None,
prev_version: Optional[str] = None,
num_replicas: Optional[int] = None,
init_args: Optional[Tuple[Any]] = None,
init_kwargs: Optional[Dict[Any, Any]] = None,
route_prefix: Union[str, None, DEFAULT] = DEFAULT.VALUE,
ray_actor_options: Optional[Dict] = None,
user_config: Optional[Any] = None,
max_concurrent_queries: Optional[int] = None,
_autoscaling_config: Optional[Union[Dict, AutoscalingConfig]] = None,
_graceful_shutdown_wait_loop_s: Optional[float] = None,
_graceful_shutdown_timeout_s: Optional[float] = None,
_health_check_period_s: Optional[float] = None,
_health_check_timeout_s: Optional[float] = None,
) -> Callable[[Callable], Deployment]:
"""Define a Serve deployment.
Args:
name (Optional[str]): Globally-unique name identifying this deployment.
If not provided, the name of the class or function will be used.
version (Optional[str]): Version of the deployment. This is used to
indicate a code change for the deployment; when it is re-deployed
with a version change, a rolling update of the replicas will be
performed. If not provided, every deployment will be treated as a
new version.
prev_version (Optional[str]): Version of the existing deployment which
is used as a precondition for the next deployment. If prev_version
does not match with the existing deployment's version, the
deployment will fail. If not provided, deployment procedure will
not check the existing deployment's version.
num_replicas (Optional[int]): The number of processes to start up that
will handle requests to this deployment. Defaults to 1.
init_args (Optional[Tuple]): Positional args to be passed to the class
constructor when starting up deployment replicas. These can also be
passed when you call `.deploy()` on the returned Deployment.
init_kwargs (Optional[Dict]): Keyword args to be passed to the class
constructor when starting up deployment replicas. These can also be
passed when you call `.deploy()` on the returned Deployment.
route_prefix (Optional[str]): Requests to paths under this HTTP path
prefix will be routed to this deployment. Defaults to '/{name}'.
When set to 'None', no HTTP endpoint will be created.
Routing is done based on longest-prefix match, so if you have
deployment A with a prefix of '/a' and deployment B with a prefix
of '/a/b', requests to '/a', '/a/', and '/a/c' go to A and requests
to '/a/b', '/a/b/', and '/a/b/c' go to B. Routes must not end with
a '/' unless they're the root (just '/'), which acts as a
catch-all.
ray_actor_options (dict): Options to be passed to the Ray actor
constructor such as resource requirements.
user_config (Optional[Any]): [experimental] Config to pass to the
reconfigure method of the deployment. This can be updated
dynamically without changing the version of the deployment and
restarting its replicas. The user_config needs to be hashable to
keep track of updates, so it must only contain hashable types, or
hashable types nested in lists and dictionaries.
max_concurrent_queries (Optional[int]): The maximum number of queries
that will be sent to a replica of this deployment without receiving
a response. Defaults to 100.
Example:
>>> from ray import serve
>>> @serve.deployment(name="deployment1", version="v1") # doctest: +SKIP
... class MyDeployment: # doctest: +SKIP
... pass # doctest: +SKIP
>>> MyDeployment.deploy(*init_args) # doctest: +SKIP
>>> MyDeployment.options( # doctest: +SKIP
... num_replicas=2, init_args=init_args).deploy()
Returns:
Deployment
"""
if num_replicas is not None and _autoscaling_config is not None:
raise ValueError(
"Manually setting num_replicas is not allowed when "
"_autoscaling_config is provided."
)
config = DeploymentConfig.from_default(
ignore_none=True,
num_replicas=num_replicas,
user_config=user_config,
max_concurrent_queries=max_concurrent_queries,
autoscaling_config=_autoscaling_config,
graceful_shutdown_wait_loop_s=_graceful_shutdown_wait_loop_s,
graceful_shutdown_timeout_s=_graceful_shutdown_timeout_s,
health_check_period_s=_health_check_period_s,
health_check_timeout_s=_health_check_timeout_s,
)
def decorator(_func_or_class):
return Deployment(
_func_or_class,
name if name is not None else _func_or_class.__name__,
config,
version=version,
prev_version=prev_version,
init_args=init_args,
init_kwargs=init_kwargs,
route_prefix=route_prefix,
ray_actor_options=ray_actor_options,
_internal=True,
)
# This handles both parametrized and non-parametrized usage of the
# decorator. See the @serve.batch code for more details.
return decorator(_func_or_class) if callable(_func_or_class) else decorator
@PublicAPI
def get_deployment(name: str) -> Deployment:
"""Dynamically fetch a handle to a Deployment object.
This can be used to update and redeploy a deployment without access to
the original definition.
Example:
>>> from ray import serve
>>> MyDeployment = serve.get_deployment("name") # doctest: +SKIP
>>> MyDeployment.options(num_replicas=10).deploy() # doctest: +SKIP
Args:
name(str): name of the deployment. This must have already been
deployed.
Returns:
Deployment
"""
try:
(
deployment_info,
route_prefix,
) = get_global_client().get_deployment_info(name)
except KeyError:
raise KeyError(
f"Deployment {name} was not found. Did you call Deployment.deploy()?"
)
return Deployment(
cloudpickle.loads(deployment_info.replica_config.serialized_deployment_def),
name,
deployment_info.deployment_config,
version=deployment_info.version,
init_args=deployment_info.replica_config.init_args,
init_kwargs=deployment_info.replica_config.init_kwargs,
route_prefix=route_prefix,
ray_actor_options=deployment_info.replica_config.ray_actor_options,
_internal=True,
)
@PublicAPI
def list_deployments() -> Dict[str, Deployment]:
"""Returns a dictionary of all active deployments.
Dictionary maps deployment name to Deployment objects.
"""
infos = get_global_client().list_deployments()
deployments = {}
for name, (deployment_info, route_prefix) in infos.items():
deployments[name] = Deployment(
cloudpickle.loads(deployment_info.replica_config.serialized_deployment_def),
name,
deployment_info.deployment_config,
version=deployment_info.version,
init_args=deployment_info.replica_config.init_args,
init_kwargs=deployment_info.replica_config.init_kwargs,
route_prefix=route_prefix,
ray_actor_options=deployment_info.replica_config.ray_actor_options,
_internal=True,
)
return deployments
def get_deployment_statuses() -> Dict[str, DeploymentStatusInfo]:
"""Returns a dictionary of deployment statuses.
A deployment's status is one of {UPDATING, UNHEALTHY, and HEALTHY}.
Example:
>>> from ray.serve.api import get_deployment_statuses
>>> statuses = get_deployment_statuses() # doctest: +SKIP
>>> status_info = statuses["deployment_name"] # doctest: +SKIP
>>> status = status_info.status # doctest: +SKIP
>>> message = status_info.message # doctest: +SKIP
Returns:
Dict[str, DeploymentStatus]: This dictionary maps the running
deployment's name to a DeploymentStatus object containing its
status and a message explaining the status.
"""
return get_global_client().get_deployment_statuses()
@PublicAPI(stability="alpha")
def run(
target: Union[ClassNode, FunctionNode],
_blocking: bool = True,
*,
host: str = DEFAULT_HTTP_HOST,
port: int = DEFAULT_HTTP_PORT,
) -> Optional[RayServeHandle]:
"""Run a Serve application and return a ServeHandle to the ingress.
Either a ClassNode, FunctionNode, or a pre-built application
can be passed in. If a node is passed in, all of the deployments it depends
on will be deployed. If there is an ingress, its handle will be returned.
Args:
target (Union[ClassNode, FunctionNode, Application]):
A user-built Serve Application or a ClassNode that acts as the
root node of DAG. By default ClassNode is the Driver
deployment unless user provides a customized one.
host (str): The host passed into serve.start().
port (int): The port passed into serve.start().
Returns:
RayServeHandle: A regular ray serve handle that can be called by user
to execute the serve DAG.
"""
# TODO (jiaodong): Resolve circular reference in pipeline codebase and serve
from ray.serve.pipeline.api import build as pipeline_build
from ray.serve.pipeline.api import get_and_validate_ingress_deployment
client = start(detached=True, http_options={"host": host, "port": port})
if isinstance(target, Application):
deployments = list(target.deployments.values())
ingress = target.ingress
# Each DAG should always provide a valid Driver ClassNode
elif isinstance(target, ClassNode):
deployments = pipeline_build(target)
ingress = get_and_validate_ingress_deployment(deployments)
# Special case where user is doing single function serve.run(func.bind())
elif isinstance(target, FunctionNode):
deployments = pipeline_build(target)
ingress = get_and_validate_ingress_deployment(deployments)
if len(deployments) != 1:
raise ValueError(
"We only support single function node in serve.run, ex: "
"serve.run(func.bind()). For more than one nodes in your DAG, "
"Please provide a driver class and bind it as entrypoint to "
"your Serve DAG."
)
elif isinstance(target, DAGNode):
raise ValueError(
"Invalid DAGNode type as entry to serve.run(), "
f"type: {type(target)}, accepted: ClassNode, "
"FunctionNode please provide a driver class and bind it "
"as entrypoint to your Serve DAG."
)
else:
raise TypeError(
"Expected a ClassNode, FunctionNode, or Application as target. "
f"Got unexpected type {type(target)} instead."
)
parameter_group = []
for deployment in deployments:
deployment_parameters = {
"name": deployment._name,
"func_or_class": deployment._func_or_class,
"init_args": deployment.init_args,
"init_kwargs": deployment.init_kwargs,
"ray_actor_options": deployment._ray_actor_options,
"config": deployment._config,
"version": deployment._version,
"prev_version": deployment._prev_version,
"route_prefix": deployment.route_prefix,
"url": deployment.url,
}
parameter_group.append(deployment_parameters)
client.deploy_group(
parameter_group, _blocking=_blocking, remove_past_deployments=True
)
if ingress is not None:
return ingress.get_handle()
def build(target: Union[ClassNode, FunctionNode]) -> Application:
"""Builds a Serve application into a static application.
Takes in a ClassNode or FunctionNode and converts it to a
Serve application consisting of one or more deployments. This is intended
to be used for production scenarios and deployed via the Serve REST API or
CLI, so there are some restrictions placed on the deployments:
1) All of the deployments must be importable. That is, they cannot be
defined in __main__ or inline defined. The deployments will be
imported in production using the same import path they were here.
2) All arguments bound to the deployment must be JSON-serializable.
The returned Application object can be exported to a dictionary or YAML
config.
"""
# TODO (jiaodong): Resolve circular reference in pipeline codebase and serve
from ray.serve.pipeline.api import build as pipeline_build
if in_interactive_shell():
raise RuntimeError(
"build cannot be called from an interactive shell like "
"IPython or Jupyter because it requires all deployments to be "
"importable to run the app after building."
)
# TODO(edoakes): this should accept host and port, but we don't
# currently support them in the REST API.
return Application(pipeline_build(target))
def _check_http_and_checkpoint_options(
client: ServeControllerClient,
http_options: Union[dict, HTTPOptions],
checkpoint_path: str,
) -> None:
if checkpoint_path and checkpoint_path != client.checkpoint_path:
logger.warning(
f"The new client checkpoint path '{checkpoint_path}' "
f"is different from the existing one '{client.checkpoint_path}'. "
"The new checkpoint path is ignored."
)
if http_options:
client_http_options = client.http_config
new_http_options = (
http_options
if isinstance(http_options, HTTPOptions)
else HTTPOptions.parse_obj(http_options)
)
different_fields = []
all_http_option_fields = new_http_options.__dict__
for field in all_http_option_fields:
if getattr(new_http_options, field) != getattr(client_http_options, field):
different_fields.append(field)
if len(different_fields):
logger.warning(
"The new client HTTP config differs from the existing one "
f"in the following fields: {different_fields}. "
"The new HTTP config is ignored."
)
|
import collections
import inspect
import logging
from typing import (
Any,
Callable,
Dict,
Optional,
Tuple,
Union,
overload,
)
from fastapi import APIRouter, FastAPI
from starlette.requests import Request
from uvicorn.config import Config
from uvicorn.lifespan.on import LifespanOn
from ray.serve.common import DeploymentStatusInfo
from ray.serve.config import (
AutoscalingConfig,
DeploymentConfig,
HTTPOptions,
)
from ray.serve.constants import (
DEFAULT_CHECKPOINT_PATH,
HTTP_PROXY_TIMEOUT,
SERVE_CONTROLLER_NAME,
CONTROLLER_MAX_CONCURRENCY,
DEFAULT_HTTP_HOST,
DEFAULT_HTTP_PORT,
)
from ray.serve.controller import ServeController
from ray.serve.deployment import Deployment
from ray.serve.exceptions import RayServeException
from ray.experimental.dag import DAGNode
from ray.serve.handle import RayServeHandle
from ray.serve.http_util import ASGIHTTPSender, make_fastapi_class_based_view
from ray.serve.logging_utils import LoggingContext
from ray.serve.utils import (
ensure_serialization_context,
format_actor_name,
get_current_node_resource_key,
get_random_letters,
in_interactive_shell,
DEFAULT,
)
from ray.util.annotations import PublicAPI
import ray
from ray import cloudpickle
from ray.serve.deployment_graph import ClassNode, FunctionNode
from ray.serve.application import Application
from ray.serve.client import ServeControllerClient, get_controller_namespace
from ray.serve.context import (
set_global_client,
get_global_client,
get_internal_replica_context,
ReplicaContext,
)
from ray._private.usage import usage_lib
logger = logging.getLogger(__file__)
@PublicAPI(stability="beta")
def start(
detached: bool = False,
http_options: Optional[Union[dict, HTTPOptions]] = None,
dedicated_cpu: bool = False,
_checkpoint_path: str = DEFAULT_CHECKPOINT_PATH,
_override_controller_namespace: Optional[str] = None,
**kwargs,
) -> ServeControllerClient:
"""Initialize a serve instance.
By default, the instance will be scoped to the lifetime of the returned
Client object (or when the script exits). If detached is set to True, the
instance will instead persist until serve.shutdown() is called. This is
only relevant if connecting to a long-running Ray cluster (e.g., with
ray.init(address="auto") or ray.init("ray://<remote_addr>")).
Args:
detached (bool): Whether not the instance should be detached from this
script. If set, the instance will live on the Ray cluster until it is
explicitly stopped with serve.shutdown().
http_options (Optional[Dict, serve.HTTPOptions]): Configuration options
for HTTP proxy. You can pass in a dictionary or HTTPOptions object
with fields:
- host(str, None): Host for HTTP servers to listen on. Defaults to
"127.0.0.1". To expose Serve publicly, you probably want to set
this to "0.0.0.0".
- port(int): Port for HTTP server. Defaults to 8000.
- root_path(str): Root path to mount the serve application
(for example, "/serve"). All deployment routes will be prefixed
with this path. Defaults to "".
- middlewares(list): A list of Starlette middlewares that will be
applied to the HTTP servers in the cluster. Defaults to [].
- location(str, serve.config.DeploymentMode): The deployment
location of HTTP servers:
- "HeadOnly": start one HTTP server on the head node. Serve
assumes the head node is the node you executed serve.start
on. This is the default.
- "EveryNode": start one HTTP server per node.
- "NoServer" or None: disable HTTP server.
- num_cpus (int): The number of CPU cores to reserve for each
internal Serve HTTP proxy actor. Defaults to 0.
dedicated_cpu (bool): Whether to reserve a CPU core for the internal
Serve controller actor. Defaults to False.
"""
usage_lib.record_library_usage("serve")
http_deprecated_args = ["http_host", "http_port", "http_middlewares"]
for key in http_deprecated_args:
if key in kwargs:
raise ValueError(
f"{key} is deprecated, please use serve.start(http_options="
f'{{"{key}": {kwargs[key]}}}) instead.'
)
# Initialize ray if needed.
ray.worker.global_worker.filter_logs_by_job = False
if not ray.is_initialized():
ray.init(namespace="serve")
controller_namespace = get_controller_namespace(
detached, _override_controller_namespace=_override_controller_namespace
)
try:
client = get_global_client(
_override_controller_namespace=_override_controller_namespace,
_health_check_controller=True,
)
logger.info(
"Connecting to existing Serve instance in namespace "
f"'{controller_namespace}'."
)
_check_http_and_checkpoint_options(client, http_options, _checkpoint_path)
return client
except RayServeException:
pass
if detached:
controller_name = SERVE_CONTROLLER_NAME
else:
controller_name = format_actor_name(get_random_letters(), SERVE_CONTROLLER_NAME)
if isinstance(http_options, dict):
http_options = HTTPOptions.parse_obj(http_options)
if http_options is None:
http_options = HTTPOptions()
controller = ServeController.options(
num_cpus=1 if dedicated_cpu else 0,
name=controller_name,
lifetime="detached" if detached else None,
max_restarts=-1,
max_task_retries=-1,
# Pin Serve controller on the head node.
resources={get_current_node_resource_key(): 0.01},
namespace=controller_namespace,
max_concurrency=CONTROLLER_MAX_CONCURRENCY,
).remote(
controller_name,
http_options,
_checkpoint_path,
detached=detached,
_override_controller_namespace=_override_controller_namespace,
)
proxy_handles = ray.get(controller.get_http_proxies.remote())
if len(proxy_handles) > 0:
try:
ray.get(
[handle.ready.remote() for handle in proxy_handles.values()],
timeout=HTTP_PROXY_TIMEOUT,
)
except ray.exceptions.GetTimeoutError:
raise TimeoutError(
f"HTTP proxies not available after {HTTP_PROXY_TIMEOUT}s."
)
client = ServeControllerClient(
controller,
controller_name,
detached=detached,
_override_controller_namespace=_override_controller_namespace,
)
set_global_client(client)
logger.info(
f"Started{' detached ' if detached else ' '}Serve instance in "
f"namespace '{controller_namespace}'."
)
return client
@PublicAPI
def shutdown() -> None:
"""Completely shut down the connected Serve instance.
Shuts down all processes and deletes all state associated with the
instance.
"""
try:
client = get_global_client()
except RayServeException:
logger.info(
"Nothing to shut down. There's no Serve application "
"running on this Ray cluster."
)
return
client.shutdown()
set_global_client(None)
@PublicAPI
def get_replica_context() -> ReplicaContext:
"""If called from a deployment, returns the deployment and replica tag.
A replica tag uniquely identifies a single replica for a Ray Serve
deployment at runtime. Replica tags are of the form
`<deployment_name>#<random letters>`.
Raises:
RayServeException: if not called from within a Ray Serve deployment.
Example:
>>> from ray import serve
>>> # deployment_name
>>> serve.get_replica_context().deployment # doctest: +SKIP
>>> # deployment_name#krcwoa
>>> serve.get_replica_context().replica_tag # doctest: +SKIP
"""
internal_replica_context = get_internal_replica_context()
if internal_replica_context is None:
raise RayServeException(
"`serve.get_replica_context()` "
"may only be called from within a "
"Ray Serve deployment."
)
return internal_replica_context
@PublicAPI(stability="beta")
def ingress(app: Union["FastAPI", "APIRouter", Callable]):
"""Mark an ASGI application ingress for Serve.
Args:
app (FastAPI,APIRouter,Starlette,etc): the app or router object serve
as ingress for this deployment. It can be any ASGI compatible
object.
Example:
>>> from fastapi import FastAPI
>>> from ray import serve
>>> app = FastAPI() # doctest: +SKIP
>>> app = FastAPI() # doctest: +SKIP
>>> @serve.deployment # doctest: +SKIP
... @serve.ingress(app) # doctest: +SKIP
... class App: # doctest: +SKIP
... pass # doctest: +SKIP
>>> App.deploy() # doctest: +SKIP
"""
def decorator(cls):
if not inspect.isclass(cls):
raise ValueError("@serve.ingress must be used with a class.")
if issubclass(cls, collections.abc.Callable):
raise ValueError(
"Class passed to @serve.ingress may not have __call__ method."
)
# Sometimes there are decorators on the methods. We want to fix
# the fast api routes here.
if isinstance(app, (FastAPI, APIRouter)):
make_fastapi_class_based_view(app, cls)
# Free the state of the app so subsequent modification won't affect
# this ingress deployment. We don't use copy.copy here to avoid
# recursion issue.
ensure_serialization_context()
frozen_app = cloudpickle.loads(cloudpickle.dumps(app))
class ASGIAppWrapper(cls):
async def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._serve_app = frozen_app
# Use uvicorn's lifespan handling code to properly deal with
# startup and shutdown event.
self._serve_asgi_lifespan = LifespanOn(
Config(self._serve_app, lifespan="on")
)
# Replace uvicorn logger with our own.
self._serve_asgi_lifespan.logger = logger
# LifespanOn's logger logs in INFO level thus becomes spammy
# Within this block we temporarily uplevel for cleaner logging
with LoggingContext(
self._serve_asgi_lifespan.logger, level=logging.WARNING
):
await self._serve_asgi_lifespan.startup()
async def __call__(self, request: Request):
sender = ASGIHTTPSender()
await self._serve_app(
request.scope,
request.receive,
sender,
)
return sender.build_asgi_response()
# NOTE: __del__ must be async so that we can run asgi shutdown
# in the same event loop.
async def __del__(self):
# LifespanOn's logger logs in INFO level thus becomes spammy
# Within this block we temporarily uplevel for cleaner logging
with LoggingContext(
self._serve_asgi_lifespan.logger, level=logging.WARNING
):
await self._serve_asgi_lifespan.shutdown()
# Make sure to call user's del method as well.
super_cls = super()
if hasattr(super_cls, "__del__"):
super_cls.__del__()
ASGIAppWrapper.__name__ = cls.__name__
return ASGIAppWrapper
return decorator
@overload
def deployment(func_or_class: Callable) -> Deployment:
pass
@overload
def deployment(
name: Optional[str] = None,
version: Optional[str] = None,
prev_version: Optional[str] = None,
num_replicas: Optional[int] = None,
init_args: Optional[Tuple[Any]] = None,
init_kwargs: Optional[Dict[Any, Any]] = None,
route_prefix: Union[str, None, DEFAULT] = DEFAULT.VALUE,
ray_actor_options: Optional[Dict] = None,
user_config: Optional[Any] = None,
max_concurrent_queries: Optional[int] = None,
_autoscaling_config: Optional[Union[Dict, AutoscalingConfig]] = None,
_graceful_shutdown_wait_loop_s: Optional[float] = None,
_graceful_shutdown_timeout_s: Optional[float] = None,
_health_check_period_s: Optional[float] = None,
_health_check_timeout_s: Optional[float] = None,
) -> Callable[[Callable], Deployment]:
pass
@PublicAPI
def deployment(
_func_or_class: Optional[Callable] = None,
name: Optional[str] = None,
version: Optional[str] = None,
prev_version: Optional[str] = None,
num_replicas: Optional[int] = None,
init_args: Optional[Tuple[Any]] = None,
init_kwargs: Optional[Dict[Any, Any]] = None,
route_prefix: Union[str, None, DEFAULT] = DEFAULT.VALUE,
ray_actor_options: Optional[Dict] = None,
user_config: Optional[Any] = None,
max_concurrent_queries: Optional[int] = None,
_autoscaling_config: Optional[Union[Dict, AutoscalingConfig]] = None,
_graceful_shutdown_wait_loop_s: Optional[float] = None,
_graceful_shutdown_timeout_s: Optional[float] = None,
_health_check_period_s: Optional[float] = None,
_health_check_timeout_s: Optional[float] = None,
) -> Callable[[Callable], Deployment]:
"""Define a Serve deployment.
Args:
name (Optional[str]): Globally-unique name identifying this deployment.
If not provided, the name of the class or function will be used.
version (Optional[str]): Version of the deployment. This is used to
indicate a code change for the deployment; when it is re-deployed
with a version change, a rolling update of the replicas will be
performed. If not provided, every deployment will be treated as a
new version.
prev_version (Optional[str]): Version of the existing deployment which
is used as a precondition for the next deployment. If prev_version
does not match with the existing deployment's version, the
deployment will fail. If not provided, deployment procedure will
not check the existing deployment's version.
num_replicas (Optional[int]): The number of processes to start up that
will handle requests to this deployment. Defaults to 1.
init_args (Optional[Tuple]): Positional args to be passed to the class
constructor when starting up deployment replicas. These can also be
passed when you call `.deploy()` on the returned Deployment.
init_kwargs (Optional[Dict]): Keyword args to be passed to the class
constructor when starting up deployment replicas. These can also be
passed when you call `.deploy()` on the returned Deployment.
route_prefix (Optional[str]): Requests to paths under this HTTP path
prefix will be routed to this deployment. Defaults to '/{name}'.
When set to 'None', no HTTP endpoint will be created.
Routing is done based on longest-prefix match, so if you have
deployment A with a prefix of '/a' and deployment B with a prefix
of '/a/b', requests to '/a', '/a/', and '/a/c' go to A and requests
to '/a/b', '/a/b/', and '/a/b/c' go to B. Routes must not end with
a '/' unless they're the root (just '/'), which acts as a
catch-all.
ray_actor_options (dict): Options to be passed to the Ray actor
constructor such as resource requirements.
user_config (Optional[Any]): [experimental] Config to pass to the
reconfigure method of the deployment. This can be updated
dynamically without changing the version of the deployment and
restarting its replicas. The user_config needs to be hashable to
keep track of updates, so it must only contain hashable types, or
hashable types nested in lists and dictionaries.
max_concurrent_queries (Optional[int]): The maximum number of queries
that will be sent to a replica of this deployment without receiving
a response. Defaults to 100.
Example:
>>> from ray import serve
>>> @serve.deployment(name="deployment1", version="v1") # doctest: +SKIP
... class MyDeployment: # doctest: +SKIP
... pass # doctest: +SKIP
>>> MyDeployment.deploy(*init_args) # doctest: +SKIP
>>> MyDeployment.options( # doctest: +SKIP
... num_replicas=2, init_args=init_args).deploy()
Returns:
Deployment
"""
if num_replicas is not None and _autoscaling_config is not None:
raise ValueError(
"Manually setting num_replicas is not allowed when "
"_autoscaling_config is provided."
)
config = DeploymentConfig.from_default(
ignore_none=True,
num_replicas=num_replicas,
user_config=user_config,
max_concurrent_queries=max_concurrent_queries,
autoscaling_config=_autoscaling_config,
graceful_shutdown_wait_loop_s=_graceful_shutdown_wait_loop_s,
graceful_shutdown_timeout_s=_graceful_shutdown_timeout_s,
health_check_period_s=_health_check_period_s,
health_check_timeout_s=_health_check_timeout_s,
)
def decorator(_func_or_class):
return Deployment(
_func_or_class,
name if name is not None else _func_or_class.__name__,
config,
version=version,
prev_version=prev_version,
init_args=init_args,
init_kwargs=init_kwargs,
route_prefix=route_prefix,
ray_actor_options=ray_actor_options,
_internal=True,
)
# This handles both parametrized and non-parametrized usage of the
# decorator. See the @serve.batch code for more details.
return decorator(_func_or_class) if callable(_func_or_class) else decorator
@PublicAPI
def get_deployment(name: str) -> Deployment:
"""Dynamically fetch a handle to a Deployment object.
This can be used to update and redeploy a deployment without access to
the original definition.
Example:
>>> from ray import serve
>>> MyDeployment = serve.get_deployment("name") # doctest: +SKIP
>>> MyDeployment.options(num_replicas=10).deploy() # doctest: +SKIP
Args:
name(str): name of the deployment. This must have already been
deployed.
Returns:
Deployment
"""
try:
(
deployment_info,
route_prefix,
) = get_global_client().get_deployment_info(name)
except KeyError:
raise KeyError(
f"Deployment {name} was not found. Did you call Deployment.deploy()?"
)
return Deployment(
cloudpickle.loads(deployment_info.replica_config.serialized_deployment_def),
name,
deployment_info.deployment_config,
version=deployment_info.version,
init_args=deployment_info.replica_config.init_args,
init_kwargs=deployment_info.replica_config.init_kwargs,
route_prefix=route_prefix,
ray_actor_options=deployment_info.replica_config.ray_actor_options,
_internal=True,
)
@PublicAPI
def list_deployments() -> Dict[str, Deployment]:
"""Returns a dictionary of all active deployments.
Dictionary maps deployment name to Deployment objects.
"""
infos = get_global_client().list_deployments()
deployments = {}
for name, (deployment_info, route_prefix) in infos.items():
deployments[name] = Deployment(
cloudpickle.loads(deployment_info.replica_config.serialized_deployment_def),
name,
deployment_info.deployment_config,
version=deployment_info.version,
init_args=deployment_info.replica_config.init_args,
init_kwargs=deployment_info.replica_config.init_kwargs,
route_prefix=route_prefix,
ray_actor_options=deployment_info.replica_config.ray_actor_options,
_internal=True,
)
return deployments
def get_deployment_statuses() -> Dict[str, DeploymentStatusInfo]:
"""Returns a dictionary of deployment statuses.
A deployment's status is one of {UPDATING, UNHEALTHY, and HEALTHY}.
Example:
>>> from ray.serve.api import get_deployment_statuses
>>> statuses = get_deployment_statuses() # doctest: +SKIP
>>> status_info = statuses["deployment_name"] # doctest: +SKIP
>>> status = status_info.status # doctest: +SKIP
>>> message = status_info.message # doctest: +SKIP
Returns:
Dict[str, DeploymentStatus]: This dictionary maps the running
deployment's name to a DeploymentStatus object containing its
status and a message explaining the status.
"""
return get_global_client().get_deployment_statuses()
@PublicAPI(stability="alpha")
def run(
target: Union[ClassNode, FunctionNode],
_blocking: bool = True,
*,
host: str = DEFAULT_HTTP_HOST,
port: int = DEFAULT_HTTP_PORT,
) -> Optional[RayServeHandle]:
"""Run a Serve application and return a ServeHandle to the ingress.
Either a ClassNode, FunctionNode, or a pre-built application
can be passed in. If a node is passed in, all of the deployments it depends
on will be deployed. If there is an ingress, its handle will be returned.
Args:
target (Union[ClassNode, FunctionNode, Application]):
A user-built Serve Application or a ClassNode that acts as the
root node of DAG. By default ClassNode is the Driver
deployment unless user provides a customized one.
host (str): The host passed into serve.start().
port (int): The port passed into serve.start().
Returns:
RayServeHandle: A regular ray serve handle that can be called by user
to execute the serve DAG.
"""
# TODO (jiaodong): Resolve circular reference in pipeline codebase and serve
from ray.serve.pipeline.api import build as pipeline_build
from ray.serve.pipeline.api import get_and_validate_ingress_deployment
client = start(detached=True, http_options={"host": host, "port": port})
if isinstance(target, Application):
deployments = list(target.deployments.values())
ingress = target.ingress
# Each DAG should always provide a valid Driver ClassNode
elif isinstance(target, ClassNode):
deployments = pipeline_build(target)
ingress = get_and_validate_ingress_deployment(deployments)
# Special case where user is doing single function serve.run(func.bind())
elif isinstance(target, FunctionNode):
deployments = pipeline_build(target)
ingress = get_and_validate_ingress_deployment(deployments)
if len(deployments) != 1:
raise ValueError(
"We only support single function node in serve.run, ex: "
"serve.run(func.bind()). For more than one nodes in your DAG, "
"Please provide a driver class and bind it as entrypoint to "
"your Serve DAG."
)
elif isinstance(target, DAGNode):
raise ValueError(
"Invalid DAGNode type as entry to serve.run(), "
f"type: {type(target)}, accepted: ClassNode, "
"FunctionNode please provide a driver class and bind it "
"as entrypoint to your Serve DAG."
)
else:
raise TypeError(
"Expected a ClassNode, FunctionNode, or Application as target. "
f"Got unexpected type {type(target)} instead."
)
parameter_group = []
for deployment in deployments:
deployment_parameters = {
"name": deployment._name,
"func_or_class": deployment._func_or_class,
"init_args": deployment.init_args,
"init_kwargs": deployment.init_kwargs,
"ray_actor_options": deployment._ray_actor_options,
"config": deployment._config,
"version": deployment._version,
"prev_version": deployment._prev_version,
"route_prefix": deployment.route_prefix,
"url": deployment.url,
}
parameter_group.append(deployment_parameters)
client.deploy_group(
parameter_group, _blocking=_blocking, remove_past_deployments=True
)
if ingress is not None:
return ingress.get_handle()
def build(target: Union[ClassNode, FunctionNode]) -> Application:
"""Builds a Serve application into a static application.
Takes in a ClassNode or FunctionNode and converts it to a
Serve application consisting of one or more deployments. This is intended
to be used for production scenarios and deployed via the Serve REST API or
CLI, so there are some restrictions placed on the deployments:
1) All of the deployments must be importable. That is, they cannot be
defined in __main__ or inline defined. The deployments will be
imported in production using the same import path they were here.
2) All arguments bound to the deployment must be JSON-serializable.
The returned Application object can be exported to a dictionary or YAML
config.
"""
# TODO (jiaodong): Resolve circular reference in pipeline codebase and serve
from ray.serve.pipeline.api import build as pipeline_build
if in_interactive_shell():
raise RuntimeError(
"build cannot be called from an interactive shell like "
"IPython or Jupyter because it requires all deployments to be "
"importable to run the app after building."
)
# TODO(edoakes): this should accept host and port, but we don't
# currently support them in the REST API.
return Application(pipeline_build(target))
def _check_http_and_checkpoint_options(
client: ServeControllerClient,
http_options: Union[dict, HTTPOptions],
checkpoint_path: str,
) -> None:
if checkpoint_path and checkpoint_path != client.checkpoint_path:
logger.warning(
f"The new client checkpoint path '{checkpoint_path}' "
f"is different from the existing one '{client.checkpoint_path}'. "
"The new checkpoint path is ignored."
)
if http_options:
client_http_options = client.http_config
new_http_options = (
http_options
if isinstance(http_options, HTTPOptions)
else HTTPOptions.parse_obj(http_options)
)
different_fields = []
all_http_option_fields = new_http_options.__dict__
for field in all_http_option_fields:
if getattr(new_http_options, field) != getattr(client_http_options, field):
different_fields.append(field)
if len(different_fields):
logger.warning(
"The new client HTTP config differs from the existing one "
f"in the following fields: {different_fields}. "
"The new HTTP config is ignored."
)
|
import tempfile
import argparse
import logging
import datetime
import threading
import os
import re
from botocore.exceptions import ClientError
from ocs_ci.framework import config
from ocs_ci.ocs.constants import CLEANUP_YAML, TEMPLATE_CLEANUP_DIR
from ocs_ci.ocs.exceptions import CommandFailed
from ocs_ci.utility.utils import get_openshift_installer, destroy_cluster
from ocs_ci.utility import templating
from ocs_ci.utility.aws import (
AWS, delete_cluster_buckets, destroy_volumes, get_rhel_worker_instances,
StackStatusError, terminate_rhel_workers
)
from ocs_ci.cleanup.aws import defaults
FORMAT = (
'%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s'
)
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
logger = logging.getLogger(__name__)
def cleanup(cluster_name, cluster_id, upi=False, failed_deletions=None):
"""
Cleanup existing cluster in AWS
Args:
cluster_name (str): Name of the cluster
cluster_id (str): Cluster id to cleanup
upi (bool): True for UPI cluster, False otherwise
failed_deletions (list): list of clusters we failed to delete, used
for reporting purposes
"""
data = {'cluster_name': cluster_name, 'cluster_id': cluster_id}
template = templating.Templating(base_path=TEMPLATE_CLEANUP_DIR)
cleanup_template = template.render_template(CLEANUP_YAML, data)
cleanup_path = tempfile.mkdtemp(prefix='cleanup_')
cleanup_file = os.path.join(cleanup_path, 'metadata.json')
with open(cleanup_file, "w") as temp:
temp.write(cleanup_template)
bin_dir = os.path.expanduser(config.RUN['bin_dir'])
oc_bin = os.path.join(bin_dir, "openshift-install")
if upi:
aws = AWS()
rhel_workers = get_rhel_worker_instances(cleanup_path)
logger.info(f"{cluster_name}'s RHEL workers: {rhel_workers}")
if rhel_workers:
terminate_rhel_workers(rhel_workers)
# Destroy extra volumes
destroy_volumes(cluster_name)
stack_names = list()
# Get master, bootstrap and security group stacks
for stack_type in ['ma', 'bs', 'sg']:
try:
stack_names.append(
aws.get_cloudformation_stacks(
pattern=f"{cluster_name}-{stack_type}"
)[0]['StackName']
)
except ClientError:
continue
# Get the worker stacks
worker_index = 0
worker_stack_exists = True
while worker_stack_exists:
try:
stack_names.append(
aws.get_cloudformation_stacks(
pattern=f"{cluster_name}-no{worker_index}"
)[0]['StackName']
)
worker_index += 1
except ClientError:
worker_stack_exists = False
logger.info(f"Deleting stacks: {stack_names}")
aws.delete_cloudformation_stacks(stack_names)
# Destroy the cluster
logger.info(f"cleaning up {cluster_id}")
destroy_cluster(installer=oc_bin, cluster_path=cleanup_path)
for stack_type in ['inf', 'vpc']:
try:
stack_names.append(
aws.get_cloudformation_stacks(
pattern=f"{cluster_name}-{stack_type}"
)[0]['StackName']
)
except ClientError:
continue
try:
aws.delete_cloudformation_stacks(stack_names)
except StackStatusError:
logger.error('Failed to fully destroy cluster %s', cluster_name)
if failed_deletions:
failed_deletions.append(cluster_name)
raise
else:
logger.info(f"cleaning up {cluster_id}")
try:
destroy_cluster(installer=oc_bin, cluster_path=cleanup_path)
except CommandFailed:
logger.error('Failed to fully destroy cluster %s', cluster_name)
if failed_deletions:
failed_deletions.append(cluster_name)
raise
delete_cluster_buckets(cluster_name)
def get_clusters(time_to_delete, region_name, prefixes_hours_to_spare):
"""
Get all cluster names that their EC2 instances running time is greater
than the specified time to delete
Args:
time_to_delete (int): The maximum time in seconds that is allowed
for clusters to continue running
region_name (str): The name of the AWS region to delete the resources from
prefixes_hours_to_spare (dict): Dictionaries of the cluster prefixes to spare
along with the maximum time in hours that is allowed for spared
clusters to continue running
Returns:
tuple: List of the cluster names (e.g ebenahar-cluster-gqtd4) to be provided to the
ci-cleanup script, a list of VPCs that are part of cloudformation,
and a list of remaining clusters
"""
def determine_cluster_deletion(ec2_instances, cluster_name):
for instance in ec2_instances:
allowed_running_time = time_to_delete
do_not_delete = False
if instance.state["Name"] == "running":
for prefix, hours in prefixes_hours_to_spare.items():
# case insensitive 'startswith'
if bool(re.match(prefix, cluster_name, re.I)):
if hours == 'never':
do_not_delete = True
else:
allowed_running_time = int(hours) * 60 * 60
break
if do_not_delete:
logger.info(
"%s marked as 'do not delete' and will not be "
"destroyed", cluster_name
)
return False
else:
launch_time = instance.launch_time
current_time = datetime.datetime.now(launch_time.tzinfo)
running_time = current_time - launch_time
logger.info(
f"Instance {[tag["Value"] for tag in instance.tags if tag["Key"] == "Name"][0]} "
f"(id: {instance.id}) running time is {running_time} hours while the allowed"
f" running time for it is {allowed_running_time/3600} hours"
)
if running_time.total_seconds() > allowed_running_time:
return True
return False
aws = AWS(region_name=region_name)
clusters_to_delete = list()
remaining_clusters = list()
cloudformation_vpc_names = list()
vpcs = aws.ec2_client.describe_vpcs()['Vpcs']
vpc_ids = [vpc['VpcId'] for vpc in vpcs]
vpc_objs = [aws.ec2_resource.Vpc(vpc_id) for vpc_id in vpc_ids]
for vpc_obj in vpc_objs:
vpc_tags = vpc_obj.tags
if vpc_tags:
cloudformation_vpc_name = [
tag['Value'] for tag in vpc_tags
if tag['Key'] == defaults.AWS_CLOUDFORMATION_TAG
]
if cloudformation_vpc_name:
cloudformation_vpc_names.append(cloudformation_vpc_name[0])
continue
vpc_name = [
tag['Value'] for tag in vpc_tags if tag['Key'] == 'Name'
][0]
cluster_name = vpc_name.replace('-vpc', '')
vpc_instances = vpc_obj.instances.all()
if not vpc_instances:
clusters_to_delete.append(cluster_name)
continue
# Append to clusters_to_delete if cluster should be deleted
if determine_cluster_deletion(vpc_instances, cluster_name):
clusters_to_delete.append(cluster_name)
else:
remaining_clusters.append(cluster_name)
else:
logger.info("No tags found for VPC")
# Get all cloudformation based clusters to delete
cf_clusters_to_delete = list()
for vpc_name in cloudformation_vpc_names:
instance_dicts = aws.get_instances_by_name_pattern(f"{vpc_name.replace("-vpc", "")}*")
ec2_instances = [aws.get_ec2_instance(instance_dict['id']) for instance_dict in instance_dicts]
if not ec2_instances:
continue
cluster_io_tag = None
for instance in ec2_instances:
cluster_io_tag = [
tag['Key'] for tag in instance.tags
if 'kubernetes.io/cluster' in tag['Key']
]
if cluster_io_tag:
break
if not cluster_io_tag:
logger.warning(
"Unable to find valid cluster IO tag from ec2 instance tags "
"for VPC %s. This is probably not an OCS cluster VPC!",
vpc_name
)
continue
cluster_name = cluster_io_tag[0].replace('kubernetes.io/cluster/', '')
if determine_cluster_deletion(ec2_instances, cluster_name):
cf_clusters_to_delete.append(cluster_name)
else:
remaining_clusters.append(cluster_name)
return clusters_to_delete, cf_clusters_to_delete, remaining_clusters
def cluster_cleanup():
parser = argparse.ArgumentParser(description='Cleanup AWS Resource')
parser.add_argument(
'--cluster',
nargs=1,
action='append',
required=True,
help="Cluster name tag"
)
parser.add_argument(
'--upi',
action='store_true',
required=False,
help="For UPI cluster deletion"
)
logging.basicConfig(level=logging.DEBUG)
args = parser.parse_args()
procs = []
for id in args.cluster:
cluster_name = id[0].rsplit('-', 1)[0]
logger.info(f"cleaning up {id[0]}")
proc = threading.Thread(target=cleanup, args=(cluster_name, id[0], args.upi))
proc.start()
procs.append(proc)
for p in procs:
p.join()
def aws_cleanup():
parser = argparse.ArgumentParser(
description='AWS overall resources cleanup according to running time'
)
parser.add_argument(
'--hours',
type=hour_valid,
action='store',
required=True,
help="""
Maximum running time of the cluster (in hours).
Clusters older than this will be deleted.
The minimum is 10 hours
"""
)
parser.add_argument(
'--region',
action='store',
required=False,
help="The name of the AWS region to delete the resources from"
)
parser.add_argument(
'--prefix',
action='append',
required=False,
type=prefix_hour_mapping,
help="""
Additional prefix:hour combo to treat as a special rule.
Clusters starting with this prefix will only be cleaned up if
their runtime exceeds the provided hour(this takes precedence
over the value provided to --hours). Note: if you want to skip
cleanup of a cluster entirely you can use 'never' for the hour.
Example: --prefix foo:24 --prefix bar:48 --prefix foobar:never
"""
)
parser.add_argument(
'--force',
action='store_true',
required=False,
help="""
Force cluster cleanup.
User will not be prompted for confirmation.
WARNING: this utility is destructive, only use this option if
you know what you are doing.
"""
)
args = parser.parse_args()
if not args.force:
confirmation = input(
'Careful! This action could be highly destructive. '
'Are you sure you want to proceed? '
)
assert confirmation == defaults.CONFIRMATION_ANSWER, (
"Wrong confirmation answer. Exiting"
)
prefixes_hours_to_spare = defaults.CLUSTER_PREFIXES_SPECIAL_RULES
if args.prefix:
for prefix, hours in args.prefix:
logger.info(
"Adding special rule for prefix '%s' with hours %s",
prefix, hours
)
prefixes_hours_to_spare.update({prefix: hours})
time_to_delete = args.hours * 60 * 60
region = defaults.AWS_REGION if not args.region else args.region
clusters_to_delete, cf_clusters_to_delete, remaining_clusters = (
get_clusters(
time_to_delete=time_to_delete, region_name=region,
prefixes_hours_to_spare=prefixes_hours_to_spare,
)
)
if not clusters_to_delete:
logger.info("No clusters to delete")
else:
logger.info("Deleting clusters: %s", clusters_to_delete)
get_openshift_installer()
procs = []
failed_deletions = []
for cluster in clusters_to_delete:
cluster_name = cluster.rsplit('-', 1)[0]
logger.info(f"Deleting cluster {cluster_name}")
proc = threading.Thread(
target=cleanup,
args=(cluster_name, cluster, False, failed_deletions)
)
proc.start()
procs.append(proc)
for p in procs:
p.join()
for cluster in cf_clusters_to_delete:
cluster_name = cluster.rsplit('-', 1)[0]
logger.info(f"Deleting UPI cluster {cluster_name}")
proc = threading.Thread(
target=cleanup,
args=(cluster_name, cluster, True, failed_deletions)
)
proc.start()
procs.append(proc)
for p in procs:
p.join()
logger.info("Remaining clusters: %s", remaining_clusters)
filename = 'failed_cluster_deletions.txt'
content = 'None\n'
if failed_deletions:
logger.error("Failed cluster deletions: %s", failed_deletions)
content = ""
for cluster in failed_deletions:
content += f"{cluster}\n"
with open(filename, 'w') as f:
f.write(content)
def prefix_hour_mapping(string):
"""
Validate that the string provided to --prefix is properly formatted
Args:
string (str): input provided to --prefix
Raises:
argparse.ArgumentTypeError: if the provided string is not
correctly formatted
Returns:
str, str: prefix, hours
"""
msg = (
f'{string} is not a properly formatted prefix:hour combination. '
f'See the --help for more information.'
)
try:
prefix, hours = string.split(':')
if not prefix or not hours:
raise argparse.ArgumentTypeError(msg)
# 'never' should be the only non-int value for hours
if hours != 'never':
int(hours)
except ValueError:
raise argparse.ArgumentTypeError(msg)
return prefix, hours
def hour_valid(string):
"""
Validate that the hour value provided is an int and not lower than the
minimum allowed running time
Args:
string: input provided to --hours
Raises:
argparse.ArgumentTypeError: if the provided hours value is not an int
or lower than the minimum allowed running time
Returns:
int: valid hour value
"""
try:
hours = int(string)
assert hours >= defaults.MINIMUM_CLUSTER_RUNNING_TIME
except ValueError:
msg = f'{string} is not an int, please provide an int value'
raise argparse.ArgumentTypeError(msg)
except AssertionError:
msg = (
f"Number of hours ({hours}) is lower than the required minimum "
f"({defaults.MINIMUM_CLUSTER_RUNNING_TIME})."
)
raise argparse.ArgumentTypeError(msg)
return hours
|
import tempfile
import argparse
import logging
import datetime
import threading
import os
import re
from botocore.exceptions import ClientError
from ocs_ci.framework import config
from ocs_ci.ocs.constants import CLEANUP_YAML, TEMPLATE_CLEANUP_DIR
from ocs_ci.ocs.exceptions import CommandFailed
from ocs_ci.utility.utils import get_openshift_installer, destroy_cluster
from ocs_ci.utility import templating
from ocs_ci.utility.aws import (
AWS, delete_cluster_buckets, destroy_volumes, get_rhel_worker_instances,
StackStatusError, terminate_rhel_workers
)
from ocs_ci.cleanup.aws import defaults
FORMAT = (
'%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s'
)
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
logger = logging.getLogger(__name__)
def cleanup(cluster_name, cluster_id, upi=False, failed_deletions=None):
"""
Cleanup existing cluster in AWS
Args:
cluster_name (str): Name of the cluster
cluster_id (str): Cluster id to cleanup
upi (bool): True for UPI cluster, False otherwise
failed_deletions (list): list of clusters we failed to delete, used
for reporting purposes
"""
data = {'cluster_name': cluster_name, 'cluster_id': cluster_id}
template = templating.Templating(base_path=TEMPLATE_CLEANUP_DIR)
cleanup_template = template.render_template(CLEANUP_YAML, data)
cleanup_path = tempfile.mkdtemp(prefix='cleanup_')
cleanup_file = os.path.join(cleanup_path, 'metadata.json')
with open(cleanup_file, "w") as temp:
temp.write(cleanup_template)
bin_dir = os.path.expanduser(config.RUN['bin_dir'])
oc_bin = os.path.join(bin_dir, "openshift-install")
if upi:
aws = AWS()
rhel_workers = get_rhel_worker_instances(cleanup_path)
logger.info(f"{cluster_name}'s RHEL workers: {rhel_workers}")
if rhel_workers:
terminate_rhel_workers(rhel_workers)
# Destroy extra volumes
destroy_volumes(cluster_name)
stack_names = list()
# Get master, bootstrap and security group stacks
for stack_type in ['ma', 'bs', 'sg']:
try:
stack_names.append(
aws.get_cloudformation_stacks(
pattern=f"{cluster_name}-{stack_type}"
)[0]['StackName']
)
except ClientError:
continue
# Get the worker stacks
worker_index = 0
worker_stack_exists = True
while worker_stack_exists:
try:
stack_names.append(
aws.get_cloudformation_stacks(
pattern=f"{cluster_name}-no{worker_index}"
)[0]['StackName']
)
worker_index += 1
except ClientError:
worker_stack_exists = False
logger.info(f"Deleting stacks: {stack_names}")
aws.delete_cloudformation_stacks(stack_names)
# Destroy the cluster
logger.info(f"cleaning up {cluster_id}")
destroy_cluster(installer=oc_bin, cluster_path=cleanup_path)
for stack_type in ['inf', 'vpc']:
try:
stack_names.append(
aws.get_cloudformation_stacks(
pattern=f"{cluster_name}-{stack_type}"
)[0]['StackName']
)
except ClientError:
continue
try:
aws.delete_cloudformation_stacks(stack_names)
except StackStatusError:
logger.error('Failed to fully destroy cluster %s', cluster_name)
if failed_deletions:
failed_deletions.append(cluster_name)
raise
else:
logger.info(f"cleaning up {cluster_id}")
try:
destroy_cluster(installer=oc_bin, cluster_path=cleanup_path)
except CommandFailed:
logger.error('Failed to fully destroy cluster %s', cluster_name)
if failed_deletions:
failed_deletions.append(cluster_name)
raise
delete_cluster_buckets(cluster_name)
def get_clusters(time_to_delete, region_name, prefixes_hours_to_spare):
"""
Get all cluster names that their EC2 instances running time is greater
than the specified time to delete
Args:
time_to_delete (int): The maximum time in seconds that is allowed
for clusters to continue running
region_name (str): The name of the AWS region to delete the resources from
prefixes_hours_to_spare (dict): Dictionaries of the cluster prefixes to spare
along with the maximum time in hours that is allowed for spared
clusters to continue running
Returns:
tuple: List of the cluster names (e.g ebenahar-cluster-gqtd4) to be provided to the
ci-cleanup script, a list of VPCs that are part of cloudformation,
and a list of remaining clusters
"""
def determine_cluster_deletion(ec2_instances, cluster_name):
for instance in ec2_instances:
allowed_running_time = time_to_delete
do_not_delete = False
if instance.state["Name"] == "running":
for prefix, hours in prefixes_hours_to_spare.items():
# case insensitive 'startswith'
if bool(re.match(prefix, cluster_name, re.I)):
if hours == 'never':
do_not_delete = True
else:
allowed_running_time = int(hours) * 60 * 60
break
if do_not_delete:
logger.info(
"%s marked as 'do not delete' and will not be "
"destroyed", cluster_name
)
return False
else:
launch_time = instance.launch_time
current_time = datetime.datetime.now(launch_time.tzinfo)
running_time = current_time - launch_time
logger.info(
f"Instance {[tag['Value'] for tag in instance.tags if tag['Key'] == 'Name'][0]} "
f"(id: {instance.id}) running time is {running_time} hours while the allowed"
f" running time for it is {allowed_running_time/3600} hours"
)
if running_time.total_seconds() > allowed_running_time:
return True
return False
aws = AWS(region_name=region_name)
clusters_to_delete = list()
remaining_clusters = list()
cloudformation_vpc_names = list()
vpcs = aws.ec2_client.describe_vpcs()['Vpcs']
vpc_ids = [vpc['VpcId'] for vpc in vpcs]
vpc_objs = [aws.ec2_resource.Vpc(vpc_id) for vpc_id in vpc_ids]
for vpc_obj in vpc_objs:
vpc_tags = vpc_obj.tags
if vpc_tags:
cloudformation_vpc_name = [
tag['Value'] for tag in vpc_tags
if tag['Key'] == defaults.AWS_CLOUDFORMATION_TAG
]
if cloudformation_vpc_name:
cloudformation_vpc_names.append(cloudformation_vpc_name[0])
continue
vpc_name = [
tag['Value'] for tag in vpc_tags if tag['Key'] == 'Name'
][0]
cluster_name = vpc_name.replace('-vpc', '')
vpc_instances = vpc_obj.instances.all()
if not vpc_instances:
clusters_to_delete.append(cluster_name)
continue
# Append to clusters_to_delete if cluster should be deleted
if determine_cluster_deletion(vpc_instances, cluster_name):
clusters_to_delete.append(cluster_name)
else:
remaining_clusters.append(cluster_name)
else:
logger.info("No tags found for VPC")
# Get all cloudformation based clusters to delete
cf_clusters_to_delete = list()
for vpc_name in cloudformation_vpc_names:
instance_dicts = aws.get_instances_by_name_pattern(f"{vpc_name.replace('-vpc', '')}*")
ec2_instances = [aws.get_ec2_instance(instance_dict['id']) for instance_dict in instance_dicts]
if not ec2_instances:
continue
cluster_io_tag = None
for instance in ec2_instances:
cluster_io_tag = [
tag['Key'] for tag in instance.tags
if 'kubernetes.io/cluster' in tag['Key']
]
if cluster_io_tag:
break
if not cluster_io_tag:
logger.warning(
"Unable to find valid cluster IO tag from ec2 instance tags "
"for VPC %s. This is probably not an OCS cluster VPC!",
vpc_name
)
continue
cluster_name = cluster_io_tag[0].replace('kubernetes.io/cluster/', '')
if determine_cluster_deletion(ec2_instances, cluster_name):
cf_clusters_to_delete.append(cluster_name)
else:
remaining_clusters.append(cluster_name)
return clusters_to_delete, cf_clusters_to_delete, remaining_clusters
def cluster_cleanup():
parser = argparse.ArgumentParser(description='Cleanup AWS Resource')
parser.add_argument(
'--cluster',
nargs=1,
action='append',
required=True,
help="Cluster name tag"
)
parser.add_argument(
'--upi',
action='store_true',
required=False,
help="For UPI cluster deletion"
)
logging.basicConfig(level=logging.DEBUG)
args = parser.parse_args()
procs = []
for id in args.cluster:
cluster_name = id[0].rsplit('-', 1)[0]
logger.info(f"cleaning up {id[0]}")
proc = threading.Thread(target=cleanup, args=(cluster_name, id[0], args.upi))
proc.start()
procs.append(proc)
for p in procs:
p.join()
def aws_cleanup():
parser = argparse.ArgumentParser(
description='AWS overall resources cleanup according to running time'
)
parser.add_argument(
'--hours',
type=hour_valid,
action='store',
required=True,
help="""
Maximum running time of the cluster (in hours).
Clusters older than this will be deleted.
The minimum is 10 hours
"""
)
parser.add_argument(
'--region',
action='store',
required=False,
help="The name of the AWS region to delete the resources from"
)
parser.add_argument(
'--prefix',
action='append',
required=False,
type=prefix_hour_mapping,
help="""
Additional prefix:hour combo to treat as a special rule.
Clusters starting with this prefix will only be cleaned up if
their runtime exceeds the provided hour(this takes precedence
over the value provided to --hours). Note: if you want to skip
cleanup of a cluster entirely you can use 'never' for the hour.
Example: --prefix foo:24 --prefix bar:48 --prefix foobar:never
"""
)
parser.add_argument(
'--force',
action='store_true',
required=False,
help="""
Force cluster cleanup.
User will not be prompted for confirmation.
WARNING: this utility is destructive, only use this option if
you know what you are doing.
"""
)
args = parser.parse_args()
if not args.force:
confirmation = input(
'Careful! This action could be highly destructive. '
'Are you sure you want to proceed? '
)
assert confirmation == defaults.CONFIRMATION_ANSWER, (
"Wrong confirmation answer. Exiting"
)
prefixes_hours_to_spare = defaults.CLUSTER_PREFIXES_SPECIAL_RULES
if args.prefix:
for prefix, hours in args.prefix:
logger.info(
"Adding special rule for prefix '%s' with hours %s",
prefix, hours
)
prefixes_hours_to_spare.update({prefix: hours})
time_to_delete = args.hours * 60 * 60
region = defaults.AWS_REGION if not args.region else args.region
clusters_to_delete, cf_clusters_to_delete, remaining_clusters = (
get_clusters(
time_to_delete=time_to_delete, region_name=region,
prefixes_hours_to_spare=prefixes_hours_to_spare,
)
)
if not clusters_to_delete:
logger.info("No clusters to delete")
else:
logger.info("Deleting clusters: %s", clusters_to_delete)
get_openshift_installer()
procs = []
failed_deletions = []
for cluster in clusters_to_delete:
cluster_name = cluster.rsplit('-', 1)[0]
logger.info(f"Deleting cluster {cluster_name}")
proc = threading.Thread(
target=cleanup,
args=(cluster_name, cluster, False, failed_deletions)
)
proc.start()
procs.append(proc)
for p in procs:
p.join()
for cluster in cf_clusters_to_delete:
cluster_name = cluster.rsplit('-', 1)[0]
logger.info(f"Deleting UPI cluster {cluster_name}")
proc = threading.Thread(
target=cleanup,
args=(cluster_name, cluster, True, failed_deletions)
)
proc.start()
procs.append(proc)
for p in procs:
p.join()
logger.info("Remaining clusters: %s", remaining_clusters)
filename = 'failed_cluster_deletions.txt'
content = 'None\n'
if failed_deletions:
logger.error("Failed cluster deletions: %s", failed_deletions)
content = ""
for cluster in failed_deletions:
content += f"{cluster}\n"
with open(filename, 'w') as f:
f.write(content)
def prefix_hour_mapping(string):
"""
Validate that the string provided to --prefix is properly formatted
Args:
string (str): input provided to --prefix
Raises:
argparse.ArgumentTypeError: if the provided string is not
correctly formatted
Returns:
str, str: prefix, hours
"""
msg = (
f'{string} is not a properly formatted prefix:hour combination. '
f'See the --help for more information.'
)
try:
prefix, hours = string.split(':')
if not prefix or not hours:
raise argparse.ArgumentTypeError(msg)
# 'never' should be the only non-int value for hours
if hours != 'never':
int(hours)
except ValueError:
raise argparse.ArgumentTypeError(msg)
return prefix, hours
def hour_valid(string):
"""
Validate that the hour value provided is an int and not lower than the
minimum allowed running time
Args:
string: input provided to --hours
Raises:
argparse.ArgumentTypeError: if the provided hours value is not an int
or lower than the minimum allowed running time
Returns:
int: valid hour value
"""
try:
hours = int(string)
assert hours >= defaults.MINIMUM_CLUSTER_RUNNING_TIME
except ValueError:
msg = f'{string} is not an int, please provide an int value'
raise argparse.ArgumentTypeError(msg)
except AssertionError:
msg = (
f"Number of hours ({hours}) is lower than the required minimum "
f"({defaults.MINIMUM_CLUSTER_RUNNING_TIME})."
)
raise argparse.ArgumentTypeError(msg)
return hours
|
from typing import List, Tuple
import logging
import pytest
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s test %(levelname)s: %(message)s",
datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger("ambassador")
from ambassador import Cache, IR
from ambassador.compile import Compile
def require_no_errors(ir: IR):
assert ir.aconf.errors == {}
def require_errors(ir: IR, errors: List[Tuple[str, str]]):
flattened_ir_errors: List[str] = []
for key in ir.aconf.errors.keys():
for error in ir.aconf.errors[key]:
flattened_ir_errors.append(f"{key}: {error["error"]}")
flattened_wanted_errors: List[str] = [
f"{key}: {error}" for key, error in errors
]
assert sorted(flattened_ir_errors) == sorted(flattened_wanted_errors)
def test_valid_forward_client_cert_details():
yaml = """
---
apiVersion: getambassador.io/v2
kind: Module
metadata:
name: ambassador
namespace: default
spec:
config:
forward_client_cert_details: SANITIZE_SET
"""
cache = Cache(logger)
r1 = Compile(logger, yaml, k8s=True)
r2 = Compile(logger, yaml, k8s=True, cache=cache)
require_no_errors(r1["ir"])
require_no_errors(r2["ir"])
def test_invalid_forward_client_cert_details():
yaml = """
---
apiVersion: getambassador.io/v2
kind: Module
metadata:
name: ambassador
namespace: default
spec:
config:
forward_client_cert_details: SANITIZE_INVALID
"""
cache = Cache(logger)
r1 = Compile(logger, yaml, k8s=True)
r2 = Compile(logger, yaml, k8s=True, cache=cache)
require_errors(r1["ir"], [
( "ambassador.default.1", "'forward_client_cert_details' may not be set to 'SANITIZE_INVALID'; it may only be set to one of: SANITIZE, FORWARD_ONLY, APPEND_FORWARD, SANITIZE_SET, ALWAYS_FORWARD_ONLY")
])
require_errors(r2["ir"], [
( "ambassador.default.1", "'forward_client_cert_details' may not be set to 'SANITIZE_INVALID'; it may only be set to one of: SANITIZE, FORWARD_ONLY, APPEND_FORWARD, SANITIZE_SET, ALWAYS_FORWARD_ONLY")
])
def test_valid_set_current_client_cert_details():
yaml = """
---
apiVersion: getambassador.io/v2
kind: Module
metadata:
name: ambassador
namespace: default
spec:
config:
set_current_client_cert_details:
subject: true
dns: true
"""
cache = Cache(logger)
r1 = Compile(logger, yaml, k8s=True)
r2 = Compile(logger, yaml, k8s=True, cache=cache)
require_no_errors(r1["ir"])
require_no_errors(r2["ir"])
def test_invalid_set_current_client_cert_details_key():
yaml = """
---
apiVersion: getambassador.io/v2
kind: Module
metadata:
name: ambassador
namespace: default
spec:
config:
set_current_client_cert_details:
invalid: true
"""
cache = Cache(logger)
r1 = Compile(logger, yaml, k8s=True)
r2 = Compile(logger, yaml, k8s=True, cache=cache)
logger.info("R1 IR: %s", r1["ir"].as_json())
require_errors(r1["ir"], [
( "ambassador.default.1", "'set_current_client_cert_details' may not contain key 'invalid'; it may only contain keys: subject, cert, chain, dns, uri")
])
require_errors(r2["ir"], [
( "ambassador.default.1", "'set_current_client_cert_details' may not contain key 'invalid'; it may only contain keys: subject, cert, chain, dns, uri")
])
def test_invalid_set_current_client_cert_details_value():
yaml = """
---
apiVersion: getambassador.io/v2
kind: Module
metadata:
name: ambassador
namespace: default
spec:
config:
set_current_client_cert_details:
subject: invalid
"""
cache = Cache(logger)
r1 = Compile(logger, yaml, k8s=True)
r2 = Compile(logger, yaml, k8s=True, cache=cache)
require_errors(r1["ir"], [
( "ambassador.default.1", "'set_current_client_cert_details' value for key 'subject' may only be 'true' or 'false', not 'invalid'")
])
require_errors(r2["ir"], [
( "ambassador.default.1", "'set_current_client_cert_details' value for key 'subject' may only be 'true' or 'false', not 'invalid'")
])
|
from typing import List, Tuple
import logging
import pytest
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s test %(levelname)s: %(message)s",
datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger("ambassador")
from ambassador import Cache, IR
from ambassador.compile import Compile
def require_no_errors(ir: IR):
assert ir.aconf.errors == {}
def require_errors(ir: IR, errors: List[Tuple[str, str]]):
flattened_ir_errors: List[str] = []
for key in ir.aconf.errors.keys():
for error in ir.aconf.errors[key]:
flattened_ir_errors.append(f"{key}: {error['error']}")
flattened_wanted_errors: List[str] = [
f"{key}: {error}" for key, error in errors
]
assert sorted(flattened_ir_errors) == sorted(flattened_wanted_errors)
def test_valid_forward_client_cert_details():
yaml = """
---
apiVersion: getambassador.io/v2
kind: Module
metadata:
name: ambassador
namespace: default
spec:
config:
forward_client_cert_details: SANITIZE_SET
"""
cache = Cache(logger)
r1 = Compile(logger, yaml, k8s=True)
r2 = Compile(logger, yaml, k8s=True, cache=cache)
require_no_errors(r1["ir"])
require_no_errors(r2["ir"])
def test_invalid_forward_client_cert_details():
yaml = """
---
apiVersion: getambassador.io/v2
kind: Module
metadata:
name: ambassador
namespace: default
spec:
config:
forward_client_cert_details: SANITIZE_INVALID
"""
cache = Cache(logger)
r1 = Compile(logger, yaml, k8s=True)
r2 = Compile(logger, yaml, k8s=True, cache=cache)
require_errors(r1["ir"], [
( "ambassador.default.1", "'forward_client_cert_details' may not be set to 'SANITIZE_INVALID'; it may only be set to one of: SANITIZE, FORWARD_ONLY, APPEND_FORWARD, SANITIZE_SET, ALWAYS_FORWARD_ONLY")
])
require_errors(r2["ir"], [
( "ambassador.default.1", "'forward_client_cert_details' may not be set to 'SANITIZE_INVALID'; it may only be set to one of: SANITIZE, FORWARD_ONLY, APPEND_FORWARD, SANITIZE_SET, ALWAYS_FORWARD_ONLY")
])
def test_valid_set_current_client_cert_details():
yaml = """
---
apiVersion: getambassador.io/v2
kind: Module
metadata:
name: ambassador
namespace: default
spec:
config:
set_current_client_cert_details:
subject: true
dns: true
"""
cache = Cache(logger)
r1 = Compile(logger, yaml, k8s=True)
r2 = Compile(logger, yaml, k8s=True, cache=cache)
require_no_errors(r1["ir"])
require_no_errors(r2["ir"])
def test_invalid_set_current_client_cert_details_key():
yaml = """
---
apiVersion: getambassador.io/v2
kind: Module
metadata:
name: ambassador
namespace: default
spec:
config:
set_current_client_cert_details:
invalid: true
"""
cache = Cache(logger)
r1 = Compile(logger, yaml, k8s=True)
r2 = Compile(logger, yaml, k8s=True, cache=cache)
logger.info("R1 IR: %s", r1["ir"].as_json())
require_errors(r1["ir"], [
( "ambassador.default.1", "'set_current_client_cert_details' may not contain key 'invalid'; it may only contain keys: subject, cert, chain, dns, uri")
])
require_errors(r2["ir"], [
( "ambassador.default.1", "'set_current_client_cert_details' may not contain key 'invalid'; it may only contain keys: subject, cert, chain, dns, uri")
])
def test_invalid_set_current_client_cert_details_value():
yaml = """
---
apiVersion: getambassador.io/v2
kind: Module
metadata:
name: ambassador
namespace: default
spec:
config:
set_current_client_cert_details:
subject: invalid
"""
cache = Cache(logger)
r1 = Compile(logger, yaml, k8s=True)
r2 = Compile(logger, yaml, k8s=True, cache=cache)
require_errors(r1["ir"], [
( "ambassador.default.1", "'set_current_client_cert_details' value for key 'subject' may only be 'true' or 'false', not 'invalid'")
])
require_errors(r2["ir"], [
( "ambassador.default.1", "'set_current_client_cert_details' value for key 'subject' may only be 'true' or 'false', not 'invalid'")
])
|
from os import environ
def get_program(environment_variable: str, backup: str) -> str:
return environ.get(environment_variable, default=backup)
def get_terminal_program(program: str) -> str:
return f"{PROGRAMS["terminal"]} -e {program}"
def get_site(url: str) -> str:
return f'{PROGRAMS['browser']} {url}'
# Set used programs and commands
PROGRAMS = dict(
editor=get_program("MY_EDITOR", "code"),
terminal=get_program("MY_TERMINAL", "xterm"),
browser=get_program("MY_BROWSER", "firefox"),
launcher=get_program("MY_LAUNCHER", "rofi -show run"),
file_explorer=get_program("MY_EXPLORER", "thunar"),
email_client=get_program("MY_EMAIL_CLIENT", "thunderbird"),
work_communication=get_program("MY_WORK_COMMUNICATION", "skypeforlinux"),
screenshot=get_program("MY_SCREENSHOT", "flameshot gui"),
volume_manager=get_program("MY_VOLUME_MANAGER", "pavucontrol"),
system_monitor="psensor",
volume_toggle="amixer set Master toggle",
wallpaper_manager="variety",
)
# Append terminal programs (requires PROGRAMS to be defined)
PROGRAMS.update(
dict(
tech_news=get_terminal_program("daily-hn"),
)
)
# Set commands used to open useful websites
WEBSITES = dict(
stack_overflow=get_site(
"https://stackoverflow.com/questions/tagged/python?sort=Newest&filters=NoAnswers"
"&uqlId=33538"
),
github=get_site("https://github.com/Rolv-Apneseth"),
youtube=get_site("https://www.youtube.com/"),
netflix=get_site("https://www.netflix.com/"),
)
if __name__ == "__main__":
# Print out programs, for debugging
from pprint import pprint
pprint(PROGRAMS)
pprint(WEBSITES)
|
from os import environ
def get_program(environment_variable: str, backup: str) -> str:
return environ.get(environment_variable, default=backup)
def get_terminal_program(program: str) -> str:
return f"{PROGRAMS['terminal']} -e {program}"
def get_site(url: str) -> str:
return f'{PROGRAMS["browser"]} {url}'
# Set used programs and commands
PROGRAMS = dict(
editor=get_program("MY_EDITOR", "code"),
terminal=get_program("MY_TERMINAL", "xterm"),
browser=get_program("MY_BROWSER", "firefox"),
launcher=get_program("MY_LAUNCHER", "rofi -show run"),
file_explorer=get_program("MY_EXPLORER", "thunar"),
email_client=get_program("MY_EMAIL_CLIENT", "thunderbird"),
work_communication=get_program("MY_WORK_COMMUNICATION", "skypeforlinux"),
screenshot=get_program("MY_SCREENSHOT", "flameshot gui"),
volume_manager=get_program("MY_VOLUME_MANAGER", "pavucontrol"),
system_monitor="psensor",
volume_toggle="amixer set Master toggle",
wallpaper_manager="variety",
)
# Append terminal programs (requires PROGRAMS to be defined)
PROGRAMS.update(
dict(
tech_news=get_terminal_program("daily-hn"),
)
)
# Set commands used to open useful websites
WEBSITES = dict(
stack_overflow=get_site(
"https://stackoverflow.com/questions/tagged/python?sort=Newest&filters=NoAnswers"
"&uqlId=33538"
),
github=get_site("https://github.com/Rolv-Apneseth"),
youtube=get_site("https://www.youtube.com/"),
netflix=get_site("https://www.netflix.com/"),
)
if __name__ == "__main__":
# Print out programs, for debugging
from pprint import pprint
pprint(PROGRAMS)
pprint(WEBSITES)
|
import sqlite3
from util.constants import DATABASE
class DBManager:
def __init__(self):
self.connection = None
self.cursor = None
def connect(self):
self.connection = sqlite3.connect(DATABASE["path"])
self.cursor = self.connection.cursor()
return self
def create_tables(self):
# Create table
self.cursor.execute("CREATE TABLE user (password int, service int)")
self.connection.commit()
def insert_user(self, user) -> int:
self.cursor.execute(f" INSERT INTO user VALUES ('{user["password"]}', '${user["service"]}');")
self.connection.commit()
return self.cursor.lastrowid
def delete_user(self, _id=1):
self.cursor.execute(f" delete from user where rowid={_id}")
self.connection.commit()
def get_user(self, _id=1):
return self.cursor.execute(f"SELECT * FROM user WHERE rowid={_id}").fetchone()
def get_all_users(self):
return self.cursor.execute('SELECT * FROM user').fetchall()
def close(self):
self.connection.close()
def drop_tables(self):
for table in self.cursor.execute("select name from sqlite_master where type = 'table'"):
self.cursor.execute(f"drop table {table[0]}")
self.connection.commit()
if __name__ == "__main__":
db = DBManager().connect()
db.drop_tables()
db.create_tables()
row_id = db.insert_user({"password": 123, "service": 1})
print("rowid =", row_id)
print(db.get_user(row_id))
db.delete_user(row_id)
print(db.get_all_users())
db.drop_tables()
db.close()
|
import sqlite3
from util.constants import DATABASE
class DBManager:
def __init__(self):
self.connection = None
self.cursor = None
def connect(self):
self.connection = sqlite3.connect(DATABASE["path"])
self.cursor = self.connection.cursor()
return self
def create_tables(self):
# Create table
self.cursor.execute("CREATE TABLE user (password int, service int)")
self.connection.commit()
def insert_user(self, user) -> int:
self.cursor.execute(f" INSERT INTO user VALUES ('{user['password']}', '${user['service']}');")
self.connection.commit()
return self.cursor.lastrowid
def delete_user(self, _id=1):
self.cursor.execute(f" delete from user where rowid={_id}")
self.connection.commit()
def get_user(self, _id=1):
return self.cursor.execute(f"SELECT * FROM user WHERE rowid={_id}").fetchone()
def get_all_users(self):
return self.cursor.execute('SELECT * FROM user').fetchall()
def close(self):
self.connection.close()
def drop_tables(self):
for table in self.cursor.execute("select name from sqlite_master where type = 'table'"):
self.cursor.execute(f"drop table {table[0]}")
self.connection.commit()
if __name__ == "__main__":
db = DBManager().connect()
db.drop_tables()
db.create_tables()
row_id = db.insert_user({"password": 123, "service": 1})
print("rowid =", row_id)
print(db.get_user(row_id))
db.delete_user(row_id)
print(db.get_all_users())
db.drop_tables()
db.close()
|
# -*- coding: utf-8 -*-
"""
Run method, and save results.
Run as:
python main.py --dataset <ds> --method <met>
where dataset name should be in UCI_Datasets folder
and method is piven, qd, deep-ens, mid or only-rmse.
"""
import argparse
import json
import datetime
import tensorflow as tf
# import tensorflow.compat.v1 as tf
import scipy.stats as stats
import itertools
import os
import random
import numpy as np
import data_loader
from DataGen import DataGenerator
from DeepNetPI import TfNetwork
from utils import *
from sklearn.model_selection import train_test_split
start_time = datetime.datetime.now()
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='flight_delay', metavar='',
help='dataset name, flight_delay')
parser.add_argument('--method', type=str, help='piven, qd, mid, only-rmse, deep-ens', required=True)
args = parser.parse_args()
method = args.method
############# added code start #############
print(args)
print(args.dataset)
original_data_path = '../flight_delay_data/' ## flight delay data
results_path = './Results/piven/'+args.dataset + '_PIVEN_UCI.txt'
seed = 12345
neurons = [100]
lambda_in = 15.0
sigma_in = 0.2
random.seed(seed)
np.random.seed(seed)
tf.compat.v1.random.set_random_seed(seed)
# tf.random.set_random_seed(seed)
h_size = neurons
# n_runs = params['n_runs'] # number of runs
n_runs = 1
n_epoch = 500 # number epochs to train for
# h_size = params['h_size'] # number of hidden units in network: [50]=layer_1 of 50, [8,4]=layer_1 of 8, layer_2 of 4
l_rate = 0.01 # learning rate of optimizer
decay_rate = 0.99 # learning rate decay
soften = 160.0 # soften param in the loss
patience = -1 # patience
n_ensemble = 1 # number of individual NNs in ensemble # 5
alpha = 0.05 # data points captured = (1 - alpha)
train_prop = 0.9 # % of data to use as training
in_ddof = 1 if n_runs > 1 else 0 # this is for results over runs only
is_early_stop = patience != -1
if args.dataset == 'YearPredictionMSD':
n_batch = 1000 # batch size
out_biases = [5., -5.]
else:
n_batch = 100 # batch size
out_biases = [2., -2.] # chose biases for output layer (for deep_ens is overwritten to 0,1)
results_runs = []
run = 0
fail_times = 0
for run in range(0, n_runs):
''' ######### flight delay data loading ###############'''
xTrain, yTrain, test_data_list = data_loader.load_flight_delays(original_data_path)
# y_train = np.reshape(y_train, (-1, 1))
# y_test = np.reshape(y_test, (-1, 1))
'''choose the train/test dataset '''
x_train = xTrain
y_train = yTrain
y_train = y_train.reshape(-1, 1)
# y_scale = yTrain_scale
test_idx = 0 # [0, 1, 2, 3] for test 1,2,3,4
X_test = test_data_list[test_idx][0]
y_test = test_data_list[test_idx][1]
y_test = y_test.reshape(-1, 1)
X_val = X_test
y_val = y_test.reshape(-1, 1)
y_pred_all = []
y_pred_all_train = []
i = 0
while i < n_ensemble:
is_failed_run = False
tf.reset_default_graph()
sess = tf.Session()
print(f'\nrun number {run+1} of {n_runs} -- ensemble number {i+1} of {n_ensemble}')
# create network
NN = TfNetwork(x_size=x_train.shape[1],
y_size=2,
h_size=h_size,
alpha=alpha,
soften=soften,
lambda_in=lambda_in,
sigma_in=sigma_in,
out_biases=out_biases,
method=method,
patience=patience,
dataset=args.dataset,
rnd_seed=seed)
# train
NN.train(sess, x_train, y_train, X_val, y_val,
n_epoch=n_epoch,
l_rate=l_rate,
decay_rate=decay_rate,
is_early_stop=is_early_stop,
n_batch=n_batch)
# predict
y_loss, y_pred = NN.predict(sess, X_test=X_test, y_test=y_test)
# prediction for training data
y_loss_train, y_pred_train = NN.predict(sess, X_test=x_train, y_test=y_train)
# check whether the run failed or not
if np.abs(y_loss) > 20. and fail_times < 1: # jump out of some endless failures
# if False:
is_failed_run = True
fail_times+=1
print('\n\n### one messed up! repeating ensemble ### failed {}/5 times!'.format(fail_times))
continue # without saving result
else:
i += 1 # continue to next
# save prediction
y_pred_all.append(y_pred)
y_pred_all_train.append(y_pred_train)
sess.close()
y_pred_all = np.array(y_pred_all)
y_pred_all_train = np.array(y_pred_all_train)
if method == 'deep-ens':
y_pred_gauss_mid_all = y_pred_all[:, :, 0]
# occasionally may get -ves for std dev so need to do max
y_pred_gauss_dev_all = np.sqrt(np.maximum(np.log(1. + np.exp(y_pred_all[:, :, 1])), 10e-6))
y_pred_gauss_mid, y_pred_gauss_dev, y_pred_U, \
y_pred_L = gauss_to_pi(y_pred_gauss_mid_all, y_pred_gauss_dev_all)
else:
### for test data
y_pred_gauss_mid, y_pred_gauss_dev, y_pred_U, y_pred_L, y_pred_v = pi_to_gauss(y_pred_all, method=method)
### for training data
y_pred_gauss_mid_train, y_pred_gauss_dev_train, y_pred_U_train, y_pred_L_train, y_pred_v_train = pi_to_gauss(y_pred_all_train, method=method)
### calculate the confidence scores
# for train
y_U_cap_train = y_pred_U_train > y_train.reshape(-1)
y_L_cap_train = y_pred_L_train < y_train.reshape(-1)
MPIW_array_train = y_pred_U_train - y_pred_L_train
MPIW_train = np.mean(MPIW_array_train)
MPIW_array_test = y_pred_U - y_pred_L
confidence_arr_test = [min(MPIW_train/test_width, 1.0) for test_width in MPIW_array_test]
confidence_arr_train = [min(MPIW_train/train_width, 1.0) for train_width in MPIW_array_train]
print('----------- OOD analysis --- confidence scores ----------------')
print('--- Train conf_scores MEAN: {}, STD: {}'.format(np.mean(confidence_arr_train), np.std(confidence_arr_train)))
print('--- Test: {} rank: {} conf_scores MEAN: {}, STD: {}'.format(test_idx+1, test_idx+1, np.mean(confidence_arr_test), np.std(confidence_arr_test)))
dist_arr_train = np.sqrt(np.sum(x_train ** 2.0, axis=1))
dist_arr_test = np.sqrt(np.sum(X_val ** 2.0, axis=1))
confidence_arr_train = np.array(confidence_arr_train)
confidence_arr_test = np.array(confidence_arr_test)
PIVEN_OOD_train_np = np.hstack((dist_arr_train.reshape(-1, 1), confidence_arr_train.reshape(-1, 1)))
PIVEN_OOD_test_np = np.hstack((dist_arr_test.reshape(-1, 1), confidence_arr_test.reshape(-1, 1)))
np.savetxt('PIVEN_OOD_flight_delay_'+ str(test_idx+1) +'_train_np.txt', PIVEN_OOD_train_np, delimiter=',')
np.savetxt('PIVEN_OOD_flight_delay_'+ str(test_idx+1) +'_test_np.txt', PIVEN_OOD_test_np, delimiter=',')
# # work out metrics
# y_U_cap = y_pred_U > y_test.reshape(-1)
# y_L_cap = y_pred_L < y_test.reshape(-1)
# y_all_cap = y_U_cap * y_L_cap
# PICP = np.sum(y_all_cap) / y_L_cap.shape[0]
# MPIW = np.mean(y_pred_U - y_pred_L)
# y_pred_mid = np.mean((y_pred_U, y_pred_L), axis=0)
# # MSE = np.mean(np.square(Gen.scale_c * (y_pred_mid - y_test[:, 0])))
# # RMSE = np.sqrt(MSE)
# if method == 'qd' or method == 'deep-ens':
# RMSE_ELI = 0.0 # RMSE_PIVEN
# else:
# if method == 'piven':
# y_piven = y_pred_v * y_pred_U + (1 - y_pred_v) * y_pred_L
# elif method == 'mid':
# y_piven = 0.5 * y_pred_U + 0.5 * y_pred_L
# elif method == 'only-rmse':
# y_piven = y_pred_v
# MSE_ELI = np.mean(np.square(Gen.scale_c * (y_piven - y_test[:, 0])))
# RMSE_ELI = np.sqrt(MSE_ELI) # RMSE_PIVEN
# CWC = np_QD_loss(y_test, y_pred_L, y_pred_U, alpha, lambda_in) # from qd paper.
# neg_log_like = gauss_neg_log_like(y_test, y_pred_gauss_mid, y_pred_gauss_dev, Gen.scale_c)
# residuals = y_pred_mid - y_test[:, 0]
# shapiro_W, shapiro_p = stats.shapiro(residuals[:])
# results_runs.append((PICP, MPIW, CWC, RMSE, RMSE_ELI, neg_log_like, shapiro_W, shapiro_p))
# # summarize results
# results_path = f"./Results/{method}/"
# results_path += f"{params["dataset"]}-{start_time.strftime("%d-%m-%H-%M")}-{method}.csv"
# results = np.array(results_runs)
# results_to_csv(results_path, results, params, n_runs, n_ensemble, in_ddof)
# timing info
end_time = datetime.datetime.now()
total_time = end_time - start_time
print('\n\nminutes taken:', round(total_time.total_seconds() / 60, 3),
'\nstart_time:', start_time.strftime('%H:%M:%S'),
'end_time:', end_time.strftime('%H:%M:%S'))
|
# -*- coding: utf-8 -*-
"""
Run method, and save results.
Run as:
python main.py --dataset <ds> --method <met>
where dataset name should be in UCI_Datasets folder
and method is piven, qd, deep-ens, mid or only-rmse.
"""
import argparse
import json
import datetime
import tensorflow as tf
# import tensorflow.compat.v1 as tf
import scipy.stats as stats
import itertools
import os
import random
import numpy as np
import data_loader
from DataGen import DataGenerator
from DeepNetPI import TfNetwork
from utils import *
from sklearn.model_selection import train_test_split
start_time = datetime.datetime.now()
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='flight_delay', metavar='',
help='dataset name, flight_delay')
parser.add_argument('--method', type=str, help='piven, qd, mid, only-rmse, deep-ens', required=True)
args = parser.parse_args()
method = args.method
############# added code start #############
print(args)
print(args.dataset)
original_data_path = '../flight_delay_data/' ## flight delay data
results_path = './Results/piven/'+args.dataset + '_PIVEN_UCI.txt'
seed = 12345
neurons = [100]
lambda_in = 15.0
sigma_in = 0.2
random.seed(seed)
np.random.seed(seed)
tf.compat.v1.random.set_random_seed(seed)
# tf.random.set_random_seed(seed)
h_size = neurons
# n_runs = params['n_runs'] # number of runs
n_runs = 1
n_epoch = 500 # number epochs to train for
# h_size = params['h_size'] # number of hidden units in network: [50]=layer_1 of 50, [8,4]=layer_1 of 8, layer_2 of 4
l_rate = 0.01 # learning rate of optimizer
decay_rate = 0.99 # learning rate decay
soften = 160.0 # soften param in the loss
patience = -1 # patience
n_ensemble = 1 # number of individual NNs in ensemble # 5
alpha = 0.05 # data points captured = (1 - alpha)
train_prop = 0.9 # % of data to use as training
in_ddof = 1 if n_runs > 1 else 0 # this is for results over runs only
is_early_stop = patience != -1
if args.dataset == 'YearPredictionMSD':
n_batch = 1000 # batch size
out_biases = [5., -5.]
else:
n_batch = 100 # batch size
out_biases = [2., -2.] # chose biases for output layer (for deep_ens is overwritten to 0,1)
results_runs = []
run = 0
fail_times = 0
for run in range(0, n_runs):
''' ######### flight delay data loading ###############'''
xTrain, yTrain, test_data_list = data_loader.load_flight_delays(original_data_path)
# y_train = np.reshape(y_train, (-1, 1))
# y_test = np.reshape(y_test, (-1, 1))
'''choose the train/test dataset '''
x_train = xTrain
y_train = yTrain
y_train = y_train.reshape(-1, 1)
# y_scale = yTrain_scale
test_idx = 0 # [0, 1, 2, 3] for test 1,2,3,4
X_test = test_data_list[test_idx][0]
y_test = test_data_list[test_idx][1]
y_test = y_test.reshape(-1, 1)
X_val = X_test
y_val = y_test.reshape(-1, 1)
y_pred_all = []
y_pred_all_train = []
i = 0
while i < n_ensemble:
is_failed_run = False
tf.reset_default_graph()
sess = tf.Session()
print(f'\nrun number {run+1} of {n_runs} -- ensemble number {i+1} of {n_ensemble}')
# create network
NN = TfNetwork(x_size=x_train.shape[1],
y_size=2,
h_size=h_size,
alpha=alpha,
soften=soften,
lambda_in=lambda_in,
sigma_in=sigma_in,
out_biases=out_biases,
method=method,
patience=patience,
dataset=args.dataset,
rnd_seed=seed)
# train
NN.train(sess, x_train, y_train, X_val, y_val,
n_epoch=n_epoch,
l_rate=l_rate,
decay_rate=decay_rate,
is_early_stop=is_early_stop,
n_batch=n_batch)
# predict
y_loss, y_pred = NN.predict(sess, X_test=X_test, y_test=y_test)
# prediction for training data
y_loss_train, y_pred_train = NN.predict(sess, X_test=x_train, y_test=y_train)
# check whether the run failed or not
if np.abs(y_loss) > 20. and fail_times < 1: # jump out of some endless failures
# if False:
is_failed_run = True
fail_times+=1
print('\n\n### one messed up! repeating ensemble ### failed {}/5 times!'.format(fail_times))
continue # without saving result
else:
i += 1 # continue to next
# save prediction
y_pred_all.append(y_pred)
y_pred_all_train.append(y_pred_train)
sess.close()
y_pred_all = np.array(y_pred_all)
y_pred_all_train = np.array(y_pred_all_train)
if method == 'deep-ens':
y_pred_gauss_mid_all = y_pred_all[:, :, 0]
# occasionally may get -ves for std dev so need to do max
y_pred_gauss_dev_all = np.sqrt(np.maximum(np.log(1. + np.exp(y_pred_all[:, :, 1])), 10e-6))
y_pred_gauss_mid, y_pred_gauss_dev, y_pred_U, \
y_pred_L = gauss_to_pi(y_pred_gauss_mid_all, y_pred_gauss_dev_all)
else:
### for test data
y_pred_gauss_mid, y_pred_gauss_dev, y_pred_U, y_pred_L, y_pred_v = pi_to_gauss(y_pred_all, method=method)
### for training data
y_pred_gauss_mid_train, y_pred_gauss_dev_train, y_pred_U_train, y_pred_L_train, y_pred_v_train = pi_to_gauss(y_pred_all_train, method=method)
### calculate the confidence scores
# for train
y_U_cap_train = y_pred_U_train > y_train.reshape(-1)
y_L_cap_train = y_pred_L_train < y_train.reshape(-1)
MPIW_array_train = y_pred_U_train - y_pred_L_train
MPIW_train = np.mean(MPIW_array_train)
MPIW_array_test = y_pred_U - y_pred_L
confidence_arr_test = [min(MPIW_train/test_width, 1.0) for test_width in MPIW_array_test]
confidence_arr_train = [min(MPIW_train/train_width, 1.0) for train_width in MPIW_array_train]
print('----------- OOD analysis --- confidence scores ----------------')
print('--- Train conf_scores MEAN: {}, STD: {}'.format(np.mean(confidence_arr_train), np.std(confidence_arr_train)))
print('--- Test: {} rank: {} conf_scores MEAN: {}, STD: {}'.format(test_idx+1, test_idx+1, np.mean(confidence_arr_test), np.std(confidence_arr_test)))
dist_arr_train = np.sqrt(np.sum(x_train ** 2.0, axis=1))
dist_arr_test = np.sqrt(np.sum(X_val ** 2.0, axis=1))
confidence_arr_train = np.array(confidence_arr_train)
confidence_arr_test = np.array(confidence_arr_test)
PIVEN_OOD_train_np = np.hstack((dist_arr_train.reshape(-1, 1), confidence_arr_train.reshape(-1, 1)))
PIVEN_OOD_test_np = np.hstack((dist_arr_test.reshape(-1, 1), confidence_arr_test.reshape(-1, 1)))
np.savetxt('PIVEN_OOD_flight_delay_'+ str(test_idx+1) +'_train_np.txt', PIVEN_OOD_train_np, delimiter=',')
np.savetxt('PIVEN_OOD_flight_delay_'+ str(test_idx+1) +'_test_np.txt', PIVEN_OOD_test_np, delimiter=',')
# # work out metrics
# y_U_cap = y_pred_U > y_test.reshape(-1)
# y_L_cap = y_pred_L < y_test.reshape(-1)
# y_all_cap = y_U_cap * y_L_cap
# PICP = np.sum(y_all_cap) / y_L_cap.shape[0]
# MPIW = np.mean(y_pred_U - y_pred_L)
# y_pred_mid = np.mean((y_pred_U, y_pred_L), axis=0)
# # MSE = np.mean(np.square(Gen.scale_c * (y_pred_mid - y_test[:, 0])))
# # RMSE = np.sqrt(MSE)
# if method == 'qd' or method == 'deep-ens':
# RMSE_ELI = 0.0 # RMSE_PIVEN
# else:
# if method == 'piven':
# y_piven = y_pred_v * y_pred_U + (1 - y_pred_v) * y_pred_L
# elif method == 'mid':
# y_piven = 0.5 * y_pred_U + 0.5 * y_pred_L
# elif method == 'only-rmse':
# y_piven = y_pred_v
# MSE_ELI = np.mean(np.square(Gen.scale_c * (y_piven - y_test[:, 0])))
# RMSE_ELI = np.sqrt(MSE_ELI) # RMSE_PIVEN
# CWC = np_QD_loss(y_test, y_pred_L, y_pred_U, alpha, lambda_in) # from qd paper.
# neg_log_like = gauss_neg_log_like(y_test, y_pred_gauss_mid, y_pred_gauss_dev, Gen.scale_c)
# residuals = y_pred_mid - y_test[:, 0]
# shapiro_W, shapiro_p = stats.shapiro(residuals[:])
# results_runs.append((PICP, MPIW, CWC, RMSE, RMSE_ELI, neg_log_like, shapiro_W, shapiro_p))
# # summarize results
# results_path = f"./Results/{method}/"
# results_path += f"{params['dataset']}-{start_time.strftime('%d-%m-%H-%M')}-{method}.csv"
# results = np.array(results_runs)
# results_to_csv(results_path, results, params, n_runs, n_ensemble, in_ddof)
# timing info
end_time = datetime.datetime.now()
total_time = end_time - start_time
print('\n\nminutes taken:', round(total_time.total_seconds() / 60, 3),
'\nstart_time:', start_time.strftime('%H:%M:%S'),
'end_time:', end_time.strftime('%H:%M:%S'))
|
import os
import logging
import copy
from tqdm import trange
from datetime import datetime
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import save_image
from utils import ema
from lib.dataset import DataLooper
from lib.sde import VPSDE
from lib.model.ddpm import DDPM
from lib.trainer import DiffusionTrainer
from lib.sampler import DiffusionSampler
def train(config, logdir, resume=True):
"""Running a training pipeline"""
# Dataset setup
datalooper = DataLooper(
config,
batch_size=config.train.batch_size,
)
# Model setup
if config.model.name.lower() == 'ddpm':
net_model = DDPM(
config.dataset.ch,
config.model.ch,
config.model.ch_mult,
config.model.attn,
config.model.num_res_blocks,
config.model.dropout,
)
else:
raise ValueError
ema_model = copy.deepcopy(net_model)
if config.parallel:
net_model = torch.nn.DataParallel(net_model)
ema_model = torch.nn.DataParallel(ema_model)
# SDE setup
if config.sde.name == 'VPSDE':
sde = VPSDE(
config.sde.beta_min,
config.sde.beta_max,
config.sde.N,
)
else:
raise ValueError
# Trainer setup
trainer = DiffusionTrainer(
sde,
net_model,
config.model.pred_type,
).to(config.device)
trainer.train()
# Optimizer setup
optim = torch.optim.Adam(
net_model.parameters(),
lr=config.train.lr,
)
warmup = config.train.warmup
sched = torch.optim.lr_scheduler.LambdaLR(
optim,
lr_lambda=lambda step: min(step, warmup) / warmup,
)
# Sampler setup
sampler = DiffusionSampler(
sde,
ema_model,
config.model.pred_type,
).to(config.device)
sampler.eval()
# Log setup
sample_dir = os.path.join(logdir, 'samples')
os.makedirs(sample_dir, exist_ok=True)
writer = SummaryWriter(logdir)
# Show model size
model_size = sum(p.numel() for p in net_model.parameters())
logging.info(f'Model Params : {model_size / 1024 / 1024:.2f}M')
# Load checkpoint (if exists)
try:
assert resume
ckpt = torch.load(os.path.join(logdir, f'ckpt_latest.pt'))
net_model.load_state_dict(ckpt['net_model'])
ema_model.load_state_dict(ckpt['ema_model'])
optim.load_state_dict(ckpt['optimizer'])
sched.load_state_dict(ckpt['scheduler'])
init_step = ckpt['step'] + 1
logging.info(f'Checkpoint loaded! Re-start from step {init_step}.')
except:
init_step = 0
logging.info(f'No checkpoint found. Start from step {init_step}.')
# Start training
with trange(init_step, config.train.total_steps, dynamic_ncols=True) as pbar:
for step in pbar:
# Train
optim.zero_grad()
x_0 = next(datalooper)
x_0 = x_0.to(config.device)
loss = trainer(x_0)
loss = loss.mean()
loss.backward()
torch.nn.utils.clip_grad_norm_(
net_model.parameters(),
config.train.grad_clip,
)
optim.step()
sched.step()
ema(net_model, ema_model, config.train.ema_decay)
# Log
writer.add_scalar('loss', loss, step)
pbar.set_postfix(loss=f'{loss:.3f}')
# Sample
if config.train.sample_step > 0 and step % config.train.sample_step == 0:
xs = []
total_steps = config.eval.sample_size // config.eval.batch_size
for i in range(0, config.eval.sample_size, config.eval.batch_size):
x_T = torch.randn_like(x_0)
with torch.no_grad():
x = sampler(
x_T,
pbar,
corrector_n_steps=1,
corrector_langevin_snr=0.16,
)
xs.append((x.detach().cpu() + 1.) / 2)
pbar.set_postfix(option=f'({i+1}/{total_steps})')
xs = torch.cat(xs, dim=0)
save_image(
xs[:64],
os.path.join(sample_dir, f'sample_{step}.png'),
nrow=8,
)
# Save
if config.train.save_step > 0 and step % config.train.save_step == 0:
ckpt = {
'net_model': net_model.state_dict(),
'ema_model': ema_model.state_dict(),
'optimizer': optim.state_dict(),
'scheduler': sched.state_dict(),
'step': step,
}
torch.save(ckpt, os.path.join(logdir, f'ckpt_latest.pt'))
# Archive
if config.train.archive_step > 0 and step % config.train.archive_step == 0:
ckpt = {
'net_model': net_model.state_dict(),
'ema_model': ema_model.state_dict(),
'optimizer': optim.state_dict(),
'scheduler': sched.state_dict(),
'step': step,
}
torch.save(ckpt, os.path.join(logdir, f'ckpt_{step}.pt'))
writer.close()
def eval(config, logdir):
"""Running an evaluation pipeline"""
# Datalooper setup
eval_datalooper = DataLooper(
config,
batch_size=config.eval.batch_size,
)
sample_size = config.eval.sample_size
batch_size = config.eval.batch_size
# Model setup
if config.model.name.lower() == 'ddpm':
model = DDPM(
config.dataset.ch,
config.model.ch,
config.model.ch_mult,
config.model.attn,
config.model.num_res_blocks,
config.model.dropout,
)
else:
raise ValueError
if config.parallel:
model = torch.nn.DataParallel(model)
# SDE setup
if config.sde.name == 'VPSDE':
sde = VPSDE(
config.sde.beta_min,
config.sde.beta_max,
config.sde.N,
)
else:
raise ValueError
# Sampler setup
sampler = DiffusionSampler(
sde,
model,
config.model.pred_type,
).to(config.device)
sampler.eval()
# Show model size
model_size = sum(p.numel() for p in model.parameters())
logging.info(f'Model Params : {model_size / 1024 / 1024:.2f}M')
# Load checkpoint
ckpt = torch.load(
os.path.join(logdir, f'ckpt_latest.pt'),
map_location=config.device
)
logging.info(f'Checkpoint step : {ckpt['step']}')
model.load_state_dict(ckpt['ema_model'])
# Directory setup
eval_dir = os.path.join(logdir, 'eval')
sample_dir = os.path.join(eval_dir, 'samples')
os.makedirs(eval_dir, exist_ok=True)
os.makedirs(sample_dir, exist_ok=True)
xs = []
x_0 = next(eval_datalooper).to(config.device)
with trange(0, sample_size, batch_size, dynamic_ncols=True) as pbar:
for _ in pbar:
x_T = torch.randn_like(x_0)
with torch.no_grad():
x = sampler(
x_T,
pbar,
corrector_n_steps=3,
corrector_langevin_snr=0.16,
)
xs.append((x.detach().cpu() + 1.) / 2)
xs = torch.cat(xs, dim=0)
now = datetime.now()
save_image(
xs[:64],
os.path.join(sample_dir, f'samples_{now}.png'),
nrow=8,
)
|
import os
import logging
import copy
from tqdm import trange
from datetime import datetime
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import save_image
from utils import ema
from lib.dataset import DataLooper
from lib.sde import VPSDE
from lib.model.ddpm import DDPM
from lib.trainer import DiffusionTrainer
from lib.sampler import DiffusionSampler
def train(config, logdir, resume=True):
"""Running a training pipeline"""
# Dataset setup
datalooper = DataLooper(
config,
batch_size=config.train.batch_size,
)
# Model setup
if config.model.name.lower() == 'ddpm':
net_model = DDPM(
config.dataset.ch,
config.model.ch,
config.model.ch_mult,
config.model.attn,
config.model.num_res_blocks,
config.model.dropout,
)
else:
raise ValueError
ema_model = copy.deepcopy(net_model)
if config.parallel:
net_model = torch.nn.DataParallel(net_model)
ema_model = torch.nn.DataParallel(ema_model)
# SDE setup
if config.sde.name == 'VPSDE':
sde = VPSDE(
config.sde.beta_min,
config.sde.beta_max,
config.sde.N,
)
else:
raise ValueError
# Trainer setup
trainer = DiffusionTrainer(
sde,
net_model,
config.model.pred_type,
).to(config.device)
trainer.train()
# Optimizer setup
optim = torch.optim.Adam(
net_model.parameters(),
lr=config.train.lr,
)
warmup = config.train.warmup
sched = torch.optim.lr_scheduler.LambdaLR(
optim,
lr_lambda=lambda step: min(step, warmup) / warmup,
)
# Sampler setup
sampler = DiffusionSampler(
sde,
ema_model,
config.model.pred_type,
).to(config.device)
sampler.eval()
# Log setup
sample_dir = os.path.join(logdir, 'samples')
os.makedirs(sample_dir, exist_ok=True)
writer = SummaryWriter(logdir)
# Show model size
model_size = sum(p.numel() for p in net_model.parameters())
logging.info(f'Model Params : {model_size / 1024 / 1024:.2f}M')
# Load checkpoint (if exists)
try:
assert resume
ckpt = torch.load(os.path.join(logdir, f'ckpt_latest.pt'))
net_model.load_state_dict(ckpt['net_model'])
ema_model.load_state_dict(ckpt['ema_model'])
optim.load_state_dict(ckpt['optimizer'])
sched.load_state_dict(ckpt['scheduler'])
init_step = ckpt['step'] + 1
logging.info(f'Checkpoint loaded! Re-start from step {init_step}.')
except:
init_step = 0
logging.info(f'No checkpoint found. Start from step {init_step}.')
# Start training
with trange(init_step, config.train.total_steps, dynamic_ncols=True) as pbar:
for step in pbar:
# Train
optim.zero_grad()
x_0 = next(datalooper)
x_0 = x_0.to(config.device)
loss = trainer(x_0)
loss = loss.mean()
loss.backward()
torch.nn.utils.clip_grad_norm_(
net_model.parameters(),
config.train.grad_clip,
)
optim.step()
sched.step()
ema(net_model, ema_model, config.train.ema_decay)
# Log
writer.add_scalar('loss', loss, step)
pbar.set_postfix(loss=f'{loss:.3f}')
# Sample
if config.train.sample_step > 0 and step % config.train.sample_step == 0:
xs = []
total_steps = config.eval.sample_size // config.eval.batch_size
for i in range(0, config.eval.sample_size, config.eval.batch_size):
x_T = torch.randn_like(x_0)
with torch.no_grad():
x = sampler(
x_T,
pbar,
corrector_n_steps=1,
corrector_langevin_snr=0.16,
)
xs.append((x.detach().cpu() + 1.) / 2)
pbar.set_postfix(option=f'({i+1}/{total_steps})')
xs = torch.cat(xs, dim=0)
save_image(
xs[:64],
os.path.join(sample_dir, f'sample_{step}.png'),
nrow=8,
)
# Save
if config.train.save_step > 0 and step % config.train.save_step == 0:
ckpt = {
'net_model': net_model.state_dict(),
'ema_model': ema_model.state_dict(),
'optimizer': optim.state_dict(),
'scheduler': sched.state_dict(),
'step': step,
}
torch.save(ckpt, os.path.join(logdir, f'ckpt_latest.pt'))
# Archive
if config.train.archive_step > 0 and step % config.train.archive_step == 0:
ckpt = {
'net_model': net_model.state_dict(),
'ema_model': ema_model.state_dict(),
'optimizer': optim.state_dict(),
'scheduler': sched.state_dict(),
'step': step,
}
torch.save(ckpt, os.path.join(logdir, f'ckpt_{step}.pt'))
writer.close()
def eval(config, logdir):
"""Running an evaluation pipeline"""
# Datalooper setup
eval_datalooper = DataLooper(
config,
batch_size=config.eval.batch_size,
)
sample_size = config.eval.sample_size
batch_size = config.eval.batch_size
# Model setup
if config.model.name.lower() == 'ddpm':
model = DDPM(
config.dataset.ch,
config.model.ch,
config.model.ch_mult,
config.model.attn,
config.model.num_res_blocks,
config.model.dropout,
)
else:
raise ValueError
if config.parallel:
model = torch.nn.DataParallel(model)
# SDE setup
if config.sde.name == 'VPSDE':
sde = VPSDE(
config.sde.beta_min,
config.sde.beta_max,
config.sde.N,
)
else:
raise ValueError
# Sampler setup
sampler = DiffusionSampler(
sde,
model,
config.model.pred_type,
).to(config.device)
sampler.eval()
# Show model size
model_size = sum(p.numel() for p in model.parameters())
logging.info(f'Model Params : {model_size / 1024 / 1024:.2f}M')
# Load checkpoint
ckpt = torch.load(
os.path.join(logdir, f'ckpt_latest.pt'),
map_location=config.device
)
logging.info(f'Checkpoint step : {ckpt["step"]}')
model.load_state_dict(ckpt['ema_model'])
# Directory setup
eval_dir = os.path.join(logdir, 'eval')
sample_dir = os.path.join(eval_dir, 'samples')
os.makedirs(eval_dir, exist_ok=True)
os.makedirs(sample_dir, exist_ok=True)
xs = []
x_0 = next(eval_datalooper).to(config.device)
with trange(0, sample_size, batch_size, dynamic_ncols=True) as pbar:
for _ in pbar:
x_T = torch.randn_like(x_0)
with torch.no_grad():
x = sampler(
x_T,
pbar,
corrector_n_steps=3,
corrector_langevin_snr=0.16,
)
xs.append((x.detach().cpu() + 1.) / 2)
xs = torch.cat(xs, dim=0)
now = datetime.now()
save_image(
xs[:64],
os.path.join(sample_dir, f'samples_{now}.png'),
nrow=8,
)
|
import json
import os
import sys
import argparse
import shutil
import uuid
import prettytable
import glob
import requests
import logging
from datetime import datetime
from zipfile import ZipFile
from typing import Any, Tuple, Union
from Tests.Marketplace.marketplace_services import init_storage_client, init_bigquery_client, Pack, PackStatus, \
GCPConfig, PACKS_FULL_PATH, IGNORED_FILES, PACKS_FOLDER, IGNORED_PATHS, Metadata, CONTENT_ROOT_PATH, \
get_packs_statistics_dataframe, BucketUploadFlow, load_json, get_content_git_client, get_recent_commits_data, \
store_successful_and_failed_packs_in_ci_artifacts
from demisto_sdk.commands.common.tools import run_command, str2bool
from Tests.scripts.utils.log_util import install_logging
METADATA_TO_REMOVE = {
'IAM/metadata-1.1.0.json',
'IAM/metadata-1.2.0.json',
'IAM/metadata-1.0.0.json',
'IAM/metadata-1.3.0.json',
'HelloWorldPremium/metadata-1.0.0.json',
'HelloWorldPremium/metadata-1.1.0.json',
'HelloWorldPremium/metadata-1.0.8.json',
'HelloWorldPremium/metadata-1.0.9.json',
}
def get_packs_names(target_packs: str, previous_commit_hash: str = "HEAD^") -> set:
"""Detects and returns packs names to upload.
In case that `Modified` is passed in target_packs input, checks the git difference between two commits,
current and previous and greps only ones with prefix Packs/.
By default this function will receive `All` as target_packs and will return all packs names from content repo.
Args:
target_packs (str): csv packs names or `All` for all available packs in content
or `Modified` for only modified packs (currently not in use).
previous_commit_hash (str): the previous commit to diff with.
Returns:
set: unique collection of packs names to upload.
"""
if target_packs.lower() == "all":
if os.path.exists(PACKS_FULL_PATH):
all_packs = {p for p in os.listdir(PACKS_FULL_PATH) if p not in IGNORED_FILES}
logging.info(f"Number of selected packs to upload is: {len(all_packs)}")
# return all available packs names
return all_packs
else:
logging.error(f"Folder {PACKS_FOLDER} was not found at the following path: {PACKS_FULL_PATH}")
sys.exit(1)
elif target_packs.lower() == "modified":
cmd = f"git diff --name-only HEAD..{previous_commit_hash} | grep 'Packs/'"
modified_packs_path = run_command(cmd).splitlines()
modified_packs = {p.split('/')[1] for p in modified_packs_path if p not in IGNORED_PATHS}
logging.info(f"Number of modified packs is: {len(modified_packs)}")
# return only modified packs between two commits
return modified_packs
elif target_packs and isinstance(target_packs, str):
modified_packs = {p.strip() for p in target_packs.split(',') if p not in IGNORED_FILES}
logging.info(f"Number of selected packs to upload is: {len(modified_packs)}")
# return only packs from csv list
return modified_packs
else:
logging.critical("Not correct usage of flag -p. Please check help section of upload packs script.")
sys.exit(1)
def extract_packs_artifacts(packs_artifacts_path: str, extract_destination_path: str):
"""Extracts all packs from content pack artifact zip.
Args:
packs_artifacts_path (str): full path to content artifacts zip file.
extract_destination_path (str): full path to directory where to extract the packs.
"""
with ZipFile(packs_artifacts_path) as packs_artifacts:
packs_artifacts.extractall(extract_destination_path)
logging.info("Finished extracting packs artifacts")
def download_and_extract_index(storage_bucket: Any, extract_destination_path: str) -> Tuple[str, Any, int]:
"""Downloads and extracts index zip from cloud storage.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where index.zip is stored.
extract_destination_path (str): the full path of extract folder.
Returns:
str: extracted index folder full path.
Blob: google cloud storage object that represents index.zip blob.
str: downloaded index generation.
"""
if storage_bucket.name == GCPConfig.PRODUCTION_PRIVATE_BUCKET:
index_storage_path = os.path.join(GCPConfig.PRIVATE_BASE_PATH, f"{GCPConfig.INDEX_NAME}.zip")
else:
index_storage_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, f"{GCPConfig.INDEX_NAME}.zip")
download_index_path = os.path.join(extract_destination_path, f"{GCPConfig.INDEX_NAME}.zip")
index_blob = storage_bucket.blob(index_storage_path)
index_folder_path = os.path.join(extract_destination_path, GCPConfig.INDEX_NAME)
index_generation = 0 # Setting to 0 makes the operation succeed only if there are no live versions of the blob
if not os.path.exists(extract_destination_path):
os.mkdir(extract_destination_path)
if not index_blob.exists():
os.mkdir(index_folder_path)
logging.error(f"{storage_bucket.name} index blob does not exists")
return index_folder_path, index_blob, index_generation
index_blob.reload()
index_generation = index_blob.generation
index_blob.download_to_filename(download_index_path, if_generation_match=index_generation)
if os.path.exists(download_index_path):
with ZipFile(download_index_path, 'r') as index_zip:
index_zip.extractall(extract_destination_path)
if not os.path.exists(index_folder_path):
logging.critical(f"Failed creating {GCPConfig.INDEX_NAME} folder with extracted data.")
sys.exit(1)
os.remove(download_index_path)
logging.success(f"Finished downloading and extracting {GCPConfig.INDEX_NAME} file to "
f"{extract_destination_path}")
return index_folder_path, index_blob, index_generation
else:
logging.critical(f"Failed to download {GCPConfig.INDEX_NAME}.zip file from cloud storage.")
sys.exit(1)
def update_index_folder(index_folder_path: str, pack_name: str, pack_path: str, pack_version: str = '',
hidden_pack: bool = False) -> bool:
"""
Copies pack folder into index folder.
Args:
index_folder_path (str): full path to index folder.
pack_name (str): pack folder name to copy.
pack_path (str): pack folder full path.
pack_version (str): pack latest version.
hidden_pack (bool): whether pack is hidden/internal or regular pack.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
try:
index_folder_subdirectories = [d for d in os.listdir(index_folder_path) if
os.path.isdir(os.path.join(index_folder_path, d))]
index_pack_path = os.path.join(index_folder_path, pack_name)
metadata_files_in_index = glob.glob(f"{index_pack_path}/metadata-*.json")
new_metadata_path = os.path.join(index_pack_path, f"metadata-{pack_version}.json")
if pack_version:
# Update the latest metadata
if new_metadata_path in metadata_files_in_index:
metadata_files_in_index.remove(new_metadata_path)
# Remove old files but keep metadata files
if pack_name in index_folder_subdirectories:
for d in os.scandir(index_pack_path):
if d.path not in metadata_files_in_index:
os.remove(d.path)
# skipping index update in case hidden is set to True
if hidden_pack:
if os.path.exists(index_pack_path):
shutil.rmtree(index_pack_path) # remove pack folder inside index in case that it exists
logging.warning(f"Skipping updating {pack_name} pack files to index")
task_status = True
return True
# Copy new files and add metadata for latest version
for d in os.scandir(pack_path):
if not os.path.exists(index_pack_path):
os.mkdir(index_pack_path)
logging.info(f"Created {pack_name} pack folder in {GCPConfig.INDEX_NAME}")
shutil.copy(d.path, index_pack_path)
if pack_version and Pack.METADATA == d.name:
shutil.copy(d.path, new_metadata_path)
task_status = True
except Exception:
logging.exception(f"Failed in updating index folder for {pack_name} pack.")
finally:
return task_status
def clean_non_existing_packs(index_folder_path: str, private_packs: list, storage_bucket: Any) -> bool:
""" Detects packs that are not part of content repo or from private packs bucket.
In case such packs were detected, problematic pack is deleted from index and from content/packs/{target_pack} path.
Args:
index_folder_path (str): full path to downloaded index folder.
private_packs (list): priced packs from private bucket.
storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where index.zip is stored.
Returns:
bool: whether cleanup was skipped or not.
"""
if ('CI' not in os.environ) or (
os.environ.get('CIRCLE_BRANCH') != 'master' and storage_bucket.name == GCPConfig.PRODUCTION_BUCKET) or (
os.environ.get('CIRCLE_BRANCH') == 'master' and storage_bucket.name not in
(GCPConfig.PRODUCTION_BUCKET, GCPConfig.CI_BUILD_BUCKET)):
logging.info("Skipping cleanup of packs in gcs.") # skipping execution of cleanup in gcs bucket
return True
public_packs_names = {p for p in os.listdir(PACKS_FULL_PATH) if p not in IGNORED_FILES}
private_packs_names = {p.get('id', '') for p in private_packs}
valid_packs_names = public_packs_names.union(private_packs_names)
# search for invalid packs folder inside index
invalid_packs_names = {(entry.name, entry.path) for entry in os.scandir(index_folder_path) if
entry.name not in valid_packs_names and entry.is_dir()}
if invalid_packs_names:
try:
logging.warning(f"Detected {len(invalid_packs_names)} non existing pack inside index, starting cleanup.")
for invalid_pack in invalid_packs_names:
invalid_pack_name = invalid_pack[0]
invalid_pack_path = invalid_pack[1]
# remove pack from index
shutil.rmtree(invalid_pack_path)
logging.warning(f"Deleted {invalid_pack_name} pack from {GCPConfig.INDEX_NAME} folder")
# important to add trailing slash at the end of path in order to avoid packs with same prefix
invalid_pack_gcs_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, invalid_pack_name, "") # by design
for invalid_blob in [b for b in storage_bucket.list_blobs(prefix=invalid_pack_gcs_path)]:
logging.warning(f"Deleted invalid {invalid_pack_name} pack under url {invalid_blob.public_url}")
invalid_blob.delete() # delete invalid pack in gcs
except Exception:
logging.exception("Failed to cleanup non existing packs.")
else:
logging.info(f"No invalid packs detected inside {GCPConfig.INDEX_NAME} folder")
return False
def upload_index_to_storage(index_folder_path: str, extract_destination_path: str, index_blob: Any,
build_number: str, private_packs: list, current_commit_hash: str,
index_generation: int, is_private: bool = False, force_upload: bool = False,
previous_commit_hash: str = None):
"""
Upload updated index zip to cloud storage.
:param index_folder_path: index folder full path.
:param extract_destination_path: extract folder full path.
:param index_blob: google cloud storage object that represents index.zip blob.
:param build_number: circleCI build number, used as an index revision.
:param private_packs: List of private packs and their price.
:param current_commit_hash: last commit hash of head.
:param index_generation: downloaded index generation.
:param is_private: Indicates if upload is private.
:param force_upload: Indicates if force upload or not.
:param previous_commit_hash: The previous commit hash to diff with.
:returns None.
"""
if force_upload:
# If we force upload we don't want to update the commit in the index.json file,
# this is to be able to identify all changed packs in the next upload
commit = previous_commit_hash
logging.info('Force upload flow - Index commit hash shuould not be changed')
else:
# Otherwise, update the index with the current commit hash (the commit of the upload)
commit = current_commit_hash
logging.info('Updating production index commit hash to master last commit hash')
logging.debug(f'commit hash is: {commit}')
with open(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json"), "w+") as index_file:
index = {
'revision': build_number,
'modified': datetime.utcnow().strftime(Metadata.DATE_FORMAT),
'packs': private_packs,
'commit': commit
}
json.dump(index, index_file, indent=4)
index_zip_name = os.path.basename(index_folder_path)
# REMOVE AFTER SUCCESSFUL RUN
logging.info("Starting to remove old meta files")
iam_metadata = glob.glob(f"{index_folder_path}/IAM/metadata-*.json")
for iam_meta in iam_metadata:
if any(x in iam_meta for x in METADATA_TO_REMOVE):
logging.info(f"Removing - {iam_meta}")
os.remove(iam_meta)
hwp_metadata = glob.glob(f"{index_folder_path}/HelloWorldPremium/metadata-*.json")
for hwp_meta in hwp_metadata:
if any(x in hwp_meta for x in METADATA_TO_REMOVE):
logging.info(f"Removing - {hwp_meta}")
os.remove(hwp_meta)
index_zip_path = shutil.make_archive(base_name=index_folder_path, format="zip",
root_dir=extract_destination_path, base_dir=index_zip_name)
try:
index_blob.reload()
current_index_generation = index_blob.generation
index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob
if is_private or current_index_generation == index_generation:
index_blob.upload_from_filename(index_zip_path)
logging.success(f"Finished uploading {GCPConfig.INDEX_NAME}.zip to storage.")
else:
logging.critical(f"Failed in uploading {GCPConfig.INDEX_NAME}, mismatch in index file generation")
logging.critical(f"Downloaded index generation: {index_generation}")
logging.critical(f"Current index generation: {current_index_generation}")
sys.exit(0)
except Exception:
logging.exception(f"Failed in uploading {GCPConfig.INDEX_NAME}.")
sys.exit(1)
finally:
shutil.rmtree(index_folder_path)
def upload_core_packs_config(storage_bucket: Any, build_number: str, index_folder_path: str):
"""Uploads corepacks.json file configuration to bucket. Corepacks file includes core packs for server installation.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is uploaded.
build_number (str): circleCI build number.
index_folder_path (str): The index folder path.
"""
core_packs_public_urls = []
found_core_packs = set()
for pack in os.scandir(index_folder_path):
if pack.is_dir() and pack.name in GCPConfig.CORE_PACKS_LIST:
pack_metadata_path = os.path.join(index_folder_path, pack.name, Pack.METADATA)
if not os.path.exists(pack_metadata_path):
logging.critical(f"{pack.name} pack {Pack.METADATA} is missing in {GCPConfig.INDEX_NAME}")
sys.exit(1)
with open(pack_metadata_path, 'r') as metadata_file:
metadata = json.load(metadata_file)
pack_current_version = metadata.get('currentVersion', Pack.PACK_INITIAL_VERSION)
core_pack_relative_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, pack.name,
pack_current_version, f"{pack.name}.zip")
core_pack_public_url = os.path.join(GCPConfig.GCS_PUBLIC_URL, storage_bucket.name, core_pack_relative_path)
if not storage_bucket.blob(core_pack_relative_path).exists():
logging.critical(f"{pack.name} pack does not exist under {core_pack_relative_path} path")
sys.exit(1)
core_packs_public_urls.append(core_pack_public_url)
found_core_packs.add(pack.name)
if len(found_core_packs) != len(GCPConfig.CORE_PACKS_LIST):
missing_core_packs = set(GCPConfig.CORE_PACKS_LIST) ^ found_core_packs
logging.critical(f"Number of defined core packs are: {len(GCPConfig.CORE_PACKS_LIST)}")
logging.critical(f"Actual number of found core packs are: {len(found_core_packs)}")
logging.critical(f"Missing core packs are: {missing_core_packs}")
sys.exit(1)
# construct core pack data with public gcs urls
core_packs_data = {
'corePacks': core_packs_public_urls,
'buildNumber': build_number
}
# upload core pack json file to gcs
core_packs_config_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, GCPConfig.CORE_PACK_FILE_NAME)
blob = storage_bucket.blob(core_packs_config_path)
blob.upload_from_string(json.dumps(core_packs_data, indent=4))
logging.success(f"Finished uploading {GCPConfig.CORE_PACK_FILE_NAME} to storage.")
def upload_id_set(storage_bucket: Any, id_set_local_path: str = None):
"""
Uploads the id_set.json artifact to the bucket.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is uploaded.
id_set_local_path: path to the id_set.json file
"""
if not id_set_local_path:
logging.info("Skipping upload of id set to gcs.")
return
id_set_gcs_path = os.path.join(os.path.dirname(GCPConfig.STORAGE_BASE_PATH), 'id_set.json')
blob = storage_bucket.blob(id_set_gcs_path)
with open(id_set_local_path, mode='r') as f:
blob.upload_from_file(f)
logging.success("Finished uploading id_set.json to storage.")
def _build_summary_table(packs_input_list: list, include_pack_status: bool = False) -> Any:
"""Build summary table from pack list
Args:
packs_input_list (list): list of Packs
include_pack_status (bool): whether pack includes status
Returns:
PrettyTable: table with upload result of packs.
"""
table_fields = ["Index", "Pack ID", "Pack Display Name", "Latest Version", "Aggregated Pack Versions"]
if include_pack_status:
table_fields.append("Status")
table = prettytable.PrettyTable()
table.field_names = table_fields
for index, pack in enumerate(packs_input_list, start=1):
pack_status_message = PackStatus[pack.status].value
row = [index, pack.name, pack.display_name, pack.latest_version,
pack.aggregation_str if pack.aggregated and pack.aggregation_str else "False"]
if include_pack_status:
row.append(pack_status_message)
table.add_row(row)
return table
def build_summary_table_md(packs_input_list: list, include_pack_status: bool = False) -> str:
"""Build markdown summary table from pack list
Args:
packs_input_list (list): list of Packs
include_pack_status (bool): whether pack includes status
Returns:
Markdown table: table with upload result of packs.
"""
table_fields = ["Index", "Pack ID", "Pack Display Name", "Latest Version", "Status"] if include_pack_status \
else ["Index", "Pack ID", "Pack Display Name", "Latest Version"]
table = ['|', '|']
for key in table_fields:
table[0] = f'{table[0]} {key} |'
table[1] = f'{table[1]} :- |'
for index, pack in enumerate(packs_input_list):
pack_status_message = PackStatus[pack.status].value if include_pack_status else ''
row = [index, pack.name, pack.display_name, pack.latest_version, pack_status_message] if include_pack_status \
else [index, pack.name, pack.display_name, pack.latest_version]
row_hr = '|'
for _value in row:
row_hr = f'{row_hr} {_value}|'
table.append(row_hr)
return '\n'.join(table)
def update_index_with_priced_packs(private_storage_bucket: Any, extract_destination_path: str,
index_folder_path: str, pack_names: set) \
-> Tuple[Union[list, list], str, Any, list]:
""" Updates index with priced packs and returns list of priced packs data.
Args:
private_storage_bucket (google.cloud.storage.bucket.Bucket): google storage private bucket.
extract_destination_path (str): full path to extract directory.
index_folder_path (str): downloaded index folder directory path.
pack_names (set): Collection of pack names.
Returns:
list: priced packs from private bucket.
"""
private_index_path = ""
private_packs = []
updated_private_packs = []
try:
(private_index_path, private_index_blob, _) = \
download_and_extract_index(private_storage_bucket,
os.path.join(extract_destination_path,
'private'))
logging.info("get_private_packs")
private_packs = get_private_packs(private_index_path, pack_names,
extract_destination_path)
logging.info("get_updated_private_packs")
updated_private_packs = get_updated_private_packs(private_packs, index_folder_path)
logging.info("add_private_packs_to_index")
add_private_packs_to_index(index_folder_path, private_index_path)
logging.info("Finished updating index with priced packs")
except Exception:
logging.exception('Could not add private packs to the index.')
finally:
shutil.rmtree(os.path.dirname(private_index_path), ignore_errors=True)
return private_packs, private_index_path, private_index_blob, updated_private_packs
def get_updated_private_packs(private_packs, index_folder_path):
""" Checks for updated private packs by compering contentCommitHash between public index json and private pack
metadata files.
Args:
private_packs (list): List of dicts containing pack metadata information.
index_folder_path (str): The public index folder path.
Returns:
updated_private_packs (list) : a list of all private packs id's that were updated.
"""
updated_private_packs = []
public_index_file_path = os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json")
public_index_json = load_json(public_index_file_path)
private_packs_from_public_index = public_index_json.get("packs", {})
for pack in private_packs:
private_pack_id = pack.get('id')
private_commit_hash_from_metadata = pack.get('contentCommitHash', "")
private_commit_hash_from_content_repo = ""
for public_pack in private_packs_from_public_index:
if public_pack.get('id') == private_pack_id:
private_commit_hash_from_content_repo = public_pack.get('contentCommitHash', "")
private_pack_was_updated = private_commit_hash_from_metadata != private_commit_hash_from_content_repo
if private_pack_was_updated:
updated_private_packs.append(private_pack_id)
return updated_private_packs
def get_private_packs(private_index_path: str, pack_names: set = set(),
extract_destination_path: str = '') -> list:
"""
Gets a list of private packs.
:param private_index_path: Path to where the private index is located.
:param pack_names: Collection of pack names.
:param extract_destination_path: Path to where the files should be extracted to.
:return: List of dicts containing pack metadata information.
"""
try:
metadata_files = glob.glob(f"{private_index_path}/**/metadata.json")
except Exception:
logging.exception(f'Could not find metadata files in {private_index_path}.')
return []
if not metadata_files:
logging.warning(f'No metadata files found in [{private_index_path}]')
private_packs = []
for metadata_file_path in metadata_files:
try:
with open(metadata_file_path, "r") as metadata_file:
metadata = json.load(metadata_file)
pack_id = metadata.get('id')
is_changed_private_pack = pack_id in pack_names
if is_changed_private_pack: # Should take metadata from artifacts.
with open(os.path.join(extract_destination_path, pack_id, "pack_metadata.json"),
"r") as metadata_file:
metadata = json.load(metadata_file)
if metadata:
private_packs.append({
'id': metadata.get('id') if not is_changed_private_pack else metadata.get('name'),
'price': metadata.get('price'),
'vendorId': metadata.get('vendorId'),
'vendorName': metadata.get('vendorName'),
'contentCommitHash': metadata.get('contentCommitHash', "")
})
except ValueError:
logging.exception(f'Invalid JSON in the metadata file [{metadata_file_path}].')
return private_packs
def add_private_packs_to_index(index_folder_path: str, private_index_path: str):
""" Add the private packs to the index folder.
Args:
index_folder_path: The index folder path.
private_index_path: The path for the index of the private packs.
"""
for d in os.scandir(private_index_path):
if os.path.isdir(d.path):
update_index_folder(index_folder_path, d.name, d.path)
def check_if_index_is_updated(index_folder_path: str, content_repo: Any, current_commit_hash: str,
previous_commit_hash: str, storage_bucket: Any):
""" Checks stored at index.json commit hash and compares it to current commit hash. In case no packs folders were
added/modified/deleted, all other steps are not performed.
Args:
index_folder_path (str): index folder full path.
content_repo (git.repo.base.Repo): content repo object.
current_commit_hash (str): last commit hash of head.
previous_commit_hash (str): the previous commit to diff with
storage_bucket: public storage bucket.
"""
skipping_build_task_message = "Skipping Upload Packs To Marketplace Storage Step."
try:
if storage_bucket.name not in (GCPConfig.CI_BUILD_BUCKET, GCPConfig.PRODUCTION_BUCKET):
logging.info("Skipping index update check in non production/build bucket")
return
if not os.path.exists(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json")):
# will happen only in init bucket run
logging.warning(f"{GCPConfig.INDEX_NAME}.json not found in {GCPConfig.INDEX_NAME} folder")
return
with open(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json")) as index_file:
index_json = json.load(index_file)
index_commit_hash = index_json.get('commit', previous_commit_hash)
try:
index_commit = content_repo.commit(index_commit_hash)
except Exception:
# not updated build will receive this exception because it is missing more updated commit
logging.exception(f"Index is already updated. {skipping_build_task_message}")
sys.exit()
current_commit = content_repo.commit(current_commit_hash)
if current_commit.committed_datetime <= index_commit.committed_datetime:
logging.warning(
f"Current commit {current_commit.hexsha} committed time: {current_commit.committed_datetime}")
logging.warning(f"Index commit {index_commit.hexsha} committed time: {index_commit.committed_datetime}")
logging.warning("Index is already updated.")
logging.warning(skipping_build_task_message)
sys.exit()
for changed_file in current_commit.diff(index_commit):
if changed_file.a_path.startswith(PACKS_FOLDER):
logging.info(
f"Found changed packs between index commit {index_commit.hexsha} and {current_commit.hexsha}")
break
else:
logging.warning(f"No changes found between index commit {index_commit.hexsha} and {current_commit.hexsha}")
logging.warning(skipping_build_task_message)
sys.exit()
except Exception:
logging.exception("Failed in checking status of index")
sys.exit(1)
def print_packs_summary(successful_packs: list, skipped_packs: list, failed_packs: list,
fail_build: bool = True):
"""Prints summary of packs uploaded to gcs.
Args:
successful_packs (list): list of packs that were successfully uploaded.
skipped_packs (list): list of packs that were skipped during upload.
failed_packs (list): list of packs that were failed during upload.
fail_build (bool): indicates whether to fail the build upon failing pack to upload or not
"""
logging.info(
f"""\n
------------------------------------------ Packs Upload Summary ------------------------------------------
Total number of packs: {len(successful_packs + skipped_packs + failed_packs)}
----------------------------------------------------------------------------------------------------------""")
if successful_packs:
successful_packs_table = _build_summary_table(successful_packs)
logging.success(f"Number of successful uploaded packs: {len(successful_packs)}")
logging.success(f"Uploaded packs:\n{successful_packs_table}")
with open('pack_list.txt', 'w') as f:
f.write(successful_packs_table.get_string())
if skipped_packs:
skipped_packs_table = _build_summary_table(skipped_packs, include_pack_status=True)
logging.warning(f"Number of skipped packs: {len(skipped_packs)}")
logging.warning(f"Skipped packs:\n{skipped_packs_table}")
if failed_packs:
failed_packs_table = _build_summary_table(failed_packs, include_pack_status=True)
logging.critical(f"Number of failed packs: {len(failed_packs)}")
logging.critical(f"Failed packs:\n{failed_packs_table}")
if fail_build:
# We don't want the bucket upload flow to fail in Prepare Content step if a pack has failed to upload.
sys.exit(1)
# for external pull requests - when there is no failed packs, add the build summary to the pull request
branch_name = os.environ.get('CIRCLE_BRANCH')
if branch_name and branch_name.startswith('pull/'):
successful_packs_table = build_summary_table_md(successful_packs)
build_num = os.environ['CIRCLE_BUILD_NUM']
bucket_path = f'https://console.cloud.google.com/storage/browser/' \
f'marketplace-ci-build/content/builds/{branch_name}/{build_num}'
pr_comment = f'Number of successful uploaded packs: {len(successful_packs)}\n' \
f'Uploaded packs:\n{successful_packs_table}\n\n' \
f'Browse to the build bucket with this address:\n{bucket_path}'
add_pr_comment(pr_comment)
def option_handler():
"""Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description="Store packs in cloud storage.")
# disable-secrets-detection-start
parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True)
parser.add_argument('-e', '--extract_path', help="Full path of folder to extract wanted packs", required=True)
parser.add_argument('-b', '--bucket_name', help="Storage bucket name", required=True)
parser.add_argument('-s', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
parser.add_argument('-i', '--id_set_path', help="The full path of id_set.json", required=False)
parser.add_argument('-d', '--pack_dependencies', help="Full path to pack dependencies json file.", required=False)
parser.add_argument('-p', '--pack_names',
help=("Target packs to upload to gcs. Optional values are: `All`, "
"`Modified` or csv list of packs "
"Default is set to `All`"),
required=False, default="All")
parser.add_argument('-n', '--ci_build_number',
help="CircleCi build number (will be used as hash revision at index file)", required=False)
parser.add_argument('-o', '--override_all_packs', help="Override all existing packs in cloud storage",
type=str2bool, default=False, required=True)
parser.add_argument('-k', '--key_string', help="Base64 encoded signature key used for signing packs.",
required=False)
parser.add_argument('-sb', '--storage_base_path', help="Storage base path of the directory to upload to.",
required=False)
parser.add_argument('-rt', '--remove_test_playbooks', type=str2bool,
help='Should remove test playbooks from content packs or not.', default=True)
parser.add_argument('-bu', '--bucket_upload', help='is bucket upload build?', type=str2bool, required=True)
parser.add_argument('-pb', '--private_bucket_name', help="Private storage bucket name", required=False)
parser.add_argument('-c', '--circle_branch', help="CircleCi branch of current build", required=True)
parser.add_argument('-f', '--force_upload', help="is force upload build?", type=str2bool, required=True)
# disable-secrets-detection-end
return parser.parse_args()
def add_pr_comment(comment: str):
"""Add comment to the pull request.
Args:
comment (string): The comment text.
"""
token = os.environ['CONTENT_GITHUB_TOKEN']
branch_name = os.environ['CIRCLE_BRANCH']
sha1 = os.environ['CIRCLE_SHA1']
query = f'?q={sha1}+repo:demisto/content+is:pr+is:open+head:{branch_name}+is:open'
url = 'https://api.github.com/search/issues'
headers = {'Authorization': 'Bearer ' + token}
try:
res = requests.get(url + query, headers=headers, verify=False)
res = handle_github_response(res)
if res and res.get('total_count', 0) == 1:
issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None
if issue_url:
res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False)
handle_github_response(res)
else:
logging.warning(
f'Add pull request comment failed: There is more then one open pull request for branch {branch_name}.')
except Exception:
logging.exception('Add pull request comment failed.')
def handle_github_response(response: json) -> dict:
"""
Handles the response from the GitHub server after making a request.
:param response: Response from the server.
:return: The returned response.
"""
res_dict = response.json()
if not res_dict.get('ok'):
logging.warning(f'Add pull request comment failed: {res_dict.get('message')}')
return res_dict
def get_packs_summary(packs_list):
""" Returns the packs list divided into 3 lists by their status
Args:
packs_list (list): The full packs list
Returns: 3 lists of packs - successful_packs, skipped_packs & failed_packs
"""
successful_packs = [pack for pack in packs_list if pack.status == PackStatus.SUCCESS.name]
skipped_packs = [pack for pack in packs_list if
pack.status == PackStatus.PACK_ALREADY_EXISTS.name
or pack.status == PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name]
failed_packs = [pack for pack in packs_list if pack not in successful_packs and pack not in skipped_packs]
return successful_packs, skipped_packs, failed_packs
def main():
install_logging('Prepare_Content_Packs_For_Testing.log')
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
id_set_path = option.id_set_path
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
circle_branch = option.circle_branch
force_upload = option.force_upload
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
if storage_base_path:
GCPConfig.STORAGE_BASE_PATH = storage_base_path
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, circle_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket)
# google cloud bigquery client initialized
bq_client = init_bigquery_client(service_account)
packs_statistic_df = get_packs_statistics_dataframe(bq_client)
updated_private_packs_ids = []
if private_bucket_name: # Add private packs to the index
private_storage_bucket = storage_client.bucket(private_bucket_name)
private_packs, _, _, updated_private_packs_ids = update_index_with_priced_packs(private_storage_bucket,
extract_destination_path,
index_folder_path, pack_names)
else: # skipping private packs
logging.debug("Skipping index update of priced packs")
private_packs = []
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket)
# starting iteration over packs
for pack in packs_list:
task_status, user_metadata = pack.load_user_metadata()
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
task_status, pack_content_items = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status, integration_images = pack.upload_integration_images(storage_bucket)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status, author_image = pack.upload_author_image(storage_bucket)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, pack_was_modified = pack.detect_modified(content_repo, index_folder_path, current_commit_hash,
previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status = pack.format_metadata(user_metadata=user_metadata, pack_content_items=pack_content_items,
integration_images=integration_images, author_image=author_image,
index_folder_path=index_folder_path,
packs_dependencies_mapping=packs_dependencies_mapping,
build_number=build_number, commit_hash=current_commit_hash,
packs_statistic_df=packs_statistic_df,
pack_was_modified=pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
(task_status, skipped_pack_uploading, full_pack_path) = \
pack.upload_to_storage(zip_pack_path, pack.latest_version,
storage_bucket, override_all_packs
or pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_pack_uploading and exists_in_index:
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
upload_core_packs_config(storage_bucket, build_number, index_folder_path)
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash)
# upload id_set.json to bucket
upload_id_set(storage_bucket, id_set_path)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
if __name__ == '__main__':
main()
|
import json
import os
import sys
import argparse
import shutil
import uuid
import prettytable
import glob
import requests
import logging
from datetime import datetime
from zipfile import ZipFile
from typing import Any, Tuple, Union
from Tests.Marketplace.marketplace_services import init_storage_client, init_bigquery_client, Pack, PackStatus, \
GCPConfig, PACKS_FULL_PATH, IGNORED_FILES, PACKS_FOLDER, IGNORED_PATHS, Metadata, CONTENT_ROOT_PATH, \
get_packs_statistics_dataframe, BucketUploadFlow, load_json, get_content_git_client, get_recent_commits_data, \
store_successful_and_failed_packs_in_ci_artifacts
from demisto_sdk.commands.common.tools import run_command, str2bool
from Tests.scripts.utils.log_util import install_logging
METADATA_TO_REMOVE = {
'IAM/metadata-1.1.0.json',
'IAM/metadata-1.2.0.json',
'IAM/metadata-1.0.0.json',
'IAM/metadata-1.3.0.json',
'HelloWorldPremium/metadata-1.0.0.json',
'HelloWorldPremium/metadata-1.1.0.json',
'HelloWorldPremium/metadata-1.0.8.json',
'HelloWorldPremium/metadata-1.0.9.json',
}
def get_packs_names(target_packs: str, previous_commit_hash: str = "HEAD^") -> set:
"""Detects and returns packs names to upload.
In case that `Modified` is passed in target_packs input, checks the git difference between two commits,
current and previous and greps only ones with prefix Packs/.
By default this function will receive `All` as target_packs and will return all packs names from content repo.
Args:
target_packs (str): csv packs names or `All` for all available packs in content
or `Modified` for only modified packs (currently not in use).
previous_commit_hash (str): the previous commit to diff with.
Returns:
set: unique collection of packs names to upload.
"""
if target_packs.lower() == "all":
if os.path.exists(PACKS_FULL_PATH):
all_packs = {p for p in os.listdir(PACKS_FULL_PATH) if p not in IGNORED_FILES}
logging.info(f"Number of selected packs to upload is: {len(all_packs)}")
# return all available packs names
return all_packs
else:
logging.error(f"Folder {PACKS_FOLDER} was not found at the following path: {PACKS_FULL_PATH}")
sys.exit(1)
elif target_packs.lower() == "modified":
cmd = f"git diff --name-only HEAD..{previous_commit_hash} | grep 'Packs/'"
modified_packs_path = run_command(cmd).splitlines()
modified_packs = {p.split('/')[1] for p in modified_packs_path if p not in IGNORED_PATHS}
logging.info(f"Number of modified packs is: {len(modified_packs)}")
# return only modified packs between two commits
return modified_packs
elif target_packs and isinstance(target_packs, str):
modified_packs = {p.strip() for p in target_packs.split(',') if p not in IGNORED_FILES}
logging.info(f"Number of selected packs to upload is: {len(modified_packs)}")
# return only packs from csv list
return modified_packs
else:
logging.critical("Not correct usage of flag -p. Please check help section of upload packs script.")
sys.exit(1)
def extract_packs_artifacts(packs_artifacts_path: str, extract_destination_path: str):
"""Extracts all packs from content pack artifact zip.
Args:
packs_artifacts_path (str): full path to content artifacts zip file.
extract_destination_path (str): full path to directory where to extract the packs.
"""
with ZipFile(packs_artifacts_path) as packs_artifacts:
packs_artifacts.extractall(extract_destination_path)
logging.info("Finished extracting packs artifacts")
def download_and_extract_index(storage_bucket: Any, extract_destination_path: str) -> Tuple[str, Any, int]:
"""Downloads and extracts index zip from cloud storage.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where index.zip is stored.
extract_destination_path (str): the full path of extract folder.
Returns:
str: extracted index folder full path.
Blob: google cloud storage object that represents index.zip blob.
str: downloaded index generation.
"""
if storage_bucket.name == GCPConfig.PRODUCTION_PRIVATE_BUCKET:
index_storage_path = os.path.join(GCPConfig.PRIVATE_BASE_PATH, f"{GCPConfig.INDEX_NAME}.zip")
else:
index_storage_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, f"{GCPConfig.INDEX_NAME}.zip")
download_index_path = os.path.join(extract_destination_path, f"{GCPConfig.INDEX_NAME}.zip")
index_blob = storage_bucket.blob(index_storage_path)
index_folder_path = os.path.join(extract_destination_path, GCPConfig.INDEX_NAME)
index_generation = 0 # Setting to 0 makes the operation succeed only if there are no live versions of the blob
if not os.path.exists(extract_destination_path):
os.mkdir(extract_destination_path)
if not index_blob.exists():
os.mkdir(index_folder_path)
logging.error(f"{storage_bucket.name} index blob does not exists")
return index_folder_path, index_blob, index_generation
index_blob.reload()
index_generation = index_blob.generation
index_blob.download_to_filename(download_index_path, if_generation_match=index_generation)
if os.path.exists(download_index_path):
with ZipFile(download_index_path, 'r') as index_zip:
index_zip.extractall(extract_destination_path)
if not os.path.exists(index_folder_path):
logging.critical(f"Failed creating {GCPConfig.INDEX_NAME} folder with extracted data.")
sys.exit(1)
os.remove(download_index_path)
logging.success(f"Finished downloading and extracting {GCPConfig.INDEX_NAME} file to "
f"{extract_destination_path}")
return index_folder_path, index_blob, index_generation
else:
logging.critical(f"Failed to download {GCPConfig.INDEX_NAME}.zip file from cloud storage.")
sys.exit(1)
def update_index_folder(index_folder_path: str, pack_name: str, pack_path: str, pack_version: str = '',
hidden_pack: bool = False) -> bool:
"""
Copies pack folder into index folder.
Args:
index_folder_path (str): full path to index folder.
pack_name (str): pack folder name to copy.
pack_path (str): pack folder full path.
pack_version (str): pack latest version.
hidden_pack (bool): whether pack is hidden/internal or regular pack.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
try:
index_folder_subdirectories = [d for d in os.listdir(index_folder_path) if
os.path.isdir(os.path.join(index_folder_path, d))]
index_pack_path = os.path.join(index_folder_path, pack_name)
metadata_files_in_index = glob.glob(f"{index_pack_path}/metadata-*.json")
new_metadata_path = os.path.join(index_pack_path, f"metadata-{pack_version}.json")
if pack_version:
# Update the latest metadata
if new_metadata_path in metadata_files_in_index:
metadata_files_in_index.remove(new_metadata_path)
# Remove old files but keep metadata files
if pack_name in index_folder_subdirectories:
for d in os.scandir(index_pack_path):
if d.path not in metadata_files_in_index:
os.remove(d.path)
# skipping index update in case hidden is set to True
if hidden_pack:
if os.path.exists(index_pack_path):
shutil.rmtree(index_pack_path) # remove pack folder inside index in case that it exists
logging.warning(f"Skipping updating {pack_name} pack files to index")
task_status = True
return True
# Copy new files and add metadata for latest version
for d in os.scandir(pack_path):
if not os.path.exists(index_pack_path):
os.mkdir(index_pack_path)
logging.info(f"Created {pack_name} pack folder in {GCPConfig.INDEX_NAME}")
shutil.copy(d.path, index_pack_path)
if pack_version and Pack.METADATA == d.name:
shutil.copy(d.path, new_metadata_path)
task_status = True
except Exception:
logging.exception(f"Failed in updating index folder for {pack_name} pack.")
finally:
return task_status
def clean_non_existing_packs(index_folder_path: str, private_packs: list, storage_bucket: Any) -> bool:
""" Detects packs that are not part of content repo or from private packs bucket.
In case such packs were detected, problematic pack is deleted from index and from content/packs/{target_pack} path.
Args:
index_folder_path (str): full path to downloaded index folder.
private_packs (list): priced packs from private bucket.
storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where index.zip is stored.
Returns:
bool: whether cleanup was skipped or not.
"""
if ('CI' not in os.environ) or (
os.environ.get('CIRCLE_BRANCH') != 'master' and storage_bucket.name == GCPConfig.PRODUCTION_BUCKET) or (
os.environ.get('CIRCLE_BRANCH') == 'master' and storage_bucket.name not in
(GCPConfig.PRODUCTION_BUCKET, GCPConfig.CI_BUILD_BUCKET)):
logging.info("Skipping cleanup of packs in gcs.") # skipping execution of cleanup in gcs bucket
return True
public_packs_names = {p for p in os.listdir(PACKS_FULL_PATH) if p not in IGNORED_FILES}
private_packs_names = {p.get('id', '') for p in private_packs}
valid_packs_names = public_packs_names.union(private_packs_names)
# search for invalid packs folder inside index
invalid_packs_names = {(entry.name, entry.path) for entry in os.scandir(index_folder_path) if
entry.name not in valid_packs_names and entry.is_dir()}
if invalid_packs_names:
try:
logging.warning(f"Detected {len(invalid_packs_names)} non existing pack inside index, starting cleanup.")
for invalid_pack in invalid_packs_names:
invalid_pack_name = invalid_pack[0]
invalid_pack_path = invalid_pack[1]
# remove pack from index
shutil.rmtree(invalid_pack_path)
logging.warning(f"Deleted {invalid_pack_name} pack from {GCPConfig.INDEX_NAME} folder")
# important to add trailing slash at the end of path in order to avoid packs with same prefix
invalid_pack_gcs_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, invalid_pack_name, "") # by design
for invalid_blob in [b for b in storage_bucket.list_blobs(prefix=invalid_pack_gcs_path)]:
logging.warning(f"Deleted invalid {invalid_pack_name} pack under url {invalid_blob.public_url}")
invalid_blob.delete() # delete invalid pack in gcs
except Exception:
logging.exception("Failed to cleanup non existing packs.")
else:
logging.info(f"No invalid packs detected inside {GCPConfig.INDEX_NAME} folder")
return False
def upload_index_to_storage(index_folder_path: str, extract_destination_path: str, index_blob: Any,
build_number: str, private_packs: list, current_commit_hash: str,
index_generation: int, is_private: bool = False, force_upload: bool = False,
previous_commit_hash: str = None):
"""
Upload updated index zip to cloud storage.
:param index_folder_path: index folder full path.
:param extract_destination_path: extract folder full path.
:param index_blob: google cloud storage object that represents index.zip blob.
:param build_number: circleCI build number, used as an index revision.
:param private_packs: List of private packs and their price.
:param current_commit_hash: last commit hash of head.
:param index_generation: downloaded index generation.
:param is_private: Indicates if upload is private.
:param force_upload: Indicates if force upload or not.
:param previous_commit_hash: The previous commit hash to diff with.
:returns None.
"""
if force_upload:
# If we force upload we don't want to update the commit in the index.json file,
# this is to be able to identify all changed packs in the next upload
commit = previous_commit_hash
logging.info('Force upload flow - Index commit hash shuould not be changed')
else:
# Otherwise, update the index with the current commit hash (the commit of the upload)
commit = current_commit_hash
logging.info('Updating production index commit hash to master last commit hash')
logging.debug(f'commit hash is: {commit}')
with open(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json"), "w+") as index_file:
index = {
'revision': build_number,
'modified': datetime.utcnow().strftime(Metadata.DATE_FORMAT),
'packs': private_packs,
'commit': commit
}
json.dump(index, index_file, indent=4)
index_zip_name = os.path.basename(index_folder_path)
# REMOVE AFTER SUCCESSFUL RUN
logging.info("Starting to remove old meta files")
iam_metadata = glob.glob(f"{index_folder_path}/IAM/metadata-*.json")
for iam_meta in iam_metadata:
if any(x in iam_meta for x in METADATA_TO_REMOVE):
logging.info(f"Removing - {iam_meta}")
os.remove(iam_meta)
hwp_metadata = glob.glob(f"{index_folder_path}/HelloWorldPremium/metadata-*.json")
for hwp_meta in hwp_metadata:
if any(x in hwp_meta for x in METADATA_TO_REMOVE):
logging.info(f"Removing - {hwp_meta}")
os.remove(hwp_meta)
index_zip_path = shutil.make_archive(base_name=index_folder_path, format="zip",
root_dir=extract_destination_path, base_dir=index_zip_name)
try:
index_blob.reload()
current_index_generation = index_blob.generation
index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob
if is_private or current_index_generation == index_generation:
index_blob.upload_from_filename(index_zip_path)
logging.success(f"Finished uploading {GCPConfig.INDEX_NAME}.zip to storage.")
else:
logging.critical(f"Failed in uploading {GCPConfig.INDEX_NAME}, mismatch in index file generation")
logging.critical(f"Downloaded index generation: {index_generation}")
logging.critical(f"Current index generation: {current_index_generation}")
sys.exit(0)
except Exception:
logging.exception(f"Failed in uploading {GCPConfig.INDEX_NAME}.")
sys.exit(1)
finally:
shutil.rmtree(index_folder_path)
def upload_core_packs_config(storage_bucket: Any, build_number: str, index_folder_path: str):
"""Uploads corepacks.json file configuration to bucket. Corepacks file includes core packs for server installation.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is uploaded.
build_number (str): circleCI build number.
index_folder_path (str): The index folder path.
"""
core_packs_public_urls = []
found_core_packs = set()
for pack in os.scandir(index_folder_path):
if pack.is_dir() and pack.name in GCPConfig.CORE_PACKS_LIST:
pack_metadata_path = os.path.join(index_folder_path, pack.name, Pack.METADATA)
if not os.path.exists(pack_metadata_path):
logging.critical(f"{pack.name} pack {Pack.METADATA} is missing in {GCPConfig.INDEX_NAME}")
sys.exit(1)
with open(pack_metadata_path, 'r') as metadata_file:
metadata = json.load(metadata_file)
pack_current_version = metadata.get('currentVersion', Pack.PACK_INITIAL_VERSION)
core_pack_relative_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, pack.name,
pack_current_version, f"{pack.name}.zip")
core_pack_public_url = os.path.join(GCPConfig.GCS_PUBLIC_URL, storage_bucket.name, core_pack_relative_path)
if not storage_bucket.blob(core_pack_relative_path).exists():
logging.critical(f"{pack.name} pack does not exist under {core_pack_relative_path} path")
sys.exit(1)
core_packs_public_urls.append(core_pack_public_url)
found_core_packs.add(pack.name)
if len(found_core_packs) != len(GCPConfig.CORE_PACKS_LIST):
missing_core_packs = set(GCPConfig.CORE_PACKS_LIST) ^ found_core_packs
logging.critical(f"Number of defined core packs are: {len(GCPConfig.CORE_PACKS_LIST)}")
logging.critical(f"Actual number of found core packs are: {len(found_core_packs)}")
logging.critical(f"Missing core packs are: {missing_core_packs}")
sys.exit(1)
# construct core pack data with public gcs urls
core_packs_data = {
'corePacks': core_packs_public_urls,
'buildNumber': build_number
}
# upload core pack json file to gcs
core_packs_config_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, GCPConfig.CORE_PACK_FILE_NAME)
blob = storage_bucket.blob(core_packs_config_path)
blob.upload_from_string(json.dumps(core_packs_data, indent=4))
logging.success(f"Finished uploading {GCPConfig.CORE_PACK_FILE_NAME} to storage.")
def upload_id_set(storage_bucket: Any, id_set_local_path: str = None):
"""
Uploads the id_set.json artifact to the bucket.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is uploaded.
id_set_local_path: path to the id_set.json file
"""
if not id_set_local_path:
logging.info("Skipping upload of id set to gcs.")
return
id_set_gcs_path = os.path.join(os.path.dirname(GCPConfig.STORAGE_BASE_PATH), 'id_set.json')
blob = storage_bucket.blob(id_set_gcs_path)
with open(id_set_local_path, mode='r') as f:
blob.upload_from_file(f)
logging.success("Finished uploading id_set.json to storage.")
def _build_summary_table(packs_input_list: list, include_pack_status: bool = False) -> Any:
"""Build summary table from pack list
Args:
packs_input_list (list): list of Packs
include_pack_status (bool): whether pack includes status
Returns:
PrettyTable: table with upload result of packs.
"""
table_fields = ["Index", "Pack ID", "Pack Display Name", "Latest Version", "Aggregated Pack Versions"]
if include_pack_status:
table_fields.append("Status")
table = prettytable.PrettyTable()
table.field_names = table_fields
for index, pack in enumerate(packs_input_list, start=1):
pack_status_message = PackStatus[pack.status].value
row = [index, pack.name, pack.display_name, pack.latest_version,
pack.aggregation_str if pack.aggregated and pack.aggregation_str else "False"]
if include_pack_status:
row.append(pack_status_message)
table.add_row(row)
return table
def build_summary_table_md(packs_input_list: list, include_pack_status: bool = False) -> str:
"""Build markdown summary table from pack list
Args:
packs_input_list (list): list of Packs
include_pack_status (bool): whether pack includes status
Returns:
Markdown table: table with upload result of packs.
"""
table_fields = ["Index", "Pack ID", "Pack Display Name", "Latest Version", "Status"] if include_pack_status \
else ["Index", "Pack ID", "Pack Display Name", "Latest Version"]
table = ['|', '|']
for key in table_fields:
table[0] = f'{table[0]} {key} |'
table[1] = f'{table[1]} :- |'
for index, pack in enumerate(packs_input_list):
pack_status_message = PackStatus[pack.status].value if include_pack_status else ''
row = [index, pack.name, pack.display_name, pack.latest_version, pack_status_message] if include_pack_status \
else [index, pack.name, pack.display_name, pack.latest_version]
row_hr = '|'
for _value in row:
row_hr = f'{row_hr} {_value}|'
table.append(row_hr)
return '\n'.join(table)
def update_index_with_priced_packs(private_storage_bucket: Any, extract_destination_path: str,
index_folder_path: str, pack_names: set) \
-> Tuple[Union[list, list], str, Any, list]:
""" Updates index with priced packs and returns list of priced packs data.
Args:
private_storage_bucket (google.cloud.storage.bucket.Bucket): google storage private bucket.
extract_destination_path (str): full path to extract directory.
index_folder_path (str): downloaded index folder directory path.
pack_names (set): Collection of pack names.
Returns:
list: priced packs from private bucket.
"""
private_index_path = ""
private_packs = []
updated_private_packs = []
try:
(private_index_path, private_index_blob, _) = \
download_and_extract_index(private_storage_bucket,
os.path.join(extract_destination_path,
'private'))
logging.info("get_private_packs")
private_packs = get_private_packs(private_index_path, pack_names,
extract_destination_path)
logging.info("get_updated_private_packs")
updated_private_packs = get_updated_private_packs(private_packs, index_folder_path)
logging.info("add_private_packs_to_index")
add_private_packs_to_index(index_folder_path, private_index_path)
logging.info("Finished updating index with priced packs")
except Exception:
logging.exception('Could not add private packs to the index.')
finally:
shutil.rmtree(os.path.dirname(private_index_path), ignore_errors=True)
return private_packs, private_index_path, private_index_blob, updated_private_packs
def get_updated_private_packs(private_packs, index_folder_path):
""" Checks for updated private packs by compering contentCommitHash between public index json and private pack
metadata files.
Args:
private_packs (list): List of dicts containing pack metadata information.
index_folder_path (str): The public index folder path.
Returns:
updated_private_packs (list) : a list of all private packs id's that were updated.
"""
updated_private_packs = []
public_index_file_path = os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json")
public_index_json = load_json(public_index_file_path)
private_packs_from_public_index = public_index_json.get("packs", {})
for pack in private_packs:
private_pack_id = pack.get('id')
private_commit_hash_from_metadata = pack.get('contentCommitHash', "")
private_commit_hash_from_content_repo = ""
for public_pack in private_packs_from_public_index:
if public_pack.get('id') == private_pack_id:
private_commit_hash_from_content_repo = public_pack.get('contentCommitHash', "")
private_pack_was_updated = private_commit_hash_from_metadata != private_commit_hash_from_content_repo
if private_pack_was_updated:
updated_private_packs.append(private_pack_id)
return updated_private_packs
def get_private_packs(private_index_path: str, pack_names: set = set(),
extract_destination_path: str = '') -> list:
"""
Gets a list of private packs.
:param private_index_path: Path to where the private index is located.
:param pack_names: Collection of pack names.
:param extract_destination_path: Path to where the files should be extracted to.
:return: List of dicts containing pack metadata information.
"""
try:
metadata_files = glob.glob(f"{private_index_path}/**/metadata.json")
except Exception:
logging.exception(f'Could not find metadata files in {private_index_path}.')
return []
if not metadata_files:
logging.warning(f'No metadata files found in [{private_index_path}]')
private_packs = []
for metadata_file_path in metadata_files:
try:
with open(metadata_file_path, "r") as metadata_file:
metadata = json.load(metadata_file)
pack_id = metadata.get('id')
is_changed_private_pack = pack_id in pack_names
if is_changed_private_pack: # Should take metadata from artifacts.
with open(os.path.join(extract_destination_path, pack_id, "pack_metadata.json"),
"r") as metadata_file:
metadata = json.load(metadata_file)
if metadata:
private_packs.append({
'id': metadata.get('id') if not is_changed_private_pack else metadata.get('name'),
'price': metadata.get('price'),
'vendorId': metadata.get('vendorId'),
'vendorName': metadata.get('vendorName'),
'contentCommitHash': metadata.get('contentCommitHash', "")
})
except ValueError:
logging.exception(f'Invalid JSON in the metadata file [{metadata_file_path}].')
return private_packs
def add_private_packs_to_index(index_folder_path: str, private_index_path: str):
""" Add the private packs to the index folder.
Args:
index_folder_path: The index folder path.
private_index_path: The path for the index of the private packs.
"""
for d in os.scandir(private_index_path):
if os.path.isdir(d.path):
update_index_folder(index_folder_path, d.name, d.path)
def check_if_index_is_updated(index_folder_path: str, content_repo: Any, current_commit_hash: str,
previous_commit_hash: str, storage_bucket: Any):
""" Checks stored at index.json commit hash and compares it to current commit hash. In case no packs folders were
added/modified/deleted, all other steps are not performed.
Args:
index_folder_path (str): index folder full path.
content_repo (git.repo.base.Repo): content repo object.
current_commit_hash (str): last commit hash of head.
previous_commit_hash (str): the previous commit to diff with
storage_bucket: public storage bucket.
"""
skipping_build_task_message = "Skipping Upload Packs To Marketplace Storage Step."
try:
if storage_bucket.name not in (GCPConfig.CI_BUILD_BUCKET, GCPConfig.PRODUCTION_BUCKET):
logging.info("Skipping index update check in non production/build bucket")
return
if not os.path.exists(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json")):
# will happen only in init bucket run
logging.warning(f"{GCPConfig.INDEX_NAME}.json not found in {GCPConfig.INDEX_NAME} folder")
return
with open(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json")) as index_file:
index_json = json.load(index_file)
index_commit_hash = index_json.get('commit', previous_commit_hash)
try:
index_commit = content_repo.commit(index_commit_hash)
except Exception:
# not updated build will receive this exception because it is missing more updated commit
logging.exception(f"Index is already updated. {skipping_build_task_message}")
sys.exit()
current_commit = content_repo.commit(current_commit_hash)
if current_commit.committed_datetime <= index_commit.committed_datetime:
logging.warning(
f"Current commit {current_commit.hexsha} committed time: {current_commit.committed_datetime}")
logging.warning(f"Index commit {index_commit.hexsha} committed time: {index_commit.committed_datetime}")
logging.warning("Index is already updated.")
logging.warning(skipping_build_task_message)
sys.exit()
for changed_file in current_commit.diff(index_commit):
if changed_file.a_path.startswith(PACKS_FOLDER):
logging.info(
f"Found changed packs between index commit {index_commit.hexsha} and {current_commit.hexsha}")
break
else:
logging.warning(f"No changes found between index commit {index_commit.hexsha} and {current_commit.hexsha}")
logging.warning(skipping_build_task_message)
sys.exit()
except Exception:
logging.exception("Failed in checking status of index")
sys.exit(1)
def print_packs_summary(successful_packs: list, skipped_packs: list, failed_packs: list,
fail_build: bool = True):
"""Prints summary of packs uploaded to gcs.
Args:
successful_packs (list): list of packs that were successfully uploaded.
skipped_packs (list): list of packs that were skipped during upload.
failed_packs (list): list of packs that were failed during upload.
fail_build (bool): indicates whether to fail the build upon failing pack to upload or not
"""
logging.info(
f"""\n
------------------------------------------ Packs Upload Summary ------------------------------------------
Total number of packs: {len(successful_packs + skipped_packs + failed_packs)}
----------------------------------------------------------------------------------------------------------""")
if successful_packs:
successful_packs_table = _build_summary_table(successful_packs)
logging.success(f"Number of successful uploaded packs: {len(successful_packs)}")
logging.success(f"Uploaded packs:\n{successful_packs_table}")
with open('pack_list.txt', 'w') as f:
f.write(successful_packs_table.get_string())
if skipped_packs:
skipped_packs_table = _build_summary_table(skipped_packs, include_pack_status=True)
logging.warning(f"Number of skipped packs: {len(skipped_packs)}")
logging.warning(f"Skipped packs:\n{skipped_packs_table}")
if failed_packs:
failed_packs_table = _build_summary_table(failed_packs, include_pack_status=True)
logging.critical(f"Number of failed packs: {len(failed_packs)}")
logging.critical(f"Failed packs:\n{failed_packs_table}")
if fail_build:
# We don't want the bucket upload flow to fail in Prepare Content step if a pack has failed to upload.
sys.exit(1)
# for external pull requests - when there is no failed packs, add the build summary to the pull request
branch_name = os.environ.get('CIRCLE_BRANCH')
if branch_name and branch_name.startswith('pull/'):
successful_packs_table = build_summary_table_md(successful_packs)
build_num = os.environ['CIRCLE_BUILD_NUM']
bucket_path = f'https://console.cloud.google.com/storage/browser/' \
f'marketplace-ci-build/content/builds/{branch_name}/{build_num}'
pr_comment = f'Number of successful uploaded packs: {len(successful_packs)}\n' \
f'Uploaded packs:\n{successful_packs_table}\n\n' \
f'Browse to the build bucket with this address:\n{bucket_path}'
add_pr_comment(pr_comment)
def option_handler():
"""Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description="Store packs in cloud storage.")
# disable-secrets-detection-start
parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True)
parser.add_argument('-e', '--extract_path', help="Full path of folder to extract wanted packs", required=True)
parser.add_argument('-b', '--bucket_name', help="Storage bucket name", required=True)
parser.add_argument('-s', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
parser.add_argument('-i', '--id_set_path', help="The full path of id_set.json", required=False)
parser.add_argument('-d', '--pack_dependencies', help="Full path to pack dependencies json file.", required=False)
parser.add_argument('-p', '--pack_names',
help=("Target packs to upload to gcs. Optional values are: `All`, "
"`Modified` or csv list of packs "
"Default is set to `All`"),
required=False, default="All")
parser.add_argument('-n', '--ci_build_number',
help="CircleCi build number (will be used as hash revision at index file)", required=False)
parser.add_argument('-o', '--override_all_packs', help="Override all existing packs in cloud storage",
type=str2bool, default=False, required=True)
parser.add_argument('-k', '--key_string', help="Base64 encoded signature key used for signing packs.",
required=False)
parser.add_argument('-sb', '--storage_base_path', help="Storage base path of the directory to upload to.",
required=False)
parser.add_argument('-rt', '--remove_test_playbooks', type=str2bool,
help='Should remove test playbooks from content packs or not.', default=True)
parser.add_argument('-bu', '--bucket_upload', help='is bucket upload build?', type=str2bool, required=True)
parser.add_argument('-pb', '--private_bucket_name', help="Private storage bucket name", required=False)
parser.add_argument('-c', '--circle_branch', help="CircleCi branch of current build", required=True)
parser.add_argument('-f', '--force_upload', help="is force upload build?", type=str2bool, required=True)
# disable-secrets-detection-end
return parser.parse_args()
def add_pr_comment(comment: str):
"""Add comment to the pull request.
Args:
comment (string): The comment text.
"""
token = os.environ['CONTENT_GITHUB_TOKEN']
branch_name = os.environ['CIRCLE_BRANCH']
sha1 = os.environ['CIRCLE_SHA1']
query = f'?q={sha1}+repo:demisto/content+is:pr+is:open+head:{branch_name}+is:open'
url = 'https://api.github.com/search/issues'
headers = {'Authorization': 'Bearer ' + token}
try:
res = requests.get(url + query, headers=headers, verify=False)
res = handle_github_response(res)
if res and res.get('total_count', 0) == 1:
issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None
if issue_url:
res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False)
handle_github_response(res)
else:
logging.warning(
f'Add pull request comment failed: There is more then one open pull request for branch {branch_name}.')
except Exception:
logging.exception('Add pull request comment failed.')
def handle_github_response(response: json) -> dict:
"""
Handles the response from the GitHub server after making a request.
:param response: Response from the server.
:return: The returned response.
"""
res_dict = response.json()
if not res_dict.get('ok'):
logging.warning(f'Add pull request comment failed: {res_dict.get("message")}')
return res_dict
def get_packs_summary(packs_list):
""" Returns the packs list divided into 3 lists by their status
Args:
packs_list (list): The full packs list
Returns: 3 lists of packs - successful_packs, skipped_packs & failed_packs
"""
successful_packs = [pack for pack in packs_list if pack.status == PackStatus.SUCCESS.name]
skipped_packs = [pack for pack in packs_list if
pack.status == PackStatus.PACK_ALREADY_EXISTS.name
or pack.status == PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name]
failed_packs = [pack for pack in packs_list if pack not in successful_packs and pack not in skipped_packs]
return successful_packs, skipped_packs, failed_packs
def main():
install_logging('Prepare_Content_Packs_For_Testing.log')
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
id_set_path = option.id_set_path
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
circle_branch = option.circle_branch
force_upload = option.force_upload
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
if storage_base_path:
GCPConfig.STORAGE_BASE_PATH = storage_base_path
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, circle_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket)
# google cloud bigquery client initialized
bq_client = init_bigquery_client(service_account)
packs_statistic_df = get_packs_statistics_dataframe(bq_client)
updated_private_packs_ids = []
if private_bucket_name: # Add private packs to the index
private_storage_bucket = storage_client.bucket(private_bucket_name)
private_packs, _, _, updated_private_packs_ids = update_index_with_priced_packs(private_storage_bucket,
extract_destination_path,
index_folder_path, pack_names)
else: # skipping private packs
logging.debug("Skipping index update of priced packs")
private_packs = []
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket)
# starting iteration over packs
for pack in packs_list:
task_status, user_metadata = pack.load_user_metadata()
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
task_status, pack_content_items = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status, integration_images = pack.upload_integration_images(storage_bucket)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status, author_image = pack.upload_author_image(storage_bucket)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, pack_was_modified = pack.detect_modified(content_repo, index_folder_path, current_commit_hash,
previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status = pack.format_metadata(user_metadata=user_metadata, pack_content_items=pack_content_items,
integration_images=integration_images, author_image=author_image,
index_folder_path=index_folder_path,
packs_dependencies_mapping=packs_dependencies_mapping,
build_number=build_number, commit_hash=current_commit_hash,
packs_statistic_df=packs_statistic_df,
pack_was_modified=pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
(task_status, skipped_pack_uploading, full_pack_path) = \
pack.upload_to_storage(zip_pack_path, pack.latest_version,
storage_bucket, override_all_packs
or pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_pack_uploading and exists_in_index:
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
upload_core_packs_config(storage_bucket, build_number, index_folder_path)
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash)
# upload id_set.json to bucket
upload_id_set(storage_bucket, id_set_path)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
if __name__ == '__main__':
main()
|
import shutil
from typing import Dict
from covid_shared import workflow
import covid_model_seiir_pipeline
from covid_model_seiir_pipeline.pipeline.parameter_fit.specification import FIT_JOBS, FitScenario
class BetaFitTaskTemplate(workflow.TaskTemplate):
tool = workflow.get_jobmon_tool(covid_model_seiir_pipeline)
task_name_template = f"{FIT_JOBS.fit}_{{scenario}}_{{draw_id}}"
command_template = (
f"{shutil.which("stask")} "
f"{FIT_JOBS.fit} "
"--fit-version {fit_version} "
"--scenario {scenario} "
"--draw-id {draw_id} "
"-vv"
)
node_args = ['scenario', 'draw_id']
task_args = ['fit_version']
class FitWorkflow(workflow.WorkflowTemplate):
tool = workflow.get_jobmon_tool(covid_model_seiir_pipeline)
workflow_name_template = 'seiir-oos-fit-{version}'
task_template_classes = {
FIT_JOBS.fit: BetaFitTaskTemplate,
}
def attach_tasks(self, n_draws: int, scenarios: Dict[str, FitScenario]):
fit_template = self.task_templates[FIT_JOBS.fit]
for scenario_name, scenario_spec in scenarios.items():
for draw in range(n_draws):
fit_task = fit_template.get_task(
fit_version=self.version,
draw_id=draw,
scenario=scenario_name,
)
self.workflow.add_task(fit_task)
|
import shutil
from typing import Dict
from covid_shared import workflow
import covid_model_seiir_pipeline
from covid_model_seiir_pipeline.pipeline.parameter_fit.specification import FIT_JOBS, FitScenario
class BetaFitTaskTemplate(workflow.TaskTemplate):
tool = workflow.get_jobmon_tool(covid_model_seiir_pipeline)
task_name_template = f"{FIT_JOBS.fit}_{{scenario}}_{{draw_id}}"
command_template = (
f"{shutil.which('stask')} "
f"{FIT_JOBS.fit} "
"--fit-version {fit_version} "
"--scenario {scenario} "
"--draw-id {draw_id} "
"-vv"
)
node_args = ['scenario', 'draw_id']
task_args = ['fit_version']
class FitWorkflow(workflow.WorkflowTemplate):
tool = workflow.get_jobmon_tool(covid_model_seiir_pipeline)
workflow_name_template = 'seiir-oos-fit-{version}'
task_template_classes = {
FIT_JOBS.fit: BetaFitTaskTemplate,
}
def attach_tasks(self, n_draws: int, scenarios: Dict[str, FitScenario]):
fit_template = self.task_templates[FIT_JOBS.fit]
for scenario_name, scenario_spec in scenarios.items():
for draw in range(n_draws):
fit_task = fit_template.get_task(
fit_version=self.version,
draw_id=draw,
scenario=scenario_name,
)
self.workflow.add_task(fit_task)
|
# ___________________________________________________________________________
#
# Prescient
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
from __future__ import annotations
import os
import math
import logging
import datetime
import dateutil
from pyomo.environ import value, Suffix
from egret.common.log import logger as egret_logger
from egret.data.model_data import ModelData
from egret.parsers.prescient_dat_parser import get_uc_model, create_model_data_dict_params
from egret.models.unit_commitment import _time_series_dict, _preallocated_list, _solve_unit_commitment, \
_save_uc_results, create_tight_unit_commitment_model, \
_get_uc_model
from prescient.util import DEFAULT_MAX_LABEL_LENGTH
from prescient.util.math_utils import round_small_values
from prescient.simulator.data_manager import RucMarket
from ..modeling_engine import ForecastErrorMethod
from ..forecast_helper import get_forecastables
from . import reporting
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from prescient.data.data_provider import DataProvider
from prescient.data.simulation_state import SimulationState
from typing import Optional
from egret.data.model_data import ModelData as EgretModel
uc_abstract_data_model = get_uc_model()
########################################################################################
# a utility to find the "nearest" - quantified via Euclidean distance - scenario among #
# a candidate set relative to the input scenario, up through and including the #
# specified simulation hour. #
########################################################################################
def call_solver(solver,instance,options,solver_options,relaxed=False, set_instance=True):
tee = options.output_solver_logs
if not tee:
egret_logger.setLevel(logging.WARNING)
symbolic_solver_labels = options.symbolic_solver_labels
mipgap = options.ruc_mipgap
solver_options_dict = dict()
for s in solver_options:
opts = s.split(' ')
for opt in opts:
option, val = opt.split('=')
try:
val = float(val)
except:
pass
solver_options_dict[option] = val
m, results, solver = _solve_unit_commitment(instance, solver, mipgap, None,
tee, symbolic_solver_labels,
solver_options_dict, None, relaxed, set_instance=set_instance)
md = _save_uc_results(m, relaxed)
if hasattr(results, 'egret_metasolver_status'):
time = results.egret_metasolver_status['time']
else:
time = results.solver.wallclock_time
return md, time, solver
def _zero_out_costs(sced_model, hours_in_objective):
''' zero out certain costs in a sced model '''
# establish the objective function for the hour to simulate - which is simply to
# minimize production costs during this time period. no fixed costs to worry about.
# however, we do need to penalize load shedding across all time periods - otherwise,
# very bad things happen.
m = sced_model
hours_in = set()
for idx,t in enumerate(m.TimePeriods):
if idx < hours_in_objective:
hours_in.add(t)
else:
break
hours_out = set(m.TimePeriods) - hours_in
for t in hours_out:
for g in m.SingleFuelGenerators:
m.ProductionCostConstr[g,t].deactivate()
m.ProductionCost[g,t].value = 0.
m.ProductionCost[g,t].fix()
for g in m.DualFuelGenerators:
m.DualFuelProductionCost[g,t].expr = 0.
if m.regulation_service:
for g in m.AGC_Generators:
m.RegulationCostGeneration[g,t].expr = 0.
if m.spinning_reserve:
for g in m.ThermalGenerators:
m.SpinningReserveCostGeneration[g,t].expr = 0.
if m.non_spinning_reserve:
for g in m.ThermalGenerators:
m.NonSpinningReserveCostGeneration[g,t].expr = 0.
if m.supplemental_reserve:
for g in m.ThermalGenerators:
m.SupplementalReserveCostGeneration[g,t].expr = 0.
return
# TBD - we probably want to grab topology from somewhere, even if the stochastic
# RUC is not solved with the topology.
def create_sced_instance(data_provider:DataProvider,
current_state:SimulationState,
options,
sced_horizon,
forecast_error_method = ForecastErrorMethod.PRESCIENT
):
''' Create a deterministic economic dispatch instance, given current forecasts and commitments.
'''
assert current_state is not None
sced_md = data_provider.get_initial_model(options, sced_horizon, current_state.minutes_per_step)
# Set initial state
_copy_initial_state_into_model(options, current_state, sced_md)
################################################################################
# initialize the demand and renewables data, based on the forecast error model #
################################################################################
if forecast_error_method is ForecastErrorMethod.PRESCIENT:
# Warning: This method can see into the future!
future_actuals = current_state.get_future_actuals()
sced_forecastables, = get_forecastables(sced_md)
for future,sced_data in zip(future_actuals, sced_actuals):
for t in range(sced_horizon):
sced_data[t] = future[t]
else: # persistent forecast error:
current_actuals = current_state.get_current_actuals()
forecasts = current_state.get_forecasts()
sced_forecastables = get_forecastables(sced_md)
# Go through each time series that can be forecasted
for current_actual, forecast, (sced_data,) in zip(current_actuals, forecasts, sced_forecastables):
# the first value is, by definition, the actual.
sced_data[0] = current_actual
# Find how much the first forecast was off from the actual, as a fraction of
# the forecast. For all subsequent times, adjust the forecast by the same fraction.
current_forecast = forecast[0]
if current_forecast == 0.0:
forecast_error_ratio = 0.0
else:
forecast_error_ratio = current_actual / forecast[0]
for t in range(1, sced_horizon):
sced_data[t] = forecast[t] * forecast_error_ratio
_ensure_reserve_factor_honored(options, sced_md, range(sced_horizon))
# Set generator commitments & future state
for g, g_dict in sced_md.elements(element_type='generator', generator_type='thermal'):
# Start by preparing an empty array of the correct size for each generator
fixed_commitment = [None]*sced_horizon
g_dict['fixed_commitment'] = _time_series_dict(fixed_commitment)
# Now fill it in with data
for t in range(sced_horizon):
fixed_commitment[t] = current_state.get_generator_commitment(g,t)
# Look as far into the future as we can for future startups / shutdowns
last_commitment = fixed_commitment[-1]
for t in range(sced_horizon, current_state.timestep_count):
this_commitment = current_state.get_generator_commitment(g,t)
if (this_commitment - last_commitment) > 0.5:
# future startup
future_status_time_steps = ( t - sced_horizon + 1 )
break
elif (last_commitment - this_commitment) > 0.5:
# future shutdown
future_status_time_steps = -( t - sced_horizon + 1 )
break
else: # no break
future_status_time_steps = 0
g_dict['future_status'] = (current_state.minutes_per_step/60.) * future_status_time_steps
if not options.no_startup_shutdown_curves:
minutes_per_step = current_state.minutes_per_step
for g, g_dict in sced_md.elements(element_type='generator', generator_type='thermal'):
if 'startup_curve' in g_dict:
continue
ramp_up_rate_sced = g_dict['ramp_up_60min'] * minutes_per_step/60.
if 'startup_capacity' not in g_dict:
sced_startup_capacity = _calculate_sced_startup_shutdown_capacity_from_none(
g_dict['p_min'], ramp_up_rate_sced)
else:
sced_startup_capacity = _calculate_sced_startup_shutdown_capacity_from_existing(
g_dict['startup_capacity'], g_dict['p_min'], minutes_per_step)
g_dict['startup_curve'] = [ sced_startup_capacity - i*ramp_up_rate_sced \
for i in range(1,int(math.ceil(sced_startup_capacity/ramp_up_rate_sced))) ]
for g, g_dict in sced_md.elements(element_type='generator', generator_type='thermal'):
if 'shutdown_curve' in g_dict:
continue
ramp_down_rate_sced = g_dict['ramp_down_60min'] * minutes_per_step/60.
# compute a new shutdown curve if we go from "on" to "off"
if g_dict['initial_status'] > 0 and g_dict['fixed_commitment']['values'][0] == 0:
power_t0 = g_dict['initial_p_output']
# if we end up using a historical curve, it's important
# for the time-horizons to match, particularly since this
# function is also used to create long-horizon look-ahead
# SCEDs for the unit commitment process
create_sced_instance.shutdown_curves[g, minutes_per_step] = \
[ power_t0 - i*ramp_down_rate_sced for i in range(1,int(math.ceil(power_t0/ramp_down_rate_sced))) ]
if (g,minutes_per_step) in create_sced_instance.shutdown_curves:
g_dict['shutdown_curve'] = create_sced_instance.shutdown_curves[g,minutes_per_step]
else:
if 'shutdown_capacity' not in g_dict:
sced_shutdown_capacity = _calculate_sced_startup_shutdown_capacity_from_none(
g_dict['p_min'], ramp_down_rate_sced)
else:
sced_shutdown_capacity = _calculate_sced_startup_shutdown_capacity_from_existing(
g_dict['shutdown_capacity'], g_dict['p_min'], minutes_per_step)
g_dict['shutdown_curve'] = [ sced_shutdown_capacity - i*ramp_down_rate_sced \
for i in range(1,int(math.ceil(sced_shutdown_capacity/ramp_down_rate_sced))) ]
if not options.enforce_sced_shutdown_ramprate:
for g, g_dict in sced_md.elements(element_type='generator', generator_type='thermal'):
# make sure the generator can immediately turn off
g_dict['shutdown_capacity'] = max(g_dict['shutdown_capacity'], (60./current_state.minutes_per_step)*g_dict['initial_p_output'] + 1.)
return sced_md
# cache for shutdown curves
create_sced_instance.shutdown_curves = dict()
def _calculate_sced_startup_shutdown_capacity_from_none(p_min, ramp_rate_sced):
if isinstance(p_min, dict):
sced_susd_capacity = [pm+ramp_rate_sced/2. for pm in p_min['values']]
return sum(sced_susd_capacity)/len(sced_susd_capacity)
else:
return p_min + ramp_rate_sced/2.
def _calculate_sced_startup_shutdown_capacity_from_existing(startup_shutdown, p_min, minutes_per_step):
susd_capacity_time_varying = isinstance(startup_shutdown, dict)
p_min_time_varying = isinstance(p_min, dict)
if p_min_time_varying and susd_capacity_time_varying:
sced_susd_capacity = [ (susd - pm)*(minutes_per_step/60.) + pm \
for pm, susd in zip(p_min['values'], startup_shutdown['values']) ]
return sum(sced_susd_capacity)/len(sced_susd_capacity)
elif p_min_time_varying:
sced_susd_capacity = [ (startup_shutdown - pm)*(minutes_per_step/60.) + pm \
for pm in p_min['values'] ]
return sum(sced_susd_capacity)/len(sced_susd_capacity)
elif susd_capacity_time_varying:
sced_susd_capacity = [ (susd - p_min)*(minutes_per_step/60.) + p_min \
for susd in startup_shutdown['values'] ]
return sum(sced_susd_capacity)/len(sced_susd_capacity)
else:
return (startup_shutdown - p_min)*(minutes_per_step/60.) + p_min
##### BEGIN Deterministic RUC solvers and helper functions ########
###################################################################
# utility functions for computing various aspects #
# of a deterministic unit commitment solution. #
###################################################################
## NOTE: in closure for deterministic_ruc_solver_plugin
def create_solve_deterministic_ruc(deterministic_ruc_solver):
def solve_deterministic_ruc(solver, options,
ruc_instance_for_this_period,
this_date,
this_hour,
ptdf_manager):
ruc_instance_for_this_period = deterministic_ruc_solver(ruc_instance_for_this_period, solver, options, ptdf_manager)
if options.write_deterministic_ruc_instances:
current_ruc_filename = options.output_directory + os.sep + str(this_date) + \
os.sep + "ruc_hour_" + str(this_hour) + ".json"
ruc_instance_for_this_period.write(current_ruc_filename)
print("RUC instance written to file=" + current_ruc_filename)
total_cost = ruc_instance_for_this_period.data['system']['total_cost']
print("")
print("Deterministic RUC Cost: {0:.2f}".format(total_cost))
if options.output_ruc_solutions:
print("")
reporting.output_solution_for_deterministic_ruc(
ruc_instance_for_this_period,
options.ruc_every_hours)
print("")
reporting.report_fixed_costs_for_deterministic_ruc(ruc_instance_for_this_period)
reporting.report_generation_costs_for_deterministic_ruc(ruc_instance_for_this_period)
print("")
reporting.report_load_generation_mismatch_for_deterministic_ruc(ruc_instance_for_this_period)
print("")
reporting.report_curtailment_for_deterministic_ruc(ruc_instance_for_this_period)
return ruc_instance_for_this_period
return solve_deterministic_ruc
def _solve_deterministic_ruc(deterministic_ruc_instance,
solver,
options,
ptdf_manager):
ptdf_manager.mark_active(deterministic_ruc_instance)
pyo_model = create_tight_unit_commitment_model(deterministic_ruc_instance,
ptdf_options=ptdf_manager.ruc_ptdf_options,
PTDF_matrix_dict=ptdf_manager.PTDF_matrix_dict)
# update in case lines were taken out
ptdf_manager.PTDF_matrix_dict = pyo_model._PTDFs
try:
ruc_results, pyo_results, _ = call_solver(solver,
pyo_model,
options,
options.deterministic_ruc_solver_options)
except:
print("Failed to solve deterministic RUC instance - likely because no feasible solution exists!")
output_filename = "bad_ruc.json"
deterministic_ruc_instance.write(output_filename)
print("Wrote failed RUC model to file=" + output_filename)
raise
ptdf_manager.update_active(ruc_results)
return ruc_results
## create this function with default solver
solve_deterministic_ruc = create_solve_deterministic_ruc(_solve_deterministic_ruc)
# utilities for creating a deterministic RUC instance, and a standard way to solve them.
def create_deterministic_ruc(options,
data_provider:DataProvider,
this_date,
this_hour,
current_state:SimulationState,
ruc_horizon,
use_next_day_in_ruc):
ruc_every_hours = options.ruc_every_hours
start_day = this_date
start_time = datetime.datetime.combine(start_day, datetime.time(hour=this_hour))
# Create a new model
md = data_provider.get_initial_model(options, ruc_horizon, 60)
# Populate the T0 data
if current_state is None or current_state.timestep_count == 0:
data_provider.populate_initial_state_data(options, start_day, md)
else:
_copy_initial_state_into_model(options, current_state, md)
# Populate forecasts
copy_first_day = (not use_next_day_in_ruc) and (this_hour != 0)
forecast_request_count = 24 if copy_first_day else ruc_horizon
data_provider.populate_with_forecast_data(options, start_time, forecast_request_count,
60, md)
# Make some near-term forecasts more accurate
ruc_delay = -(options.ruc_execution_hour%(-options.ruc_every_hours))
if options.ruc_prescience_hour > ruc_delay + 1:
improved_hour_count = options.ruc_prescience_hour - ruc_delay - 1
for forecast, actuals in zip(get_forecastables(md),
current_state.get_future_actuals()):
for t in range(0, improved_hour_count):
forecast_portion = (ruc_delay+t)/options.ruc_prescience_hour
actuals_portion = 1-forecast_portion
forecast[t] = forecast_portion*forecast[t] + actuals_portion*actuals[t]
# Ensure the reserve requirement is satisfied
_ensure_reserve_factor_honored(options, md, range(forecast_request_count))
# Copy from first 24 to second 24, if necessary
if copy_first_day:
for vals, in get_forecastables(md):
for t in range(24, ruc_horizon):
vals[t] = vals[t-24]
return md
###### END Deterministic RUC solvers and helper functions #########
def create_pricing_model(model_data,
network_constraints='ptdf_power_flow',
relaxed=True,
**kwargs):
'''
Create a model appropriate for pricing
'''
formulation_list = [
'garver_3bin_vars',
'garver_power_vars',
'MLR_reserve_vars',
'pan_guan_gentile_KOW_generation_limits',
'damcikurt_ramping',
'KOW_production_costs_tightened',
'rajan_takriti_UT_DT',
'KOW_startup_costs',
network_constraints,
]
return _get_uc_model(model_data, formulation_list, relaxed, **kwargs)
def _get_fixed_if_off(cur_commit, cur_fixed):
cur_commit = cur_commit['values']
if cur_fixed is None:
cur_fixed = [None for _ in cur_commit]
elif isinstance(cur_fixed, dict):
cur_fixed = cur_fixed['values']
else:
cur_fixed = [cur_fixed for _ in cur_commit]
new_fixed = [None]*len(cur_commit)
for idx, (fixed, committed) in enumerate(zip(cur_fixed, cur_commit)):
if fixed is None:
new_fixed[idx] = None if committed == 1 else 0
else:
new_fixed[idx] = fixed
return {'data_type':'time_series', 'values':new_fixed}
def solve_deterministic_day_ahead_pricing_problem(solver, ruc_results, options, ptdf_manager):
## create a copy because we want to maintain the solution data
## in ruc_results
pricing_type = options.day_ahead_pricing
print("Computing day-ahead prices using method "+pricing_type+".")
pricing_instance = ruc_results.clone()
if pricing_type == "LMP":
for g, g_dict in pricing_instance.elements(element_type='generator', generator_type='thermal'):
g_dict['fixed_commitment'] = g_dict['commitment']
if 'reg_provider' in g_dict:
g_dict['fixed_regulation'] = g_dict['reg_provider']
## TODO: add fixings for storage; need hooks in EGRET
elif pricing_type == "ELMP":
## for ELMP we fix all commitment binaries that were 0 in the RUC solve
time_periods = pricing_instance.data['system']['time_keys']
for g, g_dict in pricing_instance.elements(element_type='generator', generator_type='thermal'):
g_dict['fixed_commitment'] = _get_fixed_if_off(g_dict['commitment'],
g_dict.get('fixed_commitment', None))
if 'reg_provider' in g_dict:
g_dict['fixed_regulation'] = _get_fixed_if_off(g_dict['reg_provider'],
g_dict.get('fixed_regulation', None))
## TODO: add fixings for storage; need hooks in EGRET
elif pricing_type == "aCHP":
# don't do anything
pass
else:
raise RuntimeError("Unknown pricing type "+pricing_type+".")
## change the penalty prices to the caps, if necessary
reserve_requirement = ('reserve_requirement' in pricing_instance.data['system'])
# In case of demand shortfall, the price skyrockets, so we threshold the value.
if pricing_instance.data['system']['load_mismatch_cost'] > options.price_threshold:
pricing_instance.data['system']['load_mismatch_cost'] = options.price_threshold
# In case of reserve shortfall, the price skyrockets, so we threshold the value.
if reserve_requirement:
if pricing_instance.data['system']['reserve_shortfall_cost'] > options.reserve_price_threshold:
pricing_instance.data['system']['reserve_shortfall_cost'] = options.reserve_price_threshold
ptdf_manager.mark_active(pricing_instance)
pyo_model = create_pricing_model(pricing_instance, relaxed=True,
ptdf_options=ptdf_manager.damarket_ptdf_options,
PTDF_matrix_dict=ptdf_manager.PTDF_matrix_dict)
pyo_model.dual = Suffix(direction=Suffix.IMPORT)
try:
## TODO: Should there be separate options for this run?
pricing_results, _, _ = call_solver(solver,
pyo_model,
options,
options.deterministic_ruc_solver_options,
relaxed=True)
except:
print("Failed to solve pricing instance - likely because no feasible solution exists!")
output_filename = "bad_pricing.json"
pricing_instance.write(output_filename)
print("Wrote failed RUC model to file=" + output_filename)
raise
ptdf_manager.update_active(pricing_results)
## Debugging
if pricing_results.data['system']['total_cost'] > ruc_results.data['system']['total_cost']*(1.+1.e-06):
print("The pricing run had a higher objective value than the MIP run. This is indicative of a bug.")
print("Writing LP pricing_problem.json")
output_filename = 'pricing_instance.json'
pricing_results.write(output_filename)
output_filename = 'ruc_results.json'
ruc_results.write(output_filename)
raise RuntimeError("Halting due to bug in pricing.")
day_ahead_prices = {}
for b, b_dict in pricing_results.elements(element_type='bus'):
for t,lmp in enumerate(b_dict['lmp']['values']):
day_ahead_prices[b,t] = lmp
if reserve_requirement:
day_ahead_reserve_prices = {}
for t,price in enumerate(pricing_results.data['system']['reserve_price']['values']):
# Thresholding the value of the reserve price to the passed in option
day_ahead_reserve_prices[t] = price
print("Recalculating RUC reserve procurement")
## scale the provided reserves by the amount over we are
thermal_reserve_cleared_DA = {}
g_reserve_values = { g : g_dict['rg']['values'] for g, g_dict in ruc_results.elements(element_type='generator', generator_type='thermal') }
reserve_shortfall = ruc_results.data['system']['reserve_shortfall']['values']
reserve_requirement = ruc_results.data['system']['reserve_requirement']['values']
for t in range(0,options.ruc_every_hours):
reserve_provided_t = sum(reserve_vals[t] for reserve_vals in g_reserve_values.values())
reserve_shortfall_t = reserve_shortfall[t]
reserve_requirement_t = reserve_requirement[t]
surplus_reserves_t = reserve_provided_t + reserve_shortfall_t - reserve_requirement_t
## if there's a shortfall, grab the full amount from the RUC solve
## or if there's no provided reserves this can safely be set to 1.
if round_small_values(reserve_shortfall_t) > 0 or reserve_provided_t == 0:
surplus_multiple_t = 1.
else:
## scale the reserves from the RUC down by the same fraction
## so that they exactly meed the needed reserves
surplus_multiple_t = reserve_requirement_t/reserve_provided_t
for g, reserve_vals in g_reserve_values.items():
thermal_reserve_cleared_DA[g,t] = reserve_vals[t]*surplus_multiple_t
else:
day_ahead_reserve_prices = { t : 0. for t in enumerate(ruc_results.data['system']['time_keys']) }
thermal_reserve_cleared_DA = { (g,t) : 0. \
for t in enumerate(ruc_results.data['system']['time_keys']) \
for g,_ in ruc_results.elements(element_type='generator', generator_type='thermal') }
thermal_gen_cleared_DA = {}
renewable_gen_cleared_DA = {}
for g, g_dict in ruc_results.elements(element_type='generator'):
pg = g_dict['pg']['values']
if g_dict['generator_type'] == 'thermal':
store_dict = thermal_gen_cleared_DA
elif g_dict['generator_type'] == 'renewable':
store_dict = renewable_gen_cleared_DA
else:
raise RuntimeError(f"Unrecognized generator type {g_dict["generator_type"]}")
for t in range(0,options.ruc_every_hours):
store_dict[g,t] = pg[t]
return RucMarket(day_ahead_prices=day_ahead_prices,
day_ahead_reserve_prices=day_ahead_reserve_prices,
thermal_gen_cleared_DA=thermal_gen_cleared_DA,
thermal_reserve_cleared_DA=thermal_reserve_cleared_DA,
renewable_gen_cleared_DA=renewable_gen_cleared_DA)
def create_simulation_actuals(
options:Options, data_provider:DataProvider,
this_date:datetime.date, this_hour:int,
step_size_minutes:int) -> EgretModel:
''' Get an Egret model consisting of data to be treated as actuals, starting at a given time.
Parameters
----------
options:Options
Global option values
data_provider: DataProvider
An object that can provide actual and/or forecast data for the requested days
this_date: date
The date of the first time step for which data should be retrieved
this_hour: int
0-based index of the first hour of the day for which data should be retrieved
step_size_minutes: int
The number of minutes between each time step
'''
# Convert time string to time
start_time = datetime.datetime.combine(this_date,
datetime.time(hour=this_hour))
# Pick whether we're getting actuals or forecasts
if options.simulate_out_of_sample:
get_data_func = data_provider.populate_with_actuals
else:
print("")
print("***WARNING: Simulating the forecast scenario when running deterministic RUC - "
"time consistency across midnight boundaries is not guaranteed, and may lead to threshold events.")
get_data_func = data_provider.populate_with_forecast_data
# Get a new model
total_step_count = options.ruc_horizon * 60 // step_size_minutes
md = data_provider.get_initial_model(options, total_step_count, step_size_minutes)
# Fill it in with data
data_provider.populate_initial_state_data(options, start_time.date(), md)
if this_hour == 0:
get_data_func(options, start_time, total_step_count, step_size_minutes, md)
else:
# only get up to 24 hours of data, then copy it
timesteps_per_day = 24 * 60 / step_size_minutes
steps_to_request = math.min(timesteps_per_day, total_step_count)
get_data_func(options, start_time, steps_to_request, step_size_minutes, md)
for vals, in get_forecastables(md):
for t in range(timesteps_per_day, total_step_count):
vals[t] = vals[t-timesteps_per_day]
return md
def _ensure_reserve_factor_honored(options:Options, md:EgretModel, time_periods:Iterable[int]) -> None:
''' Adjust reserve requirements to satisfy the reserve factor.
For each time period in time_periods, ensure that the reserve requirement is no less than
the total load for that time period multiplied by the reserve factor. If the reserve
requirement for a time is too low it is raised, otherwise it is left alone.
'''
if options.reserve_factor > 0:
reserve_factor = options.reserve_factor
reserve_reqs = md.data['system']['reserve_requirement']['values']
for t in time_periods:
total_load = sum(bdata['p_load']['values'][t]
for bus, bdata in md.elements('load'))
min_reserve = reserve_factor*total_load
if reserve_reqs[t] < min_reserve:
reserve_reqs[t] = min_reserve
def _copy_initial_state_into_model(options:Options,
current_state:SimulationState,
md:EgretModel):
for g, g_dict in md.elements('generator', generator_type='thermal'):
g_dict['initial_status'] = current_state.get_initial_generator_state(g)
g_dict['initial_p_output'] = current_state.get_initial_power_generated(g)
for s,s_dict in md.elements('storage'):
s_dict['initial_state_of_charge'] = current_state.get_initial_state_of_charge(s)
|
# ___________________________________________________________________________
#
# Prescient
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
from __future__ import annotations
import os
import math
import logging
import datetime
import dateutil
from pyomo.environ import value, Suffix
from egret.common.log import logger as egret_logger
from egret.data.model_data import ModelData
from egret.parsers.prescient_dat_parser import get_uc_model, create_model_data_dict_params
from egret.models.unit_commitment import _time_series_dict, _preallocated_list, _solve_unit_commitment, \
_save_uc_results, create_tight_unit_commitment_model, \
_get_uc_model
from prescient.util import DEFAULT_MAX_LABEL_LENGTH
from prescient.util.math_utils import round_small_values
from prescient.simulator.data_manager import RucMarket
from ..modeling_engine import ForecastErrorMethod
from ..forecast_helper import get_forecastables
from . import reporting
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from prescient.data.data_provider import DataProvider
from prescient.data.simulation_state import SimulationState
from typing import Optional
from egret.data.model_data import ModelData as EgretModel
uc_abstract_data_model = get_uc_model()
########################################################################################
# a utility to find the "nearest" - quantified via Euclidean distance - scenario among #
# a candidate set relative to the input scenario, up through and including the #
# specified simulation hour. #
########################################################################################
def call_solver(solver,instance,options,solver_options,relaxed=False, set_instance=True):
tee = options.output_solver_logs
if not tee:
egret_logger.setLevel(logging.WARNING)
symbolic_solver_labels = options.symbolic_solver_labels
mipgap = options.ruc_mipgap
solver_options_dict = dict()
for s in solver_options:
opts = s.split(' ')
for opt in opts:
option, val = opt.split('=')
try:
val = float(val)
except:
pass
solver_options_dict[option] = val
m, results, solver = _solve_unit_commitment(instance, solver, mipgap, None,
tee, symbolic_solver_labels,
solver_options_dict, None, relaxed, set_instance=set_instance)
md = _save_uc_results(m, relaxed)
if hasattr(results, 'egret_metasolver_status'):
time = results.egret_metasolver_status['time']
else:
time = results.solver.wallclock_time
return md, time, solver
def _zero_out_costs(sced_model, hours_in_objective):
''' zero out certain costs in a sced model '''
# establish the objective function for the hour to simulate - which is simply to
# minimize production costs during this time period. no fixed costs to worry about.
# however, we do need to penalize load shedding across all time periods - otherwise,
# very bad things happen.
m = sced_model
hours_in = set()
for idx,t in enumerate(m.TimePeriods):
if idx < hours_in_objective:
hours_in.add(t)
else:
break
hours_out = set(m.TimePeriods) - hours_in
for t in hours_out:
for g in m.SingleFuelGenerators:
m.ProductionCostConstr[g,t].deactivate()
m.ProductionCost[g,t].value = 0.
m.ProductionCost[g,t].fix()
for g in m.DualFuelGenerators:
m.DualFuelProductionCost[g,t].expr = 0.
if m.regulation_service:
for g in m.AGC_Generators:
m.RegulationCostGeneration[g,t].expr = 0.
if m.spinning_reserve:
for g in m.ThermalGenerators:
m.SpinningReserveCostGeneration[g,t].expr = 0.
if m.non_spinning_reserve:
for g in m.ThermalGenerators:
m.NonSpinningReserveCostGeneration[g,t].expr = 0.
if m.supplemental_reserve:
for g in m.ThermalGenerators:
m.SupplementalReserveCostGeneration[g,t].expr = 0.
return
# TBD - we probably want to grab topology from somewhere, even if the stochastic
# RUC is not solved with the topology.
def create_sced_instance(data_provider:DataProvider,
current_state:SimulationState,
options,
sced_horizon,
forecast_error_method = ForecastErrorMethod.PRESCIENT
):
''' Create a deterministic economic dispatch instance, given current forecasts and commitments.
'''
assert current_state is not None
sced_md = data_provider.get_initial_model(options, sced_horizon, current_state.minutes_per_step)
# Set initial state
_copy_initial_state_into_model(options, current_state, sced_md)
################################################################################
# initialize the demand and renewables data, based on the forecast error model #
################################################################################
if forecast_error_method is ForecastErrorMethod.PRESCIENT:
# Warning: This method can see into the future!
future_actuals = current_state.get_future_actuals()
sced_forecastables, = get_forecastables(sced_md)
for future,sced_data in zip(future_actuals, sced_actuals):
for t in range(sced_horizon):
sced_data[t] = future[t]
else: # persistent forecast error:
current_actuals = current_state.get_current_actuals()
forecasts = current_state.get_forecasts()
sced_forecastables = get_forecastables(sced_md)
# Go through each time series that can be forecasted
for current_actual, forecast, (sced_data,) in zip(current_actuals, forecasts, sced_forecastables):
# the first value is, by definition, the actual.
sced_data[0] = current_actual
# Find how much the first forecast was off from the actual, as a fraction of
# the forecast. For all subsequent times, adjust the forecast by the same fraction.
current_forecast = forecast[0]
if current_forecast == 0.0:
forecast_error_ratio = 0.0
else:
forecast_error_ratio = current_actual / forecast[0]
for t in range(1, sced_horizon):
sced_data[t] = forecast[t] * forecast_error_ratio
_ensure_reserve_factor_honored(options, sced_md, range(sced_horizon))
# Set generator commitments & future state
for g, g_dict in sced_md.elements(element_type='generator', generator_type='thermal'):
# Start by preparing an empty array of the correct size for each generator
fixed_commitment = [None]*sced_horizon
g_dict['fixed_commitment'] = _time_series_dict(fixed_commitment)
# Now fill it in with data
for t in range(sced_horizon):
fixed_commitment[t] = current_state.get_generator_commitment(g,t)
# Look as far into the future as we can for future startups / shutdowns
last_commitment = fixed_commitment[-1]
for t in range(sced_horizon, current_state.timestep_count):
this_commitment = current_state.get_generator_commitment(g,t)
if (this_commitment - last_commitment) > 0.5:
# future startup
future_status_time_steps = ( t - sced_horizon + 1 )
break
elif (last_commitment - this_commitment) > 0.5:
# future shutdown
future_status_time_steps = -( t - sced_horizon + 1 )
break
else: # no break
future_status_time_steps = 0
g_dict['future_status'] = (current_state.minutes_per_step/60.) * future_status_time_steps
if not options.no_startup_shutdown_curves:
minutes_per_step = current_state.minutes_per_step
for g, g_dict in sced_md.elements(element_type='generator', generator_type='thermal'):
if 'startup_curve' in g_dict:
continue
ramp_up_rate_sced = g_dict['ramp_up_60min'] * minutes_per_step/60.
if 'startup_capacity' not in g_dict:
sced_startup_capacity = _calculate_sced_startup_shutdown_capacity_from_none(
g_dict['p_min'], ramp_up_rate_sced)
else:
sced_startup_capacity = _calculate_sced_startup_shutdown_capacity_from_existing(
g_dict['startup_capacity'], g_dict['p_min'], minutes_per_step)
g_dict['startup_curve'] = [ sced_startup_capacity - i*ramp_up_rate_sced \
for i in range(1,int(math.ceil(sced_startup_capacity/ramp_up_rate_sced))) ]
for g, g_dict in sced_md.elements(element_type='generator', generator_type='thermal'):
if 'shutdown_curve' in g_dict:
continue
ramp_down_rate_sced = g_dict['ramp_down_60min'] * minutes_per_step/60.
# compute a new shutdown curve if we go from "on" to "off"
if g_dict['initial_status'] > 0 and g_dict['fixed_commitment']['values'][0] == 0:
power_t0 = g_dict['initial_p_output']
# if we end up using a historical curve, it's important
# for the time-horizons to match, particularly since this
# function is also used to create long-horizon look-ahead
# SCEDs for the unit commitment process
create_sced_instance.shutdown_curves[g, minutes_per_step] = \
[ power_t0 - i*ramp_down_rate_sced for i in range(1,int(math.ceil(power_t0/ramp_down_rate_sced))) ]
if (g,minutes_per_step) in create_sced_instance.shutdown_curves:
g_dict['shutdown_curve'] = create_sced_instance.shutdown_curves[g,minutes_per_step]
else:
if 'shutdown_capacity' not in g_dict:
sced_shutdown_capacity = _calculate_sced_startup_shutdown_capacity_from_none(
g_dict['p_min'], ramp_down_rate_sced)
else:
sced_shutdown_capacity = _calculate_sced_startup_shutdown_capacity_from_existing(
g_dict['shutdown_capacity'], g_dict['p_min'], minutes_per_step)
g_dict['shutdown_curve'] = [ sced_shutdown_capacity - i*ramp_down_rate_sced \
for i in range(1,int(math.ceil(sced_shutdown_capacity/ramp_down_rate_sced))) ]
if not options.enforce_sced_shutdown_ramprate:
for g, g_dict in sced_md.elements(element_type='generator', generator_type='thermal'):
# make sure the generator can immediately turn off
g_dict['shutdown_capacity'] = max(g_dict['shutdown_capacity'], (60./current_state.minutes_per_step)*g_dict['initial_p_output'] + 1.)
return sced_md
# cache for shutdown curves
create_sced_instance.shutdown_curves = dict()
def _calculate_sced_startup_shutdown_capacity_from_none(p_min, ramp_rate_sced):
if isinstance(p_min, dict):
sced_susd_capacity = [pm+ramp_rate_sced/2. for pm in p_min['values']]
return sum(sced_susd_capacity)/len(sced_susd_capacity)
else:
return p_min + ramp_rate_sced/2.
def _calculate_sced_startup_shutdown_capacity_from_existing(startup_shutdown, p_min, minutes_per_step):
susd_capacity_time_varying = isinstance(startup_shutdown, dict)
p_min_time_varying = isinstance(p_min, dict)
if p_min_time_varying and susd_capacity_time_varying:
sced_susd_capacity = [ (susd - pm)*(minutes_per_step/60.) + pm \
for pm, susd in zip(p_min['values'], startup_shutdown['values']) ]
return sum(sced_susd_capacity)/len(sced_susd_capacity)
elif p_min_time_varying:
sced_susd_capacity = [ (startup_shutdown - pm)*(minutes_per_step/60.) + pm \
for pm in p_min['values'] ]
return sum(sced_susd_capacity)/len(sced_susd_capacity)
elif susd_capacity_time_varying:
sced_susd_capacity = [ (susd - p_min)*(minutes_per_step/60.) + p_min \
for susd in startup_shutdown['values'] ]
return sum(sced_susd_capacity)/len(sced_susd_capacity)
else:
return (startup_shutdown - p_min)*(minutes_per_step/60.) + p_min
##### BEGIN Deterministic RUC solvers and helper functions ########
###################################################################
# utility functions for computing various aspects #
# of a deterministic unit commitment solution. #
###################################################################
## NOTE: in closure for deterministic_ruc_solver_plugin
def create_solve_deterministic_ruc(deterministic_ruc_solver):
def solve_deterministic_ruc(solver, options,
ruc_instance_for_this_period,
this_date,
this_hour,
ptdf_manager):
ruc_instance_for_this_period = deterministic_ruc_solver(ruc_instance_for_this_period, solver, options, ptdf_manager)
if options.write_deterministic_ruc_instances:
current_ruc_filename = options.output_directory + os.sep + str(this_date) + \
os.sep + "ruc_hour_" + str(this_hour) + ".json"
ruc_instance_for_this_period.write(current_ruc_filename)
print("RUC instance written to file=" + current_ruc_filename)
total_cost = ruc_instance_for_this_period.data['system']['total_cost']
print("")
print("Deterministic RUC Cost: {0:.2f}".format(total_cost))
if options.output_ruc_solutions:
print("")
reporting.output_solution_for_deterministic_ruc(
ruc_instance_for_this_period,
options.ruc_every_hours)
print("")
reporting.report_fixed_costs_for_deterministic_ruc(ruc_instance_for_this_period)
reporting.report_generation_costs_for_deterministic_ruc(ruc_instance_for_this_period)
print("")
reporting.report_load_generation_mismatch_for_deterministic_ruc(ruc_instance_for_this_period)
print("")
reporting.report_curtailment_for_deterministic_ruc(ruc_instance_for_this_period)
return ruc_instance_for_this_period
return solve_deterministic_ruc
def _solve_deterministic_ruc(deterministic_ruc_instance,
solver,
options,
ptdf_manager):
ptdf_manager.mark_active(deterministic_ruc_instance)
pyo_model = create_tight_unit_commitment_model(deterministic_ruc_instance,
ptdf_options=ptdf_manager.ruc_ptdf_options,
PTDF_matrix_dict=ptdf_manager.PTDF_matrix_dict)
# update in case lines were taken out
ptdf_manager.PTDF_matrix_dict = pyo_model._PTDFs
try:
ruc_results, pyo_results, _ = call_solver(solver,
pyo_model,
options,
options.deterministic_ruc_solver_options)
except:
print("Failed to solve deterministic RUC instance - likely because no feasible solution exists!")
output_filename = "bad_ruc.json"
deterministic_ruc_instance.write(output_filename)
print("Wrote failed RUC model to file=" + output_filename)
raise
ptdf_manager.update_active(ruc_results)
return ruc_results
## create this function with default solver
solve_deterministic_ruc = create_solve_deterministic_ruc(_solve_deterministic_ruc)
# utilities for creating a deterministic RUC instance, and a standard way to solve them.
def create_deterministic_ruc(options,
data_provider:DataProvider,
this_date,
this_hour,
current_state:SimulationState,
ruc_horizon,
use_next_day_in_ruc):
ruc_every_hours = options.ruc_every_hours
start_day = this_date
start_time = datetime.datetime.combine(start_day, datetime.time(hour=this_hour))
# Create a new model
md = data_provider.get_initial_model(options, ruc_horizon, 60)
# Populate the T0 data
if current_state is None or current_state.timestep_count == 0:
data_provider.populate_initial_state_data(options, start_day, md)
else:
_copy_initial_state_into_model(options, current_state, md)
# Populate forecasts
copy_first_day = (not use_next_day_in_ruc) and (this_hour != 0)
forecast_request_count = 24 if copy_first_day else ruc_horizon
data_provider.populate_with_forecast_data(options, start_time, forecast_request_count,
60, md)
# Make some near-term forecasts more accurate
ruc_delay = -(options.ruc_execution_hour%(-options.ruc_every_hours))
if options.ruc_prescience_hour > ruc_delay + 1:
improved_hour_count = options.ruc_prescience_hour - ruc_delay - 1
for forecast, actuals in zip(get_forecastables(md),
current_state.get_future_actuals()):
for t in range(0, improved_hour_count):
forecast_portion = (ruc_delay+t)/options.ruc_prescience_hour
actuals_portion = 1-forecast_portion
forecast[t] = forecast_portion*forecast[t] + actuals_portion*actuals[t]
# Ensure the reserve requirement is satisfied
_ensure_reserve_factor_honored(options, md, range(forecast_request_count))
# Copy from first 24 to second 24, if necessary
if copy_first_day:
for vals, in get_forecastables(md):
for t in range(24, ruc_horizon):
vals[t] = vals[t-24]
return md
###### END Deterministic RUC solvers and helper functions #########
def create_pricing_model(model_data,
network_constraints='ptdf_power_flow',
relaxed=True,
**kwargs):
'''
Create a model appropriate for pricing
'''
formulation_list = [
'garver_3bin_vars',
'garver_power_vars',
'MLR_reserve_vars',
'pan_guan_gentile_KOW_generation_limits',
'damcikurt_ramping',
'KOW_production_costs_tightened',
'rajan_takriti_UT_DT',
'KOW_startup_costs',
network_constraints,
]
return _get_uc_model(model_data, formulation_list, relaxed, **kwargs)
def _get_fixed_if_off(cur_commit, cur_fixed):
cur_commit = cur_commit['values']
if cur_fixed is None:
cur_fixed = [None for _ in cur_commit]
elif isinstance(cur_fixed, dict):
cur_fixed = cur_fixed['values']
else:
cur_fixed = [cur_fixed for _ in cur_commit]
new_fixed = [None]*len(cur_commit)
for idx, (fixed, committed) in enumerate(zip(cur_fixed, cur_commit)):
if fixed is None:
new_fixed[idx] = None if committed == 1 else 0
else:
new_fixed[idx] = fixed
return {'data_type':'time_series', 'values':new_fixed}
def solve_deterministic_day_ahead_pricing_problem(solver, ruc_results, options, ptdf_manager):
## create a copy because we want to maintain the solution data
## in ruc_results
pricing_type = options.day_ahead_pricing
print("Computing day-ahead prices using method "+pricing_type+".")
pricing_instance = ruc_results.clone()
if pricing_type == "LMP":
for g, g_dict in pricing_instance.elements(element_type='generator', generator_type='thermal'):
g_dict['fixed_commitment'] = g_dict['commitment']
if 'reg_provider' in g_dict:
g_dict['fixed_regulation'] = g_dict['reg_provider']
## TODO: add fixings for storage; need hooks in EGRET
elif pricing_type == "ELMP":
## for ELMP we fix all commitment binaries that were 0 in the RUC solve
time_periods = pricing_instance.data['system']['time_keys']
for g, g_dict in pricing_instance.elements(element_type='generator', generator_type='thermal'):
g_dict['fixed_commitment'] = _get_fixed_if_off(g_dict['commitment'],
g_dict.get('fixed_commitment', None))
if 'reg_provider' in g_dict:
g_dict['fixed_regulation'] = _get_fixed_if_off(g_dict['reg_provider'],
g_dict.get('fixed_regulation', None))
## TODO: add fixings for storage; need hooks in EGRET
elif pricing_type == "aCHP":
# don't do anything
pass
else:
raise RuntimeError("Unknown pricing type "+pricing_type+".")
## change the penalty prices to the caps, if necessary
reserve_requirement = ('reserve_requirement' in pricing_instance.data['system'])
# In case of demand shortfall, the price skyrockets, so we threshold the value.
if pricing_instance.data['system']['load_mismatch_cost'] > options.price_threshold:
pricing_instance.data['system']['load_mismatch_cost'] = options.price_threshold
# In case of reserve shortfall, the price skyrockets, so we threshold the value.
if reserve_requirement:
if pricing_instance.data['system']['reserve_shortfall_cost'] > options.reserve_price_threshold:
pricing_instance.data['system']['reserve_shortfall_cost'] = options.reserve_price_threshold
ptdf_manager.mark_active(pricing_instance)
pyo_model = create_pricing_model(pricing_instance, relaxed=True,
ptdf_options=ptdf_manager.damarket_ptdf_options,
PTDF_matrix_dict=ptdf_manager.PTDF_matrix_dict)
pyo_model.dual = Suffix(direction=Suffix.IMPORT)
try:
## TODO: Should there be separate options for this run?
pricing_results, _, _ = call_solver(solver,
pyo_model,
options,
options.deterministic_ruc_solver_options,
relaxed=True)
except:
print("Failed to solve pricing instance - likely because no feasible solution exists!")
output_filename = "bad_pricing.json"
pricing_instance.write(output_filename)
print("Wrote failed RUC model to file=" + output_filename)
raise
ptdf_manager.update_active(pricing_results)
## Debugging
if pricing_results.data['system']['total_cost'] > ruc_results.data['system']['total_cost']*(1.+1.e-06):
print("The pricing run had a higher objective value than the MIP run. This is indicative of a bug.")
print("Writing LP pricing_problem.json")
output_filename = 'pricing_instance.json'
pricing_results.write(output_filename)
output_filename = 'ruc_results.json'
ruc_results.write(output_filename)
raise RuntimeError("Halting due to bug in pricing.")
day_ahead_prices = {}
for b, b_dict in pricing_results.elements(element_type='bus'):
for t,lmp in enumerate(b_dict['lmp']['values']):
day_ahead_prices[b,t] = lmp
if reserve_requirement:
day_ahead_reserve_prices = {}
for t,price in enumerate(pricing_results.data['system']['reserve_price']['values']):
# Thresholding the value of the reserve price to the passed in option
day_ahead_reserve_prices[t] = price
print("Recalculating RUC reserve procurement")
## scale the provided reserves by the amount over we are
thermal_reserve_cleared_DA = {}
g_reserve_values = { g : g_dict['rg']['values'] for g, g_dict in ruc_results.elements(element_type='generator', generator_type='thermal') }
reserve_shortfall = ruc_results.data['system']['reserve_shortfall']['values']
reserve_requirement = ruc_results.data['system']['reserve_requirement']['values']
for t in range(0,options.ruc_every_hours):
reserve_provided_t = sum(reserve_vals[t] for reserve_vals in g_reserve_values.values())
reserve_shortfall_t = reserve_shortfall[t]
reserve_requirement_t = reserve_requirement[t]
surplus_reserves_t = reserve_provided_t + reserve_shortfall_t - reserve_requirement_t
## if there's a shortfall, grab the full amount from the RUC solve
## or if there's no provided reserves this can safely be set to 1.
if round_small_values(reserve_shortfall_t) > 0 or reserve_provided_t == 0:
surplus_multiple_t = 1.
else:
## scale the reserves from the RUC down by the same fraction
## so that they exactly meed the needed reserves
surplus_multiple_t = reserve_requirement_t/reserve_provided_t
for g, reserve_vals in g_reserve_values.items():
thermal_reserve_cleared_DA[g,t] = reserve_vals[t]*surplus_multiple_t
else:
day_ahead_reserve_prices = { t : 0. for t in enumerate(ruc_results.data['system']['time_keys']) }
thermal_reserve_cleared_DA = { (g,t) : 0. \
for t in enumerate(ruc_results.data['system']['time_keys']) \
for g,_ in ruc_results.elements(element_type='generator', generator_type='thermal') }
thermal_gen_cleared_DA = {}
renewable_gen_cleared_DA = {}
for g, g_dict in ruc_results.elements(element_type='generator'):
pg = g_dict['pg']['values']
if g_dict['generator_type'] == 'thermal':
store_dict = thermal_gen_cleared_DA
elif g_dict['generator_type'] == 'renewable':
store_dict = renewable_gen_cleared_DA
else:
raise RuntimeError(f"Unrecognized generator type {g_dict['generator_type']}")
for t in range(0,options.ruc_every_hours):
store_dict[g,t] = pg[t]
return RucMarket(day_ahead_prices=day_ahead_prices,
day_ahead_reserve_prices=day_ahead_reserve_prices,
thermal_gen_cleared_DA=thermal_gen_cleared_DA,
thermal_reserve_cleared_DA=thermal_reserve_cleared_DA,
renewable_gen_cleared_DA=renewable_gen_cleared_DA)
def create_simulation_actuals(
options:Options, data_provider:DataProvider,
this_date:datetime.date, this_hour:int,
step_size_minutes:int) -> EgretModel:
''' Get an Egret model consisting of data to be treated as actuals, starting at a given time.
Parameters
----------
options:Options
Global option values
data_provider: DataProvider
An object that can provide actual and/or forecast data for the requested days
this_date: date
The date of the first time step for which data should be retrieved
this_hour: int
0-based index of the first hour of the day for which data should be retrieved
step_size_minutes: int
The number of minutes between each time step
'''
# Convert time string to time
start_time = datetime.datetime.combine(this_date,
datetime.time(hour=this_hour))
# Pick whether we're getting actuals or forecasts
if options.simulate_out_of_sample:
get_data_func = data_provider.populate_with_actuals
else:
print("")
print("***WARNING: Simulating the forecast scenario when running deterministic RUC - "
"time consistency across midnight boundaries is not guaranteed, and may lead to threshold events.")
get_data_func = data_provider.populate_with_forecast_data
# Get a new model
total_step_count = options.ruc_horizon * 60 // step_size_minutes
md = data_provider.get_initial_model(options, total_step_count, step_size_minutes)
# Fill it in with data
data_provider.populate_initial_state_data(options, start_time.date(), md)
if this_hour == 0:
get_data_func(options, start_time, total_step_count, step_size_minutes, md)
else:
# only get up to 24 hours of data, then copy it
timesteps_per_day = 24 * 60 / step_size_minutes
steps_to_request = math.min(timesteps_per_day, total_step_count)
get_data_func(options, start_time, steps_to_request, step_size_minutes, md)
for vals, in get_forecastables(md):
for t in range(timesteps_per_day, total_step_count):
vals[t] = vals[t-timesteps_per_day]
return md
def _ensure_reserve_factor_honored(options:Options, md:EgretModel, time_periods:Iterable[int]) -> None:
''' Adjust reserve requirements to satisfy the reserve factor.
For each time period in time_periods, ensure that the reserve requirement is no less than
the total load for that time period multiplied by the reserve factor. If the reserve
requirement for a time is too low it is raised, otherwise it is left alone.
'''
if options.reserve_factor > 0:
reserve_factor = options.reserve_factor
reserve_reqs = md.data['system']['reserve_requirement']['values']
for t in time_periods:
total_load = sum(bdata['p_load']['values'][t]
for bus, bdata in md.elements('load'))
min_reserve = reserve_factor*total_load
if reserve_reqs[t] < min_reserve:
reserve_reqs[t] = min_reserve
def _copy_initial_state_into_model(options:Options,
current_state:SimulationState,
md:EgretModel):
for g, g_dict in md.elements('generator', generator_type='thermal'):
g_dict['initial_status'] = current_state.get_initial_generator_state(g)
g_dict['initial_p_output'] = current_state.get_initial_power_generated(g)
for s,s_dict in md.elements('storage'):
s_dict['initial_state_of_charge'] = current_state.get_initial_state_of_charge(s)
|
"""A mixing that extends a HasDriver class with Galaxy-specific utilities.
Implementer must provide a self.build_url method to target Galaxy.
"""
import collections
import contextlib
import random
import string
import time
from abc import abstractmethod
from functools import (
partial,
wraps,
)
from typing import (
Any,
cast,
Dict,
Optional,
Union,
)
import requests
import yaml
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.remote.webdriver import WebDriver
from galaxy.util import DEFAULT_SOCKET_TIMEOUT
from . import sizzle
from .components import (
Component,
HasText,
)
from .data import load_root_component
from .has_driver import (
exception_indicates_click_intercepted,
exception_indicates_not_clickable,
exception_indicates_stale_element,
HasDriver,
TimeoutException,
)
from .smart_components import SmartComponent
# Test case data
DEFAULT_PASSWORD = "123456"
RETRY_DURING_TRANSITIONS_SLEEP_DEFAULT = 0.1
RETRY_DURING_TRANSITIONS_ATTEMPTS_DEFAULT = 10
GALAXY_MAIN_FRAME_ID = "galaxy_main"
WaitType = collections.namedtuple("WaitType", ["name", "default_length"])
# Default wait times should make sense for a development server under low
# load. Wait times for production servers can be scaled up with a multiplier.
class WAIT_TYPES:
# Rendering a form and registering callbacks, etc...
UX_RENDER = WaitType("ux_render", 1)
# Fade in, fade out, etc...
UX_TRANSITION = WaitType("ux_transition", 5)
# Toastr popup and dismissal, etc...
UX_POPUP = WaitType("ux_popup", 15)
# Creating a new history and loading it into the panel.
DATABASE_OPERATION = WaitType("database_operation", 10)
# Wait time for jobs to complete in default environment.
JOB_COMPLETION = WaitType("job_completion", 30)
# Wait time for a GIE to spawn.
GIE_SPAWN = WaitType("gie_spawn", 30)
# Wait time for toolshed search
SHED_SEARCH = WaitType("shed_search", 30)
# Wait time for repository installation
REPO_INSTALL = WaitType("repo_install", 60)
# Beta history Polling Duration
HISTORY_POLL = WaitType("history_poll", 3)
# Choose a moderate wait type for operations that don't specify a type.
DEFAULT_WAIT_TYPE = WAIT_TYPES.DATABASE_OPERATION
class NullTourCallback:
def handle_step(self, step, step_index):
pass
def exception_seems_to_indicate_transition(e):
"""True if exception seems to indicate the page state is transitioning.
Galaxy features many different transition effects that change the page state over time.
These transitions make it slightly more difficult to test Galaxy because atomic input
actions take an indeterminate amount of time to be reflected on the screen. This method
takes a Selenium assertion and tries to infer if such a transition could be the root
cause of the exception. The methods that follow use it to allow retrying actions during
transitions.
Currently the two kinds of exceptions that we say may indicate a transition are
StaleElement exceptions (a DOM element grabbed at one step is no longer available)
and "not clickable" exceptions (so perhaps a popup modal is blocking a click).
"""
return (
exception_indicates_stale_element(e)
or exception_indicates_not_clickable(e)
or exception_indicates_click_intercepted(e)
)
def retry_call_during_transitions(
f,
attempts=RETRY_DURING_TRANSITIONS_ATTEMPTS_DEFAULT,
sleep=RETRY_DURING_TRANSITIONS_SLEEP_DEFAULT,
exception_check=exception_seems_to_indicate_transition,
):
previous_attempts = 0
while True:
try:
return f()
except Exception as e:
if previous_attempts > attempts:
raise
if not exception_check(e):
raise
time.sleep(sleep)
previous_attempts += 1
def retry_during_transitions(
f,
attempts=RETRY_DURING_TRANSITIONS_ATTEMPTS_DEFAULT,
sleep=RETRY_DURING_TRANSITIONS_SLEEP_DEFAULT,
exception_check=exception_seems_to_indicate_transition,
):
@wraps(f)
def _retry(*args, **kwds):
return retry_call_during_transitions(
partial(f, *args, **kwds), attempts=attempts, sleep=sleep, exception_check=exception_check
)
return _retry
def edit_details(f, scope=".history-index"):
"""Open the editor, run the edits, hit the save button"""
@wraps(f)
def func_wrapper(self, *args, **kwds):
# open editor
if self.is_beta_history():
self.open_history_editor(scope=scope)
# run edits
result = f(self, *args, **kwds)
# save edits
if self.is_beta_history():
self.history_click_editor_save()
return result
return func_wrapper
class NavigatesGalaxy(HasDriver):
"""Class with helpers methods for driving components of the Galaxy interface.
In most cases, methods for interacting with Galaxy components that appear in
multiple tests or applications should be refactored into this class for now.
Keep in mind that this class is used outside the context of ``TestCase`` s as
well - so some methods more explicitly related to test data or assertion checking
may make more sense in SeleniumTestCase for instance.
Some day this class will likely be split up into smaller mixins for particular
components of Galaxy, but until that day the best practice is to prefix methods
for driving or querying the interface with the name of the component or page
the method operates on. These serve as pseudo-namespaces until we decompose this
class. For instance, the method for clicking an option in the workflow editor is
workflow_editor_click_option instead of click_workflow_editor_option.
"""
timeout_multiplier: float
driver: WebDriver
@abstractmethod
def build_url(self, url: str, for_selenium: bool = True) -> str:
"""Build URL to the target Galaxy."""
default_password = DEFAULT_PASSWORD
wait_types = WAIT_TYPES
# set to True to reload each invocation (good for interactive test building)
_interactive_components: bool = False
_root_component: Component = load_root_component()
def get(self, url: str = ""):
"""Expand supplied relative URL and navigate to page using Selenium driver."""
full_url = self.build_url(url)
return self.driver.get(full_url)
@property
def navigation(self) -> Component:
if self._interactive_components:
return load_root_component()
else:
return self._root_component
@property
def components(self) -> SmartComponent:
"""Fetch root component describing the Galaxy DOM."""
return SmartComponent(self.navigation, self)
def wait_length(self, wait_type: WaitType) -> float:
"""Return the wait time specified by wait_type after applying `timeout_multipler`.
`timeout_multiplier` is used in production CI tests to reduce transient failures
in a uniform way across test suites to expand waiting.
"""
return wait_type.default_length * self.timeout_multiplier
def sleep_for(self, wait_type: WaitType) -> None:
"""Sleep on the Python client side for the specified wait_type.
This method uses `wait_length` to apply any `timeout_multiplier`.
"""
self.sleep_for_seconds(self.wait_length(wait_type))
def sleep_for_seconds(self, duration: float) -> None:
"""Sleep in the local thread for specified number of seconds.
Ideally, we would be sleeping on the Selenium server instead of in the local client
(e.g. test) thread.
"""
time.sleep(duration)
def timeout_for(self, wait_type: WaitType = DEFAULT_WAIT_TYPE, **kwd) -> float:
return self.wait_length(wait_type)
def home(self) -> None:
"""Return to root Galaxy page and wait for some basic widgets to appear."""
self.get()
self.components.masthead._.wait_for_visible()
def go_to_trs_search(self) -> None:
self.driver.get(self.build_url("workflows/trs_search"))
self.components.masthead._.wait_for_visible()
def go_to_trs_by_id(self) -> None:
self.driver.get(self.build_url("workflows/trs_import"))
self.components.masthead._.wait_for_visible()
def go_to_workflow_sharing(self, workflow_id: str) -> None:
self.driver.get(self.build_url(f"workflows/sharing?id={workflow_id}"))
def go_to_workflow_export(self, workflow_id: str) -> None:
self.driver.get(self.build_url(f"workflow/export?id={workflow_id}"))
def go_to_history_sharing(self, history_id: str) -> None:
self.driver.get(self.build_url(f"histories/sharing?id={history_id}"))
def switch_to_main_panel(self):
self.driver.switch_to.frame(GALAXY_MAIN_FRAME_ID)
@contextlib.contextmanager
def local_storage(self, key: str, value: Union[float, str]):
"""Method decorator to modify localStorage for the scope of the supplied context."""
self.driver.execute_script(f"""window.localStorage.setItem("{key}", {value});""")
try:
yield
finally:
self.driver.execute_script(f"""window.localStorage.removeItem("{key}");""")
@contextlib.contextmanager
def main_panel(self):
"""Decorator to operate within the context of Galaxy's main frame."""
try:
self.switch_to_main_panel()
yield
finally:
self.driver.switch_to.default_content()
def api_get(self, endpoint, data=None, raw=False):
data = data or {}
full_url = self.build_url(f"api/{endpoint}", for_selenium=False)
response = requests.get(
full_url, data=data, cookies=self.selenium_to_requests_cookies(), timeout=DEFAULT_SOCKET_TIMEOUT
)
if raw:
return response
else:
return response.json()
def api_post(self, endpoint, data=None):
data = data or {}
full_url = self.build_url(f"api/{endpoint}", for_selenium=False)
response = requests.post(
full_url, data=data, cookies=self.selenium_to_requests_cookies(), timeout=DEFAULT_SOCKET_TIMEOUT
)
return response.json()
def api_delete(self, endpoint, raw=False):
full_url = self.build_url(f"api/{endpoint}", for_selenium=False)
response = requests.delete(
full_url, cookies=self.selenium_to_requests_cookies(), timeout=DEFAULT_SOCKET_TIMEOUT
)
if raw:
return response
else:
return response.json()
def get_galaxy_session(self):
for cookie in self.driver.get_cookies():
if cookie["name"] == "galaxysession":
return cookie["value"]
def selenium_to_requests_cookies(self):
return {"galaxysession": self.get_galaxy_session()}
def history_panel_name_element(self):
if self.is_beta_history():
component = self.beta_history_element("name display")
else:
component = self.components.history_panel.name
return component.wait_for_present()
@retry_during_transitions
def history_panel_name(self):
return self.history_panel_name_element().text
def history_panel_collection_rename(self, hid: int, new_name: str, assert_old_name: Optional[str] = None):
collection_view = self.history_panel_expand_collection(hid)
if self.is_beta_history():
self.__beta_rename_collection(new_name)
self.sleep_for(WAIT_TYPES.UX_RENDER)
else:
title_element = collection_view.title.wait_for_visible()
if assert_old_name is not None:
assert title_element.text == assert_old_name
title_element.click()
title_rename_element = collection_view.title_input.wait_for_visible()
title_rename_element.send_keys(new_name)
self.send_enter(title_rename_element)
def history_panel_expand_collection(self, collection_hid: int) -> SmartComponent:
self.history_panel_click_item_title(collection_hid)
collection_view = self.components.history_panel.collection_view
collection_view._.wait_for_present()
return collection_view
@edit_details
def __beta_rename_collection(self, new_name):
title_element = self.beta_history_element("name input").wait_for_clickable()
title_element.clear()
title_element.send_keys(new_name)
def history_panel_collection_name_element(self):
if self.is_beta_history():
title_element = self.beta_history_element("collection name display").wait_for_present()
else:
collection_view = self.components.history_panel.collection_view
collection_view._.wait_for_present()
title_element = collection_view.title.wait_for_visible()
return title_element
def make_accessible_and_publishable(self):
self.components.histories.sharing.make_accessible.wait_for_and_click()
self.components.histories.sharing.make_publishable.wait_for_and_click()
def history_contents(self, history_id=None, view="summary", datasets_only=True):
if history_id is None:
history_id = self.current_history_id()
histories = self.api_get("histories?keys=id")
if history_id not in [h["id"] for h in histories]:
return {}
if datasets_only:
endpoint = f"histories/{history_id}/contents?view={view}"
else:
endpoint = f"histories/{history_id}?view={view}"
return self.api_get(endpoint)
def current_history(self):
full_url = self.build_url("history/current_history_json", for_selenium=False)
response = requests.get(full_url, cookies=self.selenium_to_requests_cookies(), timeout=DEFAULT_SOCKET_TIMEOUT)
return response.json()
def current_history_id(self):
return self.current_history()["id"]
def latest_history_item(self):
history_contents = self.history_contents()
assert len(history_contents) > 0
return history_contents[-1]
def wait_for_history(self, assert_ok=True):
def history_becomes_terminal(driver):
current_history_id = self.current_history_id()
state = self.api_get(f"histories/{current_history_id}")["state"]
if state not in ["running", "queued", "new", "ready"]:
return state
else:
return None
timeout = self.timeout_for(wait_type=WAIT_TYPES.JOB_COMPLETION)
final_state = self.wait(timeout=timeout).until(history_becomes_terminal)
if assert_ok:
assert final_state == "ok", final_state
return final_state
def history_panel_create_new_with_name(self, name):
self.history_panel_create_new()
self.history_panel_rename(name)
def history_panel_create_new(self):
"""Click create new and pause a bit for the history to begin to refresh."""
self.history_click_create_new()
self.sleep_for(WAIT_TYPES.UX_RENDER)
def history_panel_wait_for_hid_ok(self, hid, allowed_force_refreshes=0):
return self.history_panel_wait_for_hid_state(hid, "ok", allowed_force_refreshes=allowed_force_refreshes)
def history_panel_wait_for_hid_deferred(self, hid, allowed_force_refreshes=0):
return self.history_panel_wait_for_hid_state(hid, "deferred", allowed_force_refreshes=allowed_force_refreshes)
def history_panel_item_component(self, history_item=None, hid=None, multi_history_panel=False):
if self.is_beta_history():
assert hid
return self.content_item_by_attributes(hid=hid)
if history_item is None:
assert hid
history_item = self.hid_to_history_item(hid)
item = self.components.history_panel.item.selector
if multi_history_panel:
item = self.components.multi_history_view.item
return item(history_content_type=history_item["history_content_type"], id=history_item["id"])
def wait_for_history_to_have_hid(self, history_id, hid):
def get_hids():
contents = self.api_get(f"histories/{history_id}/contents")
return [d["hid"] for d in contents]
def history_has_hid(driver):
hids = get_hids()
return any(h == hid for h in hids)
timeout = self.timeout_for(wait_type=WAIT_TYPES.JOB_COMPLETION)
try:
self.wait(timeout).until(history_has_hid)
except self.TimeoutException as e:
hids = get_hids()
message = f"Timeout waiting for history {history_id} to have hid {hid} - have hids {hids}"
raise self.prepend_timeout_message(e, message)
def history_panel_wait_for_hid_visible(self, hid, allowed_force_refreshes=0, multi_history_panel=False):
current_history_id = self.current_history_id()
self.wait_for_history_to_have_hid(current_history_id, hid)
# TODO: just use HID and stop resolving history_item -or- place history item in DOM.
# I think Mason thought the first was cleaner based on recent changes, but I think duplicated
# HIDs due to conversions and such make using the actual history item ID more robust.
history_item = self.hid_to_history_item(hid, current_history_id=current_history_id)
history_item_selector = self.history_panel_item_component(
history_item, hid=hid, multi_history_panel=multi_history_panel
)
try:
self.history_item_wait_for(history_item_selector, allowed_force_refreshes)
except self.TimeoutException as e:
selector = self.navigation.history_panel.selectors.contents
if self.is_beta_history():
selector = self.navigation.history_panel.selectors.contents_beta
contents_elements = self.find_elements(selector)
div_ids = [f"#{d.get_attribute("id")}" for d in contents_elements]
template = "Failed waiting on history item %d to become visible, visible datasets include [%s]."
message = template % (hid, ",".join(div_ids))
raise self.prepend_timeout_message(e, message)
return history_item_selector
def hid_to_history_item(self, hid, current_history_id=None):
if current_history_id is None:
current_history_id = self.current_history_id()
contents = self.api_get(f"histories/{current_history_id}/contents")
history_item = [d for d in contents if d["hid"] == hid][0]
return history_item
def history_item_wait_for(self, history_item_selector, allowed_force_refreshes):
attempt = 0
while True:
try:
rval = self.wait_for_visible(history_item_selector, wait_type=WAIT_TYPES.JOB_COMPLETION)
break
except self.TimeoutException:
if attempt >= allowed_force_refreshes:
raise
attempt += 1
if not self.is_beta_history():
self.history_panel_refresh_click()
return rval
def history_panel_wait_for_history_loaded(self):
# Verify that the history has been loaded and is empty.
self.wait_for_visible(
self.navigation.history_panel.selectors.empty_message, wait_type=WAIT_TYPES.DATABASE_OPERATION
)
def history_panel_wait_for_hid_hidden(self, hid, multi_history_panel=False):
history_item_selector = self.history_panel_item_component(hid=hid, multi_history_panel=multi_history_panel)
self.wait_for_absent_or_hidden(history_item_selector, wait_type=WAIT_TYPES.JOB_COMPLETION)
return history_item_selector
def history_panel_wait_for_hid_state(self, hid, state, allowed_force_refreshes=0, multi_history_panel=False):
history_item_selector = self.history_panel_wait_for_hid_visible(
hid, allowed_force_refreshes=allowed_force_refreshes, multi_history_panel=multi_history_panel
)
if self.is_beta_history():
history_item_selector_state = self.content_item_by_attributes(hid=hid, state=state)
else:
# history_item_selector_state = history_item_selector.with_class(f"state-{state}")
history_item_selector_state = history_item_selector.with_data("state", state)
try:
self.history_item_wait_for(history_item_selector_state, allowed_force_refreshes)
except self.TimeoutException as e:
history_item = self.wait_for_visible(history_item_selector)
current_state = "UNKNOWN"
classes = history_item.get_attribute("class").split(" ")
for clazz in classes:
if clazz.startswith("state-"):
current_state = clazz[len("state-") :]
template = "Failed waiting on history item %d state to change to [%s] current state [%s]. "
message = template % (hid, state, current_state)
raise self.prepend_timeout_message(e, message)
return history_item_selector_state
def click_grid_popup_option(self, item_name, option_label):
item_button = None
grid = self.components.grids.body.wait_for_visible()
for row in grid.find_elements_by_tag_name("tr"):
name_cell = row.find_elements_by_tag_name("td")[1]
if name_cell.text == item_name:
item_button = name_cell
break
if item_button is None:
raise AssertionError(f"Failed to find item with name [{item_name}]")
popup_menu_button = item_button.find_element_by_css_selector(".dropdown-toggle")
popup_menu_button.click()
popup_option = self.driver.find_element_by_link_text(option_label)
popup_option.click()
def published_grid_search_for(self, search_term=None):
return self._inline_search_for(
self.navigation.grids.free_text_search,
search_term,
)
def get_logged_in_user(self):
return self.api_get("users/current")
def get_api_key(self, force=False):
# If force is false, use the form inputs API and allow the key to be absent.
if not force:
return self.api_get(f"users/{self.get_user_id()}/api_key/inputs")["inputs"][0]["value"]
else:
return self.api_post(f"users/{self.get_user_id()}/api_key")
def get_user_id(self):
user = self.get_logged_in_user()
return user["id"]
def is_logged_in(self):
return "email" in self.get_logged_in_user()
@retry_during_transitions
def _inline_search_for(self, selector, search_term=None, escape_to_clear=False):
# Clear tooltip resulting from clicking on the masthead to get here.
self.clear_tooltips()
search_box = self.wait_for_and_click(selector)
if escape_to_clear:
# The combination of DebouncedInput+b-input doesn't seem to uniformly respect
# .clear() below. We use escape handling a lot - and that does cauase to the input
# to reset correctly and fire the required re-active events/handlers.
self.send_escape(search_box)
search_box.clear()
if search_term is not None:
search_box.send_keys(search_term)
self.send_enter(search_box)
return search_box
def _get_random_name(self, prefix=None, suffix=None, len=10):
return "{}{}{}".format(
prefix or "",
"".join(random.choice(string.ascii_lowercase + string.digits) for _ in range(len)),
suffix or "",
)
def _get_random_email(self, username=None, domain=None):
username = username or "test"
domain = domain or "test.test"
return self._get_random_name(prefix=username, suffix=f"@{domain}")
# Creates a random password of length len by creating an array with all ASCII letters and the numbers 0 to 9,
# then using the random number generator to pick one elemenent to concatinate it to the end of the password string until
# we have a password of length len.
def _get_random_password(self, len=6):
return "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(len))
def submit_login(self, email, password=None, assert_valid=True, retries=0):
self.components.masthead.register_or_login.wait_for_and_click()
self.sleep_for(WAIT_TYPES.UX_RENDER)
self.fill_login_and_submit(email, password=password)
if assert_valid:
try:
self.wait_for_logged_in()
except NotLoggedInException:
self.snapshot("login-failed")
if retries > 0:
self.submit_login(email, password, assert_valid, retries - 1)
else:
raise
self.snapshot("logged-in")
def fill_login_and_submit(self, email, password=None):
if password is None:
password = self.default_password
login_info = {
"login": email,
"password": password,
}
form = self.wait_for_visible(self.navigation.login.selectors.form)
self.fill(form, login_info)
self.snapshot("logging-in")
self.wait_for_and_click(self.navigation.login.selectors.submit)
self.snapshot("login-submitted")
def register(self, email=None, password=None, username=None, confirm=None, assert_valid=True):
if email is None:
email = self._get_random_email()
if password is None:
password = self.default_password
if confirm is None:
confirm = password
if username is None:
username = email.split("@")[0]
self.home()
self.components.masthead.register_or_login.wait_for_and_click()
self.wait_for_and_click(self.navigation.registration.selectors.toggle)
form = self.wait_for_visible(self.navigation.registration.selectors.form)
self.fill(form, dict(email=email, password=password, username=username, confirm=confirm))
self.wait_for_and_click(self.navigation.registration.selectors.submit)
if assert_valid is False:
self.assert_error_message()
elif assert_valid:
self.wait_for_logged_in()
# Code below previously was needed because there was a bug that would prevent the masthead from changing,
# the bug seems maybe to be fixed though - so we could consider eliminating these extra checks to speed
# up tests.
self.home()
self.wait_for_logged_in()
self.click_masthead_user()
# Make sure the user menu was dropped down
user_menu = self.components.masthead.user_menu.wait_for_visible()
try:
username_element = self.components.masthead.username.wait_for_visible()
except self.TimeoutException as e:
menu_items = user_menu.find_elements_by_css_selector("li a")
menu_text = [mi.text for mi in menu_items]
message = f"Failed to find logged in message in menu items {", ".join(menu_text)}"
raise self.prepend_timeout_message(e, message)
text = username_element.text
assert username in text
assert self.get_logged_in_user()["email"] == email
# clicking away no longer closes menu post Masthead -> VueJS
self.click_masthead_user()
def wait_for_logged_in(self):
try:
self.components.masthead.logged_in_only.wait_for_visible()
except self.TimeoutException as e:
ui_logged_out = self.components.masthead.logged_out_only.is_displayed
if ui_logged_out:
dom_message = (
"Element a.loggedout-only is present in DOM, indicating Login or Register button still in masthead."
)
else:
dom_message = "Element a.loggedout-only is *not* present in DOM."
user_info = self.api_get("users/current")
if "username" in user_info:
template = "Failed waiting for masthead to update for login, but user API response indicates [%s] is logged in. This seems to be a bug in Galaxy. %s logged API response was [%s]. "
message = template % (user_info["username"], dom_message, user_info)
raise self.prepend_timeout_message(e, message)
else:
raise NotLoggedInException(e, user_info, dom_message)
def click_center(self):
action_chains = self.action_chains()
center_element = self.driver.find_element_by_css_selector("#center")
action_chains.move_to_element(center_element).click().perform()
def perform_upload(self, test_path, **kwd):
self._perform_upload(test_path=test_path, **kwd)
def perform_upload_of_pasted_content(self, paste_data, **kwd):
self._perform_upload(paste_data=paste_data, **kwd)
def _perform_upload(
self, test_path=None, paste_data=None, ext=None, genome=None, ext_all=None, genome_all=None, deferred=None
):
self.home()
self.upload_start_click()
self.upload_set_footer_extension(ext_all)
self.upload_set_footer_genome(genome_all)
if test_path:
self.upload_queue_local_file(test_path)
else:
assert paste_data is not None
self.upload_paste_data(paste_data)
if ext is not None:
self.wait_for_selector_visible(".upload-extension")
self.select2_set_value(".upload-extension", ext)
if genome is not None:
self.wait_for_selector_visible(".upload-genome")
self.select2_set_value(".upload-genome", genome)
if deferred is not None:
upload = self.components.upload
upload.settings_button(n=0).wait_for_and_click()
upload.settings.wait_for_visible()
setting = upload.setting_deferred.wait_for_visible()
classes = setting.get_attribute("class").split(" ")
if deferred is True and "fa-check-square-o" not in classes:
setting.click()
elif deferred is False and "fa-check-square-o" in classes:
setting.click()
self.upload_start()
self.wait_for_and_click_selector("button#btn-close")
def upload_list(self, test_paths, name="test", ext=None, genome=None, hide_source_items=True):
self._collection_upload_start(test_paths, ext, genome, "List")
if not hide_source_items:
self.collection_builder_hide_originals()
self.collection_builder_set_name(name)
self.collection_builder_create()
def upload_pair(self, test_paths, name="test", ext=None, genome=None, hide_source_items=True):
self._collection_upload_start(test_paths, ext, genome, "Pair")
if not hide_source_items:
self.collection_builder_hide_originals()
self.collection_builder_set_name(name)
self.collection_builder_create()
def upload_paired_list(self, test_paths, name="test", ext=None, genome=None, hide_source_items=True):
self._collection_upload_start(test_paths, ext, genome, "List of Pairs")
if not hide_source_items:
self.collection_builder_hide_originals()
self.collection_builder_clear_filters()
# TODO: generalize and loop these clicks so we don't need the assert
assert len(test_paths) == 2
self.collection_builder_click_paired_item("forward", 0)
self.collection_builder_click_paired_item("reverse", 1)
self.collection_builder_set_name(name)
self.collection_builder_create()
def _collection_upload_start(self, test_paths, ext, genome, collection_type):
# Perform upload of files and open the collection builder for specified
# type.
self.home()
self.upload_start_click()
self.upload_tab_click("collection")
self.upload_set_footer_extension(ext, tab_id="collection")
self.upload_set_footer_genome(genome, tab_id="collection")
self.upload_set_collection_type(collection_type)
for test_path in test_paths:
self.upload_queue_local_file(test_path, tab_id="collection")
self.upload_start(tab_id="collection")
self.upload_build()
def upload_tab_click(self, tab):
self.components.upload.tab(tab=tab).wait_for_and_click()
def upload_start_click(self):
self.components.upload.start.wait_for_and_click()
@retry_during_transitions
def upload_set_footer_extension(self, ext, tab_id="regular"):
if ext is not None:
selector = f"div#{tab_id} .upload-footer-extension"
self.wait_for_selector_visible(selector)
self.select2_set_value(selector, ext)
@retry_during_transitions
def upload_set_footer_genome(self, genome, tab_id="regular"):
if genome is not None:
selector = f"div#{tab_id} .upload-footer-genome"
self.wait_for_selector_visible(selector)
self.select2_set_value(selector, genome)
@retry_during_transitions
def upload_set_collection_type(self, collection_type):
self.wait_for_selector_visible(".upload-footer-collection-type")
self.select2_set_value(".upload-footer-collection-type", collection_type)
def upload_start(self, tab_id="regular"):
self.wait_for_and_click_selector(f"div#{tab_id} button#btn-start")
@retry_during_transitions
def upload_build(self, tab="collection"):
build_selector = f"div#{tab} button#btn-build"
# Pause a bit to let the callback on the build button be registered.
time.sleep(0.5)
# Click the Build button and make sure it disappears.
self.wait_for_and_click_selector(build_selector)
try:
self.wait_for_selector_absent_or_hidden(build_selector)
except TimeoutException:
# Sometimes the callback in the JS hasn't be registered by the
# time that the build button is clicked. By the time the timeout
# has been registered - it should have been.
self.wait_for_and_click_selector(build_selector)
self.wait_for_selector_absent_or_hidden(build_selector)
def upload_queue_local_file(self, test_path, tab_id="regular"):
self.wait_for_and_click_selector(f"div#{tab_id} button#btn-local")
file_upload = self.wait_for_selector(f'div#{tab_id} input[type="file"]')
file_upload.send_keys(test_path)
def upload_paste_data(self, pasted_content, tab_id="regular"):
tab_locator = f"div#{tab_id}"
self.wait_for_and_click_selector(f"{tab_locator} button#btn-new")
textarea = self.wait_for_selector(f"{tab_locator} .upload-text-content")
textarea.send_keys(pasted_content)
def upload_rule_start(self):
self.upload_start_click()
self.upload_tab_click("rule-based")
def upload_rule_build(self):
self.upload_build(tab="rule-based")
def upload_rule_set_data_type(self, type_description):
upload = self.components.upload
data_type_element = upload.rule_select_data_type.wait_for_visible()
self.select2_set_value(data_type_element, type_description)
def upload_rule_set_input_type(self, input_description):
upload = self.components.upload
input_type_element = upload.rule_select_input_type.wait_for_visible()
self.select2_set_value(input_type_element, input_description)
def upload_rule_set_dataset(self, row=1):
upload = self.components.upload
upload.rule_dataset_selector.wait_for_visible()
upload.rule_dataset_selector_row(rowindex=row).wait_for_and_click()
def rule_builder_set_collection_name(self, name):
rule_builder = self.components.rule_builder
name_element = rule_builder.collection_name_input.wait_for_and_click()
name_element.send_keys(name)
self.sleep_for(WAIT_TYPES.UX_RENDER)
def rule_builder_set_extension(self, extension):
self.select2_set_value(self.navigation.rule_builder.selectors.extension_select, extension)
def rule_builder_filter_count(self, count=1):
rule_builder = self.components.rule_builder
rule_builder.menu_button_filter.wait_for_and_click()
with self.rule_builder_rule_editor("add-filter-count") as editor_element:
filter_input = editor_element.find_element_by_css_selector("input[type='number']")
filter_input.clear()
filter_input.send_keys(f"{count}")
def rule_builder_sort(self, column_label, screenshot_name=None):
rule_builder = self.components.rule_builder
rule_builder.menu_button_rules.wait_for_and_click()
with self.rule_builder_rule_editor("sort") as editor_element:
column_elem = editor_element.find_element_by_css_selector(".rule-column-selector")
self.select2_set_value(column_elem, column_label)
self.screenshot_if(screenshot_name)
def rule_builder_add_regex_groups(self, column_label, group_count, regex, screenshot_name):
rule_builder = self.components.rule_builder
rule_builder.menu_button_column.wait_for_and_click()
with self.rule_builder_rule_editor("add-column-regex") as editor_element:
column_elem = editor_element.find_element_by_css_selector(".rule-column-selector")
self.select2_set_value(column_elem, column_label)
groups_elem = editor_element.find_element_by_css_selector("input[type='radio'][value='groups']")
groups_elem.click()
regex_elem = editor_element.find_element_by_css_selector("input.rule-regular-expression")
regex_elem.clear()
regex_elem.send_keys(regex)
filter_input = editor_element.find_element_by_css_selector("input[type='number']")
filter_input.clear()
filter_input.send_keys(f"{group_count}")
self.screenshot_if(screenshot_name)
def rule_builder_add_regex_replacement(self, column_label, regex, replacement, screenshot_name=None):
rule_builder = self.components.rule_builder
rule_builder.menu_button_column.wait_for_and_click()
with self.rule_builder_rule_editor("add-column-regex") as editor_element:
column_elem = editor_element.find_element_by_css_selector(".rule-column-selector")
self.select2_set_value(column_elem, column_label)
groups_elem = editor_element.find_element_by_css_selector("input[type='radio'][value='replacement']")
groups_elem.click()
regex_elem = editor_element.find_element_by_css_selector("input.rule-regular-expression")
regex_elem.clear()
regex_elem.send_keys(regex)
filter_input = editor_element.find_element_by_css_selector("input.rule-replacement")
filter_input.clear()
filter_input.send_keys(f"{replacement}")
self.screenshot_if(screenshot_name)
def rule_builder_add_value(self, value, screenshot_name=None):
rule_builder = self.components.rule_builder
rule_builder.menu_button_column.wait_for_and_click()
with self.rule_builder_rule_editor("add-column-value") as editor_element:
filter_input = editor_element.find_element_by_css_selector("input[type='text']")
filter_input.clear()
filter_input.send_keys(value)
self.screenshot_if(screenshot_name)
def rule_builder_remove_columns(self, column_labels, screenshot_name=None):
rule_builder = self.components.rule_builder
rule_builder.menu_button_rules.wait_for_and_click()
with self.rule_builder_rule_editor("remove-columns") as filter_editor_element:
column_elem = filter_editor_element.find_element_by_css_selector(".rule-column-selector")
for column_label in column_labels:
self.select2_set_value(column_elem, column_label)
self.screenshot_if(screenshot_name)
def rule_builder_concatenate_columns(self, column_label_1, column_label_2, screenshot_name=None):
rule_builder = self.components.rule_builder
rule_builder.menu_button_column.wait_for_and_click()
with self.rule_builder_rule_editor("add-column-concatenate") as filter_editor_element:
column_elems = filter_editor_element.find_elements_by_css_selector(".rule-column-selector")
self.select2_set_value(column_elems[0], column_label_1)
column_elems = filter_editor_element.find_elements_by_css_selector(".rule-column-selector")
self.select2_set_value(column_elems[1], column_label_2)
self.screenshot_if(screenshot_name)
def rule_builder_split_columns(self, column_labels_1, column_labels_2, screenshot_name=None):
rule_builder = self.components.rule_builder
rule_builder.menu_button_rules.wait_for_and_click()
with self.rule_builder_rule_editor("split-columns") as filter_editor_element:
column_elems = filter_editor_element.find_elements_by_css_selector(".rule-column-selector")
clear = True
for column_label_1 in column_labels_1:
self.select2_set_value(column_elems[0], column_label_1, clear_value=clear)
clear = False
column_elems = filter_editor_element.find_elements_by_css_selector(".rule-column-selector")
clear = True
for column_label_2 in column_labels_2:
self.select2_set_value(column_elems[1], column_label_2, clear_value=clear)
clear = False
self.screenshot_if(screenshot_name)
def rule_builder_swap_columns(self, column_label_1, column_label_2, screenshot_name):
rule_builder = self.components.rule_builder
rule_builder.menu_button_rules.wait_for_and_click()
with self.rule_builder_rule_editor("swap-columns") as filter_editor_element:
column_elems = filter_editor_element.find_elements_by_css_selector(".rule-column-selector")
self.select2_set_value(column_elems[0], column_label_1)
column_elems = filter_editor_element.find_elements_by_css_selector(".rule-column-selector")
self.select2_set_value(column_elems[1], column_label_2)
self.screenshot_if(screenshot_name)
@contextlib.contextmanager
def rule_builder_rule_editor(self, rule_type):
rule_builder = self.components.rule_builder
rule_builder.menu_item_rule_type(rule_type=rule_type).wait_for_and_click()
filter_editor = rule_builder.rule_editor(rule_type=rule_type)
filter_editor_element = filter_editor.wait_for_visible()
yield filter_editor_element
rule_builder.rule_editor_ok.wait_for_and_click()
def rule_builder_set_mapping(self, mapping_type, column_label, screenshot_name=None):
rule_builder = self.components.rule_builder
rule_builder.menu_button_rules.wait_for_and_click()
rule_builder.menu_item_rule_type(rule_type="mapping").wait_for_and_click()
rule_builder.add_mapping_menu.wait_for_and_click()
rule_builder.add_mapping_button(mapping_type=mapping_type).wait_for_and_click()
if mapping_type != "list-identifiers" or not isinstance(column_label, list):
mapping_elem = rule_builder.mapping_edit(mapping_type=mapping_type).wait_for_visible()
self.select2_set_value(mapping_elem, column_label)
self.screenshot_if(screenshot_name)
else:
assert len(column_label) > 0
column_labels = column_label
for i, column_label in enumerate(column_labels):
if i > 0:
rule_builder.mapping_add_column(mapping_type=mapping_type).wait_for_and_click()
mapping_elem = rule_builder.mapping_edit(mapping_type=mapping_type).wait_for_visible()
self.select2_set_value(mapping_elem, column_label)
self.screenshot_if(screenshot_name)
rule_builder.mapping_ok.wait_for_and_click()
def rule_builder_set_source(self, json):
rule_builder = self.components.rule_builder
rule_builder.view_source.wait_for_and_click()
self.rule_builder_enter_source_text(json)
rule_builder.main_button_ok.wait_for_and_click()
rule_builder.view_source.wait_for_visible()
def rule_builder_enter_source_text(self, json):
rule_builder = self.components.rule_builder
text_area_elem = rule_builder.source.wait_for_visible()
text_area_elem.clear()
text_area_elem.send_keys(json)
def workflow_editor_click_option(self, option_label):
self.workflow_editor_click_options()
menu_element = self.workflow_editor_options_menu_element()
option_elements = menu_element.find_elements_by_css_selector("a")
assert len(option_elements) > 0, "Failed to find workflow editor options"
self.sleep_for(WAIT_TYPES.UX_RENDER)
found_option = False
for option_element in option_elements:
if option_label in option_element.text:
action_chains = self.action_chains()
action_chains.move_to_element(option_element)
action_chains.click()
action_chains.perform()
found_option = True
break
if not found_option:
raise Exception(f"Failed to find workflow editor option with label [{option_label}]")
def workflow_editor_click_options(self):
return self.wait_for_and_click_selector("#workflow-options-button")
def workflow_editor_options_menu_element(self):
return self.wait_for_selector_visible("#workflow-options-button")
def workflow_editor_click_run(self):
return self.wait_for_and_click_selector("#workflow-run-button")
def workflow_editor_click_save(self):
self.wait_for_and_click_selector("#workflow-save-button")
self.sleep_for(self.wait_types.DATABASE_OPERATION)
def navigate_to_histories_page(self):
self.home()
self.click_masthead_user()
self.components.masthead.histories.wait_for_and_click()
def navigate_to_user_preferences(self):
self.home()
self.click_masthead_user()
self.components.masthead.preferences.wait_for_and_click()
def navigate_to_invocations(self):
self.home()
self.click_masthead_user()
self.components.masthead.invocations.wait_for_and_click()
def navigate_to_pages(self):
self.home()
self.click_masthead_user()
self.components.masthead.pages.wait_for_and_click()
def admin_open(self):
self.components.masthead.admin.wait_for_and_click()
def select_dataset_from_lib_import_modal(self, filenames):
for name in filenames:
self.components.libraries.folder.select_import_dir_item(name=name).wait_for_and_click()
self.components.libraries.folder.import_dir_btn.wait_for_and_click()
def create_new_library(self, login=True):
if login:
self.admin_login()
self.libraries_open()
self.name = self._get_random_name(prefix="testcontents")
self.libraries_index_create(self.name)
def libraries_open(self):
self.home()
self.click_masthead_shared_data()
self.components.masthead.libraries.wait_for_and_click()
self.components.libraries.selector.wait_for_visible()
def libraries_open_with_name(self, name):
self.libraries_open()
self.libraries_index_search_for(name)
self.libraries_index_table_elements()[0].find_element_by_css_selector("td a").click()
@retry_during_transitions
def libraries_index_table_elements(self):
container = self.components.libraries._.wait_for_visible()
elements = container.find_elements_by_css_selector("tbody")
if not elements:
return []
else:
assert len(elements) == 1
element = elements[0]
return element.find_elements_by_css_selector("tr") # [style='display: table-row']
def libraries_index_create(self, name):
self.components.libraries.create_new_library_btn.wait_for_and_click()
name_input_field = self.components.libraries.new_library_name_input.wait_for_visible()
input_field = self.components.libraries.new_library_description_input.wait_for_visible()
name_input_field.send_keys(name)
input_field.send_keys(self._get_random_name(prefix="description"))
self.components.libraries.save_new_library_btn.wait_for_and_click()
def libraries_index_click_search(self):
self.sleep_for(WAIT_TYPES.UX_RENDER)
search_element = self.components.libraries.search_field.wait_for_visible()
search_element.click()
return search_element
def libraries_index_sort_selector(self):
return "th[aria-sort]"
def libraries_index_sort_click(self):
sort_element = self.wait_for_selector_clickable(self.libraries_index_sort_selector())
sort_element.click()
return sort_element
def libraries_index_search_for(self, text):
self.wait_for_overlays_cleared()
search_box = self.libraries_index_click_search()
search_box.clear()
search_box.send_keys(text)
value = search_box.get_attribute("value")
assert value == text, value
def libraries_folder_create(self, name):
self.components.libraries.folder.add_folder.wait_for_and_click()
self.components.libraries.folder.input_folder_name.wait_for_and_send_keys(name)
self.components.libraries.folder.save_folder_btn.wait_for_and_click()
def libraries_click_dataset_import(self):
self.wait_for_and_click(self.navigation.libraries.folder.selectors.add_items_button)
self.wait_for_visible(self.navigation.libraries.folder.selectors.add_items_menu)
def libraries_dataset_import(self, btn):
self.libraries_click_dataset_import()
self.wait_for_and_click(btn)
def libraries_dataset_import_from_history_select(self, to_select_items):
self.wait_for_visible(self.navigation.libraries.folder.selectors.import_history_content)
history_elements = self.find_elements(self.navigation.libraries.folder.selectors.import_history_contents_items)
for to_select_item in to_select_items:
found = False
for history_element in history_elements:
if to_select_item in history_element.text:
history_element.find_element_by_css_selector("input").click()
found = True
break
if not found:
raise Exception(f"Failed to find history item [{to_select_item}] to select")
def libraries_dataset_import_from_history_click_ok(self, wait=True):
self.wait_for_and_click(self.navigation.libraries.folder.selectors.import_datasets_ok_button)
if wait:
# Let the progress bar disappear...
self.wait_for_absent_or_hidden(self.navigation.libraries.folder.selectors.import_progress_bar)
def libraries_table_elements(self):
tbody_element = self.wait_for_selector_visible("#folder_list_body > tbody")
return tbody_element.find_elements_by_css_selector("tr:not(.b-table-empty-row)")
def populate_library_folder_from_import_dir(self, library_name, filenames):
self.libraries_open_with_name(library_name)
self.libraries_dataset_import(self.navigation.libraries.folder.labels.from_import_dir)
self.select_dataset_from_lib_import_modal(filenames)
def navigate_to_new_library(self, login=True):
self.create_new_library(login)
self.libraries_open_with_name(self.name)
def wait_for_overlays_cleared(self):
"""Wait for modals and Toast notifications to disappear."""
self.wait_for_selector_absent_or_hidden(".ui-modal", wait_type=WAIT_TYPES.UX_POPUP)
self.wait_for_selector_absent_or_hidden(".toast", wait_type=WAIT_TYPES.UX_POPUP)
def clear_tooltips(self):
action_chains = self.action_chains()
center_element = self.driver.find_element_by_css_selector("#center")
action_chains.move_to_element(center_element).perform()
self.wait_for_selector_absent_or_hidden(".b-tooltip", wait_type=WAIT_TYPES.UX_POPUP)
def workflow_index_open(self):
self.home()
self.click_masthead_workflow()
def workflow_index_table_elements(self):
workflows = self.components.workflows
workflows.workflow_table.wait_for_visible()
return workflows.workflow_rows.all()
def workflow_index_table_row(self, workflow_index=0):
self.components.workflows.workflow_rows.wait_for_element_count_of_at_least(workflow_index + 1)
return self.workflow_index_table_elements()[workflow_index]
@retry_during_transitions
def workflow_index_column_text(self, column_index, workflow_index=0):
row_element = self.workflow_index_table_row(workflow_index=workflow_index)
columns = row_element.find_elements_by_css_selector("td")
return columns[column_index].text
def workflow_index_click_search(self):
return self.wait_for_and_click_selector("#workflow-search")
def workflow_index_search_for(self, search_term=None):
return self._inline_search_for(
self.navigation.workflows.search_box,
search_term,
escape_to_clear=True,
)
def workflow_index_click_import(self):
return self.components.workflows.import_button.wait_for_and_click()
def workflow_index_rename(self, new_name, workflow_index=0):
self.workflow_index_click_option("Rename", workflow_index=workflow_index)
alert = self.driver.switch_to.alert
alert.send_keys(new_name)
alert.accept()
@retry_during_transitions
def workflow_index_name(self, workflow_index=0):
"""Get workflow name for workflow_index'th row."""
row_element = self.workflow_index_table_row(workflow_index=workflow_index)
workflow_button = row_element.find_element_by_css_selector(".workflow-dropdown")
return workflow_button.text
@retry_during_transitions
def workflow_click_option(self, workflow_selector, workflow_index=0):
workflow_row = self.workflow_index_table_row(workflow_index=workflow_index)
workflow_button = workflow_row.find_element_by_css_selector(workflow_selector)
workflow_button.click()
def select_dropdown_item(self, option_title):
menu_element = self.wait_for_selector_visible(".dropdown-menu.show")
menu_options = menu_element.find_elements_by_css_selector("a.dropdown-item")
for menu_option in menu_options:
if option_title in menu_option.text:
menu_option.click()
return True
def workflow_index_click_option(self, option_title, workflow_index=0):
self.workflow_click_option(".workflow-dropdown", workflow_index)
if not self.select_dropdown_item(option_title):
raise AssertionError(f"Failed to find workflow action option with title [{option_title}]")
def workflow_index_click_tag_display(self, workflow_index=0):
workflow_row_element = self.workflow_index_table_row(workflow_index)
tag_display = workflow_row_element.find_element_by_css_selector(".tags-display")
tag_display.click()
def workflow_index_add_tag(self, tag: str, workflow_index: int = 0):
self.workflow_index_click_tag_display(workflow_index=workflow_index)
self.tagging_add([tag])
@retry_during_transitions
def workflow_index_tags(self, workflow_index=0):
tag_spans = self.workflow_index_tag_elements(workflow_index=workflow_index)
tags = []
for tag_span in tag_spans:
tags.append(tag_span.text)
return tags
@retry_during_transitions
def workflow_index_tag_elements(self, workflow_index=0):
workflow_row_element = self.workflow_index_table_row(workflow_index)
tag_display = workflow_row_element.find_element_by_css_selector(".tags-display")
tag_spans = tag_display.find_elements_by_css_selector(".tag-name")
return tag_spans
@retry_during_transitions
def workflow_index_click_tag(self, tag, workflow_index=0):
tag_spans = self.workflow_index_tag_elements(workflow_index=workflow_index)
clicked = False
for tag_span in tag_spans:
if tag_span.text == tag:
tag_span.click()
clicked = True
break
if not clicked:
raise KeyError(f"Failed to find tag {tag} on workflow with index {workflow_index}")
def workflow_import_submit_url(self, url):
form_button = self.wait_for_selector_visible("#workflow-import-button")
url_element = self.wait_for_selector_visible("#workflow-import-url-input")
url_element.send_keys(url)
form_button.click()
def workflow_sharing_click_publish(self):
self.wait_for_and_click_selector("input[name='make_accessible_and_publish']")
def tagging_add(self, tags, auto_closes=True, parent_selector=""):
for i, tag in enumerate(tags):
if auto_closes or i == 0:
tag_area = f"{parent_selector}.tags-input input[type='text']"
tag_area = self.wait_for_selector_clickable(tag_area)
tag_area.click()
tag_area.send_keys(tag)
self.send_enter(tag_area)
def workflow_run_with_name(self, name: str):
self.workflow_index_open()
self.workflow_index_search_for(name)
self.workflow_click_option(".workflow-run")
def workflow_run_specify_inputs(self, inputs: Dict[str, Any]):
workflow_run = self.components.workflow_run
for label, value in inputs.items():
input_div_element = workflow_run.input_data_div(label=label).wait_for_visible()
self.select2_set_value(input_div_element, "%d: " % value["hid"])
def workflow_run_submit(self):
self.components.workflow_run.run_workflow.wait_for_and_click()
def workflow_run_ensure_expanded(self):
workflow_run = self.components.workflow_run
if workflow_run.expanded_form.is_absent:
workflow_run.expand_form_link.wait_for_and_click()
workflow_run.expanded_form.wait_for_visible()
def workflow_create_new(self, annotation=None, clear_placeholder=False):
self.workflow_index_open()
self.sleep_for(self.wait_types.UX_RENDER)
self.click_button_new_workflow()
self.sleep_for(self.wait_types.UX_RENDER)
form_element = self.driver.find_element_by_id("submit")
name = self._get_random_name()
annotation = annotation or self._get_random_name()
inputs = self.driver.find_elements_by_class_name("ui-input")
if clear_placeholder:
inputs[0].clear()
inputs[0].send_keys(name)
inputs[1].send_keys(annotation)
form_element.click()
return name
def invocation_index_table_elements(self):
invocations = self.components.invocations
invocations.invocations_table.wait_for_visible()
return invocations.invocations_table_rows.all()
def tool_open(self, tool_id, outer=False):
if outer:
tool_link = self.components.tool_panel.outer_tool_link(tool_id=tool_id)
else:
tool_link = self.components.tool_panel.tool_link(tool_id=tool_id)
tool_element = tool_link.wait_for_present()
self.driver.execute_script("arguments[0].scrollIntoView(true);", tool_element)
tool_link.wait_for_and_click()
def create_page_and_edit(self, name=None, slug=None, content_format=None, screenshot_name=None):
name = self.create_page(name=name, slug=slug, content_format=content_format, screenshot_name=screenshot_name)
self.click_grid_popup_option(name, "Edit content")
self.components.pages.editor.wym_iframe.wait_for_visible()
return name
def create_page(self, name=None, slug=None, content_format=None, screenshot_name=None):
self.components.pages.create.wait_for_and_click()
name = name or self._get_random_name(prefix="page")
slug = slug = self._get_random_name(prefix="pageslug")
content_format = content_format or "HTML"
self.tool_set_value("title", name)
self.tool_set_value("slug", slug)
self.tool_set_value("content_format", content_format, expected_type="select")
self.screenshot_if(screenshot_name)
# Sometimes 'submit' button not yet hooked up?
self.sleep_for(self.wait_types.UX_RENDER)
self.components.pages.submit.wait_for_and_click()
return name
def tool_parameter_div(self, expanded_parameter_id):
return self.components.tool_form.parameter_div(parameter=expanded_parameter_id).wait_for_clickable()
def tool_parameter_edit_rules(self):
rules_div_element = self.tool_parameter_div("rules")
edit_button_element = rules_div_element.find_element_by_css_selector("i.fa-edit")
edit_button_element.click()
def tool_set_value(self, expanded_parameter_id, value, expected_type=None):
div_element = self.tool_parameter_div(expanded_parameter_id)
assert div_element
if expected_type in ["select", "data", "data_collection"]:
div_selector = f"div.ui-form-element[id$='form-element-{expanded_parameter_id}']"
self.select2_set_value(div_selector, value)
else:
input_element = div_element.find_element_by_css_selector("input")
# Clear default value
input_element.clear()
input_element.send_keys(value)
def tool_form_generate_tour(self):
self.components.tool_form.options.wait_for_and_click()
self.components.tool_form.generate_tour.wait_for_and_click()
def tool_form_execute(self):
self.components.tool_form.execute.wait_for_and_click()
def click_masthead_user(self):
self.components.masthead.user.wait_for_and_click()
def click_masthead_shared_data(self):
self.components.masthead.shared_data.wait_for_and_click()
def click_masthead_workflow(self):
self.components.masthead.workflow.wait_for_and_click()
def click_button_new_workflow(self):
self.wait_for_and_click(self.navigation.workflows.selectors.new_button)
def wait_for_sizzle_selector_clickable(self, selector):
element = self._wait_on(
sizzle.sizzle_selector_clickable(selector),
f"sizzle/jQuery selector [{selector}] to become clickable",
)
return element
@retry_during_transitions
def click_history_options(self):
if self.is_beta_history():
component = self.components.history_panel.options_button_icon_beta
else:
component = self.components.history_panel.options_button_icon
component.wait_for_and_click()
def click_history_option_export_to_file(self):
if self.is_beta_history():
self.use_bootstrap_dropdown(option="export to file", menu="history options")
else:
self.click_history_options()
self.components.history_panel.options_show_export_history_to_file.wait_for_and_click()
def click_history_option_sharing(self):
if self.is_beta_history():
self.use_bootstrap_dropdown(option="share or publish", menu="history options")
else:
self.click_history_option("Share or Publish")
def click_history_option(self, option_label_or_component):
# Open menu
self.click_history_options()
if isinstance(option_label_or_component, str):
option_label = option_label_or_component
# Click labeled option
self.wait_for_visible(self.navigation.history_panel.options_menu)
menu_item_sizzle_selector = self.navigation.history_panel.options_menu_item(
option_label=option_label
).selector
menu_selection_element = self.wait_for_sizzle_selector_clickable(menu_item_sizzle_selector)
menu_selection_element.click()
else:
option_component = option_label_or_component
option_component.wait_for_and_click()
def use_beta_history(self):
if not self.is_beta_history():
self.click_history_option(self.components.history_panel.options_use_beta_history)
self.components.history_panel.beta.wait_for_present()
def is_beta_history(self):
old_panel = self.components.history_panel.beta.is_absent
new_panel = self.components.history_panel.new_history_button.is_absent
if old_panel and new_panel:
# both absent, let page render a bit...
self.sleep_for(self.wait_types.UX_RENDER)
else:
return new_panel
old_panel = self.components.history_panel.beta.is_absent
return not old_panel
# avoids problematic ID and classes on markup
def beta_history_element(self, attribute_value, attribute_name="data-description", scope=".history-index"):
return self.components._.by_attribute(name=attribute_name, value=attribute_value, scope=scope)
# join list of attrs into css attribute selectors and append to base beta_item selector
def content_item_by_attributes(self, **attrs):
suffix_list = [f'[data-{k}="{v}"]' for (k, v) in attrs.items()]
suffix = "".join(suffix_list)
return self.components.history_panel.content_item.selector(suffix=suffix)
def history_click_create_new(self):
if not self.is_beta_history():
self.components.history_panel.new_history_button.wait_for_and_click()
else:
option = self.beta_history_element("create new history")
option.wait_for_and_click()
def history_click_editor_save(self):
option = self.beta_history_element("editor save button")
option.wait_for_and_click()
self.sleep_for(self.wait_types.UX_RENDER)
def history_panel_click_copy_elements(self):
if self.is_beta_history():
self.use_bootstrap_dropdown(option="copy datasets", menu="history action menu")
else:
self.click_history_option("Copy Datasets")
def use_bootstrap_dropdown(self, option=None, menu=None):
"""uses bootstrap dropdown by data-description attributes"""
if option is None:
raise TypeError
if menu is None:
raise TypeError
toggle = self.beta_history_element(menu).descendant("button")
self.wait_for_and_click(toggle)
return self.beta_history_element(option).wait_for_and_click()
@retry_during_transitions
def histories_click_advanced_search(self):
search_selector = "#standard-search .advanced-search-toggle"
self.wait_for_and_click_selector(search_selector)
@retry_during_transitions
def histories_get_history_names(self):
self.sleep_for(self.wait_types.UX_RENDER)
names = []
grid = self.wait_for_selector("#grid-table-body")
for row in grid.find_elements_by_tag_name("tr"):
td = row.find_elements_by_tag_name("td")
name = td[1].text if td[0].text == "" else td[0].text
if name != "No items" and not name.startswith("No matching entries found"):
names.append(name)
return names
@edit_details
def history_panel_add_tags(self, tags):
tag_icon = self.components.history_panel.tag_icon
tag_area = self.components.history_panel.tag_area
tag_area_input = self.components.history_panel.tag_area_input
# if the tag editor is not present but the tag_icon is, then click it
if not tag_icon.is_absent and (tag_area.is_absent or not tag_area.is_displayed):
tag_icon.wait_for_and_click()
input_element = tag_area_input.wait_for_and_click()
self.sleep_for(self.wait_types.UX_RENDER)
for tag in tags:
input_element.send_keys(tag)
self.send_enter(input_element)
self.sleep_for(self.wait_types.UX_RENDER)
@edit_details
def history_panel_rename(self, new_name):
editable_text_input_element = self.history_panel_name_input()
if self.is_beta_history():
editable_text_input_element.clear()
editable_text_input_element.send_keys(new_name)
self.send_enter(editable_text_input_element)
return editable_text_input_element
def history_panel_name_input(self):
if not self.is_beta_history():
editable_text_input_element = self.history_panel_click_to_rename()
history_panel = self.components.history_panel
edit = history_panel.name_edit_input
editable_text_input_element = edit.wait_for_visible()
return editable_text_input_element
def history_panel_click_to_rename(self):
history_panel = self.components.history_panel
name = history_panel.name
edit = history_panel.name_edit_input
name.wait_for_and_click()
return edit.wait_for_visible()
def history_panel_refresh_click(self):
self.wait_for_and_click(self.navigation.history_panel.selectors.refresh_button)
def history_panel_multi_operations_show(self):
return self.wait_for_and_click(self.navigation.history_panel.multi_operations.selectors.show_button)
def history_panel_muli_operation_select_hid(self, hid):
item_selector = self.history_panel_item_selector(hid, wait=True)
operation_radio_selector = f"{item_selector} .selector"
self.wait_for_and_click_selector(operation_radio_selector)
def history_panel_multi_operation_action_click(self, action):
# Maybe isn't needed?
# self.sleep_for(WAIT_TYPES.UX_RENDER)
self.wait_for_and_click(self.navigation.history_panel.multi_operations.selectors.action_button)
@retry_during_transitions
def _click_action_in_menu():
menu_element = self.wait_for_visible(self.navigation.history_panel.multi_operations.selectors.action_menu)
menu_element.find_element_by_link_text(action.text).click()
_click_action_in_menu()
def open_history_multi_view(self):
if self.is_beta_history():
self.components.history_panel.histories_operation_menu.wait_for_and_click()
self.components.history_panel.multi_view_button_beta.wait_for_and_click()
else:
self.components.history_panel.multi_view_button.wait_for_and_click()
def history_panel_show_structure(self):
if self.is_beta_history():
self.use_bootstrap_dropdown(option="show structure", menu="history options")
else:
self.click_history_option(self.components.history_panel.options_show_history_structure)
def history_multi_view_display_collection_contents(self, collection_hid, collection_type="list"):
self.open_history_multi_view()
selector = self.history_panel_wait_for_hid_state(collection_hid, "ok", multi_history_panel=True)
self.click(selector)
next_level_element_selector = selector
for _ in range(len(collection_type.split(":")) - 1):
next_level_element_selector = next_level_element_selector.descendant(".dataset-collection-element")
self.wait_for_and_click(next_level_element_selector)
dataset_selector = next_level_element_selector.descendant(".dataset")
self.wait_for_and_click(dataset_selector)
def history_panel_item_view_dataset_details(self, hid):
if not self.is_beta_history():
self.history_panel_ensure_showing_item_details(hid)
self.hda_click_details(hid)
self.components.dataset_details._.wait_for_visible()
else:
item = self.history_panel_item_component(hid=hid)
item.dataset_operations_dropdown.wait_for_and_click()
item.info_button.wait_for_and_click()
self.components.dataset_details._.wait_for_visible()
def history_panel_item_click_visualization_menu(self, hid):
viz_button_selector = f"{self.history_panel_item_selector(hid)} .visualizations-dropdown"
self.wait_for_and_click_selector(viz_button_selector)
self.wait_for_selector_visible(f"{viz_button_selector} .dropdown-menu")
def history_panel_item_available_visualizations_elements(self, hid):
# Precondition: viz menu has been opened with history_panel_item_click_visualization_menu
viz_menu_selectors = f"{self.history_panel_item_selector(hid)} a.visualization-link"
return self.driver.find_elements_by_css_selector(viz_menu_selectors)
def history_panel_item_get_tags(self, hid):
item_component = self.history_panel_item_component(hid=hid)
item_component.wait_for_visible()
return [e.text for e in item_component.alltags.all()]
def history_panel_item_available_visualizations(self, hid):
# Precondition: viz menu has been opened with history_panel_item_click_visualization_menu
return [e.text for e in self.history_panel_item_available_visualizations_elements(hid)]
def history_panel_item_click_visualization(self, hid, visualization_name):
# Precondition: viz menu has been opened with history_panel_item_click_visualization_menu
elements = self.history_panel_item_available_visualizations_elements(hid)
for element in elements:
if element.text == visualization_name:
element.click()
return element
raise ValueError(f"No visualization [{visualization_name}] found.")
def history_panel_item_selector(self, hid, wait=False):
current_history_id = self.current_history_id()
contents = self.api_get(f"histories/{current_history_id}/contents")
try:
history_item = [d for d in contents if d["hid"] == hid][0]
except IndexError:
raise Exception(f"Could not find history item with hid [{hid}] in contents [{contents}]")
history_item_selector = f"#{history_item["history_content_type"]}-{history_item["id"]}"
if wait:
self.wait_for_selector_visible(history_item_selector)
return history_item_selector
def modal_body_selector(self):
return ".modal-body"
def history_panel_item_body_component(self, hid, wait=False):
details_component = self.history_panel_item_component(hid=hid).details
if wait:
details_component.wait_for_visible()
return details_component
def hda_click_primary_action_button(self, hid: int, button_key: str):
self.history_panel_ensure_showing_item_details(hid)
item_component = self.history_panel_item_component(hid=hid)
button_component = item_component[f"{button_key}_button"]
button_component.wait_for_and_click()
def hda_click_details(self, hid: int):
self.hda_click_primary_action_button(hid, "info")
def history_panel_click_item_title(self, hid, **kwds):
item_component = self.history_panel_item_component(hid=hid)
details_component = item_component.details
details_displayed = not details_component.is_absent and details_component.is_displayed
item_component.title.wait_for_and_click()
if kwds.get("wait", False):
if details_displayed:
details_component.wait_for_absent_or_hidden()
else:
details_component.wait_for_visible()
return item_component
def history_panel_ensure_showing_item_details(self, hid):
if not self.history_panel_item_showing_details(hid):
self.history_panel_click_item_title(hid=hid, wait=True)
def history_panel_item_showing_details(self, hid):
item_component = self.history_panel_item_component(hid=hid)
item_component.wait_for_present()
if item_component.details.is_absent:
return False
return item_component.details.is_displayed
def collection_builder_set_name(self, name):
name_element = self.wait_for_selector_visible("input.collection-name")
name_element.send_keys(name)
def collection_builder_hide_originals(self):
self.wait_for_and_click_selector("input.hide-originals")
def collection_builder_create(self):
self.wait_for_and_click_selector("button.create-collection")
def collection_builder_clear_filters(self):
self.wait_for_and_click_selector("a.clear-filters-link")
def collection_builder_click_paired_item(self, forward_or_reverse, item):
assert forward_or_reverse in ["forward", "reverse"]
forward_column = self.wait_for_selector_visible(f".{forward_or_reverse}-column .column-datasets")
first_datset_forward = forward_column.find_elements_by_css_selector("li")[item]
first_datset_forward.click()
def logout_if_needed(self):
if self.is_logged_in():
self.home()
self.logout()
def logout(self):
self.components.masthead.logged_in_only.wait_for_visible()
self.click_masthead_user()
self.components.masthead.logout.wait_for_and_click()
try:
self.components.masthead.logged_out_only.wait_for_visible()
except self.TimeoutException as e:
message = "Clicked logout button but waiting for 'Login or Registration' button failed, perhaps the logout button was clicked before the handler was setup?"
raise self.prepend_timeout_message(e, message)
assert (
not self.is_logged_in()
), "Clicked to logged out and UI reflects a logout, but API still thinks a user is logged in."
def run_tour(self, path, skip_steps=None, sleep_on_steps=None, tour_callback=None):
skip_steps = skip_steps or []
sleep_on_steps = sleep_on_steps or {}
if tour_callback is None:
tour_callback = NullTourCallback()
self.home()
with open(path) as f:
tour_dict = yaml.safe_load(f)
steps = tour_dict["steps"]
for i, step in enumerate(steps):
title = step.get("title", None)
skip = False
if skip_steps:
for skip_step in skip_steps:
if title == skip_step:
skip = True
if title in sleep_on_steps:
time.sleep(sleep_on_steps[title])
if skip:
continue
self.run_tour_step(step, i, tour_callback)
def tour_wait_for_clickable_element(self, selector):
timeout = self.timeout_for(wait_type=WAIT_TYPES.JOB_COMPLETION)
wait = self.wait(timeout=timeout)
timeout_message = self._timeout_message(f"sizzle (jQuery) selector [{selector}] to become clickable")
element = wait.until(
sizzle.sizzle_selector_clickable(selector),
timeout_message,
)
return element
def tour_wait_for_element_present(self, selector):
timeout = self.timeout_for(wait_type=WAIT_TYPES.JOB_COMPLETION)
wait = self.wait(timeout=timeout)
timeout_message = self._timeout_message(f"sizzle (jQuery) selector [{selector}] to become present")
element = wait.until(
sizzle.sizzle_presence_of_selector(selector),
timeout_message,
)
return element
def get_tooltip_text(self, element, sleep=0, click_away=True):
tooltip_balloon = self.components._.tooltip_balloon
tooltip_balloon.wait_for_absent()
action_chains = self.action_chains()
action_chains.move_to_element(element)
action_chains.perform()
if sleep > 0:
time.sleep(sleep)
tooltip_element = tooltip_balloon.wait_for_visible()
text = tooltip_element.text
if click_away:
self.click_center()
return text
@retry_during_transitions
def assert_selector_absent_or_hidden_after_transitions(self, selector):
"""Variant of assert_selector_absent_or_hidden that retries during transitions.
In the parent method - the element is found and then it is checked to see
if it is visible. It may disappear from the page in the middle there
and cause a StaleElement error. For checks where we care about the final
resting state after transitions - this method can be used to retry
during those transitions.
"""
return self.assert_selector_absent_or_hidden(selector)
@retry_during_transitions
def assert_absent_or_hidden_after_transitions(self, selector):
"""Variant of assert_absent_or_hidden that retries during transitions.
See details above for more information about this.
"""
return self.assert_absent_or_hidden(selector)
def assert_tooltip_text(self, element, expected: Union[str, HasText], sleep: int = 0, click_away: bool = True):
if hasattr(expected, "text"):
expected = cast(HasText, expected).text
text = self.get_tooltip_text(element, sleep=sleep, click_away=click_away)
assert text == expected, f"Tooltip text [{text}] was not expected text [{expected}]."
def assert_tooltip_text_contains(
self, element, expected: Union[str, HasText], sleep: int = 0, click_away: bool = True
):
if hasattr(expected, "text"):
expected = cast(HasText, expected).text
text = self.get_tooltip_text(element, sleep=sleep, click_away=click_away)
assert expected in text, f"Tooltip text [{text}] was not expected text [{expected}]."
def assert_error_message(self, contains=None):
self.components._.messages.error.wait_for_visible()
elements = self.find_elements(self.components._.messages.selectors.error)
return self.assert_message(elements, contains=contains)
def assert_warning_message(self, contains=None):
element = self.components._.messages["warning"]
return self.assert_message(element, contains=contains)
def assert_message(self, element, contains=None):
if contains is not None:
if type(element) == list:
assert any(
contains in el.text for el in element
), f"{contains} was not found in {[el.text for el in element]}"
return
element = element.wait_for_visible()
text = element.text
if contains not in text:
message = f"Text [{contains}] expected inside of [{text}] but not found."
raise AssertionError(message)
def assert_no_error_message(self):
self.components._.messages.error.assert_absent_or_hidden()
def run_tour_step(self, step, step_index, tour_callback):
preclick = step.get("preclick", [])
for preclick_selector in preclick:
print(f"(Pre)Clicking {preclick_selector}")
self._tour_wait_for_and_click_element(preclick_selector)
element_str = step.get("element", None)
if element_str is not None:
print(f"Waiting for element {element_str}")
element = self.tour_wait_for_element_present(element_str)
assert element is not None
textinsert = step.get("textinsert", None)
if textinsert is not None:
element.send_keys(textinsert)
tour_callback.handle_step(step, step_index)
postclick = step.get("postclick", [])
for postclick_selector in postclick:
print(f"(Post)Clicking {postclick_selector}")
self._tour_wait_for_and_click_element(postclick_selector)
@retry_during_transitions
def _tour_wait_for_and_click_element(self, selector):
element = self.tour_wait_for_clickable_element(selector)
element.click()
@retry_during_transitions
def wait_for_and_click_selector(self, selector):
element = self.wait_for_selector_clickable(selector)
element.click()
return element
@retry_during_transitions
def wait_for_and_click(self, selector_template):
element = self.wait_for_clickable(selector_template)
element.click()
return element
def set_history_annotation(self, annotation, clear_text=False):
history_panel = self.components.history_panel
if self.is_beta_history():
toggle = self.beta_history_element("editor toggle")
toggle.wait_for_and_click()
annotation_input = self.beta_history_element("annotation input").wait_for_visible()
if clear_text:
annotation_input.clear()
annotation_input.send_keys(annotation)
self.history_click_editor_save()
else:
self.ensure_history_annotation_area_displayed()
editable = history_panel.annotation_editable_text
edit = history_panel.annotation_edit
editable.wait_for_and_click()
edit_el = edit.wait_for_and_click()
if clear_text:
# previously this was just edit_el.clear() but
# .clear() doesn't work with beta history panel
action_chains = self.action_chains()
for _ in range(40):
action_chains.send_keys(Keys.BACKSPACE)
action_chains.perform()
edit_el.send_keys(annotation)
history_panel.annotation_done.wait_for_and_click()
def ensure_history_annotation_area_displayed(self):
annotation_area = self.components.history_panel.annotation_area
annotation_icon = self.components.history_panel.annotation_icon
if annotation_area.is_absent or not annotation_area.is_displayed:
annotation_icon.wait_for_and_click()
def select2_set_value(self, container_selector_or_elem, value, with_click=True, clear_value=False):
# There are two hacky was to select things from the select2 widget -
# with_click=True: This simulates the mouse click after the suggestion contains
# only the selected value.
# with_click=False: This presses enter on the selection. Not sure
# why.
# with_click seems to work in all situtations - the enter methods
# doesn't seem to work with the tool form for some reason.
if hasattr(container_selector_or_elem, "selector"):
container_selector_or_elem = container_selector_or_elem.selector
if not hasattr(container_selector_or_elem, "find_element_by_css_selector"):
container_elem = self.wait_for_selector(container_selector_or_elem)
else:
container_elem = container_selector_or_elem
text_element = container_elem.find_element_by_css_selector("input[type='text']")
if clear_value:
self.send_backspace(text_element)
self.send_backspace(text_element)
text_element.send_keys(value)
# Wait for select2 options to load and then click to add this one.
drop_elem = self.wait_for_selector_visible("#select2-drop")
# Sleep seems to be needed - at least for send_enter.
time.sleep(0.5)
if not with_click:
# Wait for select2 options to load and then click to add this one.
self.send_enter(text_element)
else:
select_elem = drop_elem.find_elements_by_css_selector(".select2-result-label")[0]
action_chains = self.action_chains()
action_chains.move_to_element(select_elem).click().perform()
self.wait_for_selector_absent_or_hidden("#select2-drop")
def snapshot(self, description):
"""Test case subclass overrides this to provide detailed logging."""
def open_history_editor(self, scope=".history-index"):
if self.is_beta_history():
panel = self.components.history_panel.editor.selector(scope=scope)
if panel.name_input.is_absent:
toggle = panel.toggle
toggle.wait_for_and_click()
editor = panel.form
editor.wait_for_present()
def close_history_editor(self, scope=".history-index"):
if self.is_beta_history():
toggle = self.components.history_panel.edit_toggle
toggle.wait_for_and_click()
editor = self.components.history_panel.editor.selector(scope=scope)
self.assert_absent_or_hidden(editor)
def share_ensure_by_user_available(self, sharing_component):
collapse = sharing_component.share_with_collapse
collapse.wait_for_visible()
if collapse.has_class("collapsed"):
collapse.wait_for_and_click()
sharing_component.share_with_multiselect.wait_for_visible()
def share_unshare_with_user(self, sharing_component, email):
self.share_ensure_by_user_available(sharing_component)
unshare_user_button = self.components.histories.sharing.unshare_with_user_button(email=email)
unshare_user_button.wait_for_and_click()
self.components.histories.sharing.submit_sharing_with.wait_for_and_click()
unshare_user_button.wait_for_absent_or_hidden()
def share_with_user(
self,
sharing_component,
user_id=None,
user_email=None,
screenshot_before_submit=None,
screenshot_after_submit=None,
assert_valid=False,
):
self.share_ensure_by_user_available(sharing_component)
multiselect = sharing_component.share_with_multiselect.wait_for_and_click()
sharing_component.share_with_input.wait_for_and_send_keys(user_id or user_email)
self.send_enter(multiselect)
self.screenshot_if(screenshot_before_submit)
sharing_component.submit_sharing_with.wait_for_and_click()
if assert_valid:
self.assert_no_error_message()
xpath = f'//span[contains(text(), "{user_email}")]'
self.wait_for_xpath_visible(xpath)
self.screenshot_if(screenshot_after_submit)
class NotLoggedInException(TimeoutException):
def __init__(self, timeout_exception, user_info, dom_message):
template = "Waiting for UI to reflect user logged in but it did not occur. API indicates no user is currently logged in. %s API response was [%s]. %s"
msg = template % (dom_message, user_info, timeout_exception.msg)
super().__init__(msg=msg, screen=timeout_exception.screen, stacktrace=timeout_exception.stacktrace)
|
"""A mixing that extends a HasDriver class with Galaxy-specific utilities.
Implementer must provide a self.build_url method to target Galaxy.
"""
import collections
import contextlib
import random
import string
import time
from abc import abstractmethod
from functools import (
partial,
wraps,
)
from typing import (
Any,
cast,
Dict,
Optional,
Union,
)
import requests
import yaml
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.remote.webdriver import WebDriver
from galaxy.util import DEFAULT_SOCKET_TIMEOUT
from . import sizzle
from .components import (
Component,
HasText,
)
from .data import load_root_component
from .has_driver import (
exception_indicates_click_intercepted,
exception_indicates_not_clickable,
exception_indicates_stale_element,
HasDriver,
TimeoutException,
)
from .smart_components import SmartComponent
# Test case data
DEFAULT_PASSWORD = "123456"
RETRY_DURING_TRANSITIONS_SLEEP_DEFAULT = 0.1
RETRY_DURING_TRANSITIONS_ATTEMPTS_DEFAULT = 10
GALAXY_MAIN_FRAME_ID = "galaxy_main"
WaitType = collections.namedtuple("WaitType", ["name", "default_length"])
# Default wait times should make sense for a development server under low
# load. Wait times for production servers can be scaled up with a multiplier.
class WAIT_TYPES:
# Rendering a form and registering callbacks, etc...
UX_RENDER = WaitType("ux_render", 1)
# Fade in, fade out, etc...
UX_TRANSITION = WaitType("ux_transition", 5)
# Toastr popup and dismissal, etc...
UX_POPUP = WaitType("ux_popup", 15)
# Creating a new history and loading it into the panel.
DATABASE_OPERATION = WaitType("database_operation", 10)
# Wait time for jobs to complete in default environment.
JOB_COMPLETION = WaitType("job_completion", 30)
# Wait time for a GIE to spawn.
GIE_SPAWN = WaitType("gie_spawn", 30)
# Wait time for toolshed search
SHED_SEARCH = WaitType("shed_search", 30)
# Wait time for repository installation
REPO_INSTALL = WaitType("repo_install", 60)
# Beta history Polling Duration
HISTORY_POLL = WaitType("history_poll", 3)
# Choose a moderate wait type for operations that don't specify a type.
DEFAULT_WAIT_TYPE = WAIT_TYPES.DATABASE_OPERATION
class NullTourCallback:
def handle_step(self, step, step_index):
pass
def exception_seems_to_indicate_transition(e):
"""True if exception seems to indicate the page state is transitioning.
Galaxy features many different transition effects that change the page state over time.
These transitions make it slightly more difficult to test Galaxy because atomic input
actions take an indeterminate amount of time to be reflected on the screen. This method
takes a Selenium assertion and tries to infer if such a transition could be the root
cause of the exception. The methods that follow use it to allow retrying actions during
transitions.
Currently the two kinds of exceptions that we say may indicate a transition are
StaleElement exceptions (a DOM element grabbed at one step is no longer available)
and "not clickable" exceptions (so perhaps a popup modal is blocking a click).
"""
return (
exception_indicates_stale_element(e)
or exception_indicates_not_clickable(e)
or exception_indicates_click_intercepted(e)
)
def retry_call_during_transitions(
f,
attempts=RETRY_DURING_TRANSITIONS_ATTEMPTS_DEFAULT,
sleep=RETRY_DURING_TRANSITIONS_SLEEP_DEFAULT,
exception_check=exception_seems_to_indicate_transition,
):
previous_attempts = 0
while True:
try:
return f()
except Exception as e:
if previous_attempts > attempts:
raise
if not exception_check(e):
raise
time.sleep(sleep)
previous_attempts += 1
def retry_during_transitions(
f,
attempts=RETRY_DURING_TRANSITIONS_ATTEMPTS_DEFAULT,
sleep=RETRY_DURING_TRANSITIONS_SLEEP_DEFAULT,
exception_check=exception_seems_to_indicate_transition,
):
@wraps(f)
def _retry(*args, **kwds):
return retry_call_during_transitions(
partial(f, *args, **kwds), attempts=attempts, sleep=sleep, exception_check=exception_check
)
return _retry
def edit_details(f, scope=".history-index"):
"""Open the editor, run the edits, hit the save button"""
@wraps(f)
def func_wrapper(self, *args, **kwds):
# open editor
if self.is_beta_history():
self.open_history_editor(scope=scope)
# run edits
result = f(self, *args, **kwds)
# save edits
if self.is_beta_history():
self.history_click_editor_save()
return result
return func_wrapper
class NavigatesGalaxy(HasDriver):
"""Class with helpers methods for driving components of the Galaxy interface.
In most cases, methods for interacting with Galaxy components that appear in
multiple tests or applications should be refactored into this class for now.
Keep in mind that this class is used outside the context of ``TestCase`` s as
well - so some methods more explicitly related to test data or assertion checking
may make more sense in SeleniumTestCase for instance.
Some day this class will likely be split up into smaller mixins for particular
components of Galaxy, but until that day the best practice is to prefix methods
for driving or querying the interface with the name of the component or page
the method operates on. These serve as pseudo-namespaces until we decompose this
class. For instance, the method for clicking an option in the workflow editor is
workflow_editor_click_option instead of click_workflow_editor_option.
"""
timeout_multiplier: float
driver: WebDriver
@abstractmethod
def build_url(self, url: str, for_selenium: bool = True) -> str:
"""Build URL to the target Galaxy."""
default_password = DEFAULT_PASSWORD
wait_types = WAIT_TYPES
# set to True to reload each invocation (good for interactive test building)
_interactive_components: bool = False
_root_component: Component = load_root_component()
def get(self, url: str = ""):
"""Expand supplied relative URL and navigate to page using Selenium driver."""
full_url = self.build_url(url)
return self.driver.get(full_url)
@property
def navigation(self) -> Component:
if self._interactive_components:
return load_root_component()
else:
return self._root_component
@property
def components(self) -> SmartComponent:
"""Fetch root component describing the Galaxy DOM."""
return SmartComponent(self.navigation, self)
def wait_length(self, wait_type: WaitType) -> float:
"""Return the wait time specified by wait_type after applying `timeout_multipler`.
`timeout_multiplier` is used in production CI tests to reduce transient failures
in a uniform way across test suites to expand waiting.
"""
return wait_type.default_length * self.timeout_multiplier
def sleep_for(self, wait_type: WaitType) -> None:
"""Sleep on the Python client side for the specified wait_type.
This method uses `wait_length` to apply any `timeout_multiplier`.
"""
self.sleep_for_seconds(self.wait_length(wait_type))
def sleep_for_seconds(self, duration: float) -> None:
"""Sleep in the local thread for specified number of seconds.
Ideally, we would be sleeping on the Selenium server instead of in the local client
(e.g. test) thread.
"""
time.sleep(duration)
def timeout_for(self, wait_type: WaitType = DEFAULT_WAIT_TYPE, **kwd) -> float:
return self.wait_length(wait_type)
def home(self) -> None:
"""Return to root Galaxy page and wait for some basic widgets to appear."""
self.get()
self.components.masthead._.wait_for_visible()
def go_to_trs_search(self) -> None:
self.driver.get(self.build_url("workflows/trs_search"))
self.components.masthead._.wait_for_visible()
def go_to_trs_by_id(self) -> None:
self.driver.get(self.build_url("workflows/trs_import"))
self.components.masthead._.wait_for_visible()
def go_to_workflow_sharing(self, workflow_id: str) -> None:
self.driver.get(self.build_url(f"workflows/sharing?id={workflow_id}"))
def go_to_workflow_export(self, workflow_id: str) -> None:
self.driver.get(self.build_url(f"workflow/export?id={workflow_id}"))
def go_to_history_sharing(self, history_id: str) -> None:
self.driver.get(self.build_url(f"histories/sharing?id={history_id}"))
def switch_to_main_panel(self):
self.driver.switch_to.frame(GALAXY_MAIN_FRAME_ID)
@contextlib.contextmanager
def local_storage(self, key: str, value: Union[float, str]):
"""Method decorator to modify localStorage for the scope of the supplied context."""
self.driver.execute_script(f"""window.localStorage.setItem("{key}", {value});""")
try:
yield
finally:
self.driver.execute_script(f"""window.localStorage.removeItem("{key}");""")
@contextlib.contextmanager
def main_panel(self):
"""Decorator to operate within the context of Galaxy's main frame."""
try:
self.switch_to_main_panel()
yield
finally:
self.driver.switch_to.default_content()
def api_get(self, endpoint, data=None, raw=False):
data = data or {}
full_url = self.build_url(f"api/{endpoint}", for_selenium=False)
response = requests.get(
full_url, data=data, cookies=self.selenium_to_requests_cookies(), timeout=DEFAULT_SOCKET_TIMEOUT
)
if raw:
return response
else:
return response.json()
def api_post(self, endpoint, data=None):
data = data or {}
full_url = self.build_url(f"api/{endpoint}", for_selenium=False)
response = requests.post(
full_url, data=data, cookies=self.selenium_to_requests_cookies(), timeout=DEFAULT_SOCKET_TIMEOUT
)
return response.json()
def api_delete(self, endpoint, raw=False):
full_url = self.build_url(f"api/{endpoint}", for_selenium=False)
response = requests.delete(
full_url, cookies=self.selenium_to_requests_cookies(), timeout=DEFAULT_SOCKET_TIMEOUT
)
if raw:
return response
else:
return response.json()
def get_galaxy_session(self):
for cookie in self.driver.get_cookies():
if cookie["name"] == "galaxysession":
return cookie["value"]
def selenium_to_requests_cookies(self):
return {"galaxysession": self.get_galaxy_session()}
def history_panel_name_element(self):
if self.is_beta_history():
component = self.beta_history_element("name display")
else:
component = self.components.history_panel.name
return component.wait_for_present()
@retry_during_transitions
def history_panel_name(self):
return self.history_panel_name_element().text
def history_panel_collection_rename(self, hid: int, new_name: str, assert_old_name: Optional[str] = None):
collection_view = self.history_panel_expand_collection(hid)
if self.is_beta_history():
self.__beta_rename_collection(new_name)
self.sleep_for(WAIT_TYPES.UX_RENDER)
else:
title_element = collection_view.title.wait_for_visible()
if assert_old_name is not None:
assert title_element.text == assert_old_name
title_element.click()
title_rename_element = collection_view.title_input.wait_for_visible()
title_rename_element.send_keys(new_name)
self.send_enter(title_rename_element)
def history_panel_expand_collection(self, collection_hid: int) -> SmartComponent:
self.history_panel_click_item_title(collection_hid)
collection_view = self.components.history_panel.collection_view
collection_view._.wait_for_present()
return collection_view
@edit_details
def __beta_rename_collection(self, new_name):
title_element = self.beta_history_element("name input").wait_for_clickable()
title_element.clear()
title_element.send_keys(new_name)
def history_panel_collection_name_element(self):
if self.is_beta_history():
title_element = self.beta_history_element("collection name display").wait_for_present()
else:
collection_view = self.components.history_panel.collection_view
collection_view._.wait_for_present()
title_element = collection_view.title.wait_for_visible()
return title_element
def make_accessible_and_publishable(self):
self.components.histories.sharing.make_accessible.wait_for_and_click()
self.components.histories.sharing.make_publishable.wait_for_and_click()
def history_contents(self, history_id=None, view="summary", datasets_only=True):
if history_id is None:
history_id = self.current_history_id()
histories = self.api_get("histories?keys=id")
if history_id not in [h["id"] for h in histories]:
return {}
if datasets_only:
endpoint = f"histories/{history_id}/contents?view={view}"
else:
endpoint = f"histories/{history_id}?view={view}"
return self.api_get(endpoint)
def current_history(self):
full_url = self.build_url("history/current_history_json", for_selenium=False)
response = requests.get(full_url, cookies=self.selenium_to_requests_cookies(), timeout=DEFAULT_SOCKET_TIMEOUT)
return response.json()
def current_history_id(self):
return self.current_history()["id"]
def latest_history_item(self):
history_contents = self.history_contents()
assert len(history_contents) > 0
return history_contents[-1]
def wait_for_history(self, assert_ok=True):
def history_becomes_terminal(driver):
current_history_id = self.current_history_id()
state = self.api_get(f"histories/{current_history_id}")["state"]
if state not in ["running", "queued", "new", "ready"]:
return state
else:
return None
timeout = self.timeout_for(wait_type=WAIT_TYPES.JOB_COMPLETION)
final_state = self.wait(timeout=timeout).until(history_becomes_terminal)
if assert_ok:
assert final_state == "ok", final_state
return final_state
def history_panel_create_new_with_name(self, name):
self.history_panel_create_new()
self.history_panel_rename(name)
def history_panel_create_new(self):
"""Click create new and pause a bit for the history to begin to refresh."""
self.history_click_create_new()
self.sleep_for(WAIT_TYPES.UX_RENDER)
def history_panel_wait_for_hid_ok(self, hid, allowed_force_refreshes=0):
return self.history_panel_wait_for_hid_state(hid, "ok", allowed_force_refreshes=allowed_force_refreshes)
def history_panel_wait_for_hid_deferred(self, hid, allowed_force_refreshes=0):
return self.history_panel_wait_for_hid_state(hid, "deferred", allowed_force_refreshes=allowed_force_refreshes)
def history_panel_item_component(self, history_item=None, hid=None, multi_history_panel=False):
if self.is_beta_history():
assert hid
return self.content_item_by_attributes(hid=hid)
if history_item is None:
assert hid
history_item = self.hid_to_history_item(hid)
item = self.components.history_panel.item.selector
if multi_history_panel:
item = self.components.multi_history_view.item
return item(history_content_type=history_item["history_content_type"], id=history_item["id"])
def wait_for_history_to_have_hid(self, history_id, hid):
def get_hids():
contents = self.api_get(f"histories/{history_id}/contents")
return [d["hid"] for d in contents]
def history_has_hid(driver):
hids = get_hids()
return any(h == hid for h in hids)
timeout = self.timeout_for(wait_type=WAIT_TYPES.JOB_COMPLETION)
try:
self.wait(timeout).until(history_has_hid)
except self.TimeoutException as e:
hids = get_hids()
message = f"Timeout waiting for history {history_id} to have hid {hid} - have hids {hids}"
raise self.prepend_timeout_message(e, message)
def history_panel_wait_for_hid_visible(self, hid, allowed_force_refreshes=0, multi_history_panel=False):
current_history_id = self.current_history_id()
self.wait_for_history_to_have_hid(current_history_id, hid)
# TODO: just use HID and stop resolving history_item -or- place history item in DOM.
# I think Mason thought the first was cleaner based on recent changes, but I think duplicated
# HIDs due to conversions and such make using the actual history item ID more robust.
history_item = self.hid_to_history_item(hid, current_history_id=current_history_id)
history_item_selector = self.history_panel_item_component(
history_item, hid=hid, multi_history_panel=multi_history_panel
)
try:
self.history_item_wait_for(history_item_selector, allowed_force_refreshes)
except self.TimeoutException as e:
selector = self.navigation.history_panel.selectors.contents
if self.is_beta_history():
selector = self.navigation.history_panel.selectors.contents_beta
contents_elements = self.find_elements(selector)
div_ids = [f"#{d.get_attribute('id')}" for d in contents_elements]
template = "Failed waiting on history item %d to become visible, visible datasets include [%s]."
message = template % (hid, ",".join(div_ids))
raise self.prepend_timeout_message(e, message)
return history_item_selector
def hid_to_history_item(self, hid, current_history_id=None):
if current_history_id is None:
current_history_id = self.current_history_id()
contents = self.api_get(f"histories/{current_history_id}/contents")
history_item = [d for d in contents if d["hid"] == hid][0]
return history_item
def history_item_wait_for(self, history_item_selector, allowed_force_refreshes):
attempt = 0
while True:
try:
rval = self.wait_for_visible(history_item_selector, wait_type=WAIT_TYPES.JOB_COMPLETION)
break
except self.TimeoutException:
if attempt >= allowed_force_refreshes:
raise
attempt += 1
if not self.is_beta_history():
self.history_panel_refresh_click()
return rval
def history_panel_wait_for_history_loaded(self):
# Verify that the history has been loaded and is empty.
self.wait_for_visible(
self.navigation.history_panel.selectors.empty_message, wait_type=WAIT_TYPES.DATABASE_OPERATION
)
def history_panel_wait_for_hid_hidden(self, hid, multi_history_panel=False):
history_item_selector = self.history_panel_item_component(hid=hid, multi_history_panel=multi_history_panel)
self.wait_for_absent_or_hidden(history_item_selector, wait_type=WAIT_TYPES.JOB_COMPLETION)
return history_item_selector
def history_panel_wait_for_hid_state(self, hid, state, allowed_force_refreshes=0, multi_history_panel=False):
history_item_selector = self.history_panel_wait_for_hid_visible(
hid, allowed_force_refreshes=allowed_force_refreshes, multi_history_panel=multi_history_panel
)
if self.is_beta_history():
history_item_selector_state = self.content_item_by_attributes(hid=hid, state=state)
else:
# history_item_selector_state = history_item_selector.with_class(f"state-{state}")
history_item_selector_state = history_item_selector.with_data("state", state)
try:
self.history_item_wait_for(history_item_selector_state, allowed_force_refreshes)
except self.TimeoutException as e:
history_item = self.wait_for_visible(history_item_selector)
current_state = "UNKNOWN"
classes = history_item.get_attribute("class").split(" ")
for clazz in classes:
if clazz.startswith("state-"):
current_state = clazz[len("state-") :]
template = "Failed waiting on history item %d state to change to [%s] current state [%s]. "
message = template % (hid, state, current_state)
raise self.prepend_timeout_message(e, message)
return history_item_selector_state
def click_grid_popup_option(self, item_name, option_label):
item_button = None
grid = self.components.grids.body.wait_for_visible()
for row in grid.find_elements_by_tag_name("tr"):
name_cell = row.find_elements_by_tag_name("td")[1]
if name_cell.text == item_name:
item_button = name_cell
break
if item_button is None:
raise AssertionError(f"Failed to find item with name [{item_name}]")
popup_menu_button = item_button.find_element_by_css_selector(".dropdown-toggle")
popup_menu_button.click()
popup_option = self.driver.find_element_by_link_text(option_label)
popup_option.click()
def published_grid_search_for(self, search_term=None):
return self._inline_search_for(
self.navigation.grids.free_text_search,
search_term,
)
def get_logged_in_user(self):
return self.api_get("users/current")
def get_api_key(self, force=False):
# If force is false, use the form inputs API and allow the key to be absent.
if not force:
return self.api_get(f"users/{self.get_user_id()}/api_key/inputs")["inputs"][0]["value"]
else:
return self.api_post(f"users/{self.get_user_id()}/api_key")
def get_user_id(self):
user = self.get_logged_in_user()
return user["id"]
def is_logged_in(self):
return "email" in self.get_logged_in_user()
@retry_during_transitions
def _inline_search_for(self, selector, search_term=None, escape_to_clear=False):
# Clear tooltip resulting from clicking on the masthead to get here.
self.clear_tooltips()
search_box = self.wait_for_and_click(selector)
if escape_to_clear:
# The combination of DebouncedInput+b-input doesn't seem to uniformly respect
# .clear() below. We use escape handling a lot - and that does cauase to the input
# to reset correctly and fire the required re-active events/handlers.
self.send_escape(search_box)
search_box.clear()
if search_term is not None:
search_box.send_keys(search_term)
self.send_enter(search_box)
return search_box
def _get_random_name(self, prefix=None, suffix=None, len=10):
return "{}{}{}".format(
prefix or "",
"".join(random.choice(string.ascii_lowercase + string.digits) for _ in range(len)),
suffix or "",
)
def _get_random_email(self, username=None, domain=None):
username = username or "test"
domain = domain or "test.test"
return self._get_random_name(prefix=username, suffix=f"@{domain}")
# Creates a random password of length len by creating an array with all ASCII letters and the numbers 0 to 9,
# then using the random number generator to pick one elemenent to concatinate it to the end of the password string until
# we have a password of length len.
def _get_random_password(self, len=6):
return "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(len))
def submit_login(self, email, password=None, assert_valid=True, retries=0):
self.components.masthead.register_or_login.wait_for_and_click()
self.sleep_for(WAIT_TYPES.UX_RENDER)
self.fill_login_and_submit(email, password=password)
if assert_valid:
try:
self.wait_for_logged_in()
except NotLoggedInException:
self.snapshot("login-failed")
if retries > 0:
self.submit_login(email, password, assert_valid, retries - 1)
else:
raise
self.snapshot("logged-in")
def fill_login_and_submit(self, email, password=None):
if password is None:
password = self.default_password
login_info = {
"login": email,
"password": password,
}
form = self.wait_for_visible(self.navigation.login.selectors.form)
self.fill(form, login_info)
self.snapshot("logging-in")
self.wait_for_and_click(self.navigation.login.selectors.submit)
self.snapshot("login-submitted")
def register(self, email=None, password=None, username=None, confirm=None, assert_valid=True):
if email is None:
email = self._get_random_email()
if password is None:
password = self.default_password
if confirm is None:
confirm = password
if username is None:
username = email.split("@")[0]
self.home()
self.components.masthead.register_or_login.wait_for_and_click()
self.wait_for_and_click(self.navigation.registration.selectors.toggle)
form = self.wait_for_visible(self.navigation.registration.selectors.form)
self.fill(form, dict(email=email, password=password, username=username, confirm=confirm))
self.wait_for_and_click(self.navigation.registration.selectors.submit)
if assert_valid is False:
self.assert_error_message()
elif assert_valid:
self.wait_for_logged_in()
# Code below previously was needed because there was a bug that would prevent the masthead from changing,
# the bug seems maybe to be fixed though - so we could consider eliminating these extra checks to speed
# up tests.
self.home()
self.wait_for_logged_in()
self.click_masthead_user()
# Make sure the user menu was dropped down
user_menu = self.components.masthead.user_menu.wait_for_visible()
try:
username_element = self.components.masthead.username.wait_for_visible()
except self.TimeoutException as e:
menu_items = user_menu.find_elements_by_css_selector("li a")
menu_text = [mi.text for mi in menu_items]
message = f"Failed to find logged in message in menu items {', '.join(menu_text)}"
raise self.prepend_timeout_message(e, message)
text = username_element.text
assert username in text
assert self.get_logged_in_user()["email"] == email
# clicking away no longer closes menu post Masthead -> VueJS
self.click_masthead_user()
def wait_for_logged_in(self):
try:
self.components.masthead.logged_in_only.wait_for_visible()
except self.TimeoutException as e:
ui_logged_out = self.components.masthead.logged_out_only.is_displayed
if ui_logged_out:
dom_message = (
"Element a.loggedout-only is present in DOM, indicating Login or Register button still in masthead."
)
else:
dom_message = "Element a.loggedout-only is *not* present in DOM."
user_info = self.api_get("users/current")
if "username" in user_info:
template = "Failed waiting for masthead to update for login, but user API response indicates [%s] is logged in. This seems to be a bug in Galaxy. %s logged API response was [%s]. "
message = template % (user_info["username"], dom_message, user_info)
raise self.prepend_timeout_message(e, message)
else:
raise NotLoggedInException(e, user_info, dom_message)
def click_center(self):
action_chains = self.action_chains()
center_element = self.driver.find_element_by_css_selector("#center")
action_chains.move_to_element(center_element).click().perform()
def perform_upload(self, test_path, **kwd):
self._perform_upload(test_path=test_path, **kwd)
def perform_upload_of_pasted_content(self, paste_data, **kwd):
self._perform_upload(paste_data=paste_data, **kwd)
def _perform_upload(
self, test_path=None, paste_data=None, ext=None, genome=None, ext_all=None, genome_all=None, deferred=None
):
self.home()
self.upload_start_click()
self.upload_set_footer_extension(ext_all)
self.upload_set_footer_genome(genome_all)
if test_path:
self.upload_queue_local_file(test_path)
else:
assert paste_data is not None
self.upload_paste_data(paste_data)
if ext is not None:
self.wait_for_selector_visible(".upload-extension")
self.select2_set_value(".upload-extension", ext)
if genome is not None:
self.wait_for_selector_visible(".upload-genome")
self.select2_set_value(".upload-genome", genome)
if deferred is not None:
upload = self.components.upload
upload.settings_button(n=0).wait_for_and_click()
upload.settings.wait_for_visible()
setting = upload.setting_deferred.wait_for_visible()
classes = setting.get_attribute("class").split(" ")
if deferred is True and "fa-check-square-o" not in classes:
setting.click()
elif deferred is False and "fa-check-square-o" in classes:
setting.click()
self.upload_start()
self.wait_for_and_click_selector("button#btn-close")
def upload_list(self, test_paths, name="test", ext=None, genome=None, hide_source_items=True):
self._collection_upload_start(test_paths, ext, genome, "List")
if not hide_source_items:
self.collection_builder_hide_originals()
self.collection_builder_set_name(name)
self.collection_builder_create()
def upload_pair(self, test_paths, name="test", ext=None, genome=None, hide_source_items=True):
self._collection_upload_start(test_paths, ext, genome, "Pair")
if not hide_source_items:
self.collection_builder_hide_originals()
self.collection_builder_set_name(name)
self.collection_builder_create()
def upload_paired_list(self, test_paths, name="test", ext=None, genome=None, hide_source_items=True):
self._collection_upload_start(test_paths, ext, genome, "List of Pairs")
if not hide_source_items:
self.collection_builder_hide_originals()
self.collection_builder_clear_filters()
# TODO: generalize and loop these clicks so we don't need the assert
assert len(test_paths) == 2
self.collection_builder_click_paired_item("forward", 0)
self.collection_builder_click_paired_item("reverse", 1)
self.collection_builder_set_name(name)
self.collection_builder_create()
def _collection_upload_start(self, test_paths, ext, genome, collection_type):
# Perform upload of files and open the collection builder for specified
# type.
self.home()
self.upload_start_click()
self.upload_tab_click("collection")
self.upload_set_footer_extension(ext, tab_id="collection")
self.upload_set_footer_genome(genome, tab_id="collection")
self.upload_set_collection_type(collection_type)
for test_path in test_paths:
self.upload_queue_local_file(test_path, tab_id="collection")
self.upload_start(tab_id="collection")
self.upload_build()
def upload_tab_click(self, tab):
self.components.upload.tab(tab=tab).wait_for_and_click()
def upload_start_click(self):
self.components.upload.start.wait_for_and_click()
@retry_during_transitions
def upload_set_footer_extension(self, ext, tab_id="regular"):
if ext is not None:
selector = f"div#{tab_id} .upload-footer-extension"
self.wait_for_selector_visible(selector)
self.select2_set_value(selector, ext)
@retry_during_transitions
def upload_set_footer_genome(self, genome, tab_id="regular"):
if genome is not None:
selector = f"div#{tab_id} .upload-footer-genome"
self.wait_for_selector_visible(selector)
self.select2_set_value(selector, genome)
@retry_during_transitions
def upload_set_collection_type(self, collection_type):
self.wait_for_selector_visible(".upload-footer-collection-type")
self.select2_set_value(".upload-footer-collection-type", collection_type)
def upload_start(self, tab_id="regular"):
self.wait_for_and_click_selector(f"div#{tab_id} button#btn-start")
@retry_during_transitions
def upload_build(self, tab="collection"):
build_selector = f"div#{tab} button#btn-build"
# Pause a bit to let the callback on the build button be registered.
time.sleep(0.5)
# Click the Build button and make sure it disappears.
self.wait_for_and_click_selector(build_selector)
try:
self.wait_for_selector_absent_or_hidden(build_selector)
except TimeoutException:
# Sometimes the callback in the JS hasn't be registered by the
# time that the build button is clicked. By the time the timeout
# has been registered - it should have been.
self.wait_for_and_click_selector(build_selector)
self.wait_for_selector_absent_or_hidden(build_selector)
def upload_queue_local_file(self, test_path, tab_id="regular"):
self.wait_for_and_click_selector(f"div#{tab_id} button#btn-local")
file_upload = self.wait_for_selector(f'div#{tab_id} input[type="file"]')
file_upload.send_keys(test_path)
def upload_paste_data(self, pasted_content, tab_id="regular"):
tab_locator = f"div#{tab_id}"
self.wait_for_and_click_selector(f"{tab_locator} button#btn-new")
textarea = self.wait_for_selector(f"{tab_locator} .upload-text-content")
textarea.send_keys(pasted_content)
def upload_rule_start(self):
self.upload_start_click()
self.upload_tab_click("rule-based")
def upload_rule_build(self):
self.upload_build(tab="rule-based")
def upload_rule_set_data_type(self, type_description):
upload = self.components.upload
data_type_element = upload.rule_select_data_type.wait_for_visible()
self.select2_set_value(data_type_element, type_description)
def upload_rule_set_input_type(self, input_description):
upload = self.components.upload
input_type_element = upload.rule_select_input_type.wait_for_visible()
self.select2_set_value(input_type_element, input_description)
def upload_rule_set_dataset(self, row=1):
upload = self.components.upload
upload.rule_dataset_selector.wait_for_visible()
upload.rule_dataset_selector_row(rowindex=row).wait_for_and_click()
def rule_builder_set_collection_name(self, name):
rule_builder = self.components.rule_builder
name_element = rule_builder.collection_name_input.wait_for_and_click()
name_element.send_keys(name)
self.sleep_for(WAIT_TYPES.UX_RENDER)
def rule_builder_set_extension(self, extension):
self.select2_set_value(self.navigation.rule_builder.selectors.extension_select, extension)
def rule_builder_filter_count(self, count=1):
rule_builder = self.components.rule_builder
rule_builder.menu_button_filter.wait_for_and_click()
with self.rule_builder_rule_editor("add-filter-count") as editor_element:
filter_input = editor_element.find_element_by_css_selector("input[type='number']")
filter_input.clear()
filter_input.send_keys(f"{count}")
def rule_builder_sort(self, column_label, screenshot_name=None):
rule_builder = self.components.rule_builder
rule_builder.menu_button_rules.wait_for_and_click()
with self.rule_builder_rule_editor("sort") as editor_element:
column_elem = editor_element.find_element_by_css_selector(".rule-column-selector")
self.select2_set_value(column_elem, column_label)
self.screenshot_if(screenshot_name)
def rule_builder_add_regex_groups(self, column_label, group_count, regex, screenshot_name):
rule_builder = self.components.rule_builder
rule_builder.menu_button_column.wait_for_and_click()
with self.rule_builder_rule_editor("add-column-regex") as editor_element:
column_elem = editor_element.find_element_by_css_selector(".rule-column-selector")
self.select2_set_value(column_elem, column_label)
groups_elem = editor_element.find_element_by_css_selector("input[type='radio'][value='groups']")
groups_elem.click()
regex_elem = editor_element.find_element_by_css_selector("input.rule-regular-expression")
regex_elem.clear()
regex_elem.send_keys(regex)
filter_input = editor_element.find_element_by_css_selector("input[type='number']")
filter_input.clear()
filter_input.send_keys(f"{group_count}")
self.screenshot_if(screenshot_name)
def rule_builder_add_regex_replacement(self, column_label, regex, replacement, screenshot_name=None):
rule_builder = self.components.rule_builder
rule_builder.menu_button_column.wait_for_and_click()
with self.rule_builder_rule_editor("add-column-regex") as editor_element:
column_elem = editor_element.find_element_by_css_selector(".rule-column-selector")
self.select2_set_value(column_elem, column_label)
groups_elem = editor_element.find_element_by_css_selector("input[type='radio'][value='replacement']")
groups_elem.click()
regex_elem = editor_element.find_element_by_css_selector("input.rule-regular-expression")
regex_elem.clear()
regex_elem.send_keys(regex)
filter_input = editor_element.find_element_by_css_selector("input.rule-replacement")
filter_input.clear()
filter_input.send_keys(f"{replacement}")
self.screenshot_if(screenshot_name)
def rule_builder_add_value(self, value, screenshot_name=None):
rule_builder = self.components.rule_builder
rule_builder.menu_button_column.wait_for_and_click()
with self.rule_builder_rule_editor("add-column-value") as editor_element:
filter_input = editor_element.find_element_by_css_selector("input[type='text']")
filter_input.clear()
filter_input.send_keys(value)
self.screenshot_if(screenshot_name)
def rule_builder_remove_columns(self, column_labels, screenshot_name=None):
rule_builder = self.components.rule_builder
rule_builder.menu_button_rules.wait_for_and_click()
with self.rule_builder_rule_editor("remove-columns") as filter_editor_element:
column_elem = filter_editor_element.find_element_by_css_selector(".rule-column-selector")
for column_label in column_labels:
self.select2_set_value(column_elem, column_label)
self.screenshot_if(screenshot_name)
def rule_builder_concatenate_columns(self, column_label_1, column_label_2, screenshot_name=None):
rule_builder = self.components.rule_builder
rule_builder.menu_button_column.wait_for_and_click()
with self.rule_builder_rule_editor("add-column-concatenate") as filter_editor_element:
column_elems = filter_editor_element.find_elements_by_css_selector(".rule-column-selector")
self.select2_set_value(column_elems[0], column_label_1)
column_elems = filter_editor_element.find_elements_by_css_selector(".rule-column-selector")
self.select2_set_value(column_elems[1], column_label_2)
self.screenshot_if(screenshot_name)
def rule_builder_split_columns(self, column_labels_1, column_labels_2, screenshot_name=None):
rule_builder = self.components.rule_builder
rule_builder.menu_button_rules.wait_for_and_click()
with self.rule_builder_rule_editor("split-columns") as filter_editor_element:
column_elems = filter_editor_element.find_elements_by_css_selector(".rule-column-selector")
clear = True
for column_label_1 in column_labels_1:
self.select2_set_value(column_elems[0], column_label_1, clear_value=clear)
clear = False
column_elems = filter_editor_element.find_elements_by_css_selector(".rule-column-selector")
clear = True
for column_label_2 in column_labels_2:
self.select2_set_value(column_elems[1], column_label_2, clear_value=clear)
clear = False
self.screenshot_if(screenshot_name)
def rule_builder_swap_columns(self, column_label_1, column_label_2, screenshot_name):
rule_builder = self.components.rule_builder
rule_builder.menu_button_rules.wait_for_and_click()
with self.rule_builder_rule_editor("swap-columns") as filter_editor_element:
column_elems = filter_editor_element.find_elements_by_css_selector(".rule-column-selector")
self.select2_set_value(column_elems[0], column_label_1)
column_elems = filter_editor_element.find_elements_by_css_selector(".rule-column-selector")
self.select2_set_value(column_elems[1], column_label_2)
self.screenshot_if(screenshot_name)
@contextlib.contextmanager
def rule_builder_rule_editor(self, rule_type):
rule_builder = self.components.rule_builder
rule_builder.menu_item_rule_type(rule_type=rule_type).wait_for_and_click()
filter_editor = rule_builder.rule_editor(rule_type=rule_type)
filter_editor_element = filter_editor.wait_for_visible()
yield filter_editor_element
rule_builder.rule_editor_ok.wait_for_and_click()
def rule_builder_set_mapping(self, mapping_type, column_label, screenshot_name=None):
rule_builder = self.components.rule_builder
rule_builder.menu_button_rules.wait_for_and_click()
rule_builder.menu_item_rule_type(rule_type="mapping").wait_for_and_click()
rule_builder.add_mapping_menu.wait_for_and_click()
rule_builder.add_mapping_button(mapping_type=mapping_type).wait_for_and_click()
if mapping_type != "list-identifiers" or not isinstance(column_label, list):
mapping_elem = rule_builder.mapping_edit(mapping_type=mapping_type).wait_for_visible()
self.select2_set_value(mapping_elem, column_label)
self.screenshot_if(screenshot_name)
else:
assert len(column_label) > 0
column_labels = column_label
for i, column_label in enumerate(column_labels):
if i > 0:
rule_builder.mapping_add_column(mapping_type=mapping_type).wait_for_and_click()
mapping_elem = rule_builder.mapping_edit(mapping_type=mapping_type).wait_for_visible()
self.select2_set_value(mapping_elem, column_label)
self.screenshot_if(screenshot_name)
rule_builder.mapping_ok.wait_for_and_click()
def rule_builder_set_source(self, json):
rule_builder = self.components.rule_builder
rule_builder.view_source.wait_for_and_click()
self.rule_builder_enter_source_text(json)
rule_builder.main_button_ok.wait_for_and_click()
rule_builder.view_source.wait_for_visible()
def rule_builder_enter_source_text(self, json):
rule_builder = self.components.rule_builder
text_area_elem = rule_builder.source.wait_for_visible()
text_area_elem.clear()
text_area_elem.send_keys(json)
def workflow_editor_click_option(self, option_label):
self.workflow_editor_click_options()
menu_element = self.workflow_editor_options_menu_element()
option_elements = menu_element.find_elements_by_css_selector("a")
assert len(option_elements) > 0, "Failed to find workflow editor options"
self.sleep_for(WAIT_TYPES.UX_RENDER)
found_option = False
for option_element in option_elements:
if option_label in option_element.text:
action_chains = self.action_chains()
action_chains.move_to_element(option_element)
action_chains.click()
action_chains.perform()
found_option = True
break
if not found_option:
raise Exception(f"Failed to find workflow editor option with label [{option_label}]")
def workflow_editor_click_options(self):
return self.wait_for_and_click_selector("#workflow-options-button")
def workflow_editor_options_menu_element(self):
return self.wait_for_selector_visible("#workflow-options-button")
def workflow_editor_click_run(self):
return self.wait_for_and_click_selector("#workflow-run-button")
def workflow_editor_click_save(self):
self.wait_for_and_click_selector("#workflow-save-button")
self.sleep_for(self.wait_types.DATABASE_OPERATION)
def navigate_to_histories_page(self):
self.home()
self.click_masthead_user()
self.components.masthead.histories.wait_for_and_click()
def navigate_to_user_preferences(self):
self.home()
self.click_masthead_user()
self.components.masthead.preferences.wait_for_and_click()
def navigate_to_invocations(self):
self.home()
self.click_masthead_user()
self.components.masthead.invocations.wait_for_and_click()
def navigate_to_pages(self):
self.home()
self.click_masthead_user()
self.components.masthead.pages.wait_for_and_click()
def admin_open(self):
self.components.masthead.admin.wait_for_and_click()
def select_dataset_from_lib_import_modal(self, filenames):
for name in filenames:
self.components.libraries.folder.select_import_dir_item(name=name).wait_for_and_click()
self.components.libraries.folder.import_dir_btn.wait_for_and_click()
def create_new_library(self, login=True):
if login:
self.admin_login()
self.libraries_open()
self.name = self._get_random_name(prefix="testcontents")
self.libraries_index_create(self.name)
def libraries_open(self):
self.home()
self.click_masthead_shared_data()
self.components.masthead.libraries.wait_for_and_click()
self.components.libraries.selector.wait_for_visible()
def libraries_open_with_name(self, name):
self.libraries_open()
self.libraries_index_search_for(name)
self.libraries_index_table_elements()[0].find_element_by_css_selector("td a").click()
@retry_during_transitions
def libraries_index_table_elements(self):
container = self.components.libraries._.wait_for_visible()
elements = container.find_elements_by_css_selector("tbody")
if not elements:
return []
else:
assert len(elements) == 1
element = elements[0]
return element.find_elements_by_css_selector("tr") # [style='display: table-row']
def libraries_index_create(self, name):
self.components.libraries.create_new_library_btn.wait_for_and_click()
name_input_field = self.components.libraries.new_library_name_input.wait_for_visible()
input_field = self.components.libraries.new_library_description_input.wait_for_visible()
name_input_field.send_keys(name)
input_field.send_keys(self._get_random_name(prefix="description"))
self.components.libraries.save_new_library_btn.wait_for_and_click()
def libraries_index_click_search(self):
self.sleep_for(WAIT_TYPES.UX_RENDER)
search_element = self.components.libraries.search_field.wait_for_visible()
search_element.click()
return search_element
def libraries_index_sort_selector(self):
return "th[aria-sort]"
def libraries_index_sort_click(self):
sort_element = self.wait_for_selector_clickable(self.libraries_index_sort_selector())
sort_element.click()
return sort_element
def libraries_index_search_for(self, text):
self.wait_for_overlays_cleared()
search_box = self.libraries_index_click_search()
search_box.clear()
search_box.send_keys(text)
value = search_box.get_attribute("value")
assert value == text, value
def libraries_folder_create(self, name):
self.components.libraries.folder.add_folder.wait_for_and_click()
self.components.libraries.folder.input_folder_name.wait_for_and_send_keys(name)
self.components.libraries.folder.save_folder_btn.wait_for_and_click()
def libraries_click_dataset_import(self):
self.wait_for_and_click(self.navigation.libraries.folder.selectors.add_items_button)
self.wait_for_visible(self.navigation.libraries.folder.selectors.add_items_menu)
def libraries_dataset_import(self, btn):
self.libraries_click_dataset_import()
self.wait_for_and_click(btn)
def libraries_dataset_import_from_history_select(self, to_select_items):
self.wait_for_visible(self.navigation.libraries.folder.selectors.import_history_content)
history_elements = self.find_elements(self.navigation.libraries.folder.selectors.import_history_contents_items)
for to_select_item in to_select_items:
found = False
for history_element in history_elements:
if to_select_item in history_element.text:
history_element.find_element_by_css_selector("input").click()
found = True
break
if not found:
raise Exception(f"Failed to find history item [{to_select_item}] to select")
def libraries_dataset_import_from_history_click_ok(self, wait=True):
self.wait_for_and_click(self.navigation.libraries.folder.selectors.import_datasets_ok_button)
if wait:
# Let the progress bar disappear...
self.wait_for_absent_or_hidden(self.navigation.libraries.folder.selectors.import_progress_bar)
def libraries_table_elements(self):
tbody_element = self.wait_for_selector_visible("#folder_list_body > tbody")
return tbody_element.find_elements_by_css_selector("tr:not(.b-table-empty-row)")
def populate_library_folder_from_import_dir(self, library_name, filenames):
self.libraries_open_with_name(library_name)
self.libraries_dataset_import(self.navigation.libraries.folder.labels.from_import_dir)
self.select_dataset_from_lib_import_modal(filenames)
def navigate_to_new_library(self, login=True):
self.create_new_library(login)
self.libraries_open_with_name(self.name)
def wait_for_overlays_cleared(self):
"""Wait for modals and Toast notifications to disappear."""
self.wait_for_selector_absent_or_hidden(".ui-modal", wait_type=WAIT_TYPES.UX_POPUP)
self.wait_for_selector_absent_or_hidden(".toast", wait_type=WAIT_TYPES.UX_POPUP)
def clear_tooltips(self):
action_chains = self.action_chains()
center_element = self.driver.find_element_by_css_selector("#center")
action_chains.move_to_element(center_element).perform()
self.wait_for_selector_absent_or_hidden(".b-tooltip", wait_type=WAIT_TYPES.UX_POPUP)
def workflow_index_open(self):
self.home()
self.click_masthead_workflow()
def workflow_index_table_elements(self):
workflows = self.components.workflows
workflows.workflow_table.wait_for_visible()
return workflows.workflow_rows.all()
def workflow_index_table_row(self, workflow_index=0):
self.components.workflows.workflow_rows.wait_for_element_count_of_at_least(workflow_index + 1)
return self.workflow_index_table_elements()[workflow_index]
@retry_during_transitions
def workflow_index_column_text(self, column_index, workflow_index=0):
row_element = self.workflow_index_table_row(workflow_index=workflow_index)
columns = row_element.find_elements_by_css_selector("td")
return columns[column_index].text
def workflow_index_click_search(self):
return self.wait_for_and_click_selector("#workflow-search")
def workflow_index_search_for(self, search_term=None):
return self._inline_search_for(
self.navigation.workflows.search_box,
search_term,
escape_to_clear=True,
)
def workflow_index_click_import(self):
return self.components.workflows.import_button.wait_for_and_click()
def workflow_index_rename(self, new_name, workflow_index=0):
self.workflow_index_click_option("Rename", workflow_index=workflow_index)
alert = self.driver.switch_to.alert
alert.send_keys(new_name)
alert.accept()
@retry_during_transitions
def workflow_index_name(self, workflow_index=0):
"""Get workflow name for workflow_index'th row."""
row_element = self.workflow_index_table_row(workflow_index=workflow_index)
workflow_button = row_element.find_element_by_css_selector(".workflow-dropdown")
return workflow_button.text
@retry_during_transitions
def workflow_click_option(self, workflow_selector, workflow_index=0):
workflow_row = self.workflow_index_table_row(workflow_index=workflow_index)
workflow_button = workflow_row.find_element_by_css_selector(workflow_selector)
workflow_button.click()
def select_dropdown_item(self, option_title):
menu_element = self.wait_for_selector_visible(".dropdown-menu.show")
menu_options = menu_element.find_elements_by_css_selector("a.dropdown-item")
for menu_option in menu_options:
if option_title in menu_option.text:
menu_option.click()
return True
def workflow_index_click_option(self, option_title, workflow_index=0):
self.workflow_click_option(".workflow-dropdown", workflow_index)
if not self.select_dropdown_item(option_title):
raise AssertionError(f"Failed to find workflow action option with title [{option_title}]")
def workflow_index_click_tag_display(self, workflow_index=0):
workflow_row_element = self.workflow_index_table_row(workflow_index)
tag_display = workflow_row_element.find_element_by_css_selector(".tags-display")
tag_display.click()
def workflow_index_add_tag(self, tag: str, workflow_index: int = 0):
self.workflow_index_click_tag_display(workflow_index=workflow_index)
self.tagging_add([tag])
@retry_during_transitions
def workflow_index_tags(self, workflow_index=0):
tag_spans = self.workflow_index_tag_elements(workflow_index=workflow_index)
tags = []
for tag_span in tag_spans:
tags.append(tag_span.text)
return tags
@retry_during_transitions
def workflow_index_tag_elements(self, workflow_index=0):
workflow_row_element = self.workflow_index_table_row(workflow_index)
tag_display = workflow_row_element.find_element_by_css_selector(".tags-display")
tag_spans = tag_display.find_elements_by_css_selector(".tag-name")
return tag_spans
@retry_during_transitions
def workflow_index_click_tag(self, tag, workflow_index=0):
tag_spans = self.workflow_index_tag_elements(workflow_index=workflow_index)
clicked = False
for tag_span in tag_spans:
if tag_span.text == tag:
tag_span.click()
clicked = True
break
if not clicked:
raise KeyError(f"Failed to find tag {tag} on workflow with index {workflow_index}")
def workflow_import_submit_url(self, url):
form_button = self.wait_for_selector_visible("#workflow-import-button")
url_element = self.wait_for_selector_visible("#workflow-import-url-input")
url_element.send_keys(url)
form_button.click()
def workflow_sharing_click_publish(self):
self.wait_for_and_click_selector("input[name='make_accessible_and_publish']")
def tagging_add(self, tags, auto_closes=True, parent_selector=""):
for i, tag in enumerate(tags):
if auto_closes or i == 0:
tag_area = f"{parent_selector}.tags-input input[type='text']"
tag_area = self.wait_for_selector_clickable(tag_area)
tag_area.click()
tag_area.send_keys(tag)
self.send_enter(tag_area)
def workflow_run_with_name(self, name: str):
self.workflow_index_open()
self.workflow_index_search_for(name)
self.workflow_click_option(".workflow-run")
def workflow_run_specify_inputs(self, inputs: Dict[str, Any]):
workflow_run = self.components.workflow_run
for label, value in inputs.items():
input_div_element = workflow_run.input_data_div(label=label).wait_for_visible()
self.select2_set_value(input_div_element, "%d: " % value["hid"])
def workflow_run_submit(self):
self.components.workflow_run.run_workflow.wait_for_and_click()
def workflow_run_ensure_expanded(self):
workflow_run = self.components.workflow_run
if workflow_run.expanded_form.is_absent:
workflow_run.expand_form_link.wait_for_and_click()
workflow_run.expanded_form.wait_for_visible()
def workflow_create_new(self, annotation=None, clear_placeholder=False):
self.workflow_index_open()
self.sleep_for(self.wait_types.UX_RENDER)
self.click_button_new_workflow()
self.sleep_for(self.wait_types.UX_RENDER)
form_element = self.driver.find_element_by_id("submit")
name = self._get_random_name()
annotation = annotation or self._get_random_name()
inputs = self.driver.find_elements_by_class_name("ui-input")
if clear_placeholder:
inputs[0].clear()
inputs[0].send_keys(name)
inputs[1].send_keys(annotation)
form_element.click()
return name
def invocation_index_table_elements(self):
invocations = self.components.invocations
invocations.invocations_table.wait_for_visible()
return invocations.invocations_table_rows.all()
def tool_open(self, tool_id, outer=False):
if outer:
tool_link = self.components.tool_panel.outer_tool_link(tool_id=tool_id)
else:
tool_link = self.components.tool_panel.tool_link(tool_id=tool_id)
tool_element = tool_link.wait_for_present()
self.driver.execute_script("arguments[0].scrollIntoView(true);", tool_element)
tool_link.wait_for_and_click()
def create_page_and_edit(self, name=None, slug=None, content_format=None, screenshot_name=None):
name = self.create_page(name=name, slug=slug, content_format=content_format, screenshot_name=screenshot_name)
self.click_grid_popup_option(name, "Edit content")
self.components.pages.editor.wym_iframe.wait_for_visible()
return name
def create_page(self, name=None, slug=None, content_format=None, screenshot_name=None):
self.components.pages.create.wait_for_and_click()
name = name or self._get_random_name(prefix="page")
slug = slug = self._get_random_name(prefix="pageslug")
content_format = content_format or "HTML"
self.tool_set_value("title", name)
self.tool_set_value("slug", slug)
self.tool_set_value("content_format", content_format, expected_type="select")
self.screenshot_if(screenshot_name)
# Sometimes 'submit' button not yet hooked up?
self.sleep_for(self.wait_types.UX_RENDER)
self.components.pages.submit.wait_for_and_click()
return name
def tool_parameter_div(self, expanded_parameter_id):
return self.components.tool_form.parameter_div(parameter=expanded_parameter_id).wait_for_clickable()
def tool_parameter_edit_rules(self):
rules_div_element = self.tool_parameter_div("rules")
edit_button_element = rules_div_element.find_element_by_css_selector("i.fa-edit")
edit_button_element.click()
def tool_set_value(self, expanded_parameter_id, value, expected_type=None):
div_element = self.tool_parameter_div(expanded_parameter_id)
assert div_element
if expected_type in ["select", "data", "data_collection"]:
div_selector = f"div.ui-form-element[id$='form-element-{expanded_parameter_id}']"
self.select2_set_value(div_selector, value)
else:
input_element = div_element.find_element_by_css_selector("input")
# Clear default value
input_element.clear()
input_element.send_keys(value)
def tool_form_generate_tour(self):
self.components.tool_form.options.wait_for_and_click()
self.components.tool_form.generate_tour.wait_for_and_click()
def tool_form_execute(self):
self.components.tool_form.execute.wait_for_and_click()
def click_masthead_user(self):
self.components.masthead.user.wait_for_and_click()
def click_masthead_shared_data(self):
self.components.masthead.shared_data.wait_for_and_click()
def click_masthead_workflow(self):
self.components.masthead.workflow.wait_for_and_click()
def click_button_new_workflow(self):
self.wait_for_and_click(self.navigation.workflows.selectors.new_button)
def wait_for_sizzle_selector_clickable(self, selector):
element = self._wait_on(
sizzle.sizzle_selector_clickable(selector),
f"sizzle/jQuery selector [{selector}] to become clickable",
)
return element
@retry_during_transitions
def click_history_options(self):
if self.is_beta_history():
component = self.components.history_panel.options_button_icon_beta
else:
component = self.components.history_panel.options_button_icon
component.wait_for_and_click()
def click_history_option_export_to_file(self):
if self.is_beta_history():
self.use_bootstrap_dropdown(option="export to file", menu="history options")
else:
self.click_history_options()
self.components.history_panel.options_show_export_history_to_file.wait_for_and_click()
def click_history_option_sharing(self):
if self.is_beta_history():
self.use_bootstrap_dropdown(option="share or publish", menu="history options")
else:
self.click_history_option("Share or Publish")
def click_history_option(self, option_label_or_component):
# Open menu
self.click_history_options()
if isinstance(option_label_or_component, str):
option_label = option_label_or_component
# Click labeled option
self.wait_for_visible(self.navigation.history_panel.options_menu)
menu_item_sizzle_selector = self.navigation.history_panel.options_menu_item(
option_label=option_label
).selector
menu_selection_element = self.wait_for_sizzle_selector_clickable(menu_item_sizzle_selector)
menu_selection_element.click()
else:
option_component = option_label_or_component
option_component.wait_for_and_click()
def use_beta_history(self):
if not self.is_beta_history():
self.click_history_option(self.components.history_panel.options_use_beta_history)
self.components.history_panel.beta.wait_for_present()
def is_beta_history(self):
old_panel = self.components.history_panel.beta.is_absent
new_panel = self.components.history_panel.new_history_button.is_absent
if old_panel and new_panel:
# both absent, let page render a bit...
self.sleep_for(self.wait_types.UX_RENDER)
else:
return new_panel
old_panel = self.components.history_panel.beta.is_absent
return not old_panel
# avoids problematic ID and classes on markup
def beta_history_element(self, attribute_value, attribute_name="data-description", scope=".history-index"):
return self.components._.by_attribute(name=attribute_name, value=attribute_value, scope=scope)
# join list of attrs into css attribute selectors and append to base beta_item selector
def content_item_by_attributes(self, **attrs):
suffix_list = [f'[data-{k}="{v}"]' for (k, v) in attrs.items()]
suffix = "".join(suffix_list)
return self.components.history_panel.content_item.selector(suffix=suffix)
def history_click_create_new(self):
if not self.is_beta_history():
self.components.history_panel.new_history_button.wait_for_and_click()
else:
option = self.beta_history_element("create new history")
option.wait_for_and_click()
def history_click_editor_save(self):
option = self.beta_history_element("editor save button")
option.wait_for_and_click()
self.sleep_for(self.wait_types.UX_RENDER)
def history_panel_click_copy_elements(self):
if self.is_beta_history():
self.use_bootstrap_dropdown(option="copy datasets", menu="history action menu")
else:
self.click_history_option("Copy Datasets")
def use_bootstrap_dropdown(self, option=None, menu=None):
"""uses bootstrap dropdown by data-description attributes"""
if option is None:
raise TypeError
if menu is None:
raise TypeError
toggle = self.beta_history_element(menu).descendant("button")
self.wait_for_and_click(toggle)
return self.beta_history_element(option).wait_for_and_click()
@retry_during_transitions
def histories_click_advanced_search(self):
search_selector = "#standard-search .advanced-search-toggle"
self.wait_for_and_click_selector(search_selector)
@retry_during_transitions
def histories_get_history_names(self):
self.sleep_for(self.wait_types.UX_RENDER)
names = []
grid = self.wait_for_selector("#grid-table-body")
for row in grid.find_elements_by_tag_name("tr"):
td = row.find_elements_by_tag_name("td")
name = td[1].text if td[0].text == "" else td[0].text
if name != "No items" and not name.startswith("No matching entries found"):
names.append(name)
return names
@edit_details
def history_panel_add_tags(self, tags):
tag_icon = self.components.history_panel.tag_icon
tag_area = self.components.history_panel.tag_area
tag_area_input = self.components.history_panel.tag_area_input
# if the tag editor is not present but the tag_icon is, then click it
if not tag_icon.is_absent and (tag_area.is_absent or not tag_area.is_displayed):
tag_icon.wait_for_and_click()
input_element = tag_area_input.wait_for_and_click()
self.sleep_for(self.wait_types.UX_RENDER)
for tag in tags:
input_element.send_keys(tag)
self.send_enter(input_element)
self.sleep_for(self.wait_types.UX_RENDER)
@edit_details
def history_panel_rename(self, new_name):
editable_text_input_element = self.history_panel_name_input()
if self.is_beta_history():
editable_text_input_element.clear()
editable_text_input_element.send_keys(new_name)
self.send_enter(editable_text_input_element)
return editable_text_input_element
def history_panel_name_input(self):
if not self.is_beta_history():
editable_text_input_element = self.history_panel_click_to_rename()
history_panel = self.components.history_panel
edit = history_panel.name_edit_input
editable_text_input_element = edit.wait_for_visible()
return editable_text_input_element
def history_panel_click_to_rename(self):
history_panel = self.components.history_panel
name = history_panel.name
edit = history_panel.name_edit_input
name.wait_for_and_click()
return edit.wait_for_visible()
def history_panel_refresh_click(self):
self.wait_for_and_click(self.navigation.history_panel.selectors.refresh_button)
def history_panel_multi_operations_show(self):
return self.wait_for_and_click(self.navigation.history_panel.multi_operations.selectors.show_button)
def history_panel_muli_operation_select_hid(self, hid):
item_selector = self.history_panel_item_selector(hid, wait=True)
operation_radio_selector = f"{item_selector} .selector"
self.wait_for_and_click_selector(operation_radio_selector)
def history_panel_multi_operation_action_click(self, action):
# Maybe isn't needed?
# self.sleep_for(WAIT_TYPES.UX_RENDER)
self.wait_for_and_click(self.navigation.history_panel.multi_operations.selectors.action_button)
@retry_during_transitions
def _click_action_in_menu():
menu_element = self.wait_for_visible(self.navigation.history_panel.multi_operations.selectors.action_menu)
menu_element.find_element_by_link_text(action.text).click()
_click_action_in_menu()
def open_history_multi_view(self):
if self.is_beta_history():
self.components.history_panel.histories_operation_menu.wait_for_and_click()
self.components.history_panel.multi_view_button_beta.wait_for_and_click()
else:
self.components.history_panel.multi_view_button.wait_for_and_click()
def history_panel_show_structure(self):
if self.is_beta_history():
self.use_bootstrap_dropdown(option="show structure", menu="history options")
else:
self.click_history_option(self.components.history_panel.options_show_history_structure)
def history_multi_view_display_collection_contents(self, collection_hid, collection_type="list"):
self.open_history_multi_view()
selector = self.history_panel_wait_for_hid_state(collection_hid, "ok", multi_history_panel=True)
self.click(selector)
next_level_element_selector = selector
for _ in range(len(collection_type.split(":")) - 1):
next_level_element_selector = next_level_element_selector.descendant(".dataset-collection-element")
self.wait_for_and_click(next_level_element_selector)
dataset_selector = next_level_element_selector.descendant(".dataset")
self.wait_for_and_click(dataset_selector)
def history_panel_item_view_dataset_details(self, hid):
if not self.is_beta_history():
self.history_panel_ensure_showing_item_details(hid)
self.hda_click_details(hid)
self.components.dataset_details._.wait_for_visible()
else:
item = self.history_panel_item_component(hid=hid)
item.dataset_operations_dropdown.wait_for_and_click()
item.info_button.wait_for_and_click()
self.components.dataset_details._.wait_for_visible()
def history_panel_item_click_visualization_menu(self, hid):
viz_button_selector = f"{self.history_panel_item_selector(hid)} .visualizations-dropdown"
self.wait_for_and_click_selector(viz_button_selector)
self.wait_for_selector_visible(f"{viz_button_selector} .dropdown-menu")
def history_panel_item_available_visualizations_elements(self, hid):
# Precondition: viz menu has been opened with history_panel_item_click_visualization_menu
viz_menu_selectors = f"{self.history_panel_item_selector(hid)} a.visualization-link"
return self.driver.find_elements_by_css_selector(viz_menu_selectors)
def history_panel_item_get_tags(self, hid):
item_component = self.history_panel_item_component(hid=hid)
item_component.wait_for_visible()
return [e.text for e in item_component.alltags.all()]
def history_panel_item_available_visualizations(self, hid):
# Precondition: viz menu has been opened with history_panel_item_click_visualization_menu
return [e.text for e in self.history_panel_item_available_visualizations_elements(hid)]
def history_panel_item_click_visualization(self, hid, visualization_name):
# Precondition: viz menu has been opened with history_panel_item_click_visualization_menu
elements = self.history_panel_item_available_visualizations_elements(hid)
for element in elements:
if element.text == visualization_name:
element.click()
return element
raise ValueError(f"No visualization [{visualization_name}] found.")
def history_panel_item_selector(self, hid, wait=False):
current_history_id = self.current_history_id()
contents = self.api_get(f"histories/{current_history_id}/contents")
try:
history_item = [d for d in contents if d["hid"] == hid][0]
except IndexError:
raise Exception(f"Could not find history item with hid [{hid}] in contents [{contents}]")
history_item_selector = f"#{history_item['history_content_type']}-{history_item['id']}"
if wait:
self.wait_for_selector_visible(history_item_selector)
return history_item_selector
def modal_body_selector(self):
return ".modal-body"
def history_panel_item_body_component(self, hid, wait=False):
details_component = self.history_panel_item_component(hid=hid).details
if wait:
details_component.wait_for_visible()
return details_component
def hda_click_primary_action_button(self, hid: int, button_key: str):
self.history_panel_ensure_showing_item_details(hid)
item_component = self.history_panel_item_component(hid=hid)
button_component = item_component[f"{button_key}_button"]
button_component.wait_for_and_click()
def hda_click_details(self, hid: int):
self.hda_click_primary_action_button(hid, "info")
def history_panel_click_item_title(self, hid, **kwds):
item_component = self.history_panel_item_component(hid=hid)
details_component = item_component.details
details_displayed = not details_component.is_absent and details_component.is_displayed
item_component.title.wait_for_and_click()
if kwds.get("wait", False):
if details_displayed:
details_component.wait_for_absent_or_hidden()
else:
details_component.wait_for_visible()
return item_component
def history_panel_ensure_showing_item_details(self, hid):
if not self.history_panel_item_showing_details(hid):
self.history_panel_click_item_title(hid=hid, wait=True)
def history_panel_item_showing_details(self, hid):
item_component = self.history_panel_item_component(hid=hid)
item_component.wait_for_present()
if item_component.details.is_absent:
return False
return item_component.details.is_displayed
def collection_builder_set_name(self, name):
name_element = self.wait_for_selector_visible("input.collection-name")
name_element.send_keys(name)
def collection_builder_hide_originals(self):
self.wait_for_and_click_selector("input.hide-originals")
def collection_builder_create(self):
self.wait_for_and_click_selector("button.create-collection")
def collection_builder_clear_filters(self):
self.wait_for_and_click_selector("a.clear-filters-link")
def collection_builder_click_paired_item(self, forward_or_reverse, item):
assert forward_or_reverse in ["forward", "reverse"]
forward_column = self.wait_for_selector_visible(f".{forward_or_reverse}-column .column-datasets")
first_datset_forward = forward_column.find_elements_by_css_selector("li")[item]
first_datset_forward.click()
def logout_if_needed(self):
if self.is_logged_in():
self.home()
self.logout()
def logout(self):
self.components.masthead.logged_in_only.wait_for_visible()
self.click_masthead_user()
self.components.masthead.logout.wait_for_and_click()
try:
self.components.masthead.logged_out_only.wait_for_visible()
except self.TimeoutException as e:
message = "Clicked logout button but waiting for 'Login or Registration' button failed, perhaps the logout button was clicked before the handler was setup?"
raise self.prepend_timeout_message(e, message)
assert (
not self.is_logged_in()
), "Clicked to logged out and UI reflects a logout, but API still thinks a user is logged in."
def run_tour(self, path, skip_steps=None, sleep_on_steps=None, tour_callback=None):
skip_steps = skip_steps or []
sleep_on_steps = sleep_on_steps or {}
if tour_callback is None:
tour_callback = NullTourCallback()
self.home()
with open(path) as f:
tour_dict = yaml.safe_load(f)
steps = tour_dict["steps"]
for i, step in enumerate(steps):
title = step.get("title", None)
skip = False
if skip_steps:
for skip_step in skip_steps:
if title == skip_step:
skip = True
if title in sleep_on_steps:
time.sleep(sleep_on_steps[title])
if skip:
continue
self.run_tour_step(step, i, tour_callback)
def tour_wait_for_clickable_element(self, selector):
timeout = self.timeout_for(wait_type=WAIT_TYPES.JOB_COMPLETION)
wait = self.wait(timeout=timeout)
timeout_message = self._timeout_message(f"sizzle (jQuery) selector [{selector}] to become clickable")
element = wait.until(
sizzle.sizzle_selector_clickable(selector),
timeout_message,
)
return element
def tour_wait_for_element_present(self, selector):
timeout = self.timeout_for(wait_type=WAIT_TYPES.JOB_COMPLETION)
wait = self.wait(timeout=timeout)
timeout_message = self._timeout_message(f"sizzle (jQuery) selector [{selector}] to become present")
element = wait.until(
sizzle.sizzle_presence_of_selector(selector),
timeout_message,
)
return element
def get_tooltip_text(self, element, sleep=0, click_away=True):
tooltip_balloon = self.components._.tooltip_balloon
tooltip_balloon.wait_for_absent()
action_chains = self.action_chains()
action_chains.move_to_element(element)
action_chains.perform()
if sleep > 0:
time.sleep(sleep)
tooltip_element = tooltip_balloon.wait_for_visible()
text = tooltip_element.text
if click_away:
self.click_center()
return text
@retry_during_transitions
def assert_selector_absent_or_hidden_after_transitions(self, selector):
"""Variant of assert_selector_absent_or_hidden that retries during transitions.
In the parent method - the element is found and then it is checked to see
if it is visible. It may disappear from the page in the middle there
and cause a StaleElement error. For checks where we care about the final
resting state after transitions - this method can be used to retry
during those transitions.
"""
return self.assert_selector_absent_or_hidden(selector)
@retry_during_transitions
def assert_absent_or_hidden_after_transitions(self, selector):
"""Variant of assert_absent_or_hidden that retries during transitions.
See details above for more information about this.
"""
return self.assert_absent_or_hidden(selector)
def assert_tooltip_text(self, element, expected: Union[str, HasText], sleep: int = 0, click_away: bool = True):
if hasattr(expected, "text"):
expected = cast(HasText, expected).text
text = self.get_tooltip_text(element, sleep=sleep, click_away=click_away)
assert text == expected, f"Tooltip text [{text}] was not expected text [{expected}]."
def assert_tooltip_text_contains(
self, element, expected: Union[str, HasText], sleep: int = 0, click_away: bool = True
):
if hasattr(expected, "text"):
expected = cast(HasText, expected).text
text = self.get_tooltip_text(element, sleep=sleep, click_away=click_away)
assert expected in text, f"Tooltip text [{text}] was not expected text [{expected}]."
def assert_error_message(self, contains=None):
self.components._.messages.error.wait_for_visible()
elements = self.find_elements(self.components._.messages.selectors.error)
return self.assert_message(elements, contains=contains)
def assert_warning_message(self, contains=None):
element = self.components._.messages["warning"]
return self.assert_message(element, contains=contains)
def assert_message(self, element, contains=None):
if contains is not None:
if type(element) == list:
assert any(
contains in el.text for el in element
), f"{contains} was not found in {[el.text for el in element]}"
return
element = element.wait_for_visible()
text = element.text
if contains not in text:
message = f"Text [{contains}] expected inside of [{text}] but not found."
raise AssertionError(message)
def assert_no_error_message(self):
self.components._.messages.error.assert_absent_or_hidden()
def run_tour_step(self, step, step_index, tour_callback):
preclick = step.get("preclick", [])
for preclick_selector in preclick:
print(f"(Pre)Clicking {preclick_selector}")
self._tour_wait_for_and_click_element(preclick_selector)
element_str = step.get("element", None)
if element_str is not None:
print(f"Waiting for element {element_str}")
element = self.tour_wait_for_element_present(element_str)
assert element is not None
textinsert = step.get("textinsert", None)
if textinsert is not None:
element.send_keys(textinsert)
tour_callback.handle_step(step, step_index)
postclick = step.get("postclick", [])
for postclick_selector in postclick:
print(f"(Post)Clicking {postclick_selector}")
self._tour_wait_for_and_click_element(postclick_selector)
@retry_during_transitions
def _tour_wait_for_and_click_element(self, selector):
element = self.tour_wait_for_clickable_element(selector)
element.click()
@retry_during_transitions
def wait_for_and_click_selector(self, selector):
element = self.wait_for_selector_clickable(selector)
element.click()
return element
@retry_during_transitions
def wait_for_and_click(self, selector_template):
element = self.wait_for_clickable(selector_template)
element.click()
return element
def set_history_annotation(self, annotation, clear_text=False):
history_panel = self.components.history_panel
if self.is_beta_history():
toggle = self.beta_history_element("editor toggle")
toggle.wait_for_and_click()
annotation_input = self.beta_history_element("annotation input").wait_for_visible()
if clear_text:
annotation_input.clear()
annotation_input.send_keys(annotation)
self.history_click_editor_save()
else:
self.ensure_history_annotation_area_displayed()
editable = history_panel.annotation_editable_text
edit = history_panel.annotation_edit
editable.wait_for_and_click()
edit_el = edit.wait_for_and_click()
if clear_text:
# previously this was just edit_el.clear() but
# .clear() doesn't work with beta history panel
action_chains = self.action_chains()
for _ in range(40):
action_chains.send_keys(Keys.BACKSPACE)
action_chains.perform()
edit_el.send_keys(annotation)
history_panel.annotation_done.wait_for_and_click()
def ensure_history_annotation_area_displayed(self):
annotation_area = self.components.history_panel.annotation_area
annotation_icon = self.components.history_panel.annotation_icon
if annotation_area.is_absent or not annotation_area.is_displayed:
annotation_icon.wait_for_and_click()
def select2_set_value(self, container_selector_or_elem, value, with_click=True, clear_value=False):
# There are two hacky was to select things from the select2 widget -
# with_click=True: This simulates the mouse click after the suggestion contains
# only the selected value.
# with_click=False: This presses enter on the selection. Not sure
# why.
# with_click seems to work in all situtations - the enter methods
# doesn't seem to work with the tool form for some reason.
if hasattr(container_selector_or_elem, "selector"):
container_selector_or_elem = container_selector_or_elem.selector
if not hasattr(container_selector_or_elem, "find_element_by_css_selector"):
container_elem = self.wait_for_selector(container_selector_or_elem)
else:
container_elem = container_selector_or_elem
text_element = container_elem.find_element_by_css_selector("input[type='text']")
if clear_value:
self.send_backspace(text_element)
self.send_backspace(text_element)
text_element.send_keys(value)
# Wait for select2 options to load and then click to add this one.
drop_elem = self.wait_for_selector_visible("#select2-drop")
# Sleep seems to be needed - at least for send_enter.
time.sleep(0.5)
if not with_click:
# Wait for select2 options to load and then click to add this one.
self.send_enter(text_element)
else:
select_elem = drop_elem.find_elements_by_css_selector(".select2-result-label")[0]
action_chains = self.action_chains()
action_chains.move_to_element(select_elem).click().perform()
self.wait_for_selector_absent_or_hidden("#select2-drop")
def snapshot(self, description):
"""Test case subclass overrides this to provide detailed logging."""
def open_history_editor(self, scope=".history-index"):
if self.is_beta_history():
panel = self.components.history_panel.editor.selector(scope=scope)
if panel.name_input.is_absent:
toggle = panel.toggle
toggle.wait_for_and_click()
editor = panel.form
editor.wait_for_present()
def close_history_editor(self, scope=".history-index"):
if self.is_beta_history():
toggle = self.components.history_panel.edit_toggle
toggle.wait_for_and_click()
editor = self.components.history_panel.editor.selector(scope=scope)
self.assert_absent_or_hidden(editor)
def share_ensure_by_user_available(self, sharing_component):
collapse = sharing_component.share_with_collapse
collapse.wait_for_visible()
if collapse.has_class("collapsed"):
collapse.wait_for_and_click()
sharing_component.share_with_multiselect.wait_for_visible()
def share_unshare_with_user(self, sharing_component, email):
self.share_ensure_by_user_available(sharing_component)
unshare_user_button = self.components.histories.sharing.unshare_with_user_button(email=email)
unshare_user_button.wait_for_and_click()
self.components.histories.sharing.submit_sharing_with.wait_for_and_click()
unshare_user_button.wait_for_absent_or_hidden()
def share_with_user(
self,
sharing_component,
user_id=None,
user_email=None,
screenshot_before_submit=None,
screenshot_after_submit=None,
assert_valid=False,
):
self.share_ensure_by_user_available(sharing_component)
multiselect = sharing_component.share_with_multiselect.wait_for_and_click()
sharing_component.share_with_input.wait_for_and_send_keys(user_id or user_email)
self.send_enter(multiselect)
self.screenshot_if(screenshot_before_submit)
sharing_component.submit_sharing_with.wait_for_and_click()
if assert_valid:
self.assert_no_error_message()
xpath = f'//span[contains(text(), "{user_email}")]'
self.wait_for_xpath_visible(xpath)
self.screenshot_if(screenshot_after_submit)
class NotLoggedInException(TimeoutException):
def __init__(self, timeout_exception, user_info, dom_message):
template = "Waiting for UI to reflect user logged in but it did not occur. API indicates no user is currently logged in. %s API response was [%s]. %s"
msg = template % (dom_message, user_info, timeout_exception.msg)
super().__init__(msg=msg, screen=timeout_exception.screen, stacktrace=timeout_exception.stacktrace)
|
import logging
import os
import warnings
from abc import ABC, abstractmethod
from collections import defaultdict
from os.path import join
from typing import Iterable, List, Optional, Tuple, Union
import torch
from torch import nn
from .composition import AdapterCompositionBlock, Fuse, Stack, parse_composition
from .configuration import AdapterConfig, AdapterConfigBase, AdapterFusionConfig, get_adapter_config_hash
from .context import AdapterSetup, ForwardContext
from .hub_mixin import PushAdapterToHubMixin
from .layer import AdapterLayer, AdapterLayerBase
from .loading import AdapterFusionLoader, AdapterLoader, PredictionHeadLoader, WeightsLoader
from .modeling import Adapter, GLOWCouplingBlock, NICECouplingBlock
from .prefix_tuning import PrefixTuningPool, PrefixTuningShim
from .utils import EMBEDDING_FILE, TOKENIZER_PATH, inherit_doc
from .wrappers.configuration import wrap_config
logger = logging.getLogger(__name__)
class InvertibleAdaptersMixin:
"""Mixin for Transformer models adding invertible adapters."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.invertible_adapters = nn.ModuleDict(dict())
# Make sure config is wrapped
self.config = wrap_config(self.config)
def add_invertible_adapter(self, adapter_name: str):
"""
Adds an invertible adapter module for the adapter with the given name. If the given adapter does not specify an
invertible adapter config, this method does nothing.
Args:
adapter_name (str): The name of the adapter for which to add an invertible adapter module.
"""
if adapter_name in self.invertible_adapters:
raise ValueError(f"Model already contains an adapter module for '{adapter_name}'.")
adapter_config = self.config.adapters.match(
adapter_name,
config_type=AdapterConfig,
location_key="inv_adapter",
)
if adapter_config and adapter_config["inv_adapter"]:
if adapter_config["inv_adapter"] == "nice":
inv_adap = NICECouplingBlock(
[[self.config.hidden_size]],
non_linearity=adapter_config["non_linearity"],
reduction_factor=adapter_config["inv_adapter_reduction_factor"],
)
elif adapter_config["inv_adapter"] == "glow":
inv_adap = GLOWCouplingBlock(
[[self.config.hidden_size]],
non_linearity=adapter_config["non_linearity"],
reduction_factor=adapter_config["inv_adapter_reduction_factor"],
)
else:
raise ValueError(f"Invalid invertible adapter type '{adapter_config["inv_adapter"]}'.")
self.invertible_adapters[adapter_name] = inv_adap
self.invertible_adapters[adapter_name].apply(Adapter.init_bert_weights)
def delete_invertible_adapter(self, adapter_name: str):
if adapter_name in self.invertible_adapters:
del self.invertible_adapters[adapter_name]
def get_invertible_adapter(self):
# TODO: Currently no fusion over invertible adapters, takes only very first language adapter position
if self.config.adapters.active_setup is not None and len(self.config.adapters.active_setup) > 0:
first_adapter = self.config.adapters.active_setup.first()
if first_adapter in self.invertible_adapters:
return self.invertible_adapters[first_adapter]
return None
def enable_invertible_adapters(self, adapter_names):
for adapter_name in adapter_names:
if adapter_name in self.invertible_adapters:
for param in self.invertible_adapters[adapter_name].parameters():
param.requires_grad = True
def invertible_adapters_forward(self, hidden_states, rev=False):
# TODO: Currently no fusion over invertible adapters, takes only very first language adapter position
if self.config.adapters.active_setup is not None and len(self.config.adapters.active_setup) > 0:
first_adapter = self.config.adapters.active_setup.first()
if first_adapter in self.invertible_adapters:
hidden_states = self.invertible_adapters[first_adapter](hidden_states, rev=rev)
return hidden_states
class ModelAdaptersMixin(PushAdapterToHubMixin, ABC):
"""Mixin for transformer models adding support for loading/ saving adapters."""
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
if config.name_or_path and not os.path.exists(config.name_or_path):
self.model_name = config.name_or_path
else:
self.model_name = None
self.loaded_embeddings = {}
self.shared_parameters = nn.ModuleDict()
self._active_embedding = "default"
# Make sure config is wrapped
self.config = wrap_config(self.config)
def _link_prefix_to_pool(self, layer):
if isinstance(layer, PrefixTuningShim):
layer.set_pool(self.base_model.prefix_tuning)
def _init_adapter_modules(self, add_prefix_tuning_pool=True):
"""
This method initializes adapter modules and fusion modules from the model config.
"""
# Link all prefix tunings
if add_prefix_tuning_pool:
self.base_model.prefix_tuning = PrefixTuningPool(self.config)
self.apply_to_adapter_layers(lambda i, layer: self._link_prefix_to_pool(layer))
# Initialize adapters from config
for adapter_name in self.config.adapters:
self.apply_to_adapter_layers(lambda i, layer: layer.add_adapter(adapter_name, i))
# Initialize fusion from config
for fusion_name in self.config.adapters.fusions:
self.apply_to_adapter_layers(lambda i, layer: layer.add_fusion_layer(fusion_name))
self.loaded_embeddings["default"] = self.get_input_embeddings()
# These methods have to be implemented by every deriving class:
@abstractmethod
def iter_layers(self) -> Iterable[Tuple[int, nn.Module]]:
"""
Iterates over all layers of the model.
This abstract method has to ne implemented by every implementing model.
"""
pass
def apply_to_adapter_layers(self, fn):
"""
Applies a function to all adapter layers of the model.
"""
for i, layer in self.iter_layers():
for module in layer.modules():
if isinstance(module, AdapterLayerBase):
fn(i, module)
def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock], train_embeddings=False):
"""Sets the model into mode for training the given adapters."""
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.apply_to_adapter_layers(lambda i, layer: layer.enable_adapters(adapter_setup, True, False))
for adapter_name in adapter_setup:
if adapter_name in self.shared_parameters:
for param in self.shared_parameters[adapter_name].values():
param.requires_grad = True
if isinstance(self, InvertibleAdaptersMixin):
self.enable_invertible_adapters(adapter_setup.flatten())
# use the adapters to be trained by default in every forward pass
self.set_active_adapters(adapter_setup)
if train_embeddings:
self.get_input_embeddings().train()
def train_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
"""Sets the model into mode for training of adapter fusion determined by a list of adapter names."""
warnings.warn(
"add_fusion() has been deprecated in favor of add_adapter_fusion(). Please use the newer method instead.",
FutureWarning,
)
self.train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)
def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
"""Sets the model into mode for training of adapter fusion determined by a list of adapter names."""
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.apply_to_adapter_layers(lambda i, layer: layer.enable_adapters(adapter_setup, unfreeze_adapters, True))
# use the adapters to be trained by default in every forward pass
self.set_active_adapters(adapter_setup)
# TODO implement fusion for invertible adapters
def has_adapters(self):
if not getattr(self.config, "is_adaptable", None):
return False
return len(self.config.adapters.adapters) > 0
@property
def has_parallel_adapters(self) -> bool:
if self.config.adapters.active_setup:
return self.config.adapters.active_setup.parallel_channels > 1
else:
return False
@property
def active_adapters(self) -> AdapterCompositionBlock:
return self.config.adapters.active_setup
@active_adapters.setter
def active_adapters(self, adapter_setup: Union[list, AdapterCompositionBlock]):
self.set_active_adapters(adapter_setup)
def set_shared_parameters(self, param):
self.shared_parameters = param
def set_active_adapters(
self, adapter_setup: Union[list, AdapterCompositionBlock], skip_layers: Optional[List[int]] = None
):
"""
Sets the adapter modules to be used by default in every forward pass. If no adapter with the given name is
found, no module of the respective type will be activated.
Args:
adapter_setup (list):
The list of adapters to be activated by default. Can be a fusion or stacking configuration.
"""
adapter_setup = parse_composition(adapter_setup, model_type=self.config.model_type)
if adapter_setup:
for adapter_name in adapter_setup.flatten():
if adapter_name not in self.config.adapters.adapters:
raise ValueError(
f"No adapter with name '{adapter_name}' found. Please make sure that all specified adapters are correctly loaded."
)
self.config.adapters.active_setup = adapter_setup
self.config.adapters.skip_layers = skip_layers
def add_adapter(self, adapter_name: str, config=None, overwrite_ok: bool = False, set_active: bool = False):
"""
Adds a new adapter module of the specified type to the model.
Args:
adapter_name (str): The name of the adapter module to be added. config (str or dict or AdapterConfigBase,
optional): The adapter configuration, can be either:
- the string identifier of a pre-defined configuration dictionary
- a configuration dictionary specifying the full config
- if not given, the default configuration for this adapter type will be used
overwrite_ok (bool, optional): Overwrite an adapter with the same name if it exists. By default (False), an
exception is thrown. set_active (bool, optional): Set the adapter to be the active one. By default (False),
the adapter is added but not activated.
"""
if isinstance(config, dict):
config = AdapterConfigBase.load(config) # ensure config is ok and up-to-date
# In case adapter already exists and we allow overwriting, explicitly delete the existing one first
if overwrite_ok and adapter_name in self.config.adapters:
self.delete_adapter(adapter_name)
self.config.adapters.add(adapter_name, config=config)
try:
self.apply_to_adapter_layers(lambda i, layer: layer.add_adapter(adapter_name, i))
# PHM Layer
if self.config.adapters.match(adapter_name, AdapterConfig, location_key="phm_layer"):
self._add_shared_parameters(adapter_name, config)
# Prefix Tuning
for module in self.modules():
if isinstance(module, PrefixTuningPool):
module.confirm_prefix(adapter_name)
if isinstance(self, InvertibleAdaptersMixin):
self.add_invertible_adapter(adapter_name)
except ValueError as ex:
self.delete_adapter(adapter_name)
raise ex
if set_active:
self.set_active_adapters(adapter_name)
def _add_shared_parameters(self, adapter_name, adapter_config: AdapterConfig):
self.shared_parameters[adapter_name] = (
list(self.get_adapter(adapter_name)[0].values())[0].adapter_down[0].init_shared_parameters()
)
def add_fusion(self, adapter_names: Union[Fuse, list], adapter_fusion_config=None, override_kwargs=None):
warnings.warn(
"add_fusion() has been deprecated in favor of add_adapter_fusion(). Please use the newer method instead.",
FutureWarning,
)
adapter_fusion_config = AdapterFusionConfig.from_dict(adapter_fusion_config).replace(**override_kwargs)
self.add_adapter_fusion(adapter_names, adapter_fusion_config)
def add_adapter_fusion(
self,
adapter_names: Union[Fuse, list, str],
config=None,
overwrite_ok: bool = False,
set_active: bool = False,
):
"""
Adds AdapterFusion to the model with alll the necessary configurations and weight initializations
Args:
adapter_names (Fuse or list or str): AdapterFusion layer to add. Can be either:
- a ``Fuse`` composition block
- a list of adapter names to fuse
- a comma-separated string of adapter names to fuse
config (str or dict): adapter fusion configuration, can be either:
- a string identifying a pre-defined adapter fusion configuration
- a dictionary representing the adapter fusion configuration
- the path to a file containing the adapter fusion configuration
overwrite_ok (bool, optional):
Overwrite an AdapterFusion layer with the same name if it exists. By default (False), an exception is
thrown.
set_active (bool, optional):
Activate the added AdapterFusion. By default (False), the AdapterFusion is added but not activated.
"""
if isinstance(adapter_names, Fuse):
adapter_names = adapter_names.children
elif isinstance(adapter_names, str):
adapter_names = adapter_names.split(",")
if isinstance(config, dict):
config = AdapterFusionConfig.from_dict(config) # ensure config is ok and up-to-date
# In case adapter already exists and we allow overwriting, explicitly delete the existing one first
if overwrite_ok and self.config.adapters.get_fusion(adapter_names) is not None:
self.delete_adapter_fusion(adapter_names)
self.config.adapters.add_fusion(adapter_names, config=config)
self.apply_to_adapter_layers(lambda i, layer: layer.add_fusion_layer(adapter_names))
if set_active:
if not isinstance(adapter_names, list):
adapter_names = adapter_names.split(",")
self.set_active_adapters(Fuse(*adapter_names))
def delete_adapter(self, adapter_name: str):
"""
Deletes the adapter with the specified name from the model.
Args:
adapter_name (str): The name of the adapter.
"""
if adapter_name not in self.config.adapters:
logger.info("No adapter '%s' found for deletion. Skipping.", adapter_name)
return
del self.config.adapters.adapters[adapter_name]
self.apply_to_adapter_layers(lambda i, layer: layer.delete_adapter(adapter_name))
if isinstance(self, InvertibleAdaptersMixin):
self.delete_invertible_adapter(adapter_name)
# Reset active adapters if this was the only active adapter
if self.active_adapters == Stack(adapter_name):
self.active_adapters = None
def delete_adapter_fusion(self, adapter_names: Union[Fuse, list, str]):
"""
Deletes the AdapterFusion layer of the specified adapters.
Args:
adapter_names (Union[Fuse, list, str]): AdapterFusion layer to delete.
"""
if isinstance(adapter_names, Fuse):
adapter_fusion_name = ",".join(adapter_names.children)
elif isinstance(adapter_names, list):
adapter_fusion_name = ",".join(adapter_names)
elif isinstance(adapter_names, str):
adapter_fusion_name = adapter_names
else:
raise ValueError("Invalid AdapterFusion definition: {}".format(adapter_names))
if adapter_fusion_name not in self.config.adapters.fusions:
logger.info("No AdapterFusion '%s' found for deletion. Skipping.", adapter_fusion_name)
return
del self.config.adapters.fusions[adapter_fusion_name]
self.apply_to_adapter_layers(lambda i, layer: layer.delete_fusion_layer(adapter_fusion_name))
# Reset active adapters if this was the active setup
if self.active_adapters == adapter_names:
self.active_adapters = None
def save_adapter(
self,
save_directory: str,
adapter_name: str,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
"""
Saves an adapter and its configuration file to a directory so that it can be shared or reloaded using
`load_adapter()`.
Args:
save_directory (str): Path to a directory where the adapter should be saved.
adapter_name (str): Name of the adapter to be saved.
Raises:
ValueError: If the given adapter name is invalid.
"""
loader = AdapterLoader(self)
loader.save(save_directory, adapter_name, meta_dict)
# save additional custom weights
if custom_weights_loaders:
for weights_loader in custom_weights_loaders:
weights_loader.save(save_directory, adapter_name)
def save_adapter_fusion(
self,
save_directory: str,
adapter_names: Union[Fuse, list, str],
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
"""
Saves an AdapterFusion layer and its configuration file to a directory so that it can be shared or reloaded
using `load_adapter_fusion()`.
Args:
save_directory (str): Path to a directory where the AdapterFusion should be saved.
adapter_names (Union[Fuse, list, str]): AdapterFusion to be saved.
Raises:
ValueError: If the given AdapterFusion name is invalid.
"""
if isinstance(adapter_names, Fuse):
adapter_fusion_name = ",".join(adapter_names.children)
elif isinstance(adapter_names, list):
adapter_fusion_name = ",".join(adapter_names)
elif isinstance(adapter_names, str):
adapter_fusion_name = adapter_names
else:
raise ValueError("Invalid AdapterFusion definition: {}".format(adapter_names))
loader = AdapterFusionLoader(self)
loader.save(save_directory, adapter_fusion_name, meta_dict)
# save additional custom weights
if custom_weights_loaders:
for weights_loader in custom_weights_loaders:
weights_loader.save(save_directory, adapter_fusion_name)
def load_adapter(
self,
adapter_name_or_path: str,
config: Union[dict, str] = None,
version: str = None,
model_name: str = None,
load_as: str = None,
source: str = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
leave_out: Optional[List[int]] = None,
id2label=None,
set_active: bool = False,
**kwargs
) -> str:
"""
Loads a pre-trained pytorch adapter module from the local file system or a remote location.
Args:
adapter_name_or_path (str): can be either:
- the identifier of a pre-trained task adapter to be loaded from Adapter Hub
- a path to a directory containing adapter weights saved using `model.saved_adapter()`
- a URL pointing to a zip folder containing a saved adapter module
config (dict or str, optional): The requested configuration of the adapter.
If not specified, will be either: - the default adapter config for the requested adapter if specified -
the global default adapter config
version (str, optional): The version of the adapter to be loaded.
model_name (str, optional): The string identifier of the pre-trained model.
load_as (str, optional): Load the adapter using this name. By default, the name with which the adapter was
saved will be used.
source (str, optional): Identifier of the source(s) from where to load the adapter. Can be:
- "ah" (default): search on AdapterHub.
- "hf": search on HuggingFace model hub.
- None: search on all sources
leave_out: Dynamically drop adapter modules in the specified Transformer layers when loading the adapter.
set_active (bool, optional):
Set the loaded adapter to be the active one. By default (False), the adapter is loaded but not
activated.
Returns:
str: The name with which the adapter was added to the model.
"""
loader = AdapterLoader(self)
load_dir, load_name = loader.load(
adapter_name_or_path,
config,
version,
model_name,
load_as,
source=source,
leave_out=leave_out,
set_active=set_active,
**kwargs,
)
# load additional custom weights
if custom_weights_loaders:
for weights_loader in custom_weights_loaders:
weights_loader.load(
load_dir,
load_as=load_as,
loading_info=kwargs.get("loading_info", None),
main_load_name=load_name,
id2label=id2label,
set_active=set_active,
)
return load_name
def load_adapter_fusion(
self,
adapter_fusion_name_or_path: str,
load_as: str = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
set_active: bool = False,
**kwargs
) -> str:
"""
Loads a pre-trained AdapterFusion layer from the local file system.
Args:
adapter_fusion_name_or_path (str):
a path to a directory containing AdapterFusion weights saved using `model.save_adapter_fusion()`.
load_as (str, optional): Load the AdapterFusion using this name.
By default, the name with which the AdapterFusion layer was saved will be used.
set_active (bool, optional):
Activate the loaded AdapterFusion. By default (False), the AdapterFusion is loaded but not activated.
Returns:
str: The name with which the AdapterFusion was added to the model.
"""
loader = AdapterFusionLoader(self)
load_dir, load_name = loader.load(adapter_fusion_name_or_path, load_as, set_active=set_active)
# load additional custom weights
if custom_weights_loaders:
for weights_loader in custom_weights_loaders:
weights_loader.load(
load_dir,
load_as=load_as,
loading_info=kwargs.get("loading_info", None),
main_load_name=load_name,
set_active=set_active,
)
return load_name
def save_all_adapters(
self,
save_directory: str,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
"""
Saves all adapters of this model together with their configuration to subfolders of the given location.
Args:
save_directory (str): Path to a directory where the adapters should be saved.
"""
for name in self.config.adapters:
adapter_config = self.config.adapters.get(name)
h = get_adapter_config_hash(adapter_config)
save_path = join(save_directory, name)
if meta_dict:
meta_dict.update({"config_id": h})
else:
meta_dict = {"config_id": h}
self.save_adapter(save_path, name, meta_dict=meta_dict, custom_weights_loaders=custom_weights_loaders)
def save_all_adapter_fusions(
self,
save_directory: str,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
"""
Saves all AdapterFusion layers of this model together with their configuration to subfolders of the given
location.
Args:
save_directory (str): Path to a directory where the AdapterFusion layers should be saved.
"""
for name in self.config.adapters.fusions:
adapter_fusion_config = self.config.adapters.get_fusion(name)
h = get_adapter_config_hash(adapter_fusion_config)
save_path = join(save_directory, name)
if meta_dict:
meta_dict.update({"config_id": h})
else:
meta_dict = {"config_id": h}
self.save_adapter_fusion(
save_path, name, meta_dict=meta_dict, custom_weights_loaders=custom_weights_loaders
)
def freeze_model(self, freeze=True):
"""Freezes all weights of the model."""
# first freeze/ unfreeze all model weights
for param in self.base_model.parameters():
param.requires_grad = not freeze
self.model_frozen = freeze
def forward_context(self, context: ForwardContext, *args, **kwargs):
"""
This method is called by the ``ForwardContext`` at the beginning of the forward pass.
"""
# some warnings if we don't use available adapters
active_adapters = getattr(self, "active_adapters", None) or AdapterSetup.get_context()
if not active_adapters:
if self.has_adapters():
logger.warning("There are adapters available but none are activated for the forward pass.")
return
context.adapters_parallelized = False
# Add the shared parameters for the active adapters to the context
context.shared_parameters = {
name: param for name, param in self.shared_parameters.items() if name in active_adapters.flatten()
}
# Prefix tuning
input_tensor = kwargs.get("input_ids", None)
if input_tensor is None:
input_tensor = kwargs.get("decoder_input_ids", None)
if input_tensor is None:
input_tensor = kwargs.get("attention_mask", None)
if input_tensor is None:
input_tensor = args[0]
context.prefix_states = self.base_model.prefix_tuning(input_tensor.shape[0])
def load_embeddings(self, path: str, name: str):
"""
Load a saved embedding from the given path. If the embedding was saved with a tokenizer it is returned
Args:
path: the path to the saved embedding
name: the name the embedding should be loaded as
Returns: a tokenizer if it ws saved with the embedding otherwise None
"""
from ..models.auto.tokenization_auto import AutoTokenizer
if name in self.loaded_embeddings:
raise ValueError("An embedding with the name {} already exists".format(name))
tokenizer = None
tokenizer_path = os.path.join(path, TOKENIZER_PATH)
if os.path.isdir(tokenizer_path):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
embedding_path = os.path.join(path, EMBEDDING_FILE)
if not os.path.isfile(embedding_path):
raise FileNotFoundError("No embeddings found at {}".format(embedding_path))
weights = torch.load(embedding_path)
self.loaded_embeddings[name] = nn.Embedding.from_pretrained(weights)
self.set_active_embeddings(name)
return tokenizer
def add_embeddings(self, name, tokenizer, reference_embedding=None, reference_tokenizer=None, embedding_dim=None):
"""
Add a new embedding to the model. If a reference embedding and reference tokenizer are provided tokens in the
present in both tokenizers are initialized to the embedding in the reference_embedding.
Args:
name: the name of the embedding
tokenizer: the tokenizer determining the vocab of the embedding
reference_embedding:
the reference embedding to use for initializing the embeddings of tokens present in the newly created
embedding
reference_tokenizer: the tokenizer providing the vocab for the reference embedding
embedding_dim: the dimension of the embeddings (if None the hidden_size from the config is used)
"""
if name in self.loaded_embeddings:
raise ValueError("An embedding with the name {} already exists".format(name))
if embedding_dim is None:
embedding_dim = self.config.hidden_size
embedding = nn.Embedding(tokenizer.vocab_size, embedding_dim)
embedding.requires_grad_(False)
if (reference_embedding is not None and reference_tokenizer is None) or (
reference_tokenizer is not None and reference_embedding is None
):
raise KeyError(
"Reference embedding and reference tokenizer are required to use initialize embeddings from reference embedding"
)
if reference_embedding is not None and reference_tokenizer is not None:
tokens = set(tokenizer.get_vocab().keys()) & set(reference_tokenizer.get_vocab().keys())
reference_vocab = reference_tokenizer.get_vocab()
vocab = tokenizer.get_vocab()
for t in tokens:
idx_reference = reference_vocab[t]
idx = vocab[t]
embedding.weight[idx] = self.loaded_embeddings[reference_embedding].weight[idx_reference].clone()
embedding.train(False)
self.loaded_embeddings[name] = embedding
self.set_active_embeddings(name)
def delete_embeddings(self, name):
"""
Deletes the embedding with the given name
Args:
name: The name of the embedding that should be deleted
"""
if name not in self.loaded_embeddings:
raise ValueError("No embedding with name {}".format(name))
if self.active_embeddings == name:
logger.warning("The active embedding is deleted. Setting the default embedding as active.")
self.set_active_embeddings("default")
del self.loaded_embeddings[name]
def save_embeddings(self, path, name, tokenizer=None):
"""
Saves the embedding with the given name. If a tokenizer is passed as well the tokenizer is saved together with
the embedding.
Args:
path: The path where the embedding should be saved
name: The name of the embedding that should be saved
tokenizer: optionally a tokenizer to save with the embedding (default is None)
"""
if self.active_embeddings == name:
self.loaded_embeddings[name] = self.get_input_embeddings()
os.makedirs(path, exist_ok=True)
embedding_path = os.path.join(path, EMBEDDING_FILE)
torch.save(self.loaded_embeddings[name].weight, embedding_path)
if tokenizer:
tokenizer_path = os.path.join(path, TOKENIZER_PATH)
tokenizer.save_pretrained(tokenizer_path)
def set_active_embeddings(self, name):
"""
Sets the active embedding for the forward pass of the model
Args:
name: The name of the embedding that should be used
"""
self.loaded_embeddings[self.active_embeddings] = self.get_input_embeddings()
self.set_input_embeddings(self.loaded_embeddings[name])
self._active_embedding = name
@property
def active_embeddings(self):
return self._active_embedding
def get_fusion_regularization_loss(self):
reg_loss = 0.0
target = torch.zeros((self.config.hidden_size, self.config.hidden_size)).fill_diagonal_(1.0).to(self.device)
for i, layer in self.iter_layers():
for module in layer.modules():
if isinstance(module, AdapterLayer):
for _, layer_fusion in module.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
return reg_loss
def get_adapter(self, name) -> dict:
"""
Returns a dictionary with all weights of the adapter with the specified name.
Args:
name (str): The adapter name.
Returns:
dict: A nested dictionary containing the weights of the adapter. The dictionary is structured as follow:
{<layer id>: {<module location>: <nn.Module>}}.
"""
destination = defaultdict(dict)
# use a custom index to ensure numbering is from 0 to N layers
for i, (_, layer) in enumerate(self.iter_layers()):
for module in layer.modules():
if isinstance(module, AdapterLayerBase):
adapter_module = module.get_adapter(name)
if adapter_module is not None:
destination[i][module.location_key] = adapter_module
return dict(destination)
def eject_prefix_tuning(self, name: str):
"""
Converts the prefix tuning with the given name from the reparameterized form into the flat form.
Args:
name (str): The name of the prefix tuning.
"""
for module in self.modules():
if isinstance(module, PrefixTuningPool):
if name in module.prefix_tunings:
module.prefix_tunings[name].eject()
@inherit_doc
class ModelWithHeadsAdaptersMixin(ModelAdaptersMixin):
"""
Mixin adding support for loading/ saving adapters to transformer models with head(s).
"""
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self._convert_to_flex_head = False
def set_shared_parameters(self, param):
self.shared_parameters = param
if self.base_model is not self:
self.base_model.shared_parameters = self.shared_parameters
def iter_layers(self) -> Iterable[Tuple[int, nn.Module]]:
"""
Iterates over all layers of the model.
"""
if self.base_model is self:
return super().iter_layers()
else:
return self.base_model.iter_layers()
def add_adapter(self, adapter_name: str, config=None, overwrite_ok: bool = False, set_active: bool = False):
"""
Adds a new adapter module of the specified type to the model.
Args:
adapter_name (str): The name of the adapter module to be added.
config (str or dict, optional): The adapter configuration, can be either:
- the string identifier of a pre-defined configuration dictionary
- a configuration dictionary specifying the full config
- if not given, the default configuration for this adapter type will be used
overwrite_ok (bool, optional):
Overwrite an adapter with the same name if it exists. By default (False), an exception is thrown.
set_active (bool, optional):
Set the adapter to be the active one. By default (False), the adapter is added but not activated.
If self.base_model is self, must inherit from a class that implements this method, to preclude infinite
recursion
"""
if self.base_model is self:
super().add_adapter(adapter_name, config, overwrite_ok=overwrite_ok, set_active=set_active)
else:
self.base_model.add_adapter(adapter_name, config, overwrite_ok=overwrite_ok, set_active=set_active)
def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock], train_embeddings=False):
"""
Sets the model into mode for training the given adapters. If self.base_model is self, must inherit from a class
that implements this method, to preclude infinite recursion
"""
if self.base_model is self:
super().train_adapter(adapter_setup, train_embeddings)
else:
self.base_model.train_adapter(adapter_setup, train_embeddings)
def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
"""
Sets the model into mode for training of adapter fusion determined by a list of adapter names. If
self.base_model is self, must inherit from a class that implements this method, to preclude infinite recursion
"""
if self.base_model is self:
super().train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)
else:
self.base_model.train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)
def save_head(self, save_directory: str, head_name: str = None):
loader = PredictionHeadLoader(self)
loader.save(save_directory, name=head_name)
def load_head(self, save_directory, load_as=None, id2label=None, **kwargs):
loader = PredictionHeadLoader(self, convert_to_flex_head=self._convert_to_flex_head)
return loader.load(save_directory, load_as=load_as, id2label=id2label, **kwargs)
def save_adapter(
self,
save_directory: str,
adapter_name: str,
with_head: bool = True,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
if with_head:
if custom_weights_loaders is None:
custom_weights_loaders = []
custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))
super().save_adapter(
save_directory,
adapter_name,
meta_dict=meta_dict,
custom_weights_loaders=custom_weights_loaders,
)
def load_adapter(
self,
adapter_name_or_path: str,
config: Union[dict, str] = None,
version: str = None,
model_name: str = None,
load_as: str = None,
source: str = None,
with_head: bool = True,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
leave_out: Optional[List[int]] = None,
id2label=None,
set_active: bool = False,
**kwargs
) -> str:
if with_head:
if custom_weights_loaders is None:
custom_weights_loaders = []
custom_weights_loaders.append(
PredictionHeadLoader(
self,
error_on_missing=False,
convert_to_flex_head=self._convert_to_flex_head,
)
)
# Support passing a num_labels for compatibility reasons. Convert to label map here.
num_labels = kwargs.pop("num_labels", None)
if num_labels is not None:
id2label = {i: "LABEL_" + str(i) for i in range(num_labels)}
return super().load_adapter(
adapter_name_or_path,
config=config,
version=version,
model_name=model_name,
load_as=load_as,
source=source,
custom_weights_loaders=custom_weights_loaders,
leave_out=leave_out,
id2label=id2label,
set_active=set_active,
**kwargs,
)
def save_all_adapters(
self,
save_directory: str,
with_head: bool = True,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
if with_head:
if custom_weights_loaders is None:
custom_weights_loaders = []
custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))
super().save_all_adapters(
save_directory,
meta_dict=meta_dict,
custom_weights_loaders=custom_weights_loaders,
)
def save_adapter_fusion(
self,
save_directory: str,
adapter_names: Union[Fuse, list, str],
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
with_head: Union[bool, str] = False,
):
"""
Saves an AdapterFusion layer and its configuration file to a directory so that it can be shared or reloaded
using `load_adapter_fusion()`.
Args:
save_directory (str): Path to a directory where the AdapterFusion should be saved.
adapter_names (Union[Fuse, list, str]): AdapterFusion to be saved.
with_head (Union[bool, str]):
If True, will save a head with the same name as the AdapterFusionLayer. If a string, this will be used
as the name of the head to be saved.
Raises:
ValueError: If the given AdapterFusion name is invalid.
"""
super().save_adapter_fusion(save_directory, adapter_names, meta_dict, custom_weights_loaders)
if with_head:
# Make sure to cover the different options for adapter_names
if isinstance(with_head, str):
head_name = with_head
elif isinstance(adapter_names, Fuse):
head_name = adapter_names.name
elif isinstance(adapter_names, list):
head_name = ",".join(adapter_names)
else:
head_name = adapter_names
if head_name not in self.heads:
raise ValueError("No head with name {} found".format(head_name))
loader = PredictionHeadLoader(self)
loader.save(save_directory, head_name)
def load_adapter_fusion(
self,
adapter_fusion_name_or_path: str,
load_as: str = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
set_active: bool = False,
with_head: bool = True,
**kwargs
) -> str:
if with_head:
if custom_weights_loaders is None:
custom_weights_loaders = []
custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))
super().load_adapter_fusion(adapter_fusion_name_or_path, load_as, custom_weights_loaders, set_active)
def save_all_heads(self, save_directory):
for head_name in self.heads:
save_path = join(save_directory, head_name)
self.save_head(save_path, head_name)
def get_labels(self):
return list(self.config.id2label.values())
def get_labels_dict(self):
return self.config.id2label
def get_adapter(self, name):
"""
If self.base_model is self, must inherit from a class that implements this method, to preclude infinite
recursion
"""
if self.base_model is self:
return super().get_adapter(name)
else:
return self.base_model.get_adapter(name)
def load_embeddings(self, path: str, name: str):
if self.base_model is self:
return super().load_embeddings(path, name)
else:
return self.base_model.load_embeddings(path, name)
def save_embeddings(self, path, name, tokenizer=None):
if self.base_model is self:
return super().save_embeddings(path, name, tokenizer)
else:
return self.base_model.save_embeddings(path, name, tokenizer)
def add_embeddings(self, name, tokenizer, reference_embedding=None, reference_tokenizer=None, embedding_dim=None):
if self.base_model is None:
return super().add_embeddings(name, tokenizer, reference_embedding, reference_tokenizer, embedding_dim)
else:
return self.base_model.add_embeddings(
name, tokenizer, reference_embedding, reference_tokenizer, embedding_dim
)
def set_active_embeddings(self, name):
if self.base_model is None:
return super().set_active_embeddings(name)
else:
return self.base_model.set_active_embeddings(name)
def delete_embeddings(self, name):
if self.base_model is None:
return super().delete_embeddings(name)
else:
return self.base_model.delete_embeddings(name)
|
import logging
import os
import warnings
from abc import ABC, abstractmethod
from collections import defaultdict
from os.path import join
from typing import Iterable, List, Optional, Tuple, Union
import torch
from torch import nn
from .composition import AdapterCompositionBlock, Fuse, Stack, parse_composition
from .configuration import AdapterConfig, AdapterConfigBase, AdapterFusionConfig, get_adapter_config_hash
from .context import AdapterSetup, ForwardContext
from .hub_mixin import PushAdapterToHubMixin
from .layer import AdapterLayer, AdapterLayerBase
from .loading import AdapterFusionLoader, AdapterLoader, PredictionHeadLoader, WeightsLoader
from .modeling import Adapter, GLOWCouplingBlock, NICECouplingBlock
from .prefix_tuning import PrefixTuningPool, PrefixTuningShim
from .utils import EMBEDDING_FILE, TOKENIZER_PATH, inherit_doc
from .wrappers.configuration import wrap_config
logger = logging.getLogger(__name__)
class InvertibleAdaptersMixin:
"""Mixin for Transformer models adding invertible adapters."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.invertible_adapters = nn.ModuleDict(dict())
# Make sure config is wrapped
self.config = wrap_config(self.config)
def add_invertible_adapter(self, adapter_name: str):
"""
Adds an invertible adapter module for the adapter with the given name. If the given adapter does not specify an
invertible adapter config, this method does nothing.
Args:
adapter_name (str): The name of the adapter for which to add an invertible adapter module.
"""
if adapter_name in self.invertible_adapters:
raise ValueError(f"Model already contains an adapter module for '{adapter_name}'.")
adapter_config = self.config.adapters.match(
adapter_name,
config_type=AdapterConfig,
location_key="inv_adapter",
)
if adapter_config and adapter_config["inv_adapter"]:
if adapter_config["inv_adapter"] == "nice":
inv_adap = NICECouplingBlock(
[[self.config.hidden_size]],
non_linearity=adapter_config["non_linearity"],
reduction_factor=adapter_config["inv_adapter_reduction_factor"],
)
elif adapter_config["inv_adapter"] == "glow":
inv_adap = GLOWCouplingBlock(
[[self.config.hidden_size]],
non_linearity=adapter_config["non_linearity"],
reduction_factor=adapter_config["inv_adapter_reduction_factor"],
)
else:
raise ValueError(f"Invalid invertible adapter type '{adapter_config['inv_adapter']}'.")
self.invertible_adapters[adapter_name] = inv_adap
self.invertible_adapters[adapter_name].apply(Adapter.init_bert_weights)
def delete_invertible_adapter(self, adapter_name: str):
if adapter_name in self.invertible_adapters:
del self.invertible_adapters[adapter_name]
def get_invertible_adapter(self):
# TODO: Currently no fusion over invertible adapters, takes only very first language adapter position
if self.config.adapters.active_setup is not None and len(self.config.adapters.active_setup) > 0:
first_adapter = self.config.adapters.active_setup.first()
if first_adapter in self.invertible_adapters:
return self.invertible_adapters[first_adapter]
return None
def enable_invertible_adapters(self, adapter_names):
for adapter_name in adapter_names:
if adapter_name in self.invertible_adapters:
for param in self.invertible_adapters[adapter_name].parameters():
param.requires_grad = True
def invertible_adapters_forward(self, hidden_states, rev=False):
# TODO: Currently no fusion over invertible adapters, takes only very first language adapter position
if self.config.adapters.active_setup is not None and len(self.config.adapters.active_setup) > 0:
first_adapter = self.config.adapters.active_setup.first()
if first_adapter in self.invertible_adapters:
hidden_states = self.invertible_adapters[first_adapter](hidden_states, rev=rev)
return hidden_states
class ModelAdaptersMixin(PushAdapterToHubMixin, ABC):
"""Mixin for transformer models adding support for loading/ saving adapters."""
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
if config.name_or_path and not os.path.exists(config.name_or_path):
self.model_name = config.name_or_path
else:
self.model_name = None
self.loaded_embeddings = {}
self.shared_parameters = nn.ModuleDict()
self._active_embedding = "default"
# Make sure config is wrapped
self.config = wrap_config(self.config)
def _link_prefix_to_pool(self, layer):
if isinstance(layer, PrefixTuningShim):
layer.set_pool(self.base_model.prefix_tuning)
def _init_adapter_modules(self, add_prefix_tuning_pool=True):
"""
This method initializes adapter modules and fusion modules from the model config.
"""
# Link all prefix tunings
if add_prefix_tuning_pool:
self.base_model.prefix_tuning = PrefixTuningPool(self.config)
self.apply_to_adapter_layers(lambda i, layer: self._link_prefix_to_pool(layer))
# Initialize adapters from config
for adapter_name in self.config.adapters:
self.apply_to_adapter_layers(lambda i, layer: layer.add_adapter(adapter_name, i))
# Initialize fusion from config
for fusion_name in self.config.adapters.fusions:
self.apply_to_adapter_layers(lambda i, layer: layer.add_fusion_layer(fusion_name))
self.loaded_embeddings["default"] = self.get_input_embeddings()
# These methods have to be implemented by every deriving class:
@abstractmethod
def iter_layers(self) -> Iterable[Tuple[int, nn.Module]]:
"""
Iterates over all layers of the model.
This abstract method has to ne implemented by every implementing model.
"""
pass
def apply_to_adapter_layers(self, fn):
"""
Applies a function to all adapter layers of the model.
"""
for i, layer in self.iter_layers():
for module in layer.modules():
if isinstance(module, AdapterLayerBase):
fn(i, module)
def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock], train_embeddings=False):
"""Sets the model into mode for training the given adapters."""
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.apply_to_adapter_layers(lambda i, layer: layer.enable_adapters(adapter_setup, True, False))
for adapter_name in adapter_setup:
if adapter_name in self.shared_parameters:
for param in self.shared_parameters[adapter_name].values():
param.requires_grad = True
if isinstance(self, InvertibleAdaptersMixin):
self.enable_invertible_adapters(adapter_setup.flatten())
# use the adapters to be trained by default in every forward pass
self.set_active_adapters(adapter_setup)
if train_embeddings:
self.get_input_embeddings().train()
def train_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
"""Sets the model into mode for training of adapter fusion determined by a list of adapter names."""
warnings.warn(
"add_fusion() has been deprecated in favor of add_adapter_fusion(). Please use the newer method instead.",
FutureWarning,
)
self.train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)
def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
"""Sets the model into mode for training of adapter fusion determined by a list of adapter names."""
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.apply_to_adapter_layers(lambda i, layer: layer.enable_adapters(adapter_setup, unfreeze_adapters, True))
# use the adapters to be trained by default in every forward pass
self.set_active_adapters(adapter_setup)
# TODO implement fusion for invertible adapters
def has_adapters(self):
if not getattr(self.config, "is_adaptable", None):
return False
return len(self.config.adapters.adapters) > 0
@property
def has_parallel_adapters(self) -> bool:
if self.config.adapters.active_setup:
return self.config.adapters.active_setup.parallel_channels > 1
else:
return False
@property
def active_adapters(self) -> AdapterCompositionBlock:
return self.config.adapters.active_setup
@active_adapters.setter
def active_adapters(self, adapter_setup: Union[list, AdapterCompositionBlock]):
self.set_active_adapters(adapter_setup)
def set_shared_parameters(self, param):
self.shared_parameters = param
def set_active_adapters(
self, adapter_setup: Union[list, AdapterCompositionBlock], skip_layers: Optional[List[int]] = None
):
"""
Sets the adapter modules to be used by default in every forward pass. If no adapter with the given name is
found, no module of the respective type will be activated.
Args:
adapter_setup (list):
The list of adapters to be activated by default. Can be a fusion or stacking configuration.
"""
adapter_setup = parse_composition(adapter_setup, model_type=self.config.model_type)
if adapter_setup:
for adapter_name in adapter_setup.flatten():
if adapter_name not in self.config.adapters.adapters:
raise ValueError(
f"No adapter with name '{adapter_name}' found. Please make sure that all specified adapters are correctly loaded."
)
self.config.adapters.active_setup = adapter_setup
self.config.adapters.skip_layers = skip_layers
def add_adapter(self, adapter_name: str, config=None, overwrite_ok: bool = False, set_active: bool = False):
"""
Adds a new adapter module of the specified type to the model.
Args:
adapter_name (str): The name of the adapter module to be added. config (str or dict or AdapterConfigBase,
optional): The adapter configuration, can be either:
- the string identifier of a pre-defined configuration dictionary
- a configuration dictionary specifying the full config
- if not given, the default configuration for this adapter type will be used
overwrite_ok (bool, optional): Overwrite an adapter with the same name if it exists. By default (False), an
exception is thrown. set_active (bool, optional): Set the adapter to be the active one. By default (False),
the adapter is added but not activated.
"""
if isinstance(config, dict):
config = AdapterConfigBase.load(config) # ensure config is ok and up-to-date
# In case adapter already exists and we allow overwriting, explicitly delete the existing one first
if overwrite_ok and adapter_name in self.config.adapters:
self.delete_adapter(adapter_name)
self.config.adapters.add(adapter_name, config=config)
try:
self.apply_to_adapter_layers(lambda i, layer: layer.add_adapter(adapter_name, i))
# PHM Layer
if self.config.adapters.match(adapter_name, AdapterConfig, location_key="phm_layer"):
self._add_shared_parameters(adapter_name, config)
# Prefix Tuning
for module in self.modules():
if isinstance(module, PrefixTuningPool):
module.confirm_prefix(adapter_name)
if isinstance(self, InvertibleAdaptersMixin):
self.add_invertible_adapter(adapter_name)
except ValueError as ex:
self.delete_adapter(adapter_name)
raise ex
if set_active:
self.set_active_adapters(adapter_name)
def _add_shared_parameters(self, adapter_name, adapter_config: AdapterConfig):
self.shared_parameters[adapter_name] = (
list(self.get_adapter(adapter_name)[0].values())[0].adapter_down[0].init_shared_parameters()
)
def add_fusion(self, adapter_names: Union[Fuse, list], adapter_fusion_config=None, override_kwargs=None):
warnings.warn(
"add_fusion() has been deprecated in favor of add_adapter_fusion(). Please use the newer method instead.",
FutureWarning,
)
adapter_fusion_config = AdapterFusionConfig.from_dict(adapter_fusion_config).replace(**override_kwargs)
self.add_adapter_fusion(adapter_names, adapter_fusion_config)
def add_adapter_fusion(
self,
adapter_names: Union[Fuse, list, str],
config=None,
overwrite_ok: bool = False,
set_active: bool = False,
):
"""
Adds AdapterFusion to the model with alll the necessary configurations and weight initializations
Args:
adapter_names (Fuse or list or str): AdapterFusion layer to add. Can be either:
- a ``Fuse`` composition block
- a list of adapter names to fuse
- a comma-separated string of adapter names to fuse
config (str or dict): adapter fusion configuration, can be either:
- a string identifying a pre-defined adapter fusion configuration
- a dictionary representing the adapter fusion configuration
- the path to a file containing the adapter fusion configuration
overwrite_ok (bool, optional):
Overwrite an AdapterFusion layer with the same name if it exists. By default (False), an exception is
thrown.
set_active (bool, optional):
Activate the added AdapterFusion. By default (False), the AdapterFusion is added but not activated.
"""
if isinstance(adapter_names, Fuse):
adapter_names = adapter_names.children
elif isinstance(adapter_names, str):
adapter_names = adapter_names.split(",")
if isinstance(config, dict):
config = AdapterFusionConfig.from_dict(config) # ensure config is ok and up-to-date
# In case adapter already exists and we allow overwriting, explicitly delete the existing one first
if overwrite_ok and self.config.adapters.get_fusion(adapter_names) is not None:
self.delete_adapter_fusion(adapter_names)
self.config.adapters.add_fusion(adapter_names, config=config)
self.apply_to_adapter_layers(lambda i, layer: layer.add_fusion_layer(adapter_names))
if set_active:
if not isinstance(adapter_names, list):
adapter_names = adapter_names.split(",")
self.set_active_adapters(Fuse(*adapter_names))
def delete_adapter(self, adapter_name: str):
"""
Deletes the adapter with the specified name from the model.
Args:
adapter_name (str): The name of the adapter.
"""
if adapter_name not in self.config.adapters:
logger.info("No adapter '%s' found for deletion. Skipping.", adapter_name)
return
del self.config.adapters.adapters[adapter_name]
self.apply_to_adapter_layers(lambda i, layer: layer.delete_adapter(adapter_name))
if isinstance(self, InvertibleAdaptersMixin):
self.delete_invertible_adapter(adapter_name)
# Reset active adapters if this was the only active adapter
if self.active_adapters == Stack(adapter_name):
self.active_adapters = None
def delete_adapter_fusion(self, adapter_names: Union[Fuse, list, str]):
"""
Deletes the AdapterFusion layer of the specified adapters.
Args:
adapter_names (Union[Fuse, list, str]): AdapterFusion layer to delete.
"""
if isinstance(adapter_names, Fuse):
adapter_fusion_name = ",".join(adapter_names.children)
elif isinstance(adapter_names, list):
adapter_fusion_name = ",".join(adapter_names)
elif isinstance(adapter_names, str):
adapter_fusion_name = adapter_names
else:
raise ValueError("Invalid AdapterFusion definition: {}".format(adapter_names))
if adapter_fusion_name not in self.config.adapters.fusions:
logger.info("No AdapterFusion '%s' found for deletion. Skipping.", adapter_fusion_name)
return
del self.config.adapters.fusions[adapter_fusion_name]
self.apply_to_adapter_layers(lambda i, layer: layer.delete_fusion_layer(adapter_fusion_name))
# Reset active adapters if this was the active setup
if self.active_adapters == adapter_names:
self.active_adapters = None
def save_adapter(
self,
save_directory: str,
adapter_name: str,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
"""
Saves an adapter and its configuration file to a directory so that it can be shared or reloaded using
`load_adapter()`.
Args:
save_directory (str): Path to a directory where the adapter should be saved.
adapter_name (str): Name of the adapter to be saved.
Raises:
ValueError: If the given adapter name is invalid.
"""
loader = AdapterLoader(self)
loader.save(save_directory, adapter_name, meta_dict)
# save additional custom weights
if custom_weights_loaders:
for weights_loader in custom_weights_loaders:
weights_loader.save(save_directory, adapter_name)
def save_adapter_fusion(
self,
save_directory: str,
adapter_names: Union[Fuse, list, str],
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
"""
Saves an AdapterFusion layer and its configuration file to a directory so that it can be shared or reloaded
using `load_adapter_fusion()`.
Args:
save_directory (str): Path to a directory where the AdapterFusion should be saved.
adapter_names (Union[Fuse, list, str]): AdapterFusion to be saved.
Raises:
ValueError: If the given AdapterFusion name is invalid.
"""
if isinstance(adapter_names, Fuse):
adapter_fusion_name = ",".join(adapter_names.children)
elif isinstance(adapter_names, list):
adapter_fusion_name = ",".join(adapter_names)
elif isinstance(adapter_names, str):
adapter_fusion_name = adapter_names
else:
raise ValueError("Invalid AdapterFusion definition: {}".format(adapter_names))
loader = AdapterFusionLoader(self)
loader.save(save_directory, adapter_fusion_name, meta_dict)
# save additional custom weights
if custom_weights_loaders:
for weights_loader in custom_weights_loaders:
weights_loader.save(save_directory, adapter_fusion_name)
def load_adapter(
self,
adapter_name_or_path: str,
config: Union[dict, str] = None,
version: str = None,
model_name: str = None,
load_as: str = None,
source: str = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
leave_out: Optional[List[int]] = None,
id2label=None,
set_active: bool = False,
**kwargs
) -> str:
"""
Loads a pre-trained pytorch adapter module from the local file system or a remote location.
Args:
adapter_name_or_path (str): can be either:
- the identifier of a pre-trained task adapter to be loaded from Adapter Hub
- a path to a directory containing adapter weights saved using `model.saved_adapter()`
- a URL pointing to a zip folder containing a saved adapter module
config (dict or str, optional): The requested configuration of the adapter.
If not specified, will be either: - the default adapter config for the requested adapter if specified -
the global default adapter config
version (str, optional): The version of the adapter to be loaded.
model_name (str, optional): The string identifier of the pre-trained model.
load_as (str, optional): Load the adapter using this name. By default, the name with which the adapter was
saved will be used.
source (str, optional): Identifier of the source(s) from where to load the adapter. Can be:
- "ah" (default): search on AdapterHub.
- "hf": search on HuggingFace model hub.
- None: search on all sources
leave_out: Dynamically drop adapter modules in the specified Transformer layers when loading the adapter.
set_active (bool, optional):
Set the loaded adapter to be the active one. By default (False), the adapter is loaded but not
activated.
Returns:
str: The name with which the adapter was added to the model.
"""
loader = AdapterLoader(self)
load_dir, load_name = loader.load(
adapter_name_or_path,
config,
version,
model_name,
load_as,
source=source,
leave_out=leave_out,
set_active=set_active,
**kwargs,
)
# load additional custom weights
if custom_weights_loaders:
for weights_loader in custom_weights_loaders:
weights_loader.load(
load_dir,
load_as=load_as,
loading_info=kwargs.get("loading_info", None),
main_load_name=load_name,
id2label=id2label,
set_active=set_active,
)
return load_name
def load_adapter_fusion(
self,
adapter_fusion_name_or_path: str,
load_as: str = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
set_active: bool = False,
**kwargs
) -> str:
"""
Loads a pre-trained AdapterFusion layer from the local file system.
Args:
adapter_fusion_name_or_path (str):
a path to a directory containing AdapterFusion weights saved using `model.save_adapter_fusion()`.
load_as (str, optional): Load the AdapterFusion using this name.
By default, the name with which the AdapterFusion layer was saved will be used.
set_active (bool, optional):
Activate the loaded AdapterFusion. By default (False), the AdapterFusion is loaded but not activated.
Returns:
str: The name with which the AdapterFusion was added to the model.
"""
loader = AdapterFusionLoader(self)
load_dir, load_name = loader.load(adapter_fusion_name_or_path, load_as, set_active=set_active)
# load additional custom weights
if custom_weights_loaders:
for weights_loader in custom_weights_loaders:
weights_loader.load(
load_dir,
load_as=load_as,
loading_info=kwargs.get("loading_info", None),
main_load_name=load_name,
set_active=set_active,
)
return load_name
def save_all_adapters(
self,
save_directory: str,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
"""
Saves all adapters of this model together with their configuration to subfolders of the given location.
Args:
save_directory (str): Path to a directory where the adapters should be saved.
"""
for name in self.config.adapters:
adapter_config = self.config.adapters.get(name)
h = get_adapter_config_hash(adapter_config)
save_path = join(save_directory, name)
if meta_dict:
meta_dict.update({"config_id": h})
else:
meta_dict = {"config_id": h}
self.save_adapter(save_path, name, meta_dict=meta_dict, custom_weights_loaders=custom_weights_loaders)
def save_all_adapter_fusions(
self,
save_directory: str,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
"""
Saves all AdapterFusion layers of this model together with their configuration to subfolders of the given
location.
Args:
save_directory (str): Path to a directory where the AdapterFusion layers should be saved.
"""
for name in self.config.adapters.fusions:
adapter_fusion_config = self.config.adapters.get_fusion(name)
h = get_adapter_config_hash(adapter_fusion_config)
save_path = join(save_directory, name)
if meta_dict:
meta_dict.update({"config_id": h})
else:
meta_dict = {"config_id": h}
self.save_adapter_fusion(
save_path, name, meta_dict=meta_dict, custom_weights_loaders=custom_weights_loaders
)
def freeze_model(self, freeze=True):
"""Freezes all weights of the model."""
# first freeze/ unfreeze all model weights
for param in self.base_model.parameters():
param.requires_grad = not freeze
self.model_frozen = freeze
def forward_context(self, context: ForwardContext, *args, **kwargs):
"""
This method is called by the ``ForwardContext`` at the beginning of the forward pass.
"""
# some warnings if we don't use available adapters
active_adapters = getattr(self, "active_adapters", None) or AdapterSetup.get_context()
if not active_adapters:
if self.has_adapters():
logger.warning("There are adapters available but none are activated for the forward pass.")
return
context.adapters_parallelized = False
# Add the shared parameters for the active adapters to the context
context.shared_parameters = {
name: param for name, param in self.shared_parameters.items() if name in active_adapters.flatten()
}
# Prefix tuning
input_tensor = kwargs.get("input_ids", None)
if input_tensor is None:
input_tensor = kwargs.get("decoder_input_ids", None)
if input_tensor is None:
input_tensor = kwargs.get("attention_mask", None)
if input_tensor is None:
input_tensor = args[0]
context.prefix_states = self.base_model.prefix_tuning(input_tensor.shape[0])
def load_embeddings(self, path: str, name: str):
"""
Load a saved embedding from the given path. If the embedding was saved with a tokenizer it is returned
Args:
path: the path to the saved embedding
name: the name the embedding should be loaded as
Returns: a tokenizer if it ws saved with the embedding otherwise None
"""
from ..models.auto.tokenization_auto import AutoTokenizer
if name in self.loaded_embeddings:
raise ValueError("An embedding with the name {} already exists".format(name))
tokenizer = None
tokenizer_path = os.path.join(path, TOKENIZER_PATH)
if os.path.isdir(tokenizer_path):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
embedding_path = os.path.join(path, EMBEDDING_FILE)
if not os.path.isfile(embedding_path):
raise FileNotFoundError("No embeddings found at {}".format(embedding_path))
weights = torch.load(embedding_path)
self.loaded_embeddings[name] = nn.Embedding.from_pretrained(weights)
self.set_active_embeddings(name)
return tokenizer
def add_embeddings(self, name, tokenizer, reference_embedding=None, reference_tokenizer=None, embedding_dim=None):
"""
Add a new embedding to the model. If a reference embedding and reference tokenizer are provided tokens in the
present in both tokenizers are initialized to the embedding in the reference_embedding.
Args:
name: the name of the embedding
tokenizer: the tokenizer determining the vocab of the embedding
reference_embedding:
the reference embedding to use for initializing the embeddings of tokens present in the newly created
embedding
reference_tokenizer: the tokenizer providing the vocab for the reference embedding
embedding_dim: the dimension of the embeddings (if None the hidden_size from the config is used)
"""
if name in self.loaded_embeddings:
raise ValueError("An embedding with the name {} already exists".format(name))
if embedding_dim is None:
embedding_dim = self.config.hidden_size
embedding = nn.Embedding(tokenizer.vocab_size, embedding_dim)
embedding.requires_grad_(False)
if (reference_embedding is not None and reference_tokenizer is None) or (
reference_tokenizer is not None and reference_embedding is None
):
raise KeyError(
"Reference embedding and reference tokenizer are required to use initialize embeddings from reference embedding"
)
if reference_embedding is not None and reference_tokenizer is not None:
tokens = set(tokenizer.get_vocab().keys()) & set(reference_tokenizer.get_vocab().keys())
reference_vocab = reference_tokenizer.get_vocab()
vocab = tokenizer.get_vocab()
for t in tokens:
idx_reference = reference_vocab[t]
idx = vocab[t]
embedding.weight[idx] = self.loaded_embeddings[reference_embedding].weight[idx_reference].clone()
embedding.train(False)
self.loaded_embeddings[name] = embedding
self.set_active_embeddings(name)
def delete_embeddings(self, name):
"""
Deletes the embedding with the given name
Args:
name: The name of the embedding that should be deleted
"""
if name not in self.loaded_embeddings:
raise ValueError("No embedding with name {}".format(name))
if self.active_embeddings == name:
logger.warning("The active embedding is deleted. Setting the default embedding as active.")
self.set_active_embeddings("default")
del self.loaded_embeddings[name]
def save_embeddings(self, path, name, tokenizer=None):
"""
Saves the embedding with the given name. If a tokenizer is passed as well the tokenizer is saved together with
the embedding.
Args:
path: The path where the embedding should be saved
name: The name of the embedding that should be saved
tokenizer: optionally a tokenizer to save with the embedding (default is None)
"""
if self.active_embeddings == name:
self.loaded_embeddings[name] = self.get_input_embeddings()
os.makedirs(path, exist_ok=True)
embedding_path = os.path.join(path, EMBEDDING_FILE)
torch.save(self.loaded_embeddings[name].weight, embedding_path)
if tokenizer:
tokenizer_path = os.path.join(path, TOKENIZER_PATH)
tokenizer.save_pretrained(tokenizer_path)
def set_active_embeddings(self, name):
"""
Sets the active embedding for the forward pass of the model
Args:
name: The name of the embedding that should be used
"""
self.loaded_embeddings[self.active_embeddings] = self.get_input_embeddings()
self.set_input_embeddings(self.loaded_embeddings[name])
self._active_embedding = name
@property
def active_embeddings(self):
return self._active_embedding
def get_fusion_regularization_loss(self):
reg_loss = 0.0
target = torch.zeros((self.config.hidden_size, self.config.hidden_size)).fill_diagonal_(1.0).to(self.device)
for i, layer in self.iter_layers():
for module in layer.modules():
if isinstance(module, AdapterLayer):
for _, layer_fusion in module.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
return reg_loss
def get_adapter(self, name) -> dict:
"""
Returns a dictionary with all weights of the adapter with the specified name.
Args:
name (str): The adapter name.
Returns:
dict: A nested dictionary containing the weights of the adapter. The dictionary is structured as follow:
{<layer id>: {<module location>: <nn.Module>}}.
"""
destination = defaultdict(dict)
# use a custom index to ensure numbering is from 0 to N layers
for i, (_, layer) in enumerate(self.iter_layers()):
for module in layer.modules():
if isinstance(module, AdapterLayerBase):
adapter_module = module.get_adapter(name)
if adapter_module is not None:
destination[i][module.location_key] = adapter_module
return dict(destination)
def eject_prefix_tuning(self, name: str):
"""
Converts the prefix tuning with the given name from the reparameterized form into the flat form.
Args:
name (str): The name of the prefix tuning.
"""
for module in self.modules():
if isinstance(module, PrefixTuningPool):
if name in module.prefix_tunings:
module.prefix_tunings[name].eject()
@inherit_doc
class ModelWithHeadsAdaptersMixin(ModelAdaptersMixin):
"""
Mixin adding support for loading/ saving adapters to transformer models with head(s).
"""
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self._convert_to_flex_head = False
def set_shared_parameters(self, param):
self.shared_parameters = param
if self.base_model is not self:
self.base_model.shared_parameters = self.shared_parameters
def iter_layers(self) -> Iterable[Tuple[int, nn.Module]]:
"""
Iterates over all layers of the model.
"""
if self.base_model is self:
return super().iter_layers()
else:
return self.base_model.iter_layers()
def add_adapter(self, adapter_name: str, config=None, overwrite_ok: bool = False, set_active: bool = False):
"""
Adds a new adapter module of the specified type to the model.
Args:
adapter_name (str): The name of the adapter module to be added.
config (str or dict, optional): The adapter configuration, can be either:
- the string identifier of a pre-defined configuration dictionary
- a configuration dictionary specifying the full config
- if not given, the default configuration for this adapter type will be used
overwrite_ok (bool, optional):
Overwrite an adapter with the same name if it exists. By default (False), an exception is thrown.
set_active (bool, optional):
Set the adapter to be the active one. By default (False), the adapter is added but not activated.
If self.base_model is self, must inherit from a class that implements this method, to preclude infinite
recursion
"""
if self.base_model is self:
super().add_adapter(adapter_name, config, overwrite_ok=overwrite_ok, set_active=set_active)
else:
self.base_model.add_adapter(adapter_name, config, overwrite_ok=overwrite_ok, set_active=set_active)
def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock], train_embeddings=False):
"""
Sets the model into mode for training the given adapters. If self.base_model is self, must inherit from a class
that implements this method, to preclude infinite recursion
"""
if self.base_model is self:
super().train_adapter(adapter_setup, train_embeddings)
else:
self.base_model.train_adapter(adapter_setup, train_embeddings)
def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
"""
Sets the model into mode for training of adapter fusion determined by a list of adapter names. If
self.base_model is self, must inherit from a class that implements this method, to preclude infinite recursion
"""
if self.base_model is self:
super().train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)
else:
self.base_model.train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)
def save_head(self, save_directory: str, head_name: str = None):
loader = PredictionHeadLoader(self)
loader.save(save_directory, name=head_name)
def load_head(self, save_directory, load_as=None, id2label=None, **kwargs):
loader = PredictionHeadLoader(self, convert_to_flex_head=self._convert_to_flex_head)
return loader.load(save_directory, load_as=load_as, id2label=id2label, **kwargs)
def save_adapter(
self,
save_directory: str,
adapter_name: str,
with_head: bool = True,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
if with_head:
if custom_weights_loaders is None:
custom_weights_loaders = []
custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))
super().save_adapter(
save_directory,
adapter_name,
meta_dict=meta_dict,
custom_weights_loaders=custom_weights_loaders,
)
def load_adapter(
self,
adapter_name_or_path: str,
config: Union[dict, str] = None,
version: str = None,
model_name: str = None,
load_as: str = None,
source: str = None,
with_head: bool = True,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
leave_out: Optional[List[int]] = None,
id2label=None,
set_active: bool = False,
**kwargs
) -> str:
if with_head:
if custom_weights_loaders is None:
custom_weights_loaders = []
custom_weights_loaders.append(
PredictionHeadLoader(
self,
error_on_missing=False,
convert_to_flex_head=self._convert_to_flex_head,
)
)
# Support passing a num_labels for compatibility reasons. Convert to label map here.
num_labels = kwargs.pop("num_labels", None)
if num_labels is not None:
id2label = {i: "LABEL_" + str(i) for i in range(num_labels)}
return super().load_adapter(
adapter_name_or_path,
config=config,
version=version,
model_name=model_name,
load_as=load_as,
source=source,
custom_weights_loaders=custom_weights_loaders,
leave_out=leave_out,
id2label=id2label,
set_active=set_active,
**kwargs,
)
def save_all_adapters(
self,
save_directory: str,
with_head: bool = True,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
if with_head:
if custom_weights_loaders is None:
custom_weights_loaders = []
custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))
super().save_all_adapters(
save_directory,
meta_dict=meta_dict,
custom_weights_loaders=custom_weights_loaders,
)
def save_adapter_fusion(
self,
save_directory: str,
adapter_names: Union[Fuse, list, str],
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
with_head: Union[bool, str] = False,
):
"""
Saves an AdapterFusion layer and its configuration file to a directory so that it can be shared or reloaded
using `load_adapter_fusion()`.
Args:
save_directory (str): Path to a directory where the AdapterFusion should be saved.
adapter_names (Union[Fuse, list, str]): AdapterFusion to be saved.
with_head (Union[bool, str]):
If True, will save a head with the same name as the AdapterFusionLayer. If a string, this will be used
as the name of the head to be saved.
Raises:
ValueError: If the given AdapterFusion name is invalid.
"""
super().save_adapter_fusion(save_directory, adapter_names, meta_dict, custom_weights_loaders)
if with_head:
# Make sure to cover the different options for adapter_names
if isinstance(with_head, str):
head_name = with_head
elif isinstance(adapter_names, Fuse):
head_name = adapter_names.name
elif isinstance(adapter_names, list):
head_name = ",".join(adapter_names)
else:
head_name = adapter_names
if head_name not in self.heads:
raise ValueError("No head with name {} found".format(head_name))
loader = PredictionHeadLoader(self)
loader.save(save_directory, head_name)
def load_adapter_fusion(
self,
adapter_fusion_name_or_path: str,
load_as: str = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
set_active: bool = False,
with_head: bool = True,
**kwargs
) -> str:
if with_head:
if custom_weights_loaders is None:
custom_weights_loaders = []
custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))
super().load_adapter_fusion(adapter_fusion_name_or_path, load_as, custom_weights_loaders, set_active)
def save_all_heads(self, save_directory):
for head_name in self.heads:
save_path = join(save_directory, head_name)
self.save_head(save_path, head_name)
def get_labels(self):
return list(self.config.id2label.values())
def get_labels_dict(self):
return self.config.id2label
def get_adapter(self, name):
"""
If self.base_model is self, must inherit from a class that implements this method, to preclude infinite
recursion
"""
if self.base_model is self:
return super().get_adapter(name)
else:
return self.base_model.get_adapter(name)
def load_embeddings(self, path: str, name: str):
if self.base_model is self:
return super().load_embeddings(path, name)
else:
return self.base_model.load_embeddings(path, name)
def save_embeddings(self, path, name, tokenizer=None):
if self.base_model is self:
return super().save_embeddings(path, name, tokenizer)
else:
return self.base_model.save_embeddings(path, name, tokenizer)
def add_embeddings(self, name, tokenizer, reference_embedding=None, reference_tokenizer=None, embedding_dim=None):
if self.base_model is None:
return super().add_embeddings(name, tokenizer, reference_embedding, reference_tokenizer, embedding_dim)
else:
return self.base_model.add_embeddings(
name, tokenizer, reference_embedding, reference_tokenizer, embedding_dim
)
def set_active_embeddings(self, name):
if self.base_model is None:
return super().set_active_embeddings(name)
else:
return self.base_model.set_active_embeddings(name)
def delete_embeddings(self, name):
if self.base_model is None:
return super().delete_embeddings(name)
else:
return self.base_model.delete_embeddings(name)
|
import numpy as np
import torch
from dataclasses import dataclass
from typing import List
from jiant.tasks.core import (
BaseExample,
BaseTokenizedExample,
BaseDataRow,
BatchMixin,
Task,
TaskTypes,
)
from jiant.tasks.lib.templates.shared import double_sentence_featurize, labels_to_bimap
from jiant.utils.python.io import read_jsonl
@dataclass
class Example(BaseExample):
guid: str
input_premise: str
input_hypothesis: str
label: str
def tokenize(self, tokenizer):
return TokenizedExample(
guid=self.guid,
input_premise=tokenizer.tokenize(self.input_premise),
input_hypothesis=tokenizer.tokenize(self.input_hypothesis),
label_id=CounterfactualNliTask.LABEL_TO_ID[self.label],
)
@dataclass
class TokenizedExample(BaseTokenizedExample):
guid: str
input_premise: List
input_hypothesis: List
label_id: int
def featurize(self, tokenizer, feat_spec):
return double_sentence_featurize(
guid=self.guid,
input_tokens_a=self.input_premise,
input_tokens_b=self.input_hypothesis,
label_id=self.label_id,
tokenizer=tokenizer,
feat_spec=feat_spec,
data_row_class=DataRow,
)
@dataclass
class DataRow(BaseDataRow):
guid: str
input_ids: np.ndarray
input_mask: np.ndarray
segment_ids: np.ndarray
label_id: int
tokens: list
@dataclass
class Batch(BatchMixin):
input_ids: torch.LongTensor
input_mask: torch.LongTensor
segment_ids: torch.LongTensor
label_id: torch.LongTensor
tokens: list
class CounterfactualNliTask(Task):
Example = Example
TokenizedExample = Example
DataRow = DataRow
Batch = Batch
TASK_TYPE = TaskTypes.CLASSIFICATION
LABELS = ["contradiction", "entailment", "neutral"]
LABEL_TO_ID, ID_TO_LABEL = labels_to_bimap(LABELS)
def get_train_examples(self):
return self._create_examples(lines=read_jsonl(self.train_path), set_type="train")
def get_val_examples(self):
return self._create_examples(lines=read_jsonl(self.val_path), set_type="val")
def get_test_examples(self):
return self._create_examples(lines=read_jsonl(self.test_path), set_type="test")
@classmethod
def _create_examples(cls, lines, set_type):
examples = []
empty_labels = 0
dash_labels = 0
for (i, line) in enumerate(lines):
if set_type != "test":
if line["label"] == "-":
dash_labels+=1
continue
elif line["label"] == "":
empty_labels+=1
continue
else:
assert line["label"] in cls.LABELS, f"Example {i} label not supported: {line["label"]}"
examples.append(
Example(
guid="%s-%s" % (set_type, i),
input_premise=line["premise"],
input_hypothesis=line["hypothesis"],
label=line["label"] if set_type != "test" else cls.LABELS[-1],
)
)
return examples
|
import numpy as np
import torch
from dataclasses import dataclass
from typing import List
from jiant.tasks.core import (
BaseExample,
BaseTokenizedExample,
BaseDataRow,
BatchMixin,
Task,
TaskTypes,
)
from jiant.tasks.lib.templates.shared import double_sentence_featurize, labels_to_bimap
from jiant.utils.python.io import read_jsonl
@dataclass
class Example(BaseExample):
guid: str
input_premise: str
input_hypothesis: str
label: str
def tokenize(self, tokenizer):
return TokenizedExample(
guid=self.guid,
input_premise=tokenizer.tokenize(self.input_premise),
input_hypothesis=tokenizer.tokenize(self.input_hypothesis),
label_id=CounterfactualNliTask.LABEL_TO_ID[self.label],
)
@dataclass
class TokenizedExample(BaseTokenizedExample):
guid: str
input_premise: List
input_hypothesis: List
label_id: int
def featurize(self, tokenizer, feat_spec):
return double_sentence_featurize(
guid=self.guid,
input_tokens_a=self.input_premise,
input_tokens_b=self.input_hypothesis,
label_id=self.label_id,
tokenizer=tokenizer,
feat_spec=feat_spec,
data_row_class=DataRow,
)
@dataclass
class DataRow(BaseDataRow):
guid: str
input_ids: np.ndarray
input_mask: np.ndarray
segment_ids: np.ndarray
label_id: int
tokens: list
@dataclass
class Batch(BatchMixin):
input_ids: torch.LongTensor
input_mask: torch.LongTensor
segment_ids: torch.LongTensor
label_id: torch.LongTensor
tokens: list
class CounterfactualNliTask(Task):
Example = Example
TokenizedExample = Example
DataRow = DataRow
Batch = Batch
TASK_TYPE = TaskTypes.CLASSIFICATION
LABELS = ["contradiction", "entailment", "neutral"]
LABEL_TO_ID, ID_TO_LABEL = labels_to_bimap(LABELS)
def get_train_examples(self):
return self._create_examples(lines=read_jsonl(self.train_path), set_type="train")
def get_val_examples(self):
return self._create_examples(lines=read_jsonl(self.val_path), set_type="val")
def get_test_examples(self):
return self._create_examples(lines=read_jsonl(self.test_path), set_type="test")
@classmethod
def _create_examples(cls, lines, set_type):
examples = []
empty_labels = 0
dash_labels = 0
for (i, line) in enumerate(lines):
if set_type != "test":
if line["label"] == "-":
dash_labels+=1
continue
elif line["label"] == "":
empty_labels+=1
continue
else:
assert line["label"] in cls.LABELS, f"Example {i} label not supported: {line['label']}"
examples.append(
Example(
guid="%s-%s" % (set_type, i),
input_premise=line["premise"],
input_hypothesis=line["hypothesis"],
label=line["label"] if set_type != "test" else cls.LABELS[-1],
)
)
return examples
|
import click
import logging
from os.path import basename, exists
from shutil import rmtree
from helper.aws import AwsApiHelper
logging.getLogger().setLevel(logging.DEBUG)
class Helper(AwsApiHelper):
def __init__(self, sql_file):
super().__init__()
self._sql_file = sql_file
with open(sql_file, "r") as f:
self._sql_statement = f.read()
def process_request(self, session, account_id, region, kwargs):
output_filename = f'{account_id}_{region}_{basename(self._sql_file).replace('.sql', '')}.txt'
if exists(output_filename):
rmtree(output_filename)
client = session.client("config", region_name=region)
params = {"Expression": self._sql_statement}
while True:
resp = client.select_resource_config(**params)
for item in resp["Results"]:
dump(item, output_filename)
if resp.get("NextToken") is None:
break
params["NextToken"] = resp.get("NextToken")
def dump(data, output_filename, to_console=True):
if to_console is True:
print(data)
with open(output_filename, "a") as f:
f.write(f'{data}\n')
@click.command()
@click.option("--sqlfile", "-s", required=True, help="File containing the sql statement (.sql)")
@click.option("--profile", "-p", help="AWS profile name. Use profiles in ~/.aws if not specified.")
@click.option("--region", "-r", default="ap-southeast-2", show_default=True, help="AWS Region. Use 'all' for all regions.")
def main(sqlfile, profile, region):
Helper(sqlfile).start(profile, region, "config")
if __name__ == "__main__":
main()
|
import click
import logging
from os.path import basename, exists
from shutil import rmtree
from helper.aws import AwsApiHelper
logging.getLogger().setLevel(logging.DEBUG)
class Helper(AwsApiHelper):
def __init__(self, sql_file):
super().__init__()
self._sql_file = sql_file
with open(sql_file, "r") as f:
self._sql_statement = f.read()
def process_request(self, session, account_id, region, kwargs):
output_filename = f'{account_id}_{region}_{basename(self._sql_file).replace(".sql", "")}.txt'
if exists(output_filename):
rmtree(output_filename)
client = session.client("config", region_name=region)
params = {"Expression": self._sql_statement}
while True:
resp = client.select_resource_config(**params)
for item in resp["Results"]:
dump(item, output_filename)
if resp.get("NextToken") is None:
break
params["NextToken"] = resp.get("NextToken")
def dump(data, output_filename, to_console=True):
if to_console is True:
print(data)
with open(output_filename, "a") as f:
f.write(f'{data}\n')
@click.command()
@click.option("--sqlfile", "-s", required=True, help="File containing the sql statement (.sql)")
@click.option("--profile", "-p", help="AWS profile name. Use profiles in ~/.aws if not specified.")
@click.option("--region", "-r", default="ap-southeast-2", show_default=True, help="AWS Region. Use 'all' for all regions.")
def main(sqlfile, profile, region):
Helper(sqlfile).start(profile, region, "config")
if __name__ == "__main__":
main()
|
import os
import csv
import genanki
from gtts import gTTS
from mnemocards import ASSETS_DIR
from mnemocards.utils import get_hash_id, NoteID, generate_furigana
from mnemocards.builders.vocabulary_builder import VocabularyBuilder
from mnemocards.builders.vocabulary_builder import remove_parentheses, remove_spaces
from mnemocards import maketsv as tsv
css = open(f"{ASSETS_DIR}/css/autogenerate.css").read()
CARD_MODEL = genanki.Model(
get_hash_id("725b5570-eb22-4ca5-a2b2-817e04514cde"),
"Autogenerated vocabulary model",
fields=[
# Visible fields.
{"name": "YourLanguageWord"},
{"name": "YourLanguageExplanation"},
{"name": "LanguageYouLearnWord"},
{"name": "LanguageYouLearnPronunciation"},
{"name": "LanguageYouLearnExplanation"},
# Configuration fields.
{"name": "CardColor"},
{"name": "ShowPronunciationInReverse"},
],
templates=[
{
"name": "Vocabulary card",
"qfmt": '''
<style>
.card {
background: {{CardColor}};
}
.origin {
color: black;
}
.synonyms .line_2 {
color: #0000;
}
</style>
<div class="origin word">{{YourLanguageWord}}</div>
<div class="origin comment">{{YourLanguageExplanation}}</div>
''',
"afmt": '''
<style>
.card {
background: {{CardColor}};
}
.origin {
color: black;
}
.destination {
color: black;
}
</style>
<div class="origin word">{{YourLanguageWord}}</div>
<div class="origin comment">{{YourLanguageExplanation}}</div>
<hr>
<div class="destination word">{{LanguageYouLearnWord}}</div>
<div class="destination fonetic">{{LanguageYouLearnPronunciation}}</div>
<div class="destination comment">{{LanguageYouLearnExplanation}}</div>
''',
},
{
"name": "Vocabulary card (reversed)",
"qfmt": '''
<style>
.card {
background: {{CardColor}};
}
.destination {
color: black;
}
.definitions .line_1{
color: #0000
}
.definitions .line_2{
color: #0000
}
</style>
<div class="destination word">{{LanguageYouLearnWord}}</div>
{{#ShowPronunciationInReverse}}
<div class="destination fonetic">{{LanguageYouLearnPronunciation}}</div>
{{/ShowPronunciationInReverse}}
<div class="destination comment">{{LanguageYouLearnExplanation}}</div>
''',
"afmt": '''
<style>
.card {
background: {{CardColor}};
}
.origin {
color: black;
}
.destination {
color: black;
}
</style>
<div class="destination word">{{LanguageYouLearnWord}}</div>
<div class="destination fonetic">{{LanguageYouLearnPronunciation}}</div>
<div class="destination comment">{{LanguageYouLearnExplanation}}</div>
<hr>
<div class="origin word">{{YourLanguageWord}}</div>
<div class="origin comment">{{YourLanguageExplanation}}</div>
''',
},
],
css=css,
)
CARD_MODEL_JAPANESE = genanki.Model(
get_hash_id("fcac015f-d9dc-4f62-a8f9-0a1ef7a621e2"),
"Autogenerated vocabulary model (Japanese)",
fields=[
# Visible fields.
{"name": "YourLanguageWord"},
{"name": "YourLanguageExplanation"},
{"name": "LanguageYouLearnWord"},
{"name": "LanguageYouLearnPronunciation"},
{"name": "LanguageYouLearnExplanation"},
# Configuration fields.
{"name": "CardColor"},
{"name": "ShowPronunciationInReverse"},
],
templates=[
{
"name": "Vocabulary card",
"qfmt": '''
<style>
.card {
background: {{CardColor}};
}
.origin {
color: black;
}
.synonyms .line_2 {
color: #0000;
}
</style>
<div class="origin word">{{YourLanguageWord}}</div>
<div class="origin comment">{{YourLanguageExplanation}}</div>
''',
"afmt": '''
<style>
.card {
background: {{CardColor}};
}
.origin {
color: black;
}
.destination {
color: black;
}
</style>
<div class="origin word">{{YourLanguageWord}}</div>
<div class="origin comment">{{YourLanguageExplanation}}</div>
<hr>
<div class="destination word">{{furigana:LanguageYouLearnWord}}</div>
<div class="destination fonetic">{{LanguageYouLearnPronunciation}}</div>
<div class="destination comment">{{LanguageYouLearnExplanation}}</div>
''',
},
{
"name": "Vocabulary card (reversed)",
"qfmt": '''
<style>
.card {
background: {{CardColor}};
}
.destination {
color: black;
}
.definitions .line_1{
color: #0000
}
.definitions .line_2{
color: #0000
}
</style>
<div class="destination word">{{kanji:LanguageYouLearnWord}}</div>
{{#ShowPronunciationInReverse}}
<div class="destination fonetic">{{LanguageYouLearnPronunciation}}</div>
{{/ShowPronunciationInReverse}}
<div class="destination comment">{{LanguageYouLearnExplanation}}</div>
''',
"afmt": '''
<style>
.card {
background: {{CardColor}};
}
.origin {
color: black;
}
.destination {
color: black;
}
</style>
<div class="destination word">{{furigana:LanguageYouLearnWord}}</div>
<div class="destination fonetic">{{LanguageYouLearnPronunciation}}</div>
<div class="destination comment">{{LanguageYouLearnExplanation}}</div>
<hr>
<div class="origin word">{{YourLanguageWord}}</div>
<div class="origin comment">{{YourLanguageExplanation}}</div>
''',
},
],
css=css,
)
class AutogenerateBuilder(VocabularyBuilder, object):
def __init__(self):
pass
def parse_src_to_settings(self, data_dir, src):
settings = {}
settings["color"] = src.get("card_color", "#f5f5f5")
settings["show_p"] = "true" if src.get(
"pronunciation_in_reverse", False) else ""
settings["tsv_as_source"] = src.get("use_tsv_for_generation", False)
settings["filename"] = os.path.join(data_dir, src["file"])
settings["generate_audio"] = src.get("audio", False)
settings["lang"] = src.get("lang", {})
if not settings["lang"]:
settings["lang"]["original"] = "en"
settings["lang"]["translation"] = "es"
if settings["generate_audio"]:
settings["media_dir"] = self.prepare_media_dir(data_dir, {})
settings["furigana"] = src.get("furigana", False)
settings["furigana_type"] = src.get("furigana_type", "hira")
settings["card_properties"] = src.get("card_properties", None)
settings["tags"] = []
if src.get("card_properties", None):
settings["tags"] = src["card_properties"].get("tags", [])
return settings
def build_cards_from_words(self, settings):
words = tsv.scrape_words_from_file(settings["filename"])
translations = tsv.get_translation(
words, settings["lang"]["original"], settings["lang"]["translation"])
cards = [tsv.prepare_card_fields(trans) for trans in translations]
for card in cards:
card["tags"] = settings["tags"]
return cards
def build_cards_from_tsv(self, settings):
with open(settings["filename"], "r") as csvfile:
reader = csv.reader(csvfile, delimiter="\t", quotechar='"')
iterator = iter(reader)
cards = []
# Skip header that is present in every autogenerated TSV.
next(iterator)
for i, row in enumerate(iterator):
card_id, ylw, yle, lylw, lylp, lyle, row_tags = row
tags = settings["tags"]
tags.extend(row_tags.split(","))
card = {"card_id": card_id, "ylw": ylw, "yle": yle,
"lylw": lylw, "lylp": lylp, "lyle": lyle, "tags": tags}
cards.append(card)
return cards
def build_notes_and_media(self, settings, cards):
notes, media = [], []
for card in cards:
if settings["furigana"] and (settings["lang"]["original"] == "ja"):
card["lylp"] = ""
card["lylw"] = generate_furigana(
card["lylw"], settings["furigana_type"])
if settings["generate_audio"]:
clean_text = remove_parentheses(card["lylw"])
if settings["furigana"]:
# If you leave those spaces you get wrong
# pronunciations, like in `スペイン 人`.
# Instead of `supein jin` it pronounces it as
# `supein hito` because the kanji `人` alone is
# pronounced as `hito`.
clean_text = remove_spaces(clean_text)
hash_text = get_hash_id(clean_text, bytes=8)
sound_file = f'{settings['media_dir']}/{hash_text}.mp3'
if not os.path.exists(sound_file):
print(f"Creating audio file {sound_file}")
lang = settings["lang"]["original"]
tts = gTTS(clean_text, lang=lang)
tts.save(sound_file)
card["lylp"] += f" [sound:{hash_text}.mp3]"
media.append(sound_file)
note = NoteID(
card["card_id"],
model=CARD_MODEL_JAPANESE if settings["furigana"] else CARD_MODEL,
fields=[
card["ylw"], card["yle"], card["lylw"], card["lylp"],
card["lyle"], settings["color"], settings["show_p"]],
tags=card["tags"])
notes.append(note)
return notes, media
def build_cards(self, data_dir, src, deck_config):
# Get data from config.
settings = self.parse_src_to_settings(data_dir, src)
# Choose what builder to use - from words or TSV.
builder = self.build_cards_from_words
if "tsv" in settings["filename"].split('.')[-1]:
builder = self.build_cards_from_tsv
cards = builder(settings)
#
notes, media = self.build_notes_and_media(settings, cards)
return notes, media
|
import os
import csv
import genanki
from gtts import gTTS
from mnemocards import ASSETS_DIR
from mnemocards.utils import get_hash_id, NoteID, generate_furigana
from mnemocards.builders.vocabulary_builder import VocabularyBuilder
from mnemocards.builders.vocabulary_builder import remove_parentheses, remove_spaces
from mnemocards import maketsv as tsv
css = open(f"{ASSETS_DIR}/css/autogenerate.css").read()
CARD_MODEL = genanki.Model(
get_hash_id("725b5570-eb22-4ca5-a2b2-817e04514cde"),
"Autogenerated vocabulary model",
fields=[
# Visible fields.
{"name": "YourLanguageWord"},
{"name": "YourLanguageExplanation"},
{"name": "LanguageYouLearnWord"},
{"name": "LanguageYouLearnPronunciation"},
{"name": "LanguageYouLearnExplanation"},
# Configuration fields.
{"name": "CardColor"},
{"name": "ShowPronunciationInReverse"},
],
templates=[
{
"name": "Vocabulary card",
"qfmt": '''
<style>
.card {
background: {{CardColor}};
}
.origin {
color: black;
}
.synonyms .line_2 {
color: #0000;
}
</style>
<div class="origin word">{{YourLanguageWord}}</div>
<div class="origin comment">{{YourLanguageExplanation}}</div>
''',
"afmt": '''
<style>
.card {
background: {{CardColor}};
}
.origin {
color: black;
}
.destination {
color: black;
}
</style>
<div class="origin word">{{YourLanguageWord}}</div>
<div class="origin comment">{{YourLanguageExplanation}}</div>
<hr>
<div class="destination word">{{LanguageYouLearnWord}}</div>
<div class="destination fonetic">{{LanguageYouLearnPronunciation}}</div>
<div class="destination comment">{{LanguageYouLearnExplanation}}</div>
''',
},
{
"name": "Vocabulary card (reversed)",
"qfmt": '''
<style>
.card {
background: {{CardColor}};
}
.destination {
color: black;
}
.definitions .line_1{
color: #0000
}
.definitions .line_2{
color: #0000
}
</style>
<div class="destination word">{{LanguageYouLearnWord}}</div>
{{#ShowPronunciationInReverse}}
<div class="destination fonetic">{{LanguageYouLearnPronunciation}}</div>
{{/ShowPronunciationInReverse}}
<div class="destination comment">{{LanguageYouLearnExplanation}}</div>
''',
"afmt": '''
<style>
.card {
background: {{CardColor}};
}
.origin {
color: black;
}
.destination {
color: black;
}
</style>
<div class="destination word">{{LanguageYouLearnWord}}</div>
<div class="destination fonetic">{{LanguageYouLearnPronunciation}}</div>
<div class="destination comment">{{LanguageYouLearnExplanation}}</div>
<hr>
<div class="origin word">{{YourLanguageWord}}</div>
<div class="origin comment">{{YourLanguageExplanation}}</div>
''',
},
],
css=css,
)
CARD_MODEL_JAPANESE = genanki.Model(
get_hash_id("fcac015f-d9dc-4f62-a8f9-0a1ef7a621e2"),
"Autogenerated vocabulary model (Japanese)",
fields=[
# Visible fields.
{"name": "YourLanguageWord"},
{"name": "YourLanguageExplanation"},
{"name": "LanguageYouLearnWord"},
{"name": "LanguageYouLearnPronunciation"},
{"name": "LanguageYouLearnExplanation"},
# Configuration fields.
{"name": "CardColor"},
{"name": "ShowPronunciationInReverse"},
],
templates=[
{
"name": "Vocabulary card",
"qfmt": '''
<style>
.card {
background: {{CardColor}};
}
.origin {
color: black;
}
.synonyms .line_2 {
color: #0000;
}
</style>
<div class="origin word">{{YourLanguageWord}}</div>
<div class="origin comment">{{YourLanguageExplanation}}</div>
''',
"afmt": '''
<style>
.card {
background: {{CardColor}};
}
.origin {
color: black;
}
.destination {
color: black;
}
</style>
<div class="origin word">{{YourLanguageWord}}</div>
<div class="origin comment">{{YourLanguageExplanation}}</div>
<hr>
<div class="destination word">{{furigana:LanguageYouLearnWord}}</div>
<div class="destination fonetic">{{LanguageYouLearnPronunciation}}</div>
<div class="destination comment">{{LanguageYouLearnExplanation}}</div>
''',
},
{
"name": "Vocabulary card (reversed)",
"qfmt": '''
<style>
.card {
background: {{CardColor}};
}
.destination {
color: black;
}
.definitions .line_1{
color: #0000
}
.definitions .line_2{
color: #0000
}
</style>
<div class="destination word">{{kanji:LanguageYouLearnWord}}</div>
{{#ShowPronunciationInReverse}}
<div class="destination fonetic">{{LanguageYouLearnPronunciation}}</div>
{{/ShowPronunciationInReverse}}
<div class="destination comment">{{LanguageYouLearnExplanation}}</div>
''',
"afmt": '''
<style>
.card {
background: {{CardColor}};
}
.origin {
color: black;
}
.destination {
color: black;
}
</style>
<div class="destination word">{{furigana:LanguageYouLearnWord}}</div>
<div class="destination fonetic">{{LanguageYouLearnPronunciation}}</div>
<div class="destination comment">{{LanguageYouLearnExplanation}}</div>
<hr>
<div class="origin word">{{YourLanguageWord}}</div>
<div class="origin comment">{{YourLanguageExplanation}}</div>
''',
},
],
css=css,
)
class AutogenerateBuilder(VocabularyBuilder, object):
def __init__(self):
pass
def parse_src_to_settings(self, data_dir, src):
settings = {}
settings["color"] = src.get("card_color", "#f5f5f5")
settings["show_p"] = "true" if src.get(
"pronunciation_in_reverse", False) else ""
settings["tsv_as_source"] = src.get("use_tsv_for_generation", False)
settings["filename"] = os.path.join(data_dir, src["file"])
settings["generate_audio"] = src.get("audio", False)
settings["lang"] = src.get("lang", {})
if not settings["lang"]:
settings["lang"]["original"] = "en"
settings["lang"]["translation"] = "es"
if settings["generate_audio"]:
settings["media_dir"] = self.prepare_media_dir(data_dir, {})
settings["furigana"] = src.get("furigana", False)
settings["furigana_type"] = src.get("furigana_type", "hira")
settings["card_properties"] = src.get("card_properties", None)
settings["tags"] = []
if src.get("card_properties", None):
settings["tags"] = src["card_properties"].get("tags", [])
return settings
def build_cards_from_words(self, settings):
words = tsv.scrape_words_from_file(settings["filename"])
translations = tsv.get_translation(
words, settings["lang"]["original"], settings["lang"]["translation"])
cards = [tsv.prepare_card_fields(trans) for trans in translations]
for card in cards:
card["tags"] = settings["tags"]
return cards
def build_cards_from_tsv(self, settings):
with open(settings["filename"], "r") as csvfile:
reader = csv.reader(csvfile, delimiter="\t", quotechar='"')
iterator = iter(reader)
cards = []
# Skip header that is present in every autogenerated TSV.
next(iterator)
for i, row in enumerate(iterator):
card_id, ylw, yle, lylw, lylp, lyle, row_tags = row
tags = settings["tags"]
tags.extend(row_tags.split(","))
card = {"card_id": card_id, "ylw": ylw, "yle": yle,
"lylw": lylw, "lylp": lylp, "lyle": lyle, "tags": tags}
cards.append(card)
return cards
def build_notes_and_media(self, settings, cards):
notes, media = [], []
for card in cards:
if settings["furigana"] and (settings["lang"]["original"] == "ja"):
card["lylp"] = ""
card["lylw"] = generate_furigana(
card["lylw"], settings["furigana_type"])
if settings["generate_audio"]:
clean_text = remove_parentheses(card["lylw"])
if settings["furigana"]:
# If you leave those spaces you get wrong
# pronunciations, like in `スペイン 人`.
# Instead of `supein jin` it pronounces it as
# `supein hito` because the kanji `人` alone is
# pronounced as `hito`.
clean_text = remove_spaces(clean_text)
hash_text = get_hash_id(clean_text, bytes=8)
sound_file = f'{settings["media_dir"]}/{hash_text}.mp3'
if not os.path.exists(sound_file):
print(f"Creating audio file {sound_file}")
lang = settings["lang"]["original"]
tts = gTTS(clean_text, lang=lang)
tts.save(sound_file)
card["lylp"] += f" [sound:{hash_text}.mp3]"
media.append(sound_file)
note = NoteID(
card["card_id"],
model=CARD_MODEL_JAPANESE if settings["furigana"] else CARD_MODEL,
fields=[
card["ylw"], card["yle"], card["lylw"], card["lylp"],
card["lyle"], settings["color"], settings["show_p"]],
tags=card["tags"])
notes.append(note)
return notes, media
def build_cards(self, data_dir, src, deck_config):
# Get data from config.
settings = self.parse_src_to_settings(data_dir, src)
# Choose what builder to use - from words or TSV.
builder = self.build_cards_from_words
if "tsv" in settings["filename"].split('.')[-1]:
builder = self.build_cards_from_tsv
cards = builder(settings)
#
notes, media = self.build_notes_and_media(settings, cards)
return notes, media
|
import time
import logging
from spaceone.inventory.libs.manager import GoogleCloudManager
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.connector.instance_group import InstanceGroupConnector
from spaceone.inventory.model.instance_group.data import *
from spaceone.inventory.model.instance_group.cloud_service import *
from spaceone.inventory.model.instance_group.cloud_service_type import CLOUD_SERVICE_TYPES
_LOGGER = logging.getLogger(__name__)
class InstanceGroupManager(GoogleCloudManager):
connector_name = 'InstanceGroupConnector'
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
_LOGGER.debug(f'** Instance Group START **')
start_time = time.time()
"""
Args:
params:
- options
- schema
- secret_data
- filter
- zones
Response:
CloudServiceResponse
"""
collected_cloud_services = []
error_responses = []
instance_group_id = ""
secret_data = params['secret_data']
instance_group_conn: InstanceGroupConnector = self.locator.get_connector(self.connector_name, **params)
# Get all Resources
instance_groups = instance_group_conn.list_instance_groups()
instance_group_managers = instance_group_conn.list_instance_group_managers()
autoscalers = instance_group_conn.list_autoscalers()
instance_templates = instance_group_conn.list_instance_templates()
for instance_group in instance_groups:
try:
instance_group_id = instance_group.get('id')
instance_group.update({
'project': secret_data['project_id']
})
scheduler = {'type': 'zone'} if 'zone' in instance_group else {'type': 'region'}
if match_instance_group_manager := \
self.match_instance_group_manager(instance_group_managers, instance_group.get('selfLink')):
instance_group_type = self.get_instance_group_type(match_instance_group_manager)
scheduler.update({'instance_group_type': instance_group_type})
# Managed
match_instance_group_manager.update({
'statefulPolicy': {
'preservedState': {'disks': self._get_stateful_policy(match_instance_group_manager)}}
})
instance_group.update({
'instance_group_type': instance_group_type,
'instance_group_manager': InstanceGroupManagers(match_instance_group_manager, strict=False)
})
if match_auto_scaler := self.match_auto_scaler(autoscalers, match_instance_group_manager):
self._get_auto_policy_for_scheduler(scheduler, match_auto_scaler)
instance_group.update({
'auto_scaler': AutoScaler(match_auto_scaler, strict=False),
'autoscaling_display':
self._get_autoscaling_display(match_auto_scaler.get('autoscalingPolicy', {}))
})
match_instance_template = \
self.match_instance_template(instance_templates,
match_instance_group_manager.get('instanceTemplate'))
if match_instance_template:
instance_group.update({'template': InstanceTemplate(match_instance_template, strict=False)})
else:
# Unmanaged
instance_group.update({'instance_group_type': 'UNMANAGED'})
scheduler.update({'instance_group_type': 'UNMANAGED'})
loc_type, location = self.get_instance_group_loc(instance_group)
region = self.generate_region_from_zone(location) if loc_type == 'zone' else location
instances = instance_group_conn.list_instances(instance_group.get('name'), location, loc_type)
display_loc = {'region': location, 'zone': ''} if loc_type == 'region' \
else {'region': location[:-2], 'zone': location}
instance_group.update({'display_location': display_loc})
instance_group.update({
'power_scheduler': scheduler,
'instances': self.get_instances(instances),
'instance_counts': len(instances)
})
# No labels
_name = instance_group.get('name', '')
instance_group_data = InstanceGroup(instance_group, strict=False)
instance_group_resource = InstanceGroupResource({
'name': _name,
'data': instance_group_data,
'region_code': region,
'reference': ReferenceModel(instance_group_data.reference())
})
self.set_region_code(region)
collected_cloud_services.append(InstanceGroupResponse({'resource': instance_group_resource}))
except Exception as e:
_LOGGER.error(f'[collect_cloud_service] => {e}', exc_info=True)
error_response = self.generate_resource_error_response(e, 'ComputeEngine', 'InstanceGroup', instance_group_id)
error_responses.append(error_response)
_LOGGER.debug(f'** Instance Group Finished {time.time() - start_time} Seconds **')
return collected_cloud_services, error_responses
def get_instance_group_loc(self, instance_group):
inst_type = 'zone' if 'zone' in instance_group else 'region'
loc = self._get_last_target(instance_group, inst_type)
return inst_type, loc
def get_instances(self, instances):
_instances = []
for instance in instances:
instance.update({'name': self._get_last_target(instance, 'instance')})
_instances.append(instance)
return _instances
@staticmethod
def match_instance_template(instance_templates, instance_template_self_link):
for instance_template in instance_templates:
if instance_template['selfLink'] == instance_template_self_link:
return instance_template
return None
@staticmethod
def match_instance_group_manager(instance_group_managers, instance_group_name):
for instance_group_manager in instance_group_managers:
if instance_group_manager['instanceGroup'] == instance_group_name:
return instance_group_manager
return None
@staticmethod
def match_auto_scaler(auto_scalers, instance_group_manager):
match_auto_scaler_name = instance_group_manager.get('status', {}).get('autoscaler')
if match_auto_scaler_name:
for auto_scaler in auto_scalers:
if match_auto_scaler_name == auto_scaler['selfLink']:
return auto_scaler
return None
@staticmethod
def _get_stateful_policy(match_instance_group_manager):
disks_vos = []
stateful_policy = match_instance_group_manager.get('statefulPolicy')
if stateful_policy:
preserved_state = stateful_policy.get('preservedState')
if preserved_state:
for key, val in preserved_state.get('disks', {}).items():
disks_vos.append({'key': key, 'value': val})
return disks_vos
@staticmethod
def get_instance_group_type(instance_group_manager):
if instance_group_manager.get('status', {}).get('stateful', {}).get('hasStatefulConfig'):
return 'STATEFUL'
else:
return 'STATELESS'
def _get_autoscaling_display(self, autoscaling_policy):
display_string = f'{autoscaling_policy.get('mode')}: Target '
policy_display_list = []
if 'cpuUtilization' in autoscaling_policy:
policy_display_list.append(
f'CPU utilization {(autoscaling_policy.get('cpuUtilization', {}).get("utilizationTarget")) * 100}%')
if 'loadBalancingUtilization' in autoscaling_policy:
policy_display_list.append(
f'LB capacity fraction {(autoscaling_policy.get('loadBalancingUtilization', {}).get("utilizationTarget")) * 100}%')
for custom_metric in autoscaling_policy.get('customMetricUtilizations', []):
policy_display_list.append(
f'{self._get_custom_metric_target_name(custom_metric.get('metric', ''))} {custom_metric.get('utilizationTarget', '')}{self._get_custom_metric_target_type(custom_metric.get('utilizationTargetType'))}')
if policy_display_list:
policy_join_str = ', '.join(policy_display_list)
return f'{display_string}{policy_join_str}'
else:
return ''
@staticmethod
def _get_custom_metric_target_name(util_target):
try:
target_name = util_target.split('/')[-1]
return target_name
except Exception as e:
return ''
@staticmethod
def _get_custom_metric_target_type(util_target_type):
if util_target_type == 'GAUGE':
return ''
elif util_target_type == 'DELTA_PER_SECOND':
return '/s'
elif util_target_type == 'DELTA_PER_MINUTE':
return '/m'
else:
return ''
@staticmethod
def _get_last_target(target_vo, key):
a = target_vo.get(key, '')
return a[a.rfind('/') + 1:]
@staticmethod
def _get_auto_policy_for_scheduler(scheduler, matched_scheduler):
auto_policy = matched_scheduler.get('autoscalingPolicy', {})
if auto_policy != {}:
scheduler.update({
'recommend_size': matched_scheduler.get('recommendedSize', 1),
'origin_min_size': auto_policy.get('minNumReplicas'),
'origin_max_size': auto_policy.get('maxNumReplicas'),
'mode': auto_policy.get('mode')
})
|
import time
import logging
from spaceone.inventory.libs.manager import GoogleCloudManager
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.connector.instance_group import InstanceGroupConnector
from spaceone.inventory.model.instance_group.data import *
from spaceone.inventory.model.instance_group.cloud_service import *
from spaceone.inventory.model.instance_group.cloud_service_type import CLOUD_SERVICE_TYPES
_LOGGER = logging.getLogger(__name__)
class InstanceGroupManager(GoogleCloudManager):
connector_name = 'InstanceGroupConnector'
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
_LOGGER.debug(f'** Instance Group START **')
start_time = time.time()
"""
Args:
params:
- options
- schema
- secret_data
- filter
- zones
Response:
CloudServiceResponse
"""
collected_cloud_services = []
error_responses = []
instance_group_id = ""
secret_data = params['secret_data']
instance_group_conn: InstanceGroupConnector = self.locator.get_connector(self.connector_name, **params)
# Get all Resources
instance_groups = instance_group_conn.list_instance_groups()
instance_group_managers = instance_group_conn.list_instance_group_managers()
autoscalers = instance_group_conn.list_autoscalers()
instance_templates = instance_group_conn.list_instance_templates()
for instance_group in instance_groups:
try:
instance_group_id = instance_group.get('id')
instance_group.update({
'project': secret_data['project_id']
})
scheduler = {'type': 'zone'} if 'zone' in instance_group else {'type': 'region'}
if match_instance_group_manager := \
self.match_instance_group_manager(instance_group_managers, instance_group.get('selfLink')):
instance_group_type = self.get_instance_group_type(match_instance_group_manager)
scheduler.update({'instance_group_type': instance_group_type})
# Managed
match_instance_group_manager.update({
'statefulPolicy': {
'preservedState': {'disks': self._get_stateful_policy(match_instance_group_manager)}}
})
instance_group.update({
'instance_group_type': instance_group_type,
'instance_group_manager': InstanceGroupManagers(match_instance_group_manager, strict=False)
})
if match_auto_scaler := self.match_auto_scaler(autoscalers, match_instance_group_manager):
self._get_auto_policy_for_scheduler(scheduler, match_auto_scaler)
instance_group.update({
'auto_scaler': AutoScaler(match_auto_scaler, strict=False),
'autoscaling_display':
self._get_autoscaling_display(match_auto_scaler.get('autoscalingPolicy', {}))
})
match_instance_template = \
self.match_instance_template(instance_templates,
match_instance_group_manager.get('instanceTemplate'))
if match_instance_template:
instance_group.update({'template': InstanceTemplate(match_instance_template, strict=False)})
else:
# Unmanaged
instance_group.update({'instance_group_type': 'UNMANAGED'})
scheduler.update({'instance_group_type': 'UNMANAGED'})
loc_type, location = self.get_instance_group_loc(instance_group)
region = self.generate_region_from_zone(location) if loc_type == 'zone' else location
instances = instance_group_conn.list_instances(instance_group.get('name'), location, loc_type)
display_loc = {'region': location, 'zone': ''} if loc_type == 'region' \
else {'region': location[:-2], 'zone': location}
instance_group.update({'display_location': display_loc})
instance_group.update({
'power_scheduler': scheduler,
'instances': self.get_instances(instances),
'instance_counts': len(instances)
})
# No labels
_name = instance_group.get('name', '')
instance_group_data = InstanceGroup(instance_group, strict=False)
instance_group_resource = InstanceGroupResource({
'name': _name,
'data': instance_group_data,
'region_code': region,
'reference': ReferenceModel(instance_group_data.reference())
})
self.set_region_code(region)
collected_cloud_services.append(InstanceGroupResponse({'resource': instance_group_resource}))
except Exception as e:
_LOGGER.error(f'[collect_cloud_service] => {e}', exc_info=True)
error_response = self.generate_resource_error_response(e, 'ComputeEngine', 'InstanceGroup', instance_group_id)
error_responses.append(error_response)
_LOGGER.debug(f'** Instance Group Finished {time.time() - start_time} Seconds **')
return collected_cloud_services, error_responses
def get_instance_group_loc(self, instance_group):
inst_type = 'zone' if 'zone' in instance_group else 'region'
loc = self._get_last_target(instance_group, inst_type)
return inst_type, loc
def get_instances(self, instances):
_instances = []
for instance in instances:
instance.update({'name': self._get_last_target(instance, 'instance')})
_instances.append(instance)
return _instances
@staticmethod
def match_instance_template(instance_templates, instance_template_self_link):
for instance_template in instance_templates:
if instance_template['selfLink'] == instance_template_self_link:
return instance_template
return None
@staticmethod
def match_instance_group_manager(instance_group_managers, instance_group_name):
for instance_group_manager in instance_group_managers:
if instance_group_manager['instanceGroup'] == instance_group_name:
return instance_group_manager
return None
@staticmethod
def match_auto_scaler(auto_scalers, instance_group_manager):
match_auto_scaler_name = instance_group_manager.get('status', {}).get('autoscaler')
if match_auto_scaler_name:
for auto_scaler in auto_scalers:
if match_auto_scaler_name == auto_scaler['selfLink']:
return auto_scaler
return None
@staticmethod
def _get_stateful_policy(match_instance_group_manager):
disks_vos = []
stateful_policy = match_instance_group_manager.get('statefulPolicy')
if stateful_policy:
preserved_state = stateful_policy.get('preservedState')
if preserved_state:
for key, val in preserved_state.get('disks', {}).items():
disks_vos.append({'key': key, 'value': val})
return disks_vos
@staticmethod
def get_instance_group_type(instance_group_manager):
if instance_group_manager.get('status', {}).get('stateful', {}).get('hasStatefulConfig'):
return 'STATEFUL'
else:
return 'STATELESS'
def _get_autoscaling_display(self, autoscaling_policy):
display_string = f'{autoscaling_policy.get("mode")}: Target '
policy_display_list = []
if 'cpuUtilization' in autoscaling_policy:
policy_display_list.append(
f'CPU utilization {(autoscaling_policy.get("cpuUtilization", {}).get("utilizationTarget")) * 100}%')
if 'loadBalancingUtilization' in autoscaling_policy:
policy_display_list.append(
f'LB capacity fraction {(autoscaling_policy.get("loadBalancingUtilization", {}).get("utilizationTarget")) * 100}%')
for custom_metric in autoscaling_policy.get('customMetricUtilizations', []):
policy_display_list.append(
f'{self._get_custom_metric_target_name(custom_metric.get("metric", ""))} {custom_metric.get("utilizationTarget", "")}{self._get_custom_metric_target_type(custom_metric.get("utilizationTargetType"))}')
if policy_display_list:
policy_join_str = ', '.join(policy_display_list)
return f'{display_string}{policy_join_str}'
else:
return ''
@staticmethod
def _get_custom_metric_target_name(util_target):
try:
target_name = util_target.split('/')[-1]
return target_name
except Exception as e:
return ''
@staticmethod
def _get_custom_metric_target_type(util_target_type):
if util_target_type == 'GAUGE':
return ''
elif util_target_type == 'DELTA_PER_SECOND':
return '/s'
elif util_target_type == 'DELTA_PER_MINUTE':
return '/m'
else:
return ''
@staticmethod
def _get_last_target(target_vo, key):
a = target_vo.get(key, '')
return a[a.rfind('/') + 1:]
@staticmethod
def _get_auto_policy_for_scheduler(scheduler, matched_scheduler):
auto_policy = matched_scheduler.get('autoscalingPolicy', {})
if auto_policy != {}:
scheduler.update({
'recommend_size': matched_scheduler.get('recommendedSize', 1),
'origin_min_size': auto_policy.get('minNumReplicas'),
'origin_max_size': auto_policy.get('maxNumReplicas'),
'mode': auto_policy.get('mode')
})
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import csv
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, Union
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.suite.hooks.sheets import GSheetsHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class GoogleSheetsToGCSOperator(BaseOperator):
"""
Writes Google Sheet data into Google Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSheetsToGCSOperator`
:param spreadsheet_id: The Google Sheet ID to interact with.
:type spreadsheet_id: str
:param sheet_filter: Default to None, if provided, Should be an array of the sheet
titles to pull from.
:type sheet_filter: List[str]
:param destination_bucket: The destination Google cloud storage bucket where the
report should be written to. (templated)
:type destination_bucket: str
:param destination_path: The Google cloud storage URI array for the object created by the operator.
For example: ``path/to/my/files``.
:type destination_path: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"spreadsheet_id",
"destination_bucket",
"destination_path",
"sheet_filter",
"impersonation_chain",
)
def __init__(
self,
*,
spreadsheet_id: str,
destination_bucket: str,
sheet_filter: Optional[List[str]] = None,
destination_path: Optional[str] = None,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.gcp_conn_id = gcp_conn_id
self.spreadsheet_id = spreadsheet_id
self.sheet_filter = sheet_filter
self.destination_bucket = destination_bucket
self.destination_path = destination_path
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def _upload_data(
self,
gcs_hook: GCSHook,
hook: GSheetsHook,
sheet_range: str,
sheet_values: List[Any],
) -> str:
# Construct destination file path
sheet = hook.get_spreadsheet(self.spreadsheet_id)
file_name = f"{sheet["properties"]["title"]}_{sheet_range}.csv".replace(" ", "_")
dest_file_name = (
f"{self.destination_path.strip("/")}/{file_name}" if self.destination_path else file_name
)
with NamedTemporaryFile("w+") as temp_file:
# Write data
writer = csv.writer(temp_file)
writer.writerows(sheet_values)
temp_file.flush()
# Upload to GCS
gcs_hook.upload(
bucket_name=self.destination_bucket,
object_name=dest_file_name,
filename=temp_file.name,
)
return dest_file_name
def execute(self, context: 'Context'):
sheet_hook = GSheetsHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
# Pull data and upload
destination_array: List[str] = []
sheet_titles = sheet_hook.get_sheet_titles(
spreadsheet_id=self.spreadsheet_id, sheet_filter=self.sheet_filter
)
for sheet_range in sheet_titles:
data = sheet_hook.get_values(spreadsheet_id=self.spreadsheet_id, range_=sheet_range)
gcs_path_to_file = self._upload_data(gcs_hook, sheet_hook, sheet_range, data)
destination_array.append(gcs_path_to_file)
self.xcom_push(context, "destination_objects", destination_array)
return destination_array
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import csv
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, Union
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.suite.hooks.sheets import GSheetsHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class GoogleSheetsToGCSOperator(BaseOperator):
"""
Writes Google Sheet data into Google Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSheetsToGCSOperator`
:param spreadsheet_id: The Google Sheet ID to interact with.
:type spreadsheet_id: str
:param sheet_filter: Default to None, if provided, Should be an array of the sheet
titles to pull from.
:type sheet_filter: List[str]
:param destination_bucket: The destination Google cloud storage bucket where the
report should be written to. (templated)
:type destination_bucket: str
:param destination_path: The Google cloud storage URI array for the object created by the operator.
For example: ``path/to/my/files``.
:type destination_path: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"spreadsheet_id",
"destination_bucket",
"destination_path",
"sheet_filter",
"impersonation_chain",
)
def __init__(
self,
*,
spreadsheet_id: str,
destination_bucket: str,
sheet_filter: Optional[List[str]] = None,
destination_path: Optional[str] = None,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.gcp_conn_id = gcp_conn_id
self.spreadsheet_id = spreadsheet_id
self.sheet_filter = sheet_filter
self.destination_bucket = destination_bucket
self.destination_path = destination_path
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def _upload_data(
self,
gcs_hook: GCSHook,
hook: GSheetsHook,
sheet_range: str,
sheet_values: List[Any],
) -> str:
# Construct destination file path
sheet = hook.get_spreadsheet(self.spreadsheet_id)
file_name = f"{sheet['properties']['title']}_{sheet_range}.csv".replace(" ", "_")
dest_file_name = (
f"{self.destination_path.strip('/')}/{file_name}" if self.destination_path else file_name
)
with NamedTemporaryFile("w+") as temp_file:
# Write data
writer = csv.writer(temp_file)
writer.writerows(sheet_values)
temp_file.flush()
# Upload to GCS
gcs_hook.upload(
bucket_name=self.destination_bucket,
object_name=dest_file_name,
filename=temp_file.name,
)
return dest_file_name
def execute(self, context: 'Context'):
sheet_hook = GSheetsHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
# Pull data and upload
destination_array: List[str] = []
sheet_titles = sheet_hook.get_sheet_titles(
spreadsheet_id=self.spreadsheet_id, sheet_filter=self.sheet_filter
)
for sheet_range in sheet_titles:
data = sheet_hook.get_values(spreadsheet_id=self.spreadsheet_id, range_=sheet_range)
gcs_path_to_file = self._upload_data(gcs_hook, sheet_hook, sheet_range, data)
destination_array.append(gcs_path_to_file)
self.xcom_push(context, "destination_objects", destination_array)
return destination_array
|
########################################################################################################################
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
if sys.version_info[0] < 3:
raise EnvironmentError("Hey, caveman, use Python 3.")
__doc__ = \
"""
PDB minimization for different GNB2 models.
"""
__author__ = "Matteo Ferla. [Github](https://github.com/matteoferla)"
__email__ = "matteo.ferla@gmail.com"
__date__ = "2019 A.D."
__license__ = "MIT"
__version__ = "1"
__citation__ = "TBA"
########################################################################################################################
import pyrosetta
pyrosetta.init()
def relax(pose, cycles:int=2):
scorefxn = pyrosetta.get_fa_scorefxn()
relax = pyrosetta.rosetta.protocols.relax.FastRelax(scorefxn, cycles)
relax.apply(pose)
print('Done')
################### alpha beta gamma ######################################
native_alpha = pyrosetta.rosetta.core.pose.Pose()
pyrosetta.rosetta.core.import_pose.pose_from_file(native_alpha, 'GNB2_alpha.pdb')
relax(native_alpha, 15)#equivalent to -relax:thorough
native_alpha.dump_pdb('GNB2_alpha.r.pdb')
################### GRK2 ######################################
native_alt = pyrosetta.rosetta.core.pose.Pose()
pyrosetta.rosetta.core.import_pose.pose_from_file(native_alt, 'GNB2_GRK.pdb')
relax(native_alt, 15)
native_alt.dump_pdb('GNB2_alt.r.pdb')
################### WITHOUT alpha ######################################
import pymol2
with pymol2.PyMOL() as pymol:
pymol.cmd.load('GNB2_alpha.pdb')
pymol.cmd.remove('chain A')
pymol.cmd.save('GNB2_alone.pdb')
native_alone = pyrosetta.rosetta.core.pose.Pose()
pyrosetta.rosetta.core.import_pose.pose_from_file(native_alone, 'GNB2_alone.pdb')
relax(native_alone, 15)
native_alone.dump_pdb('GNB2_alone.r.pdb')
################### phosphorylated ######################################
from Bio.SeqUtils import seq3
from michelanglo_protein import ProteinAnalyser, global_settings
global_settings.startup(data_folder='/home/matteo/Coding/Michelanglo/protein-data')
p = ProteinAnalyser(taxid=9606, uniprot='P62879').load()
native_phospho = pyrosetta.rosetta.core.pose.Pose()
pyrosetta.rosetta.core.import_pose.pose_from_file(native_phospho, 'GNB2_alone.pdb')
MutateResidue = pyrosetta.rosetta.protocols.simple_moves.MutateResidue
# KinaseMover = pyrosetta.rosetta.protocols.enzymatic_movers.KinaseMover
add_variant_type_to_residue = pyrosetta.rosetta.core.pose.add_variant_type_to_residue
pose2pdb = native_phospho.pdb_info().pdb2pose
for record in p.features['PSP_modified_residues']:
change = record['from_residue'] + '-' + record['ptm']
if record['ptm'] == 'ub':
continue
elif record['ptm'] == 'p':
patch = 'phosphorylated'
elif record['ptm'] == 'ac':
patch = 'acetylated'
elif record['ptm'] == 'm1':
patch = 'monomethylated'
elif record['ptm'] == 'm2':
patch = 'dimethylated'
elif record['ptm'] == 'm3':
patch = 'trimethylated'
else:
raise ValueError
new_res = f'{seq3(p['from_residue'])}:{patch}'
r = pose2pdb(res=int(record['residue_index']), chain='B')
MutateResidue(target=r, new_res=new_res).apply(native_phospho)
relax(native_phospho, 15)
native_phospho.dump_pdb('GNB2_phospho.r.pdb')
|
########################################################################################################################
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
if sys.version_info[0] < 3:
raise EnvironmentError("Hey, caveman, use Python 3.")
__doc__ = \
"""
PDB minimization for different GNB2 models.
"""
__author__ = "Matteo Ferla. [Github](https://github.com/matteoferla)"
__email__ = "matteo.ferla@gmail.com"
__date__ = "2019 A.D."
__license__ = "MIT"
__version__ = "1"
__citation__ = "TBA"
########################################################################################################################
import pyrosetta
pyrosetta.init()
def relax(pose, cycles:int=2):
scorefxn = pyrosetta.get_fa_scorefxn()
relax = pyrosetta.rosetta.protocols.relax.FastRelax(scorefxn, cycles)
relax.apply(pose)
print('Done')
################### alpha beta gamma ######################################
native_alpha = pyrosetta.rosetta.core.pose.Pose()
pyrosetta.rosetta.core.import_pose.pose_from_file(native_alpha, 'GNB2_alpha.pdb')
relax(native_alpha, 15)#equivalent to -relax:thorough
native_alpha.dump_pdb('GNB2_alpha.r.pdb')
################### GRK2 ######################################
native_alt = pyrosetta.rosetta.core.pose.Pose()
pyrosetta.rosetta.core.import_pose.pose_from_file(native_alt, 'GNB2_GRK.pdb')
relax(native_alt, 15)
native_alt.dump_pdb('GNB2_alt.r.pdb')
################### WITHOUT alpha ######################################
import pymol2
with pymol2.PyMOL() as pymol:
pymol.cmd.load('GNB2_alpha.pdb')
pymol.cmd.remove('chain A')
pymol.cmd.save('GNB2_alone.pdb')
native_alone = pyrosetta.rosetta.core.pose.Pose()
pyrosetta.rosetta.core.import_pose.pose_from_file(native_alone, 'GNB2_alone.pdb')
relax(native_alone, 15)
native_alone.dump_pdb('GNB2_alone.r.pdb')
################### phosphorylated ######################################
from Bio.SeqUtils import seq3
from michelanglo_protein import ProteinAnalyser, global_settings
global_settings.startup(data_folder='/home/matteo/Coding/Michelanglo/protein-data')
p = ProteinAnalyser(taxid=9606, uniprot='P62879').load()
native_phospho = pyrosetta.rosetta.core.pose.Pose()
pyrosetta.rosetta.core.import_pose.pose_from_file(native_phospho, 'GNB2_alone.pdb')
MutateResidue = pyrosetta.rosetta.protocols.simple_moves.MutateResidue
# KinaseMover = pyrosetta.rosetta.protocols.enzymatic_movers.KinaseMover
add_variant_type_to_residue = pyrosetta.rosetta.core.pose.add_variant_type_to_residue
pose2pdb = native_phospho.pdb_info().pdb2pose
for record in p.features['PSP_modified_residues']:
change = record['from_residue'] + '-' + record['ptm']
if record['ptm'] == 'ub':
continue
elif record['ptm'] == 'p':
patch = 'phosphorylated'
elif record['ptm'] == 'ac':
patch = 'acetylated'
elif record['ptm'] == 'm1':
patch = 'monomethylated'
elif record['ptm'] == 'm2':
patch = 'dimethylated'
elif record['ptm'] == 'm3':
patch = 'trimethylated'
else:
raise ValueError
new_res = f'{seq3(p["from_residue"])}:{patch}'
r = pose2pdb(res=int(record['residue_index']), chain='B')
MutateResidue(target=r, new_res=new_res).apply(native_phospho)
relax(native_phospho, 15)
native_phospho.dump_pdb('GNB2_phospho.r.pdb')
|
import requests
import sys
import re
def get_merged_pull_reqs_since_last_release(token):
"""
Get all the merged pull requests since the last release.
"""
stopPattern = r"^(r|R)elease v"
pull_reqs = []
found_last_release = False
page = 1
print("Getting PRs since last release.")
while not found_last_release:
data = get_merged_pull_reqs(token, page)
# assume we don't encounter it during the loop
last_release_index = 101
for i in range(len(data)):
if re.search(stopPattern, data[i]["title"]):
found_last_release = True
last_release_index = i
break
pull_reqs.extend(data[:last_release_index])
page += 1
# should contain all the PRs since last release
return pull_reqs
def get_merged_pull_reqs(token, page):
"""
Get the merged pull requests based on page. There are
100 results per page. See https://docs.github.com/en/rest/reference/pulls
for more details on the parameters.
:param token, a GitHub API token.
:param page, the page number.
"""
queryPath = "https://api.github.com/repos/devicons/devicon/pulls"
headers = {
"Authorization": f"token {token}"
}
params = {
"accept": "application/vnd.github.v3+json",
"state": "closed",
"per_page": 100,
"page": page
}
print(f"Querying the GitHub API for requests page #{page}")
response = requests.get(queryPath, headers=headers, params=params)
if not response:
print(f"Can't query the GitHub API. Status code is {response.status_code}. Message is {response.text}")
sys.exit(1)
closed_pull_reqs = response.json()
return [merged_pull_req
for merged_pull_req in closed_pull_reqs
if merged_pull_req["merged_at"] is not None]
def is_feature_icon(pull_req_data):
"""
Check whether the pullData is a feature:icon PR.
:param pull_req_data - the data on a specific pull request from GitHub.
:return true if the pullData has a label named "feature:icon"
"""
for label in pull_req_data["labels"]:
if label["name"] == "feature:icon":
return True
return False
def find_all_authors(pull_req_data, token):
"""
Find all the authors of a PR based on its commits.
:param pull_req_data - the data on a specific pull request from GitHub.
:param token - a GitHub API token.
"""
headers = {
"Authorization": f"token {token}"
}
response = requests.get(pull_req_data["commits_url"], headers=headers)
if not response:
print(f"Can't query the GitHub API. Status code is {response.status_code}")
print("Response is: ", response.text)
return
commits = response.json()
authors = set() # want unique authors only
for commit in commits:
try:
# this contains proper referenceable github name
authors.add(commit["author"]["login"])
except TypeError:
# special case
authors.add(commit["commit"]["author"]["name"])
print(f"This URL didn't have an `author` attribute: {pull_req_data["commits_url"]}")
return ", ".join(["@" + author for author in list(authors)])
|
import requests
import sys
import re
def get_merged_pull_reqs_since_last_release(token):
"""
Get all the merged pull requests since the last release.
"""
stopPattern = r"^(r|R)elease v"
pull_reqs = []
found_last_release = False
page = 1
print("Getting PRs since last release.")
while not found_last_release:
data = get_merged_pull_reqs(token, page)
# assume we don't encounter it during the loop
last_release_index = 101
for i in range(len(data)):
if re.search(stopPattern, data[i]["title"]):
found_last_release = True
last_release_index = i
break
pull_reqs.extend(data[:last_release_index])
page += 1
# should contain all the PRs since last release
return pull_reqs
def get_merged_pull_reqs(token, page):
"""
Get the merged pull requests based on page. There are
100 results per page. See https://docs.github.com/en/rest/reference/pulls
for more details on the parameters.
:param token, a GitHub API token.
:param page, the page number.
"""
queryPath = "https://api.github.com/repos/devicons/devicon/pulls"
headers = {
"Authorization": f"token {token}"
}
params = {
"accept": "application/vnd.github.v3+json",
"state": "closed",
"per_page": 100,
"page": page
}
print(f"Querying the GitHub API for requests page #{page}")
response = requests.get(queryPath, headers=headers, params=params)
if not response:
print(f"Can't query the GitHub API. Status code is {response.status_code}. Message is {response.text}")
sys.exit(1)
closed_pull_reqs = response.json()
return [merged_pull_req
for merged_pull_req in closed_pull_reqs
if merged_pull_req["merged_at"] is not None]
def is_feature_icon(pull_req_data):
"""
Check whether the pullData is a feature:icon PR.
:param pull_req_data - the data on a specific pull request from GitHub.
:return true if the pullData has a label named "feature:icon"
"""
for label in pull_req_data["labels"]:
if label["name"] == "feature:icon":
return True
return False
def find_all_authors(pull_req_data, token):
"""
Find all the authors of a PR based on its commits.
:param pull_req_data - the data on a specific pull request from GitHub.
:param token - a GitHub API token.
"""
headers = {
"Authorization": f"token {token}"
}
response = requests.get(pull_req_data["commits_url"], headers=headers)
if not response:
print(f"Can't query the GitHub API. Status code is {response.status_code}")
print("Response is: ", response.text)
return
commits = response.json()
authors = set() # want unique authors only
for commit in commits:
try:
# this contains proper referenceable github name
authors.add(commit["author"]["login"])
except TypeError:
# special case
authors.add(commit["commit"]["author"]["name"])
print(f"This URL didn't have an `author` attribute: {pull_req_data['commits_url']}")
return ", ".join(["@" + author for author in list(authors)])
|
# !/usr/bin/env python
"""
ExcisionFinder identifies allele-specific excision sites. Written in Python version 3.6.1.
Kathleen Keough et al 2017-2018.
Note: This version of the script is intended only for analysis of large cohorts, particularly the
1000 Genomes cohort. There is a more general purpose script for small cohorts and individuals with
more options, such as outputting sgRNAs for pairs identified.
Usage:
ExcisionFinder.py [-vs] <annots> <gene> <targdir> <maxcut> <cas_list> <bcf> <outdir> [--window=<window_in_bp>]
ExcisionFinder.py -h
Arguments:
annots Gene annotations file (gene_annots_wsize) filepath.
gene Gene you would like to analyze.
targdir Directory where the variant targetability HDF5 files are stored.
maxcut Maximum distance between cut position pairs.
cas_list Comma separated (no spaces!) list of Cas varieties to evaluate, options below.
outdir Directory to which you would like to write the output files.
Options:
-h Show this screen.
-v Run as verbose, print to stderr.
-s Only consider sgRNA sites where variant is in a PAM (strict).
--window=<window_in_bp> Window around the gene (in bp) to also consider [default: 0].
available Cas types = cpf1,SpCas9,SpCas9_VRER,SpCas9_EQR,SpCas9_VQR_1,SpCas9_VQR_2,StCas9,StCas9_2,SaCas9,SaCas9_KKH,nmCas9,cjCas9
"""
import pandas as pd
from pandas import HDFStore
import numpy as np
from functools import reduce
from docopt import docopt
import itertools
import regex as re
import logging
import subprocess
from io import StringIO
import os
import time
__version__ = '0.0.0'
def load_gene_annots(annots_path):
"""
Load gene annotation data (transcript data).
:param annots_path: str, filepath for gene_annots_wsize (Part of ExcisionFinder package).
:return: Refseq gene annotations file.
"""
gene_annots = pd.read_csv(annots_path, sep='\t', header=0, names=['name', 'chrom', 'txStart', 'txEnd', 'cdsStart', 'cdsEnd', 'exonCount',
'exonStarts', 'exonEnds', 'size'])
return gene_annots
def het(genotype):
"""
Determine whether a genotype in format A|G is het.
:param genotype: genotype, str.
:return: bool, True = het, False = hom.
"""
hap1, hap2 = re.split('/|\|',genotype)
return hap1 != hap2
def next_exon(variant_position, coding_exon_starts):
"""
get location of next coding exon after variant
:param coding_exon_starts: coding exon start positions, Pandas Series.
:param variant_position: chromosomal position, int
:return: chromosomal position of start of next coding exon, int
"""
greater_than_var = [x for x in coding_exon_starts if x > variant_position]
if not greater_than_var:
return False
else:
next_coding_exon_pos = min(greater_than_var)
return next_coding_exon_pos
def targ_pair(variant1, variant2, coding_positions, coding_exon_starts):
"""
Determine whether a pair of variants positions is targetable based on whether they might
disrupt an exon.
:param variant1: position of variant 1, int.
:param variant2: position of variant 2, int.
:param coding_positions: coding positions, set.
:param coding_exon_starts: Start positions of coding exons, Pandas Series.
:return: whether targetable or not, bool.
"""
low_var, high_var = sorted([variant1, variant2])
if low_var in coding_positions or high_var in coding_positions:
return True
else:
# checks whether larger variant position occurs in or after next exon
return bool(high_var >= next_exon(low_var, coding_exon_starts))
def translate_gene_name(gene_name):
"""
HDF5 throws all sort of errors when you have weird punctuation in the gene name, so
this translates it to a less offensive form.
"""
repls = ('-', 'dash'), ('.', 'period')
trans_gene_name = reduce(lambda a, kv: a.replace(*kv), repls, str(gene_name))
return trans_gene_name
class Gene:
"""Holds information for the gene"""
def __init__(self, official_gene_symbol, annots, window):
self.official_gene_symbol = official_gene_symbol
self.info = annots.query('index == @self.official_gene_symbol')
self.n_exons = self.info['exonCount'].item()
self.coding_start = self.info['cdsStart'].item()
self.coding_end = self.info['cdsEnd'].item()
self.coding_exons = [x for x in list(zip(list(map(int,self.info['exonStarts'].item().split(',')[:-1])),
list(map(int,self.info['exonEnds'].item().split(',')[:-1])))) if x[0] >= self.coding_start and \
x[1] <= self.coding_end]
self.n_coding_exons = len(self.coding_exons)
self.start = self.info['txStart'].item() - window
self.end = self.info['txEnd'].item() + window
self.chrom = annots.query('index == @self.official_gene_symbol')['chrom'].item()
def get_coding_positions_and_starts(self):
coding_positions = []
coding_exon_starts = []
for start, stop in self.coding_exons:
coding_positions.extend(list(range(start, stop+1)))
coding_exon_starts.append(start)
return coding_positions, coding_exon_starts
def check_bcftools():
"""
Checks bcftools version, and exits the program if the version is incorrect
"""
version = subprocess.run("bcftools -v | head -1 | cut -d ' ' -f2", shell=True,\
stdout=subprocess.PIPE).stdout.decode("utf-8").rstrip()
if float(version) >= REQUIRED_BCFTOOLS_VER:
print(f'bcftools version {version} running')
else:
print(f'Error: bcftools must be >=1.5. Current version: {version}')
exit()
class SafeHDFStore(HDFStore):
# from https://stackoverflow.com/questions/22522551/pandas-hdf5-as-a-database/29014295#29014295
# due to parallel write issues with HDF5
def __init__(self, *args, **kwargs):
probe_interval = kwargs.pop('probe_interval', 1)
self._lock = '%s.lock' % args[0]
while True:
try:
self._flock = os.open(self._lock, os.O_CREAT |
os.O_EXCL |
os.O_WRONLY)
break
except FileExistsError:
time.sleep(probe_interval)
HDFStore.__init__(self, *args, **kwargs)
def __exit__(self, *args, **kwargs):
HDFStore.__exit__(self, *args, **kwargs)
os.close(self._flock)
os.remove(self._lock)
def main(args):
annots = load_gene_annots(args['<annots>'])
gene = args['<gene>']
targ_df = args['<targdir>']
out_dir = args['<outdir>']
maxcut = int(args['<maxcut>'])
cas_list_append = args['<cas_list>'].split(',')
bcf = args['<bcf>']
window = int(args['--window'])
cas_list = ['all'] + cas_list_append
# define strictness level, which is whether or not variants near PAMs are considered
# along with those that are in PAMs
if args['-s']:
logging.info('Running as strict.')
strict_level = 'strict'
else:
strict_level = 'relaxed'
logging.info('Running as relaxed.')
logging.info('Now running ExcisionFinder on ' + gene + '.')
# grab info about relevant gene w/ class
MyGene = Gene(gene, annots, window)
# get number of coding exons in gene, must have at least 1 to continue
n_exons = MyGene.n_exons
n_coding_exons = MyGene.n_coding_exons
chrom = MyGene.chrom.replace('chr','')
if n_coding_exons < 1:
logging.error(f'{n_exons} total exons in this gene, {n_coding_exons} of which are coding.\
No coding exons in gene {gene}, exiting.')
with open(f'{out_dir}no_coding_exons.txt','a+') as f:
f.write(gene + '\n')
exit()
else:
logging.info(f'{n_exons} total exons in this gene, {n_coding_exons} of which are coding.')
# load targetability information for each variant
targ_df = pd.read_hdf(f'{targ_df}{chrom}_targ.hdf5', 'all', where=f'pos >= {MyGene.start} and pos <= {MyGene.end}')
# check whether there are annotated variants for this gene, abort otherwise
if targ_df.empty:
logging.error(f'No variants in 1KGP for gene {gene}')
with open(f'{out_dir}not_enough_hets.txt', 'a+') as fout:
fout.write(gene+'\n')
exit()
else:
logging.info(
f'Targetability data loaded, {targ_df.shape[0]} variants annotated in 1KGP for {gene}.')
# import region of interest genotypes
bcf = f'bcf = f'{bcf}ALL.chr{chrom}_GRCh38.genotypes.20170504.bcf' # this was for 1kgp' # this was for 1kgp
bcl_v = f'bcftools view -g 'het' -r {chrom}:{MyGene.start}-{MyGene.end} -H {bcf}'
samples_cmd = f'bcftools query -l {bcf}'
bcl_samps = subprocess.Popen(samples_cmd, shell=True, stdout=subprocess.PIPE)
samples=bcl_samps.communicate()[0].decode('utf-8').split('\n')[:-1]
col_names = ['chrom','pos','rsid','ref','alt','score','random','info','gt'] + samples
bcl_view = subprocess.Popen(bcl_v, shell=True, stdout=subprocess.PIPE)
gens = pd.read_csv(StringIO(bcl_view.communicate()[0].decode('utf-8')),sep='\t',
header=None, names=col_names, usecols=['chrom','pos','ref','alt']+samples)
logging.info('1KGP genotypes loaded.')
het_gens = gens[samples].applymap(het).copy()
enough_hets = list(het_gens.sum(axis=0).loc[lambda s: s >= 2].index)
logging.info(str(len(enough_hets)) + ' individuals have >= 2 het positions.')
if len(enough_hets) < 1:
logging.info('No individuals have at least 2 het sites, aborting analysis.')
with open(f'{out_dir}not_enough_hets.txt', 'a+') as fout:
fout.write(gene+'\n')
exit()
logging.info('Checking targetability of individuals with sufficient number of hets.')
# get variants in region
variants = sorted(gens.pos)
# set up targetability analyses
het_vars_per_ind = {} # get heterozygous variant positions for each individual
for ind in enough_hets:
het_vars_per_ind[ind] = gens.pos[het_gens[ind]].tolist()
# get variant combinations and extract targetable pairs
logging.info('Getting variant combos.')
variant1 = []
variant2 = []
coding_positions, coding_exon_starts = MyGene.get_coding_positions_and_starts()
for var1, var2 in itertools.product(variants, repeat=2):
if (var1 != var2) and (max([var1,var2]) <= min([var1,var2])+10000) and (targ_pair(var1, var2, coding_positions, coding_exon_starts)):
variant1.append(var1)
variant2.append(var2)
else:
continue
logging.info('Combos obtained.')
# get rid of dups and make df
targ_pairs_df = pd.DataFrame({'var1':variant1, 'var2':variant2}).query('var1 < var2')
# check that each individual that has enough hets also has at least one of these pairs
# and specify which pairs they have
inds_w_targ_pair = {}
for ind in het_vars_per_ind.keys():
ind_vars = het_vars_per_ind[ind]
ind_targ_pairs = targ_pairs_df.loc[targ_pairs_df.isin(ind_vars).all(axis=1)].reset_index(drop=True).copy()
if ind_targ_pairs.empty:
continue
else:
inds_w_targ_pair[ind] = ind_targ_pairs
logging.info(f'{len(inds_w_targ_pair.keys())} individuals have at least one targetable pair of variants.')
if not inds_w_targ_pair:
logging.info(f'No individuals in 1KGP have at least 1 targetable variant pair for {gene}.')
with open(f'{out_dir}no_targetable_inds.txt', 'a+') as fout:
fout.write(gene+'\n')
exit()
# check targetability for each type of Cas
final_targ = pd.DataFrame({'sample':list(inds_w_targ_pair.keys())})
finaltargcols = [] # keeps track of columns for all cas types for later evaluating 'all' condition
for cas in cas_list[1:]: # skip all because is handled below faster
logging.info(f'Evaluating gene targetability for {cas}')
if args['-s']:
targ_vars_cas = targ_df.query(f'(makes_{cas}) or (breaks_{cas})').pos.tolist()
else:
targ_vars_cas = targ_df.query(f'(var_near_{cas}) or (makes_{cas}) or (breaks_{cas})').pos.tolist()
targ_pairs_cas = targ_pairs_df.loc[targ_pairs_df.isin(targ_vars_cas).all(axis=1)].reset_index(drop=True).copy()
# eliminate individuals that do not have at least one targetable pair for this specific cas
ind_targ_cas = []
for ind in list(inds_w_targ_pair.keys()):
ind_cas_targ_pairs = inds_w_targ_pair[ind].merge(targ_pairs_cas, how='inner').drop_duplicates()
if ind_cas_targ_pairs.empty:
ind_targ_cas.append(False)
continue
else:
# check that at least one pair of allele-specific cut sites is on the same haplotype in individual
if args['-s']:
ind_cas_targ_pairs['both_make'] = ind_cas_targ_pairs[['var1_make_pam','var2_make_pam']].all(axis=1)
ind_cas_targ_pairs['both_break'] = ind_cas_targ_pairs[['var1_break_pam','var2_break_pam']].all(axis=1)
ind_cas_targ_pairs['one_make_one_break_1'] = ind_cas_targ_pairs[['var1_make_pam','var2_break_pam']].all(axis=1)
ind_cas_targ_pairs['one_make_one_break_2'] = ind_cas_targ_pairs[['var2_make_pam','var1_break_pam']].all(axis=1)
gens_replace = {'0|1':'hap2','0|2':'hap2','0|3':'hap2',
'1|0':'hap1','2|0':'hap1','3|0':'hap1',
'0|0':'not_het','1|1':'not_het'}
ind_gens = gens[['pos',ind]].replace(gens_replace).copy()
ind_cas_targ_pairs['var1_hap'] = pd.merge(ind_cas_targ_pairs.copy(), ind_gens.copy(), left_on='var1', right_on='pos',
how='left')[ind]
ind_cas_targ_pairs['var2_hap'] = pd.merge(ind_cas_targ_pairs.copy(), ind_gens.copy(), left_on='var2', right_on='pos',
how='left')[ind]
ind_cas_targ_pairs['same_hap'] = np.where(ind_cas_targ_pairs['var1_hap'] == ind_cas_targ_pairs['var2_hap'],True,False)
ind_cas_targ_pairs['not_same_hap'] = ~ind_cas_targ_pairs['same_hap']
if ind_cas_targ_pairs[['both_make','same_hap']].all(axis=1).any() or ind_cas_targ_pairs[['both_break','same_hap']].all(axis=1).any():
ind_targ_cas.append(True)
continue
# check if pair where one makes, one breaks a PAM, and on different haplotypes
elif ind_cas_targ_pairs[['one_make_one_break_1','not_same_hap']].all(axis=1).any() or ind_cas_targ_pairs[['one_make_one_break_2','not_same_hap']].all(axis=1).any():
ind_targ_cas.append(True)
continue
# all possibilities exhausted, this person just isn't targetable at this gene
else:
ind_targ_cas.append(False)
continue
else:
# if both near PAM, haplotype doesn't have to be the same because both are allele-specific sgRNA sites
ind_cas_targ_pairs['both_near_pam'] = ind_cas_targ_pairs[['var1','var2']].isin(targ_df.query(f'var_near_{cas}').pos.tolist()).all(axis=1)
if ind_cas_targ_pairs['both_near_pam'].any(): # this doesn't work for 'strict' mode
ind_targ_cas.append(True)
continue
else:
# if none have both near a PAM, when both make or break, need to be same hap
ind_cas_targ_pairs['var1_make_pam'] = ind_cas_targ_pairs['var1'].isin(targ_df.query(f'makes_{cas}').pos.tolist())
ind_cas_targ_pairs['var2_make_pam'] = ind_cas_targ_pairs[['var2']].isin(targ_df.query(f'makes_{cas}').pos.tolist())
ind_cas_targ_pairs['var1_near_pam'] = ind_cas_targ_pairs['var1'].isin(targ_df.query(f'var_near_{cas}').pos.tolist())
ind_cas_targ_pairs['var2_near_pam'] = ind_cas_targ_pairs['var2'].isin(targ_df.query(f'var_near_{cas}').pos.tolist())
ind_cas_targ_pairs['var1_break_pam'] = ind_cas_targ_pairs[['var1']].isin(targ_df.query(f'breaks_{cas}').pos.tolist())
ind_cas_targ_pairs['var2_break_pam'] = ind_cas_targ_pairs[['var2']].isin(targ_df.query(f'breaks_{cas}').pos.tolist())
# if one var is near a pam and the other makes/breaks, haplotype doesn't matter
if not ind_cas_targ_pairs.query('(var1_near_pam and var2_make_pam) or (var1_near_pam and var2_break_pam) or (var2_near_pam and var1_make_pam) or (var2_near_pam and var1_break_pam)').empty:
ind_targ_cas.append(True)
continue
else:
ind_cas_targ_pairs['both_make'] = ind_cas_targ_pairs[['var1_make_pam','var2_make_pam']].all(axis=1)
ind_cas_targ_pairs['both_break'] = ind_cas_targ_pairs[['var1_break_pam','var2_break_pam']].all(axis=1)
ind_cas_targ_pairs['one_make_one_break_1'] = ind_cas_targ_pairs[['var1_make_pam','var2_break_pam']].all(axis=1)
ind_cas_targ_pairs['one_make_one_break_2'] = ind_cas_targ_pairs[['var2_make_pam','var1_break_pam']].all(axis=1)
gens_replace = {'0|1':'hap2','0|2':'hap2','0|3':'hap2',
'1|0':'hap1','2|0':'hap1','3|0':'hap1',
'0|0':'not_het','1|1':'not_het'}
ind_gens = gens[['pos',ind]].replace(gens_replace).copy()
ind_cas_targ_pairs['var1_hap'] = pd.merge(ind_cas_targ_pairs.copy(), ind_gens.copy(), left_on='var1', right_on='pos',
how='left')[ind]
ind_cas_targ_pairs['var2_hap'] = pd.merge(ind_cas_targ_pairs.copy(), ind_gens.copy(), left_on='var2', right_on='pos',
how='left')[ind]
ind_cas_targ_pairs['same_hap'] = np.where(ind_cas_targ_pairs['var1_hap'] == ind_cas_targ_pairs['var2_hap'],True,False)
ind_cas_targ_pairs['not_same_hap'] = ~ind_cas_targ_pairs['same_hap']
if ind_cas_targ_pairs[['both_make','same_hap']].all(axis=1).any() or ind_cas_targ_pairs[['both_break','same_hap']].all(axis=1).any():
ind_targ_cas.append(True)
continue
# check if pair where one makes, one breaks a PAM, and on different haplotypes
elif ind_cas_targ_pairs[['one_make_one_break_1','not_same_hap']].all(axis=1).any() or ind_cas_targ_pairs[['one_make_one_break_2','not_same_hap']].all(axis=1).any():
ind_targ_cas.append(True)
continue
# all possibilities exhausted, this person just isn't targetable at this gene
else:
ind_targ_cas.append(False)
continue
finaltargcols.append(f'targ_{cas}')
final_targ[f'targ_{cas}'] = ind_targ_cas
# add column summarizing targetability across assessed Cas varieties
final_targ['targ_all'] = final_targ[finaltargcols].any(axis=1)
# HDF has issues with certain characters
translated_gene_name = translate_gene_name(gene)
# save to HDF
# final_targ.to_hdf(f'{out_dir}chr{chrom}_gene_targ.h5',translated_gene_name, mode='a', comp_level=9, complib='blosc')
# make list of genes that actually get written to HDF5
with open(f'{out_dir}genes_evaluated.txt','a+') as f:
f.write(f'{translated_gene_name}\n')
# write gene dat to file
final_targ.to_hdf(f'{out_dir}{translated_gene_name}.h5', 'all', comp_level=9,complib='blosc')
logging.info('Done!')
if __name__ == '__main__':
arguments = docopt(__doc__, version=__version__)
if arguments['-v']:
logging.basicConfig(level=logging.INFO, format='[%(asctime)s %(name)s:%(levelname)s ]%(message)s')
else:
logging.basicConfig(level=logging.ERROR, format='[%(asctime)s %(name)s:%(levelname)s ]%(message)s')
logging.info(arguments)
main(arguments)
|
# !/usr/bin/env python
"""
ExcisionFinder identifies allele-specific excision sites. Written in Python version 3.6.1.
Kathleen Keough et al 2017-2018.
Note: This version of the script is intended only for analysis of large cohorts, particularly the
1000 Genomes cohort. There is a more general purpose script for small cohorts and individuals with
more options, such as outputting sgRNAs for pairs identified.
Usage:
ExcisionFinder.py [-vs] <annots> <gene> <targdir> <maxcut> <cas_list> <bcf> <outdir> [--window=<window_in_bp>]
ExcisionFinder.py -h
Arguments:
annots Gene annotations file (gene_annots_wsize) filepath.
gene Gene you would like to analyze.
targdir Directory where the variant targetability HDF5 files are stored.
maxcut Maximum distance between cut position pairs.
cas_list Comma separated (no spaces!) list of Cas varieties to evaluate, options below.
outdir Directory to which you would like to write the output files.
Options:
-h Show this screen.
-v Run as verbose, print to stderr.
-s Only consider sgRNA sites where variant is in a PAM (strict).
--window=<window_in_bp> Window around the gene (in bp) to also consider [default: 0].
available Cas types = cpf1,SpCas9,SpCas9_VRER,SpCas9_EQR,SpCas9_VQR_1,SpCas9_VQR_2,StCas9,StCas9_2,SaCas9,SaCas9_KKH,nmCas9,cjCas9
"""
import pandas as pd
from pandas import HDFStore
import numpy as np
from functools import reduce
from docopt import docopt
import itertools
import regex as re
import logging
import subprocess
from io import StringIO
import os
import time
__version__ = '0.0.0'
def load_gene_annots(annots_path):
"""
Load gene annotation data (transcript data).
:param annots_path: str, filepath for gene_annots_wsize (Part of ExcisionFinder package).
:return: Refseq gene annotations file.
"""
gene_annots = pd.read_csv(annots_path, sep='\t', header=0, names=['name', 'chrom', 'txStart', 'txEnd', 'cdsStart', 'cdsEnd', 'exonCount',
'exonStarts', 'exonEnds', 'size'])
return gene_annots
def het(genotype):
"""
Determine whether a genotype in format A|G is het.
:param genotype: genotype, str.
:return: bool, True = het, False = hom.
"""
hap1, hap2 = re.split('/|\|',genotype)
return hap1 != hap2
def next_exon(variant_position, coding_exon_starts):
"""
get location of next coding exon after variant
:param coding_exon_starts: coding exon start positions, Pandas Series.
:param variant_position: chromosomal position, int
:return: chromosomal position of start of next coding exon, int
"""
greater_than_var = [x for x in coding_exon_starts if x > variant_position]
if not greater_than_var:
return False
else:
next_coding_exon_pos = min(greater_than_var)
return next_coding_exon_pos
def targ_pair(variant1, variant2, coding_positions, coding_exon_starts):
"""
Determine whether a pair of variants positions is targetable based on whether they might
disrupt an exon.
:param variant1: position of variant 1, int.
:param variant2: position of variant 2, int.
:param coding_positions: coding positions, set.
:param coding_exon_starts: Start positions of coding exons, Pandas Series.
:return: whether targetable or not, bool.
"""
low_var, high_var = sorted([variant1, variant2])
if low_var in coding_positions or high_var in coding_positions:
return True
else:
# checks whether larger variant position occurs in or after next exon
return bool(high_var >= next_exon(low_var, coding_exon_starts))
def translate_gene_name(gene_name):
"""
HDF5 throws all sort of errors when you have weird punctuation in the gene name, so
this translates it to a less offensive form.
"""
repls = ('-', 'dash'), ('.', 'period')
trans_gene_name = reduce(lambda a, kv: a.replace(*kv), repls, str(gene_name))
return trans_gene_name
class Gene:
"""Holds information for the gene"""
def __init__(self, official_gene_symbol, annots, window):
self.official_gene_symbol = official_gene_symbol
self.info = annots.query('index == @self.official_gene_symbol')
self.n_exons = self.info['exonCount'].item()
self.coding_start = self.info['cdsStart'].item()
self.coding_end = self.info['cdsEnd'].item()
self.coding_exons = [x for x in list(zip(list(map(int,self.info['exonStarts'].item().split(',')[:-1])),
list(map(int,self.info['exonEnds'].item().split(',')[:-1])))) if x[0] >= self.coding_start and \
x[1] <= self.coding_end]
self.n_coding_exons = len(self.coding_exons)
self.start = self.info['txStart'].item() - window
self.end = self.info['txEnd'].item() + window
self.chrom = annots.query('index == @self.official_gene_symbol')['chrom'].item()
def get_coding_positions_and_starts(self):
coding_positions = []
coding_exon_starts = []
for start, stop in self.coding_exons:
coding_positions.extend(list(range(start, stop+1)))
coding_exon_starts.append(start)
return coding_positions, coding_exon_starts
def check_bcftools():
"""
Checks bcftools version, and exits the program if the version is incorrect
"""
version = subprocess.run("bcftools -v | head -1 | cut -d ' ' -f2", shell=True,\
stdout=subprocess.PIPE).stdout.decode("utf-8").rstrip()
if float(version) >= REQUIRED_BCFTOOLS_VER:
print(f'bcftools version {version} running')
else:
print(f"Error: bcftools must be >=1.5. Current version: {version}")
exit()
class SafeHDFStore(HDFStore):
# from https://stackoverflow.com/questions/22522551/pandas-hdf5-as-a-database/29014295#29014295
# due to parallel write issues with HDF5
def __init__(self, *args, **kwargs):
probe_interval = kwargs.pop("probe_interval", 1)
self._lock = "%s.lock" % args[0]
while True:
try:
self._flock = os.open(self._lock, os.O_CREAT |
os.O_EXCL |
os.O_WRONLY)
break
except FileExistsError:
time.sleep(probe_interval)
HDFStore.__init__(self, *args, **kwargs)
def __exit__(self, *args, **kwargs):
HDFStore.__exit__(self, *args, **kwargs)
os.close(self._flock)
os.remove(self._lock)
def main(args):
annots = load_gene_annots(args['<annots>'])
gene = args['<gene>']
targ_df = args['<targdir>']
out_dir = args['<outdir>']
maxcut = int(args['<maxcut>'])
cas_list_append = args['<cas_list>'].split(',')
bcf = args['<bcf>']
window = int(args['--window'])
cas_list = ['all'] + cas_list_append
# define strictness level, which is whether or not variants near PAMs are considered
# along with those that are in PAMs
if args['-s']:
logging.info('Running as strict.')
strict_level = 'strict'
else:
strict_level = 'relaxed'
logging.info('Running as relaxed.')
logging.info('Now running ExcisionFinder on ' + gene + '.')
# grab info about relevant gene w/ class
MyGene = Gene(gene, annots, window)
# get number of coding exons in gene, must have at least 1 to continue
n_exons = MyGene.n_exons
n_coding_exons = MyGene.n_coding_exons
chrom = MyGene.chrom.replace('chr','')
if n_coding_exons < 1:
logging.error(f'{n_exons} total exons in this gene, {n_coding_exons} of which are coding.\
No coding exons in gene {gene}, exiting.')
with open(f'{out_dir}no_coding_exons.txt','a+') as f:
f.write(gene + '\n')
exit()
else:
logging.info(f'{n_exons} total exons in this gene, {n_coding_exons} of which are coding.')
# load targetability information for each variant
targ_df = pd.read_hdf(f'{targ_df}{chrom}_targ.hdf5', 'all', where=f'pos >= {MyGene.start} and pos <= {MyGene.end}')
# check whether there are annotated variants for this gene, abort otherwise
if targ_df.empty:
logging.error(f'No variants in 1KGP for gene {gene}')
with open(f'{out_dir}not_enough_hets.txt', 'a+') as fout:
fout.write(gene+'\n')
exit()
else:
logging.info(
f"Targetability data loaded, {targ_df.shape[0]} variants annotated in 1KGP for {gene}.")
# import region of interest genotypes
bcf = f'bcf = f'{bcf}ALL.chr{chrom}_GRCh38.genotypes.20170504.bcf' # this was for 1kgp' # this was for 1kgp
bcl_v = f'bcftools view -g "het" -r {chrom}:{MyGene.start}-{MyGene.end} -H {bcf}'
samples_cmd = f'bcftools query -l {bcf}'
bcl_samps = subprocess.Popen(samples_cmd, shell=True, stdout=subprocess.PIPE)
samples=bcl_samps.communicate()[0].decode("utf-8").split('\n')[:-1]
col_names = ['chrom','pos','rsid','ref','alt','score','random','info','gt'] + samples
bcl_view = subprocess.Popen(bcl_v, shell=True, stdout=subprocess.PIPE)
gens = pd.read_csv(StringIO(bcl_view.communicate()[0].decode("utf-8")),sep='\t',
header=None, names=col_names, usecols=['chrom','pos','ref','alt']+samples)
logging.info("1KGP genotypes loaded.")
het_gens = gens[samples].applymap(het).copy()
enough_hets = list(het_gens.sum(axis=0).loc[lambda s: s >= 2].index)
logging.info(str(len(enough_hets)) + ' individuals have >= 2 het positions.')
if len(enough_hets) < 1:
logging.info('No individuals have at least 2 het sites, aborting analysis.')
with open(f'{out_dir}not_enough_hets.txt', 'a+') as fout:
fout.write(gene+'\n')
exit()
logging.info('Checking targetability of individuals with sufficient number of hets.')
# get variants in region
variants = sorted(gens.pos)
# set up targetability analyses
het_vars_per_ind = {} # get heterozygous variant positions for each individual
for ind in enough_hets:
het_vars_per_ind[ind] = gens.pos[het_gens[ind]].tolist()
# get variant combinations and extract targetable pairs
logging.info('Getting variant combos.')
variant1 = []
variant2 = []
coding_positions, coding_exon_starts = MyGene.get_coding_positions_and_starts()
for var1, var2 in itertools.product(variants, repeat=2):
if (var1 != var2) and (max([var1,var2]) <= min([var1,var2])+10000) and (targ_pair(var1, var2, coding_positions, coding_exon_starts)):
variant1.append(var1)
variant2.append(var2)
else:
continue
logging.info('Combos obtained.')
# get rid of dups and make df
targ_pairs_df = pd.DataFrame({'var1':variant1, 'var2':variant2}).query('var1 < var2')
# check that each individual that has enough hets also has at least one of these pairs
# and specify which pairs they have
inds_w_targ_pair = {}
for ind in het_vars_per_ind.keys():
ind_vars = het_vars_per_ind[ind]
ind_targ_pairs = targ_pairs_df.loc[targ_pairs_df.isin(ind_vars).all(axis=1)].reset_index(drop=True).copy()
if ind_targ_pairs.empty:
continue
else:
inds_w_targ_pair[ind] = ind_targ_pairs
logging.info(f'{len(inds_w_targ_pair.keys())} individuals have at least one targetable pair of variants.')
if not inds_w_targ_pair:
logging.info(f'No individuals in 1KGP have at least 1 targetable variant pair for {gene}.')
with open(f'{out_dir}no_targetable_inds.txt', 'a+') as fout:
fout.write(gene+'\n')
exit()
# check targetability for each type of Cas
final_targ = pd.DataFrame({'sample':list(inds_w_targ_pair.keys())})
finaltargcols = [] # keeps track of columns for all cas types for later evaluating "all" condition
for cas in cas_list[1:]: # skip all because is handled below faster
logging.info(f'Evaluating gene targetability for {cas}')
if args['-s']:
targ_vars_cas = targ_df.query(f'(makes_{cas}) or (breaks_{cas})').pos.tolist()
else:
targ_vars_cas = targ_df.query(f'(var_near_{cas}) or (makes_{cas}) or (breaks_{cas})').pos.tolist()
targ_pairs_cas = targ_pairs_df.loc[targ_pairs_df.isin(targ_vars_cas).all(axis=1)].reset_index(drop=True).copy()
# eliminate individuals that do not have at least one targetable pair for this specific cas
ind_targ_cas = []
for ind in list(inds_w_targ_pair.keys()):
ind_cas_targ_pairs = inds_w_targ_pair[ind].merge(targ_pairs_cas, how='inner').drop_duplicates()
if ind_cas_targ_pairs.empty:
ind_targ_cas.append(False)
continue
else:
# check that at least one pair of allele-specific cut sites is on the same haplotype in individual
if args['-s']:
ind_cas_targ_pairs['both_make'] = ind_cas_targ_pairs[['var1_make_pam','var2_make_pam']].all(axis=1)
ind_cas_targ_pairs['both_break'] = ind_cas_targ_pairs[['var1_break_pam','var2_break_pam']].all(axis=1)
ind_cas_targ_pairs['one_make_one_break_1'] = ind_cas_targ_pairs[['var1_make_pam','var2_break_pam']].all(axis=1)
ind_cas_targ_pairs['one_make_one_break_2'] = ind_cas_targ_pairs[['var2_make_pam','var1_break_pam']].all(axis=1)
gens_replace = {'0|1':'hap2','0|2':'hap2','0|3':'hap2',
'1|0':'hap1','2|0':'hap1','3|0':'hap1',
'0|0':'not_het','1|1':'not_het'}
ind_gens = gens[['pos',ind]].replace(gens_replace).copy()
ind_cas_targ_pairs['var1_hap'] = pd.merge(ind_cas_targ_pairs.copy(), ind_gens.copy(), left_on='var1', right_on='pos',
how='left')[ind]
ind_cas_targ_pairs['var2_hap'] = pd.merge(ind_cas_targ_pairs.copy(), ind_gens.copy(), left_on='var2', right_on='pos',
how='left')[ind]
ind_cas_targ_pairs['same_hap'] = np.where(ind_cas_targ_pairs['var1_hap'] == ind_cas_targ_pairs['var2_hap'],True,False)
ind_cas_targ_pairs['not_same_hap'] = ~ind_cas_targ_pairs['same_hap']
if ind_cas_targ_pairs[['both_make','same_hap']].all(axis=1).any() or ind_cas_targ_pairs[['both_break','same_hap']].all(axis=1).any():
ind_targ_cas.append(True)
continue
# check if pair where one makes, one breaks a PAM, and on different haplotypes
elif ind_cas_targ_pairs[['one_make_one_break_1','not_same_hap']].all(axis=1).any() or ind_cas_targ_pairs[['one_make_one_break_2','not_same_hap']].all(axis=1).any():
ind_targ_cas.append(True)
continue
# all possibilities exhausted, this person just isn't targetable at this gene
else:
ind_targ_cas.append(False)
continue
else:
# if both near PAM, haplotype doesn't have to be the same because both are allele-specific sgRNA sites
ind_cas_targ_pairs['both_near_pam'] = ind_cas_targ_pairs[['var1','var2']].isin(targ_df.query(f'var_near_{cas}').pos.tolist()).all(axis=1)
if ind_cas_targ_pairs['both_near_pam'].any(): # this doesn't work for "strict" mode
ind_targ_cas.append(True)
continue
else:
# if none have both near a PAM, when both make or break, need to be same hap
ind_cas_targ_pairs['var1_make_pam'] = ind_cas_targ_pairs['var1'].isin(targ_df.query(f'makes_{cas}').pos.tolist())
ind_cas_targ_pairs['var2_make_pam'] = ind_cas_targ_pairs[['var2']].isin(targ_df.query(f'makes_{cas}').pos.tolist())
ind_cas_targ_pairs['var1_near_pam'] = ind_cas_targ_pairs['var1'].isin(targ_df.query(f'var_near_{cas}').pos.tolist())
ind_cas_targ_pairs['var2_near_pam'] = ind_cas_targ_pairs['var2'].isin(targ_df.query(f'var_near_{cas}').pos.tolist())
ind_cas_targ_pairs['var1_break_pam'] = ind_cas_targ_pairs[['var1']].isin(targ_df.query(f'breaks_{cas}').pos.tolist())
ind_cas_targ_pairs['var2_break_pam'] = ind_cas_targ_pairs[['var2']].isin(targ_df.query(f'breaks_{cas}').pos.tolist())
# if one var is near a pam and the other makes/breaks, haplotype doesn't matter
if not ind_cas_targ_pairs.query('(var1_near_pam and var2_make_pam) or (var1_near_pam and var2_break_pam) or (var2_near_pam and var1_make_pam) or (var2_near_pam and var1_break_pam)').empty:
ind_targ_cas.append(True)
continue
else:
ind_cas_targ_pairs['both_make'] = ind_cas_targ_pairs[['var1_make_pam','var2_make_pam']].all(axis=1)
ind_cas_targ_pairs['both_break'] = ind_cas_targ_pairs[['var1_break_pam','var2_break_pam']].all(axis=1)
ind_cas_targ_pairs['one_make_one_break_1'] = ind_cas_targ_pairs[['var1_make_pam','var2_break_pam']].all(axis=1)
ind_cas_targ_pairs['one_make_one_break_2'] = ind_cas_targ_pairs[['var2_make_pam','var1_break_pam']].all(axis=1)
gens_replace = {'0|1':'hap2','0|2':'hap2','0|3':'hap2',
'1|0':'hap1','2|0':'hap1','3|0':'hap1',
'0|0':'not_het','1|1':'not_het'}
ind_gens = gens[['pos',ind]].replace(gens_replace).copy()
ind_cas_targ_pairs['var1_hap'] = pd.merge(ind_cas_targ_pairs.copy(), ind_gens.copy(), left_on='var1', right_on='pos',
how='left')[ind]
ind_cas_targ_pairs['var2_hap'] = pd.merge(ind_cas_targ_pairs.copy(), ind_gens.copy(), left_on='var2', right_on='pos',
how='left')[ind]
ind_cas_targ_pairs['same_hap'] = np.where(ind_cas_targ_pairs['var1_hap'] == ind_cas_targ_pairs['var2_hap'],True,False)
ind_cas_targ_pairs['not_same_hap'] = ~ind_cas_targ_pairs['same_hap']
if ind_cas_targ_pairs[['both_make','same_hap']].all(axis=1).any() or ind_cas_targ_pairs[['both_break','same_hap']].all(axis=1).any():
ind_targ_cas.append(True)
continue
# check if pair where one makes, one breaks a PAM, and on different haplotypes
elif ind_cas_targ_pairs[['one_make_one_break_1','not_same_hap']].all(axis=1).any() or ind_cas_targ_pairs[['one_make_one_break_2','not_same_hap']].all(axis=1).any():
ind_targ_cas.append(True)
continue
# all possibilities exhausted, this person just isn't targetable at this gene
else:
ind_targ_cas.append(False)
continue
finaltargcols.append(f'targ_{cas}')
final_targ[f'targ_{cas}'] = ind_targ_cas
# add column summarizing targetability across assessed Cas varieties
final_targ['targ_all'] = final_targ[finaltargcols].any(axis=1)
# HDF has issues with certain characters
translated_gene_name = translate_gene_name(gene)
# save to HDF
# final_targ.to_hdf(f'{out_dir}chr{chrom}_gene_targ.h5',translated_gene_name, mode='a', comp_level=9, complib='blosc')
# make list of genes that actually get written to HDF5
with open(f'{out_dir}genes_evaluated.txt','a+') as f:
f.write(f'{translated_gene_name}\n')
# write gene dat to file
final_targ.to_hdf(f'{out_dir}{translated_gene_name}.h5', 'all', comp_level=9,complib='blosc')
logging.info('Done!')
if __name__ == '__main__':
arguments = docopt(__doc__, version=__version__)
if arguments['-v']:
logging.basicConfig(level=logging.INFO, format='[%(asctime)s %(name)s:%(levelname)s ]%(message)s')
else:
logging.basicConfig(level=logging.ERROR, format='[%(asctime)s %(name)s:%(levelname)s ]%(message)s')
logging.info(arguments)
main(arguments)
|
import sys
from pathlib import Path
import h5py
import numpy as np
import matplotlib.pyplot as plt
TWOTHETA_KEYS = ["2th", "2theta", "twotheta"]
Q_KEYS = ["q"]
INTENSITY_KEYS = ["i", "intensity", "int"]
STACK_INDICES_KEY = "stack_indices"
DPI = 300
FIGSIZE = (12,4)
FONTSIZE_LABELS = 20
FONTSIZE_TICKS = 14
LINEWIDTH = 1
COLORS = dict(bg_blue='#0B3C5D', bg_red='#B82601', bg_green='#1c6b0a',
bg_lightblue='#328CC1', bg_darkblue='#062F4F',
bg_yellow='#D9B310', bg_darkred='#984B43', bg_bordeaux='#76323F',
bg_olivegreen='#626E60', bg_yellowgrey='#AB987A',
bg_brownorange='#C09F80')
COLOR = COLORS["bg_blue"]
def h5_extract_to_dict(h5_file):
f = h5py.File(h5_file, mode="r")
d = {}
fkeys = list(f.keys())
if "entry" in fkeys:
fkeys = list(f["entry"].keys())
for k in fkeys:
d[k.lower()] = np.array(f[k])
return d
def dict_to_xy_write(d, fname):
twotheta, q, intensity = None, None, None
dkeys = d.keys()
for k in TWOTHETA_KEYS:
if k in dkeys:
twotheta = d[k]
for k in Q_KEYS:
if k in dkeys:
q = d[k]
for k in INTENSITY_KEYS:
if k in dkeys:
intensity = d[k]
if STACK_INDICES_KEY in dkeys:
stack_indices = d[STACK_INDICES_KEY]
if isinstance(twotheta, np.ndarray) and isinstance(intensity, np.ndarray):
if intensity.ndim > 1:
zfill = len(str(intensity.shape[0]))
scans_index = intensity.shape[0]
else:
scans_index = 1
for i in range(scans_index):
if STACK_INDICES_KEY in dkeys:
print(f"\t\t\t{stack_indices[i]}")
else:
print(f"\t\t\t{i}")
if intensity.ndim > 1:
x, y = twotheta, intensity[i,:]
else:
x, y = twotheta, intensity
xy = np.column_stack((x,y))
h = "2theta\tintensity"
if STACK_INDICES_KEY in dkeys:
np.savetxt(f"xy/{fname}_{stack_indices[i]}.xy", xy,
encoding="utf-8", header=h)
else:
np.savetxt(f"xy/{fname}_{str(i).zfill(zfill)}.xy", xy,
encoding="utf-8", header=h)
elif isinstance(q, np.ndarray) and isinstance(intensity, np.ndarray):
if intensity.ndim > 1:
zfill = len(str(intensity.shape[0]))
scans_index = intensity.shape[0]
else:
scans_index = 1
for i in range(scans_index):
if STACK_INDICES_KEY in dkeys:
print(f"\t\t\t{stack_indices[i]}")
else:
print(f"\t\t\t{i}")
if intensity.ndim > 1:
x, y = q, intensity[i,:]
else:
x, y = q, intensity
xy = np.column_stack((x,y))
h = "q\tintensity"
if STACK_INDICES_KEY in dkeys:
np.savetxt(f"xy/{fname}_{stack_indices[i]}.xy", xy,
encoding="utf-8", header=h)
else:
np.savetxt(f"xy/{fname}_{str(i).zfill(zfill)}.xy", xy,
encoding="utf-8", header=h)
return None
def dict_to_plot(d, fname):
twotheta, q, intensity = None, None, None
dkeys = d.keys()
for k in TWOTHETA_KEYS:
if k in dkeys:
twotheta = d[k]
for k in Q_KEYS:
if k in dkeys:
q = d[k]
for k in INTENSITY_KEYS:
if k in dkeys:
intensity = d[k]
if STACK_INDICES_KEY in dkeys:
stack_indices = d[STACK_INDICES_KEY]
if isinstance(twotheta, np.ndarray) and isinstance(intensity, np.ndarray):
if intensity.ndim > 1:
zfill = len(str(intensity.shape[0]))
scans_index = intensity.shape[0]
else:
scans_index = 1
for i in range(scans_index):
if STACK_INDICES_KEY in dkeys:
print(f"\t\t\t{stack_indices[i]}")
else:
print(f"\t\t\t{i}")
if intensity.ndim > 1:
x, y = twotheta, intensity[i,:]
else:
x, y = twotheta, intensity
plt.figure(dpi=DPI, figsize=FIGSIZE)
plt.plot(x, y, c=COLOR, lw=LINEWIDTH)
plt.xlim(np.amin(x), np.amax(x))
plt.xlabel(r"$2\theta$ $[\degree]$", fontsize=FONTSIZE_LABELS)
plt.ylabel(r"$I$ $[\mathrm{arb. u.}]$", fontsize=FONTSIZE_LABELS)
plt.tick_params(axis='both', which='major',
labelsize=FONTSIZE_LABELS)
plt.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
if STACK_INDICES_KEY in dkeys:
plt.savefig(f"png/{fname}_{stack_indices[i]}.png",
bbox_inches="tight")
plt.savefig(f"pdf/{fname}_{stack_indices[i]}.pdf",
bbox_inches="tight")
else:
plt.savefig(f"png/{fname}_{str(i).zfill(zfill)}.png",
bbox_inches="tight")
plt.savefig(f"pdf/{fname}_{str(i).zfill(zfill)}.pdf",
bbox_inches="tight")
plt.close()
if isinstance(q, np.ndarray) and isinstance(intensity, np.ndarray):
if intensity.ndim > 1:
zfill = len(str(intensity.shape[0]))
scans_index = intensity.shape[0]
else:
scans_index = 1
for i in range(scans_index):
if STACK_INDICES_KEY in dkeys:
print(f"\t\t\t{stack_indices[i]}")
else:
print(f"\t\t\t{i}")
if intensity.ndim > 1:
x, y = q, intensity[i,:]
else:
x, y = q, intensity
plt.figure(dpi=DPI, figsize=FIGSIZE)
plt.plot(x, y, c=COLOR, lw=LINEWIDTH)
plt.xlim(np.amin(x), np.amax(x))
if np.amax(q) > 40 :
plt.xlabel(r"$Q$ $[\mathrm{nm}^{-1}]$",
fontsize=FONTSIZE_LABELS)
else:
plt.xlabel(r"$Q$ $[\mathrm{\AA}^{-1}]$",
fontsize=FONTSIZE_LABELS)
plt.ylabel(r"$I$ $[\mathrm{arb. u.}]$", fontsize=FONTSIZE_LABELS)
plt.tick_params(axis='both', which='major',
labelsize=FONTSIZE_LABELS)
plt.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
if STACK_INDICES_KEY in dkeys:
plt.savefig(f"png/{fname}_{stack_indices[i]}.png",
bbox_inches="tight")
plt.savefig(f"pdf/{fname}_{stack_indices[i]}.pdf",
bbox_inches="tight")
else:
plt.savefig(f"png/{fname}_{str(i).zfill(zfill)}.png",
bbox_inches="tight")
plt.savefig(f"pdf/{fname}_{str(i).zfill(zfill)}.pdf",
bbox_inches="tight")
plt.close()
return None
def merge_dict(d):
twotheta, q, intensity = None, None, None
dkeys = d.keys()
d_merged = {}
for k in TWOTHETA_KEYS:
if k in dkeys:
twotheta = d[k]
d_merged[k] = twotheta
for k in Q_KEYS:
if k in dkeys:
q = d[k]
d_merged[k] = q
for k in INTENSITY_KEYS:
if k in dkeys:
intensity = d[k]
intensity_key = k
if isinstance(intensity, np.ndarray):
zfill = len(str(intensity.shape[0]))
number_of_scans = intensity.shape[0]
scans_to_stack = int(input("\t\t\tHow many scans should be stacked "
"together?: "))
full_stacks = number_of_scans // scans_to_stack
remainder_to_stack = number_of_scans % scans_to_stack
stack_indices = []
for i in range(full_stacks):
stack = intensity[i*scans_to_stack, :]
stack_indices_str = str(i*scans_to_stack).zfill(zfill)
for j in range(1, scans_to_stack):
stack += intensity[i*scans_to_stack+j, :]
stack_indices.append(f"{stack_indices_str}-"
f"{str(i*scans_to_stack+j).zfill(zfill)}")
if i == 0:
d_merged[intensity_key] = stack
else:
d_merged[intensity_key] = np.vstack((d_merged[intensity_key],
stack))
if remainder_to_stack != 0:
stack = intensity[(full_stacks * scans_to_stack),:]
stack_indices_str = str(full_stacks * scans_to_stack).zfill(zfill)
for j in range(1, remainder_to_stack-1):
stack = intensity[(full_stacks * scans_to_stack) + 1 + j,:]
if remainder_to_stack == 1:
stack_indices.append(f"{stack_indices_str}")
else:
last_scan = str((full_stacks*scans_to_stack)+1+j).zfill(zfill)
stack_indices.append(f"{stack_indices_str}-{last_scan}")
d_merged[intensity_key] = np.vstack((d_merged[intensity_key],
stack))
d_merged[STACK_INDICES_KEY] = stack_indices
return d_merged
def main():
h5_path = Path.cwd() / "h5"
if not h5_path.exists():
h5_path.mkdir()
print(f"{80*"-"}\nA folder called 'h5' has been created. Please "
f"place your .h5 files there and\nrerun the code.\n{80*"-"}")
sys.exit()
h5_files = list(h5_path.glob("*.h5"))
if len(h5_files) == 0:
print(f"{80*"-"}\nNo .h5 files were found in the 'h5' folder. Please "
f"place your .h5 files there\nand rerun the code.\n{80*"-"}")
sys.exit()
output_paths = ["xy", "png", "pdf"]
for e in output_paths:
p = Path.cwd() / e
if not p.exists():
p.mkdir()
print("Working w. files...")
for h5_file in h5_files:
try:
print(f"{80*"-"}\n\tFile: {h5_file.name}")
fname = h5_file.stem
d = h5_extract_to_dict(h5_file)
for k in INTENSITY_KEYS:
if k in d.keys():
print(f"\t\tNumber of scans: {d[k].shape[0]}")
mergereq = input("\t\tDo you want to merge any of the scans? "
"(y/n): ")
while mergereq not in ["y", "n"]:
mergereq = input("\t\tDo you want to merge any of the scans? "
"(y/n): ")
if mergereq == "y":
writereq = input("\t\tDo you want to write .xy files for all "
"merged scans? (y/n): ")
while writereq not in ["y", "n"]:
writereq = input("\t\tDo you want to write .xy files for "
"all merged scans? (y/n): ")
else:
writereq = input("\t\tDo you want to write .xy files for all "
"scans? (y/n): ")
while writereq not in ["y", "n"]:
writereq = input("\t\tDo you want to write .xy files for "
"merged scans? (y/n): ")
if mergereq == "y":
plotreq = input("\t\tDo you want to plot all merged scans? "
"(y/n): ")
while plotreq not in ["y", "n"]:
plotreq = input("\t\tDo you want to plot all merged scans? "
"(y/n): ")
else:
plotreq = input("\t\tDo you want to plot all scans? (y/n): ")
while plotreq not in ["y", "n"]:
plotreq = input("\t\tDo you want to plot all scans? "
"(y/n): ")
if mergereq.lower() == "y":
d_merged = merge_dict(d)
if writereq == "y":
print("\t\tWriting to two-column files of merged scans...")
dict_to_xy_write(d_merged, fname)
print("\t\tPlotting merged scans...")
if plotreq == "y":
dict_to_plot(d_merged, fname)
else:
if writereq == "y":
print("\t\tWriting to two-column files for each scan...")
dict_to_xy_write(d, fname)
if plotreq == "y":
print("\t\tPlotting each scan...")
dict_to_plot(d, fname)
except KeyError:
print(f"\t\tThis file seems to contain non-integrated data. File "
"skipped.")
print(f"{80*"-"}\nDone working w. files.\n{80*"-"}")
return None
if __name__ == "__main__":
main()
# End of file.
|
import sys
from pathlib import Path
import h5py
import numpy as np
import matplotlib.pyplot as plt
TWOTHETA_KEYS = ["2th", "2theta", "twotheta"]
Q_KEYS = ["q"]
INTENSITY_KEYS = ["i", "intensity", "int"]
STACK_INDICES_KEY = "stack_indices"
DPI = 300
FIGSIZE = (12,4)
FONTSIZE_LABELS = 20
FONTSIZE_TICKS = 14
LINEWIDTH = 1
COLORS = dict(bg_blue='#0B3C5D', bg_red='#B82601', bg_green='#1c6b0a',
bg_lightblue='#328CC1', bg_darkblue='#062F4F',
bg_yellow='#D9B310', bg_darkred='#984B43', bg_bordeaux='#76323F',
bg_olivegreen='#626E60', bg_yellowgrey='#AB987A',
bg_brownorange='#C09F80')
COLOR = COLORS["bg_blue"]
def h5_extract_to_dict(h5_file):
f = h5py.File(h5_file, mode="r")
d = {}
fkeys = list(f.keys())
if "entry" in fkeys:
fkeys = list(f["entry"].keys())
for k in fkeys:
d[k.lower()] = np.array(f[k])
return d
def dict_to_xy_write(d, fname):
twotheta, q, intensity = None, None, None
dkeys = d.keys()
for k in TWOTHETA_KEYS:
if k in dkeys:
twotheta = d[k]
for k in Q_KEYS:
if k in dkeys:
q = d[k]
for k in INTENSITY_KEYS:
if k in dkeys:
intensity = d[k]
if STACK_INDICES_KEY in dkeys:
stack_indices = d[STACK_INDICES_KEY]
if isinstance(twotheta, np.ndarray) and isinstance(intensity, np.ndarray):
if intensity.ndim > 1:
zfill = len(str(intensity.shape[0]))
scans_index = intensity.shape[0]
else:
scans_index = 1
for i in range(scans_index):
if STACK_INDICES_KEY in dkeys:
print(f"\t\t\t{stack_indices[i]}")
else:
print(f"\t\t\t{i}")
if intensity.ndim > 1:
x, y = twotheta, intensity[i,:]
else:
x, y = twotheta, intensity
xy = np.column_stack((x,y))
h = "2theta\tintensity"
if STACK_INDICES_KEY in dkeys:
np.savetxt(f"xy/{fname}_{stack_indices[i]}.xy", xy,
encoding="utf-8", header=h)
else:
np.savetxt(f"xy/{fname}_{str(i).zfill(zfill)}.xy", xy,
encoding="utf-8", header=h)
elif isinstance(q, np.ndarray) and isinstance(intensity, np.ndarray):
if intensity.ndim > 1:
zfill = len(str(intensity.shape[0]))
scans_index = intensity.shape[0]
else:
scans_index = 1
for i in range(scans_index):
if STACK_INDICES_KEY in dkeys:
print(f"\t\t\t{stack_indices[i]}")
else:
print(f"\t\t\t{i}")
if intensity.ndim > 1:
x, y = q, intensity[i,:]
else:
x, y = q, intensity
xy = np.column_stack((x,y))
h = "q\tintensity"
if STACK_INDICES_KEY in dkeys:
np.savetxt(f"xy/{fname}_{stack_indices[i]}.xy", xy,
encoding="utf-8", header=h)
else:
np.savetxt(f"xy/{fname}_{str(i).zfill(zfill)}.xy", xy,
encoding="utf-8", header=h)
return None
def dict_to_plot(d, fname):
twotheta, q, intensity = None, None, None
dkeys = d.keys()
for k in TWOTHETA_KEYS:
if k in dkeys:
twotheta = d[k]
for k in Q_KEYS:
if k in dkeys:
q = d[k]
for k in INTENSITY_KEYS:
if k in dkeys:
intensity = d[k]
if STACK_INDICES_KEY in dkeys:
stack_indices = d[STACK_INDICES_KEY]
if isinstance(twotheta, np.ndarray) and isinstance(intensity, np.ndarray):
if intensity.ndim > 1:
zfill = len(str(intensity.shape[0]))
scans_index = intensity.shape[0]
else:
scans_index = 1
for i in range(scans_index):
if STACK_INDICES_KEY in dkeys:
print(f"\t\t\t{stack_indices[i]}")
else:
print(f"\t\t\t{i}")
if intensity.ndim > 1:
x, y = twotheta, intensity[i,:]
else:
x, y = twotheta, intensity
plt.figure(dpi=DPI, figsize=FIGSIZE)
plt.plot(x, y, c=COLOR, lw=LINEWIDTH)
plt.xlim(np.amin(x), np.amax(x))
plt.xlabel(r"$2\theta$ $[\degree]$", fontsize=FONTSIZE_LABELS)
plt.ylabel(r"$I$ $[\mathrm{arb. u.}]$", fontsize=FONTSIZE_LABELS)
plt.tick_params(axis='both', which='major',
labelsize=FONTSIZE_LABELS)
plt.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
if STACK_INDICES_KEY in dkeys:
plt.savefig(f"png/{fname}_{stack_indices[i]}.png",
bbox_inches="tight")
plt.savefig(f"pdf/{fname}_{stack_indices[i]}.pdf",
bbox_inches="tight")
else:
plt.savefig(f"png/{fname}_{str(i).zfill(zfill)}.png",
bbox_inches="tight")
plt.savefig(f"pdf/{fname}_{str(i).zfill(zfill)}.pdf",
bbox_inches="tight")
plt.close()
if isinstance(q, np.ndarray) and isinstance(intensity, np.ndarray):
if intensity.ndim > 1:
zfill = len(str(intensity.shape[0]))
scans_index = intensity.shape[0]
else:
scans_index = 1
for i in range(scans_index):
if STACK_INDICES_KEY in dkeys:
print(f"\t\t\t{stack_indices[i]}")
else:
print(f"\t\t\t{i}")
if intensity.ndim > 1:
x, y = q, intensity[i,:]
else:
x, y = q, intensity
plt.figure(dpi=DPI, figsize=FIGSIZE)
plt.plot(x, y, c=COLOR, lw=LINEWIDTH)
plt.xlim(np.amin(x), np.amax(x))
if np.amax(q) > 40 :
plt.xlabel(r"$Q$ $[\mathrm{nm}^{-1}]$",
fontsize=FONTSIZE_LABELS)
else:
plt.xlabel(r"$Q$ $[\mathrm{\AA}^{-1}]$",
fontsize=FONTSIZE_LABELS)
plt.ylabel(r"$I$ $[\mathrm{arb. u.}]$", fontsize=FONTSIZE_LABELS)
plt.tick_params(axis='both', which='major',
labelsize=FONTSIZE_LABELS)
plt.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
if STACK_INDICES_KEY in dkeys:
plt.savefig(f"png/{fname}_{stack_indices[i]}.png",
bbox_inches="tight")
plt.savefig(f"pdf/{fname}_{stack_indices[i]}.pdf",
bbox_inches="tight")
else:
plt.savefig(f"png/{fname}_{str(i).zfill(zfill)}.png",
bbox_inches="tight")
plt.savefig(f"pdf/{fname}_{str(i).zfill(zfill)}.pdf",
bbox_inches="tight")
plt.close()
return None
def merge_dict(d):
twotheta, q, intensity = None, None, None
dkeys = d.keys()
d_merged = {}
for k in TWOTHETA_KEYS:
if k in dkeys:
twotheta = d[k]
d_merged[k] = twotheta
for k in Q_KEYS:
if k in dkeys:
q = d[k]
d_merged[k] = q
for k in INTENSITY_KEYS:
if k in dkeys:
intensity = d[k]
intensity_key = k
if isinstance(intensity, np.ndarray):
zfill = len(str(intensity.shape[0]))
number_of_scans = intensity.shape[0]
scans_to_stack = int(input("\t\t\tHow many scans should be stacked "
"together?: "))
full_stacks = number_of_scans // scans_to_stack
remainder_to_stack = number_of_scans % scans_to_stack
stack_indices = []
for i in range(full_stacks):
stack = intensity[i*scans_to_stack, :]
stack_indices_str = str(i*scans_to_stack).zfill(zfill)
for j in range(1, scans_to_stack):
stack += intensity[i*scans_to_stack+j, :]
stack_indices.append(f"{stack_indices_str}-"
f"{str(i*scans_to_stack+j).zfill(zfill)}")
if i == 0:
d_merged[intensity_key] = stack
else:
d_merged[intensity_key] = np.vstack((d_merged[intensity_key],
stack))
if remainder_to_stack != 0:
stack = intensity[(full_stacks * scans_to_stack),:]
stack_indices_str = str(full_stacks * scans_to_stack).zfill(zfill)
for j in range(1, remainder_to_stack-1):
stack = intensity[(full_stacks * scans_to_stack) + 1 + j,:]
if remainder_to_stack == 1:
stack_indices.append(f"{stack_indices_str}")
else:
last_scan = str((full_stacks*scans_to_stack)+1+j).zfill(zfill)
stack_indices.append(f"{stack_indices_str}-{last_scan}")
d_merged[intensity_key] = np.vstack((d_merged[intensity_key],
stack))
d_merged[STACK_INDICES_KEY] = stack_indices
return d_merged
def main():
h5_path = Path.cwd() / "h5"
if not h5_path.exists():
h5_path.mkdir()
print(f"{80*'-'}\nA folder called 'h5' has been created. Please "
f"place your .h5 files there and\nrerun the code.\n{80*'-'}")
sys.exit()
h5_files = list(h5_path.glob("*.h5"))
if len(h5_files) == 0:
print(f"{80*'-'}\nNo .h5 files were found in the 'h5' folder. Please "
f"place your .h5 files there\nand rerun the code.\n{80*'-'}")
sys.exit()
output_paths = ["xy", "png", "pdf"]
for e in output_paths:
p = Path.cwd() / e
if not p.exists():
p.mkdir()
print("Working w. files...")
for h5_file in h5_files:
try:
print(f"{80*'-'}\n\tFile: {h5_file.name}")
fname = h5_file.stem
d = h5_extract_to_dict(h5_file)
for k in INTENSITY_KEYS:
if k in d.keys():
print(f"\t\tNumber of scans: {d[k].shape[0]}")
mergereq = input("\t\tDo you want to merge any of the scans? "
"(y/n): ")
while mergereq not in ["y", "n"]:
mergereq = input("\t\tDo you want to merge any of the scans? "
"(y/n): ")
if mergereq == "y":
writereq = input("\t\tDo you want to write .xy files for all "
"merged scans? (y/n): ")
while writereq not in ["y", "n"]:
writereq = input("\t\tDo you want to write .xy files for "
"all merged scans? (y/n): ")
else:
writereq = input("\t\tDo you want to write .xy files for all "
"scans? (y/n): ")
while writereq not in ["y", "n"]:
writereq = input("\t\tDo you want to write .xy files for "
"merged scans? (y/n): ")
if mergereq == "y":
plotreq = input("\t\tDo you want to plot all merged scans? "
"(y/n): ")
while plotreq not in ["y", "n"]:
plotreq = input("\t\tDo you want to plot all merged scans? "
"(y/n): ")
else:
plotreq = input("\t\tDo you want to plot all scans? (y/n): ")
while plotreq not in ["y", "n"]:
plotreq = input("\t\tDo you want to plot all scans? "
"(y/n): ")
if mergereq.lower() == "y":
d_merged = merge_dict(d)
if writereq == "y":
print("\t\tWriting to two-column files of merged scans...")
dict_to_xy_write(d_merged, fname)
print("\t\tPlotting merged scans...")
if plotreq == "y":
dict_to_plot(d_merged, fname)
else:
if writereq == "y":
print("\t\tWriting to two-column files for each scan...")
dict_to_xy_write(d, fname)
if plotreq == "y":
print("\t\tPlotting each scan...")
dict_to_plot(d, fname)
except KeyError:
print(f"\t\tThis file seems to contain non-integrated data. File "
"skipped.")
print(f"{80*'-'}\nDone working w. files.\n{80*'-'}")
return None
if __name__ == "__main__":
main()
# End of file.
|
import json
import os
import time
import uuid
from copy import deepcopy
from datetime import datetime, timedelta, timezone
from random import randint
from urllib.parse import parse_qs, urlparse, urlsplit
import pystac
from pydantic.datetime_parse import parse_datetime
from pystac.utils import datetime_to_str
from shapely.geometry import Polygon
from stac_fastapi.sqlalchemy.core import CoreCrudClient
from stac_fastapi.types.core import LandingPageMixin
from stac_fastapi.types.rfc3339 import rfc3339_str_to_datetime
def test_create_and_delete_item(app_client, load_test_data):
"""Test creation and deletion of a single item (transactions extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
resp = app_client.delete(
f"/collections/{test_item["collection"]}/items/{resp.json()["id"]}"
)
assert resp.status_code == 200
def test_create_item_conflict(app_client, load_test_data):
"""Test creation of an item which already exists (transactions extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 409
def test_create_item_duplicate(app_client, load_test_data):
"""Test creation of an item id which already exists but in a different collection(transactions extension)"""
# add test_item to test-collection
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
# add test_item to test-collection again, resource already exists
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 409
# create "test-collection-2"
collection_2 = load_test_data("test_collection.json")
collection_2["id"] = "test-collection-2"
resp = app_client.post("/collections", json=collection_2)
assert resp.status_code == 200
# add test_item to test-collection-2, posts successfully
test_item["collection"] = "test-collection-2"
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
def test_delete_item_duplicate(app_client, load_test_data):
"""Test creation of an item id which already exists but in a different collection(transactions extension)"""
# add test_item to test-collection
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
# create "test-collection-2"
collection_2 = load_test_data("test_collection.json")
collection_2["id"] = "test-collection-2"
resp = app_client.post("/collections", json=collection_2)
assert resp.status_code == 200
# add test_item to test-collection-2
test_item["collection"] = "test-collection-2"
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
# delete test_item from test-collection
test_item["collection"] = "test-collection"
resp = app_client.delete(
f"/collections/{test_item["collection"]}/items/{test_item["id"]}"
)
assert resp.status_code == 200
# test-item in test-collection has already been deleted
resp = app_client.delete(
f"/collections/{test_item["collection"]}/items/{test_item["id"]}"
)
assert resp.status_code == 404
# test-item in test-collection-2 still exists, was not deleted
test_item["collection"] = "test-collection-2"
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 409
def test_update_item_duplicate(app_client, load_test_data):
"""Test creation of an item id which already exists but in a different collection(transactions extension)"""
# add test_item to test-collection
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
# create "test-collection-2"
collection_2 = load_test_data("test_collection.json")
collection_2["id"] = "test-collection-2"
resp = app_client.post("/collections", json=collection_2)
assert resp.status_code == 200
# add test_item to test-collection-2
test_item["collection"] = "test-collection-2"
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
# update gsd in test_item, test-collection-2
test_item["properties"]["gsd"] = 16
resp = app_client.put(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
updated_item = resp.json()
assert updated_item["properties"]["gsd"] == 16
# update gsd in test_item, test-collection
test_item["collection"] = "test-collection"
test_item["properties"]["gsd"] = 17
resp = app_client.put(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
updated_item = resp.json()
assert updated_item["properties"]["gsd"] == 17
# test_item in test-collection, updated gsd = 17
resp = app_client.get(
f"/collections/{test_item["collection"]}/items/{test_item["id"]}"
)
assert resp.status_code == 200
item = resp.json()
assert item["properties"]["gsd"] == 17
# test_item in test-collection-2, updated gsd = 16
test_item["collection"] = "test-collection-2"
resp = app_client.get(
f"/collections/{test_item["collection"]}/items/{test_item["id"]}"
)
assert resp.status_code == 200
item = resp.json()
assert item["properties"]["gsd"] == 16
def test_delete_missing_item(app_client, load_test_data):
"""Test deletion of an item which does not exist (transactions extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.delete(f"/collections/{test_item["collection"]}/items/hijosh")
assert resp.status_code == 404
def test_create_item_missing_collection(app_client, load_test_data):
"""Test creation of an item without a parent collection (transactions extension)"""
test_item = load_test_data("test_item.json")
test_item["collection"] = "stac is cool"
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 422
def test_update_item_already_exists(app_client, load_test_data):
"""Test updating an item which already exists (transactions extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
assert test_item["properties"]["gsd"] != 16
test_item["properties"]["gsd"] = 16
resp = app_client.put(
f"/collections/{test_item["collection"]}/items", json=test_item
)
updated_item = resp.json()
assert updated_item["properties"]["gsd"] == 16
def test_update_new_item(app_client, load_test_data):
"""Test updating an item which does not exist (transactions extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.put(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 404
def test_update_item_missing_collection(app_client, load_test_data):
"""Test updating an item without a parent collection (transactions extension)"""
test_item = load_test_data("test_item.json")
# Create the item
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
# Try to update collection of the item
test_item["collection"] = "stac is cool"
resp = app_client.put(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 404
def test_update_item_geometry(app_client, load_test_data):
test_item = load_test_data("test_item.json")
# Create the item
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
# Update the geometry of the item
test_item["geometry"]["coordinates"] = [[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]]
resp = app_client.put(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
# Fetch the updated item
resp = app_client.get(
f"/collections/{test_item["collection"]}/items/{test_item["id"]}"
)
assert resp.status_code == 200
assert resp.json()["geometry"]["coordinates"] == [
[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
]
def test_get_item(app_client, load_test_data):
"""Test read an item by id (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
get_item = app_client.get(
f"/collections/{test_item["collection"]}/items/{test_item["id"]}"
)
assert get_item.status_code == 200
def test_returns_valid_item(app_client, load_test_data):
"""Test validates fetched item with jsonschema"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
get_item = app_client.get(
f"/collections/{test_item["collection"]}/items/{test_item["id"]}"
)
assert get_item.status_code == 200
item_dict = get_item.json()
# Mock root to allow validation
mock_root = pystac.Catalog(
id="test", description="test desc", href="https://example.com"
)
item = pystac.Item.from_dict(item_dict, preserve_dict=False, root=mock_root)
item.validate()
def test_get_item_collection(app_client, load_test_data):
"""Test read an item collection (core)"""
item_count = randint(1, 4)
test_item = load_test_data("test_item.json")
for idx in range(item_count):
_test_item = deepcopy(test_item)
_test_item["id"] = test_item["id"] + str(idx)
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=_test_item
)
assert resp.status_code == 200
resp = app_client.get(f"/collections/{test_item["collection"]}/items")
assert resp.status_code == 200
item_collection = resp.json()
assert item_collection["context"]["matched"] == len(range(item_count))
def test_pagination(app_client, load_test_data):
"""Test item collection pagination (paging extension)"""
item_count = 10
test_item = load_test_data("test_item.json")
for idx in range(item_count):
_test_item = deepcopy(test_item)
_test_item["id"] = test_item["id"] + str(idx)
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=_test_item
)
assert resp.status_code == 200
resp = app_client.get(
f"/collections/{test_item["collection"]}/items", params={"limit": 3}
)
assert resp.status_code == 200
first_page = resp.json()
assert first_page["context"]["returned"] == 3
url_components = urlsplit(first_page["links"][0]["href"])
resp = app_client.get(f"{url_components.path}?{url_components.query}")
assert resp.status_code == 200
second_page = resp.json()
assert second_page["context"]["returned"] == 3
def test_item_timestamps(app_client, load_test_data):
"""Test created and updated timestamps (common metadata)"""
test_item = load_test_data("test_item.json")
start_time = datetime.now(timezone.utc)
time.sleep(2)
# Confirm `created` timestamp
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
item = resp.json()
created_dt = parse_datetime(item["properties"]["created"])
assert resp.status_code == 200
assert start_time < created_dt < datetime.now(timezone.utc)
time.sleep(2)
# Confirm `updated` timestamp
item["properties"]["proj:epsg"] = 4326
resp = app_client.put(f"/collections/{test_item["collection"]}/items", json=item)
assert resp.status_code == 200
updated_item = resp.json()
# Created shouldn't change on update
assert item["properties"]["created"] == updated_item["properties"]["created"]
assert parse_datetime(updated_item["properties"]["updated"]) > created_dt
def test_item_search_by_id_post(app_client, load_test_data):
"""Test POST search by item id (core)"""
ids = ["test1", "test2", "test3"]
for id in ids:
test_item = load_test_data("test_item.json")
test_item["id"] = id
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
params = {"collections": [test_item["collection"]], "ids": ids}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == len(ids)
assert set([feat["id"] for feat in resp_json["features"]]) == set(ids)
def test_item_search_spatial_query_post(app_client, load_test_data):
"""Test POST search with spatial query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
params = {
"collections": [test_item["collection"]],
"intersects": test_item["geometry"],
}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_temporal_query_post(app_client, load_test_data):
"""Test POST search with single-tailed spatio-temporal query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
item_date = rfc3339_str_to_datetime(test_item["properties"]["datetime"])
item_date = item_date + timedelta(seconds=1)
params = {
"collections": [test_item["collection"]],
"intersects": test_item["geometry"],
"datetime": f"../{datetime_to_str(item_date)}",
}
resp = app_client.post("/search", json=params)
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_temporal_window_post(app_client, load_test_data):
"""Test POST search with two-tailed spatio-temporal query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
item_date = rfc3339_str_to_datetime(test_item["properties"]["datetime"])
item_date_before = item_date - timedelta(seconds=1)
item_date_after = item_date + timedelta(seconds=1)
params = {
"collections": [test_item["collection"]],
"intersects": test_item["geometry"],
"datetime": f"{datetime_to_str(item_date_before)}/{datetime_to_str(item_date_after)}",
}
resp = app_client.post("/search", json=params)
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_temporal_open_window(app_client, load_test_data):
"""Test POST search with open spatio-temporal query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
for dt in ["/", "../", "/..", "../.."]:
resp = app_client.post("/search", json={"datetime": dt})
assert resp.status_code == 400
def test_item_search_sort_post(app_client, load_test_data):
"""Test POST search with sorting (sort extension)"""
first_item = load_test_data("test_item.json")
item_date = rfc3339_str_to_datetime(first_item["properties"]["datetime"])
resp = app_client.post(
f"/collections/{first_item["collection"]}/items", json=first_item
)
assert resp.status_code == 200
second_item = load_test_data("test_item.json")
second_item["id"] = "another-item"
another_item_date = item_date - timedelta(days=1)
second_item["properties"]["datetime"] = datetime_to_str(another_item_date)
resp = app_client.post(
f"/collections/{second_item["collection"]}/items", json=second_item
)
assert resp.status_code == 200
params = {
"collections": [first_item["collection"]],
"sortby": [{"field": "datetime", "direction": "desc"}],
}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == first_item["id"]
assert resp_json["features"][1]["id"] == second_item["id"]
def test_item_search_by_id_get(app_client, load_test_data):
"""Test GET search by item id (core)"""
ids = ["test1", "test2", "test3"]
for id in ids:
test_item = load_test_data("test_item.json")
test_item["id"] = id
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
params = {"collections": test_item["collection"], "ids": ",".join(ids)}
resp = app_client.get("/search", params=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == len(ids)
assert set([feat["id"] for feat in resp_json["features"]]) == set(ids)
def test_item_search_bbox_get(app_client, load_test_data):
"""Test GET search with spatial query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
params = {
"collections": test_item["collection"],
"bbox": ",".join([str(coord) for coord in test_item["bbox"]]),
}
resp = app_client.get("/search", params=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_get_without_collections(app_client, load_test_data):
"""Test GET search without specifying collections"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
params = {
"bbox": ",".join([str(coord) for coord in test_item["bbox"]]),
}
resp = app_client.get("/search", params=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_temporal_window_get(app_client, load_test_data):
"""Test GET search with spatio-temporal query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
item_date = rfc3339_str_to_datetime(test_item["properties"]["datetime"])
item_date_before = item_date - timedelta(seconds=1)
item_date_after = item_date + timedelta(seconds=1)
params = {
"collections": test_item["collection"],
"bbox": ",".join([str(coord) for coord in test_item["bbox"]]),
"datetime": f"{datetime_to_str(item_date_before)}/{datetime_to_str(item_date_after)}",
}
resp = app_client.get("/search", params=params)
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_sort_get(app_client, load_test_data):
"""Test GET search with sorting (sort extension)"""
first_item = load_test_data("test_item.json")
item_date = rfc3339_str_to_datetime(first_item["properties"]["datetime"])
resp = app_client.post(
f"/collections/{first_item["collection"]}/items", json=first_item
)
assert resp.status_code == 200
second_item = load_test_data("test_item.json")
second_item["id"] = "another-item"
another_item_date = item_date - timedelta(days=1)
second_item["properties"]["datetime"] = datetime_to_str(another_item_date)
resp = app_client.post(
f"/collections/{second_item["collection"]}/items", json=second_item
)
assert resp.status_code == 200
params = {"collections": [first_item["collection"]], "sortby": "-datetime"}
resp = app_client.get("/search", params=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == first_item["id"]
assert resp_json["features"][1]["id"] == second_item["id"]
def test_item_search_post_without_collection(app_client, load_test_data):
"""Test POST search without specifying a collection"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
params = {
"bbox": test_item["bbox"],
}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_properties_jsonb(app_client, load_test_data):
"""Test POST search with JSONB query (query extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
# EPSG is a JSONB key
params = {"query": {"proj:epsg": {"gt": test_item["properties"]["proj:epsg"] + 1}}}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == 0
def test_item_search_properties_field(app_client, load_test_data):
"""Test POST search indexed field with query (query extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
# Orientation is an indexed field
params = {"query": {"orientation": {"eq": "south"}}}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == 0
def test_item_search_get_query_extension(app_client, load_test_data):
"""Test GET search with JSONB query (query extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
# EPSG is a JSONB key
params = {
"collections": [test_item["collection"]],
"query": json.dumps(
{"proj:epsg": {"gt": test_item["properties"]["proj:epsg"] + 1}}
),
}
resp = app_client.get("/search", params=params)
assert resp.json()["context"]["returned"] == 0
params["query"] = json.dumps(
{"proj:epsg": {"eq": test_item["properties"]["proj:epsg"]}}
)
resp = app_client.get("/search", params=params)
resp_json = resp.json()
assert resp_json["context"]["returned"] == 1
assert (
resp_json["features"][0]["properties"]["proj:epsg"]
== test_item["properties"]["proj:epsg"]
)
def test_get_missing_item_collection(app_client):
"""Test reading a collection which does not exist"""
resp = app_client.get("/collections/invalid-collection/items")
assert resp.status_code == 200
def test_pagination_item_collection(app_client, load_test_data):
"""Test item collection pagination links (paging extension)"""
test_item = load_test_data("test_item.json")
ids = []
# Ingest 5 items
for idx in range(5):
uid = str(uuid.uuid4())
test_item["id"] = uid
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
ids.append(uid)
# Paginate through all 5 items with a limit of 1 (expecting 5 requests)
page = app_client.get(
f"/collections/{test_item["collection"]}/items", params={"limit": 1}
)
idx = 0
item_ids = []
while True:
idx += 1
page_data = page.json()
item_ids.append(page_data["features"][0]["id"])
next_link = list(filter(lambda l: l["rel"] == "next", page_data["links"]))
if not next_link:
break
query_params = parse_qs(urlparse(next_link[0]["href"]).query)
page = app_client.get(
f"/collections/{test_item["collection"]}/items",
params=query_params,
)
# Our limit is 1 so we expect len(ids) number of requests before we run out of pages
assert idx == len(ids)
# Confirm we have paginated through all items
assert not set(item_ids) - set(ids)
def test_pagination_post(app_client, load_test_data):
"""Test POST pagination (paging extension)"""
test_item = load_test_data("test_item.json")
ids = []
# Ingest 5 items
for idx in range(5):
uid = str(uuid.uuid4())
test_item["id"] = uid
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
ids.append(uid)
# Paginate through all 5 items with a limit of 1 (expecting 5 requests)
request_body = {"ids": ids, "limit": 1}
page = app_client.post("/search", json=request_body)
idx = 0
item_ids = []
while True:
idx += 1
page_data = page.json()
item_ids.append(page_data["features"][0]["id"])
next_link = list(filter(lambda l: l["rel"] == "next", page_data["links"]))
if not next_link:
break
# Merge request bodies
request_body.update(next_link[0]["body"])
page = app_client.post("/search", json=request_body)
# Our limit is 1 so we expect len(ids) number of requests before we run out of pages
assert idx == len(ids)
# Confirm we have paginated through all items
assert not set(item_ids) - set(ids)
def test_pagination_token_idempotent(app_client, load_test_data):
"""Test that pagination tokens are idempotent (paging extension)"""
test_item = load_test_data("test_item.json")
ids = []
# Ingest 5 items
for idx in range(5):
uid = str(uuid.uuid4())
test_item["id"] = uid
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
ids.append(uid)
page = app_client.get("/search", params={"ids": ",".join(ids), "limit": 3})
page_data = page.json()
next_link = list(filter(lambda l: l["rel"] == "next", page_data["links"]))
# Confirm token is idempotent
resp1 = app_client.get(
"/search", params=parse_qs(urlparse(next_link[0]["href"]).query)
)
resp2 = app_client.get(
"/search", params=parse_qs(urlparse(next_link[0]["href"]).query)
)
resp1_data = resp1.json()
resp2_data = resp2.json()
# Two different requests with the same pagination token should return the same items
assert [item["id"] for item in resp1_data["features"]] == [
item["id"] for item in resp2_data["features"]
]
def test_field_extension_get(app_client, load_test_data):
"""Test GET search with included fields (fields extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
params = {"fields": "+properties.proj:epsg,+properties.gsd"}
resp = app_client.get("/search", params=params)
feat_properties = resp.json()["features"][0]["properties"]
assert not set(feat_properties) - {"proj:epsg", "gsd", "datetime"}
def test_field_extension_post(app_client, load_test_data):
"""Test POST search with included and excluded fields (fields extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
body = {
"fields": {
"exclude": ["assets.B1"],
"include": ["properties.eo:cloud_cover", "properties.orientation"],
}
}
resp = app_client.post("/search", json=body)
resp_json = resp.json()
assert "B1" not in resp_json["features"][0]["assets"].keys()
assert not set(resp_json["features"][0]["properties"]) - {
"orientation",
"eo:cloud_cover",
"datetime",
}
def test_field_extension_exclude_and_include(app_client, load_test_data):
"""Test POST search including/excluding same field (fields extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
body = {
"fields": {
"exclude": ["properties.eo:cloud_cover"],
"include": ["properties.eo:cloud_cover"],
}
}
resp = app_client.post("/search", json=body)
resp_json = resp.json()
assert "eo:cloud_cover" not in resp_json["features"][0]["properties"]
def test_field_extension_exclude_default_includes(app_client, load_test_data):
"""Test POST search excluding a forbidden field (fields extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item["collection"]}/items", json=test_item
)
assert resp.status_code == 200
body = {"fields": {"exclude": ["geometry"]}}
resp = app_client.post("/search", json=body)
resp_json = resp.json()
assert "geometry" not in resp_json["features"][0]
def test_search_intersects_and_bbox(app_client):
"""Test POST search intersects and bbox are mutually exclusive (core)"""
bbox = [-118, 34, -117, 35]
geoj = Polygon.from_bounds(*bbox).__geo_interface__
params = {"bbox": bbox, "intersects": geoj}
resp = app_client.post("/search", json=params)
assert resp.status_code == 400
def test_get_missing_item(app_client, load_test_data):
"""Test read item which does not exist (transactions extension)"""
test_coll = load_test_data("test_collection.json")
resp = app_client.get(f"/collections/{test_coll["id"]}/items/invalid-item")
assert resp.status_code == 404
def test_search_invalid_query_field(app_client):
body = {"query": {"gsd": {"lt": 100}, "invalid-field": {"eq": 50}}}
resp = app_client.post("/search", json=body)
assert resp.status_code == 400
def test_search_bbox_errors(app_client):
body = {"query": {"bbox": [0]}}
resp = app_client.post("/search", json=body)
assert resp.status_code == 400
body = {"query": {"bbox": [100.0, 0.0, 0.0, 105.0, 1.0, 1.0]}}
resp = app_client.post("/search", json=body)
assert resp.status_code == 400
params = {"bbox": "100.0,0.0,0.0,105.0"}
resp = app_client.get("/search", params=params)
assert resp.status_code == 400
def test_conformance_classes_configurable():
"""Test conformance class configurability"""
landing = LandingPageMixin()
landing_page = landing._landing_page(
base_url="http://test/test",
conformance_classes=["this is a test"],
extension_schemas=[],
)
assert landing_page["conformsTo"][0] == "this is a test"
# Update environment to avoid key error on client instantiation
os.environ["READER_CONN_STRING"] = "testing"
os.environ["WRITER_CONN_STRING"] = "testing"
client = CoreCrudClient(base_conformance_classes=["this is a test"])
assert client.conformance_classes()[0] == "this is a test"
def test_search_datetime_validation_errors(app_client):
bad_datetimes = [
"37-01-01T12:00:27.87Z",
"1985-13-12T23:20:50.52Z",
"1985-12-32T23:20:50.52Z",
"1985-12-01T25:20:50.52Z",
"1985-12-01T00:60:50.52Z",
"1985-12-01T00:06:61.52Z",
"1990-12-31T23:59:61Z",
"1986-04-12T23:20:50.52Z/1985-04-12T23:20:50.52Z",
]
for dt in bad_datetimes:
body = {"query": {"datetime": dt}}
resp = app_client.post("/search", json=body)
assert resp.status_code == 400
resp = app_client.get("/search?datetime={}".format(dt))
assert resp.status_code == 400
|
import json
import os
import time
import uuid
from copy import deepcopy
from datetime import datetime, timedelta, timezone
from random import randint
from urllib.parse import parse_qs, urlparse, urlsplit
import pystac
from pydantic.datetime_parse import parse_datetime
from pystac.utils import datetime_to_str
from shapely.geometry import Polygon
from stac_fastapi.sqlalchemy.core import CoreCrudClient
from stac_fastapi.types.core import LandingPageMixin
from stac_fastapi.types.rfc3339 import rfc3339_str_to_datetime
def test_create_and_delete_item(app_client, load_test_data):
"""Test creation and deletion of a single item (transactions extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
resp = app_client.delete(
f"/collections/{test_item['collection']}/items/{resp.json()['id']}"
)
assert resp.status_code == 200
def test_create_item_conflict(app_client, load_test_data):
"""Test creation of an item which already exists (transactions extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 409
def test_create_item_duplicate(app_client, load_test_data):
"""Test creation of an item id which already exists but in a different collection(transactions extension)"""
# add test_item to test-collection
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# add test_item to test-collection again, resource already exists
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 409
# create "test-collection-2"
collection_2 = load_test_data("test_collection.json")
collection_2["id"] = "test-collection-2"
resp = app_client.post("/collections", json=collection_2)
assert resp.status_code == 200
# add test_item to test-collection-2, posts successfully
test_item["collection"] = "test-collection-2"
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
def test_delete_item_duplicate(app_client, load_test_data):
"""Test creation of an item id which already exists but in a different collection(transactions extension)"""
# add test_item to test-collection
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# create "test-collection-2"
collection_2 = load_test_data("test_collection.json")
collection_2["id"] = "test-collection-2"
resp = app_client.post("/collections", json=collection_2)
assert resp.status_code == 200
# add test_item to test-collection-2
test_item["collection"] = "test-collection-2"
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# delete test_item from test-collection
test_item["collection"] = "test-collection"
resp = app_client.delete(
f"/collections/{test_item['collection']}/items/{test_item['id']}"
)
assert resp.status_code == 200
# test-item in test-collection has already been deleted
resp = app_client.delete(
f"/collections/{test_item['collection']}/items/{test_item['id']}"
)
assert resp.status_code == 404
# test-item in test-collection-2 still exists, was not deleted
test_item["collection"] = "test-collection-2"
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 409
def test_update_item_duplicate(app_client, load_test_data):
"""Test creation of an item id which already exists but in a different collection(transactions extension)"""
# add test_item to test-collection
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# create "test-collection-2"
collection_2 = load_test_data("test_collection.json")
collection_2["id"] = "test-collection-2"
resp = app_client.post("/collections", json=collection_2)
assert resp.status_code == 200
# add test_item to test-collection-2
test_item["collection"] = "test-collection-2"
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# update gsd in test_item, test-collection-2
test_item["properties"]["gsd"] = 16
resp = app_client.put(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
updated_item = resp.json()
assert updated_item["properties"]["gsd"] == 16
# update gsd in test_item, test-collection
test_item["collection"] = "test-collection"
test_item["properties"]["gsd"] = 17
resp = app_client.put(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
updated_item = resp.json()
assert updated_item["properties"]["gsd"] == 17
# test_item in test-collection, updated gsd = 17
resp = app_client.get(
f"/collections/{test_item['collection']}/items/{test_item['id']}"
)
assert resp.status_code == 200
item = resp.json()
assert item["properties"]["gsd"] == 17
# test_item in test-collection-2, updated gsd = 16
test_item["collection"] = "test-collection-2"
resp = app_client.get(
f"/collections/{test_item['collection']}/items/{test_item['id']}"
)
assert resp.status_code == 200
item = resp.json()
assert item["properties"]["gsd"] == 16
def test_delete_missing_item(app_client, load_test_data):
"""Test deletion of an item which does not exist (transactions extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.delete(f"/collections/{test_item['collection']}/items/hijosh")
assert resp.status_code == 404
def test_create_item_missing_collection(app_client, load_test_data):
"""Test creation of an item without a parent collection (transactions extension)"""
test_item = load_test_data("test_item.json")
test_item["collection"] = "stac is cool"
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 422
def test_update_item_already_exists(app_client, load_test_data):
"""Test updating an item which already exists (transactions extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
assert test_item["properties"]["gsd"] != 16
test_item["properties"]["gsd"] = 16
resp = app_client.put(
f"/collections/{test_item['collection']}/items", json=test_item
)
updated_item = resp.json()
assert updated_item["properties"]["gsd"] == 16
def test_update_new_item(app_client, load_test_data):
"""Test updating an item which does not exist (transactions extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.put(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 404
def test_update_item_missing_collection(app_client, load_test_data):
"""Test updating an item without a parent collection (transactions extension)"""
test_item = load_test_data("test_item.json")
# Create the item
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# Try to update collection of the item
test_item["collection"] = "stac is cool"
resp = app_client.put(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 404
def test_update_item_geometry(app_client, load_test_data):
test_item = load_test_data("test_item.json")
# Create the item
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# Update the geometry of the item
test_item["geometry"]["coordinates"] = [[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]]
resp = app_client.put(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# Fetch the updated item
resp = app_client.get(
f"/collections/{test_item['collection']}/items/{test_item['id']}"
)
assert resp.status_code == 200
assert resp.json()["geometry"]["coordinates"] == [
[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
]
def test_get_item(app_client, load_test_data):
"""Test read an item by id (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
get_item = app_client.get(
f"/collections/{test_item['collection']}/items/{test_item['id']}"
)
assert get_item.status_code == 200
def test_returns_valid_item(app_client, load_test_data):
"""Test validates fetched item with jsonschema"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
get_item = app_client.get(
f"/collections/{test_item['collection']}/items/{test_item['id']}"
)
assert get_item.status_code == 200
item_dict = get_item.json()
# Mock root to allow validation
mock_root = pystac.Catalog(
id="test", description="test desc", href="https://example.com"
)
item = pystac.Item.from_dict(item_dict, preserve_dict=False, root=mock_root)
item.validate()
def test_get_item_collection(app_client, load_test_data):
"""Test read an item collection (core)"""
item_count = randint(1, 4)
test_item = load_test_data("test_item.json")
for idx in range(item_count):
_test_item = deepcopy(test_item)
_test_item["id"] = test_item["id"] + str(idx)
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=_test_item
)
assert resp.status_code == 200
resp = app_client.get(f"/collections/{test_item['collection']}/items")
assert resp.status_code == 200
item_collection = resp.json()
assert item_collection["context"]["matched"] == len(range(item_count))
def test_pagination(app_client, load_test_data):
"""Test item collection pagination (paging extension)"""
item_count = 10
test_item = load_test_data("test_item.json")
for idx in range(item_count):
_test_item = deepcopy(test_item)
_test_item["id"] = test_item["id"] + str(idx)
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=_test_item
)
assert resp.status_code == 200
resp = app_client.get(
f"/collections/{test_item['collection']}/items", params={"limit": 3}
)
assert resp.status_code == 200
first_page = resp.json()
assert first_page["context"]["returned"] == 3
url_components = urlsplit(first_page["links"][0]["href"])
resp = app_client.get(f"{url_components.path}?{url_components.query}")
assert resp.status_code == 200
second_page = resp.json()
assert second_page["context"]["returned"] == 3
def test_item_timestamps(app_client, load_test_data):
"""Test created and updated timestamps (common metadata)"""
test_item = load_test_data("test_item.json")
start_time = datetime.now(timezone.utc)
time.sleep(2)
# Confirm `created` timestamp
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
item = resp.json()
created_dt = parse_datetime(item["properties"]["created"])
assert resp.status_code == 200
assert start_time < created_dt < datetime.now(timezone.utc)
time.sleep(2)
# Confirm `updated` timestamp
item["properties"]["proj:epsg"] = 4326
resp = app_client.put(f"/collections/{test_item['collection']}/items", json=item)
assert resp.status_code == 200
updated_item = resp.json()
# Created shouldn't change on update
assert item["properties"]["created"] == updated_item["properties"]["created"]
assert parse_datetime(updated_item["properties"]["updated"]) > created_dt
def test_item_search_by_id_post(app_client, load_test_data):
"""Test POST search by item id (core)"""
ids = ["test1", "test2", "test3"]
for id in ids:
test_item = load_test_data("test_item.json")
test_item["id"] = id
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
params = {"collections": [test_item["collection"]], "ids": ids}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == len(ids)
assert set([feat["id"] for feat in resp_json["features"]]) == set(ids)
def test_item_search_spatial_query_post(app_client, load_test_data):
"""Test POST search with spatial query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
params = {
"collections": [test_item["collection"]],
"intersects": test_item["geometry"],
}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_temporal_query_post(app_client, load_test_data):
"""Test POST search with single-tailed spatio-temporal query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
item_date = rfc3339_str_to_datetime(test_item["properties"]["datetime"])
item_date = item_date + timedelta(seconds=1)
params = {
"collections": [test_item["collection"]],
"intersects": test_item["geometry"],
"datetime": f"../{datetime_to_str(item_date)}",
}
resp = app_client.post("/search", json=params)
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_temporal_window_post(app_client, load_test_data):
"""Test POST search with two-tailed spatio-temporal query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
item_date = rfc3339_str_to_datetime(test_item["properties"]["datetime"])
item_date_before = item_date - timedelta(seconds=1)
item_date_after = item_date + timedelta(seconds=1)
params = {
"collections": [test_item["collection"]],
"intersects": test_item["geometry"],
"datetime": f"{datetime_to_str(item_date_before)}/{datetime_to_str(item_date_after)}",
}
resp = app_client.post("/search", json=params)
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_temporal_open_window(app_client, load_test_data):
"""Test POST search with open spatio-temporal query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
for dt in ["/", "../", "/..", "../.."]:
resp = app_client.post("/search", json={"datetime": dt})
assert resp.status_code == 400
def test_item_search_sort_post(app_client, load_test_data):
"""Test POST search with sorting (sort extension)"""
first_item = load_test_data("test_item.json")
item_date = rfc3339_str_to_datetime(first_item["properties"]["datetime"])
resp = app_client.post(
f"/collections/{first_item['collection']}/items", json=first_item
)
assert resp.status_code == 200
second_item = load_test_data("test_item.json")
second_item["id"] = "another-item"
another_item_date = item_date - timedelta(days=1)
second_item["properties"]["datetime"] = datetime_to_str(another_item_date)
resp = app_client.post(
f"/collections/{second_item['collection']}/items", json=second_item
)
assert resp.status_code == 200
params = {
"collections": [first_item["collection"]],
"sortby": [{"field": "datetime", "direction": "desc"}],
}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == first_item["id"]
assert resp_json["features"][1]["id"] == second_item["id"]
def test_item_search_by_id_get(app_client, load_test_data):
"""Test GET search by item id (core)"""
ids = ["test1", "test2", "test3"]
for id in ids:
test_item = load_test_data("test_item.json")
test_item["id"] = id
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
params = {"collections": test_item["collection"], "ids": ",".join(ids)}
resp = app_client.get("/search", params=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == len(ids)
assert set([feat["id"] for feat in resp_json["features"]]) == set(ids)
def test_item_search_bbox_get(app_client, load_test_data):
"""Test GET search with spatial query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
params = {
"collections": test_item["collection"],
"bbox": ",".join([str(coord) for coord in test_item["bbox"]]),
}
resp = app_client.get("/search", params=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_get_without_collections(app_client, load_test_data):
"""Test GET search without specifying collections"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
params = {
"bbox": ",".join([str(coord) for coord in test_item["bbox"]]),
}
resp = app_client.get("/search", params=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_temporal_window_get(app_client, load_test_data):
"""Test GET search with spatio-temporal query (core)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
item_date = rfc3339_str_to_datetime(test_item["properties"]["datetime"])
item_date_before = item_date - timedelta(seconds=1)
item_date_after = item_date + timedelta(seconds=1)
params = {
"collections": test_item["collection"],
"bbox": ",".join([str(coord) for coord in test_item["bbox"]]),
"datetime": f"{datetime_to_str(item_date_before)}/{datetime_to_str(item_date_after)}",
}
resp = app_client.get("/search", params=params)
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_sort_get(app_client, load_test_data):
"""Test GET search with sorting (sort extension)"""
first_item = load_test_data("test_item.json")
item_date = rfc3339_str_to_datetime(first_item["properties"]["datetime"])
resp = app_client.post(
f"/collections/{first_item['collection']}/items", json=first_item
)
assert resp.status_code == 200
second_item = load_test_data("test_item.json")
second_item["id"] = "another-item"
another_item_date = item_date - timedelta(days=1)
second_item["properties"]["datetime"] = datetime_to_str(another_item_date)
resp = app_client.post(
f"/collections/{second_item['collection']}/items", json=second_item
)
assert resp.status_code == 200
params = {"collections": [first_item["collection"]], "sortby": "-datetime"}
resp = app_client.get("/search", params=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == first_item["id"]
assert resp_json["features"][1]["id"] == second_item["id"]
def test_item_search_post_without_collection(app_client, load_test_data):
"""Test POST search without specifying a collection"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
params = {
"bbox": test_item["bbox"],
}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == test_item["id"]
def test_item_search_properties_jsonb(app_client, load_test_data):
"""Test POST search with JSONB query (query extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# EPSG is a JSONB key
params = {"query": {"proj:epsg": {"gt": test_item["properties"]["proj:epsg"] + 1}}}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == 0
def test_item_search_properties_field(app_client, load_test_data):
"""Test POST search indexed field with query (query extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# Orientation is an indexed field
params = {"query": {"orientation": {"eq": "south"}}}
resp = app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == 0
def test_item_search_get_query_extension(app_client, load_test_data):
"""Test GET search with JSONB query (query extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
# EPSG is a JSONB key
params = {
"collections": [test_item["collection"]],
"query": json.dumps(
{"proj:epsg": {"gt": test_item["properties"]["proj:epsg"] + 1}}
),
}
resp = app_client.get("/search", params=params)
assert resp.json()["context"]["returned"] == 0
params["query"] = json.dumps(
{"proj:epsg": {"eq": test_item["properties"]["proj:epsg"]}}
)
resp = app_client.get("/search", params=params)
resp_json = resp.json()
assert resp_json["context"]["returned"] == 1
assert (
resp_json["features"][0]["properties"]["proj:epsg"]
== test_item["properties"]["proj:epsg"]
)
def test_get_missing_item_collection(app_client):
"""Test reading a collection which does not exist"""
resp = app_client.get("/collections/invalid-collection/items")
assert resp.status_code == 200
def test_pagination_item_collection(app_client, load_test_data):
"""Test item collection pagination links (paging extension)"""
test_item = load_test_data("test_item.json")
ids = []
# Ingest 5 items
for idx in range(5):
uid = str(uuid.uuid4())
test_item["id"] = uid
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
ids.append(uid)
# Paginate through all 5 items with a limit of 1 (expecting 5 requests)
page = app_client.get(
f"/collections/{test_item['collection']}/items", params={"limit": 1}
)
idx = 0
item_ids = []
while True:
idx += 1
page_data = page.json()
item_ids.append(page_data["features"][0]["id"])
next_link = list(filter(lambda l: l["rel"] == "next", page_data["links"]))
if not next_link:
break
query_params = parse_qs(urlparse(next_link[0]["href"]).query)
page = app_client.get(
f"/collections/{test_item['collection']}/items",
params=query_params,
)
# Our limit is 1 so we expect len(ids) number of requests before we run out of pages
assert idx == len(ids)
# Confirm we have paginated through all items
assert not set(item_ids) - set(ids)
def test_pagination_post(app_client, load_test_data):
"""Test POST pagination (paging extension)"""
test_item = load_test_data("test_item.json")
ids = []
# Ingest 5 items
for idx in range(5):
uid = str(uuid.uuid4())
test_item["id"] = uid
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
ids.append(uid)
# Paginate through all 5 items with a limit of 1 (expecting 5 requests)
request_body = {"ids": ids, "limit": 1}
page = app_client.post("/search", json=request_body)
idx = 0
item_ids = []
while True:
idx += 1
page_data = page.json()
item_ids.append(page_data["features"][0]["id"])
next_link = list(filter(lambda l: l["rel"] == "next", page_data["links"]))
if not next_link:
break
# Merge request bodies
request_body.update(next_link[0]["body"])
page = app_client.post("/search", json=request_body)
# Our limit is 1 so we expect len(ids) number of requests before we run out of pages
assert idx == len(ids)
# Confirm we have paginated through all items
assert not set(item_ids) - set(ids)
def test_pagination_token_idempotent(app_client, load_test_data):
"""Test that pagination tokens are idempotent (paging extension)"""
test_item = load_test_data("test_item.json")
ids = []
# Ingest 5 items
for idx in range(5):
uid = str(uuid.uuid4())
test_item["id"] = uid
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
ids.append(uid)
page = app_client.get("/search", params={"ids": ",".join(ids), "limit": 3})
page_data = page.json()
next_link = list(filter(lambda l: l["rel"] == "next", page_data["links"]))
# Confirm token is idempotent
resp1 = app_client.get(
"/search", params=parse_qs(urlparse(next_link[0]["href"]).query)
)
resp2 = app_client.get(
"/search", params=parse_qs(urlparse(next_link[0]["href"]).query)
)
resp1_data = resp1.json()
resp2_data = resp2.json()
# Two different requests with the same pagination token should return the same items
assert [item["id"] for item in resp1_data["features"]] == [
item["id"] for item in resp2_data["features"]
]
def test_field_extension_get(app_client, load_test_data):
"""Test GET search with included fields (fields extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
params = {"fields": "+properties.proj:epsg,+properties.gsd"}
resp = app_client.get("/search", params=params)
feat_properties = resp.json()["features"][0]["properties"]
assert not set(feat_properties) - {"proj:epsg", "gsd", "datetime"}
def test_field_extension_post(app_client, load_test_data):
"""Test POST search with included and excluded fields (fields extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
body = {
"fields": {
"exclude": ["assets.B1"],
"include": ["properties.eo:cloud_cover", "properties.orientation"],
}
}
resp = app_client.post("/search", json=body)
resp_json = resp.json()
assert "B1" not in resp_json["features"][0]["assets"].keys()
assert not set(resp_json["features"][0]["properties"]) - {
"orientation",
"eo:cloud_cover",
"datetime",
}
def test_field_extension_exclude_and_include(app_client, load_test_data):
"""Test POST search including/excluding same field (fields extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
body = {
"fields": {
"exclude": ["properties.eo:cloud_cover"],
"include": ["properties.eo:cloud_cover"],
}
}
resp = app_client.post("/search", json=body)
resp_json = resp.json()
assert "eo:cloud_cover" not in resp_json["features"][0]["properties"]
def test_field_extension_exclude_default_includes(app_client, load_test_data):
"""Test POST search excluding a forbidden field (fields extension)"""
test_item = load_test_data("test_item.json")
resp = app_client.post(
f"/collections/{test_item['collection']}/items", json=test_item
)
assert resp.status_code == 200
body = {"fields": {"exclude": ["geometry"]}}
resp = app_client.post("/search", json=body)
resp_json = resp.json()
assert "geometry" not in resp_json["features"][0]
def test_search_intersects_and_bbox(app_client):
"""Test POST search intersects and bbox are mutually exclusive (core)"""
bbox = [-118, 34, -117, 35]
geoj = Polygon.from_bounds(*bbox).__geo_interface__
params = {"bbox": bbox, "intersects": geoj}
resp = app_client.post("/search", json=params)
assert resp.status_code == 400
def test_get_missing_item(app_client, load_test_data):
"""Test read item which does not exist (transactions extension)"""
test_coll = load_test_data("test_collection.json")
resp = app_client.get(f"/collections/{test_coll['id']}/items/invalid-item")
assert resp.status_code == 404
def test_search_invalid_query_field(app_client):
body = {"query": {"gsd": {"lt": 100}, "invalid-field": {"eq": 50}}}
resp = app_client.post("/search", json=body)
assert resp.status_code == 400
def test_search_bbox_errors(app_client):
body = {"query": {"bbox": [0]}}
resp = app_client.post("/search", json=body)
assert resp.status_code == 400
body = {"query": {"bbox": [100.0, 0.0, 0.0, 105.0, 1.0, 1.0]}}
resp = app_client.post("/search", json=body)
assert resp.status_code == 400
params = {"bbox": "100.0,0.0,0.0,105.0"}
resp = app_client.get("/search", params=params)
assert resp.status_code == 400
def test_conformance_classes_configurable():
"""Test conformance class configurability"""
landing = LandingPageMixin()
landing_page = landing._landing_page(
base_url="http://test/test",
conformance_classes=["this is a test"],
extension_schemas=[],
)
assert landing_page["conformsTo"][0] == "this is a test"
# Update environment to avoid key error on client instantiation
os.environ["READER_CONN_STRING"] = "testing"
os.environ["WRITER_CONN_STRING"] = "testing"
client = CoreCrudClient(base_conformance_classes=["this is a test"])
assert client.conformance_classes()[0] == "this is a test"
def test_search_datetime_validation_errors(app_client):
bad_datetimes = [
"37-01-01T12:00:27.87Z",
"1985-13-12T23:20:50.52Z",
"1985-12-32T23:20:50.52Z",
"1985-12-01T25:20:50.52Z",
"1985-12-01T00:60:50.52Z",
"1985-12-01T00:06:61.52Z",
"1990-12-31T23:59:61Z",
"1986-04-12T23:20:50.52Z/1985-04-12T23:20:50.52Z",
]
for dt in bad_datetimes:
body = {"query": {"datetime": dt}}
resp = app_client.post("/search", json=body)
assert resp.status_code == 400
resp = app_client.get("/search?datetime={}".format(dt))
assert resp.status_code == 400
|
import copy
import time
from collections import OrderedDict
import torch
from data.dataloader import local_client_dataset, test_dataset
from models.utils import *
from utils.train_helper import validate_one_model
from utils.sampling import *
import numpy as np
from multiprocessing import Process
import time
def return_state_dict(network):
"""
save model to state_dict
"""
feat_model = {k: v.cpu() for k, v in network["feat_model"].state_dict().items()}
classifier = {k: v.cpu() for k, v in network["classifier"].state_dict().items()}
return {"feat_model": feat_model, "classifier": classifier}
def load_state_dict(network, state_dict):
"""
restore model from state_dict
"""
network["feat_model"].load_state_dict(state_dict["feat_model"])
network["classifier"].load_state_dict(state_dict["classifier"])
# for name, param in state_dict["feat_model"].items():
# print(name, "\t", param.size())
return network
def check_status(status_list, selected_idx, target_status):
"""
0. original status (1st FL round)
1. server finished sending: server_network --> mp_list
2. client received, and returned the model: mp_list --> networks[i] --> local_update --> mp_list
3. server received: mp_list --> networks[i]
--> 1. aggregation finished. networks[i] --> aggregate --> server_network --> mp_list, the status change to 1
---
Return True: when all clients meet conditions, else False
"""
tmp = np.array(status_list)
if (tmp[selected_idx] == target_status).all() == True:
return True
else:
return False
def set_status(status_list, selected_idx, target_status):
"""
see function: check_status
"""
if type(selected_idx) is int:
selected_idx = [selected_idx]
for i in selected_idx:
status_list[i] = target_status
# print(f"set_status {target_status}")
def difference_models_norm_2(model_1, model_2):
"""
Return the norm 2 difference between the two model parameters. Used in FedProx.
"""
tensor_1_backbone = list(model_1["feat_model"].parameters())
tensor_1_classifier = list(model_1["classifier"].parameters())
tensor_2_backbone = list(model_2["feat_model"].parameters())
tensor_2_classifier = list(model_2["classifier"].parameters())
diff_list = [
torch.sum((tensor_1_backbone[i] - tensor_2_backbone[i]) ** 2)
for i in range(len(tensor_1_backbone))
]
diff_list.extend(
[
torch.sum((tensor_1_classifier[i] - tensor_2_classifier[i]) ** 2)
for i in range(len(tensor_1_classifier))
]
)
norm = sum(diff_list)
return norm
class Fed_server(Process):
"""
Class for client updating and model aggregation
"""
def __init__(
self,
init_network,
criterion,
config,
per_client_data,
per_client_label,
idx_per_client_train,
test_data,
test_label,
state_list=None,
state_dict_list=None,
idx=None,
):
super(Fed_server, self).__init__()
self.local_bs = config["fl_opt"]["local_bs"]
self.local_ep = config["fl_opt"]["local_ep"]
self.num_clients = config["fl_opt"]["num_clients"]
self.criterion = criterion
self.networks, self.optimizers, self.optimizers_stage2, self.schedulers = (
[],
[],
[],
[],
)
self.train_loaders = [] # include dataloader or pre-loaded dataset
self.train_loader_balanced = [] # balanced-sampling dataloader
self.local_num_per_cls = [] # list to store local data number per class
self.test_loaders = []
self.status_list = state_list
self.state_dict_list = state_dict_list
self.client_idx = idx # physical idx of clients (hardcoded)
self.config = config
self.prefetch = False
self.feat_aug = config["fl_opt"]["feat_aug"]
self.crt = config["fl_opt"]["crt"]
self.client_weights = np.array([i for i in idx_per_client_train])
self.client_weights = self.client_weights / self.client_weights.sum()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.server_network = copy.deepcopy(init_network)
self.server_network["feat_model"].to(self.device)
self.server_network["classifier"].to(self.device)
# per-client accuracy and loss
self.acc = [0 for i in range(self.num_clients)]
self.losses_cls = [-1 for i in range(self.num_clients)]
self.losses_kd = [-1 for i in range(self.num_clients)]
print(f'=====> {config['metainfo']['optimizer']}, Server (fed.py)\n ')
######## init backbone, classifier, optimizer and dataloader ########
for client_i in range(self.num_clients):
backbone = copy.deepcopy(self.server_network["feat_model"])
classifier = copy.deepcopy(self.server_network["classifier"])
self.networks.append({"feat_model": backbone, "classifier": classifier})
""" Server does not need
# list of optimizer_dict. One optimizer for one network
self.optimizers.append(init_optimizers(self.networks[client_i], config))
optim_params_dict = {'params': self.networks[client_i]["classifier"].parameters(), 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0}
self.optimizers_stage2.append(torch.optim.SGD([optim_params_dict],))
# dataloader
num_workers = 0
local_dataset = \
local_client_dataset(per_client_data[client_i], per_client_label[client_i], config)
self.train_loaders.append(
torch.utils.data.DataLoader(
local_dataset, batch_size=self.local_bs, shuffle=True,
num_workers=num_workers, pin_memory=False)
)
self.train_loader_balanced.append(
torch.utils.data.DataLoader(
local_dataset, batch_size=self.local_bs, sampler=local_dataset.get_balanced_sampler(),
num_workers=num_workers, pin_memory=False)
)
self.local_num_per_cls.append(local_dataset.class_sample_count)
"""
# centralized train dataset
train_data_all, train_label_all = [], []
for client_i in range(len(per_client_label)):
train_data_all = train_data_all + per_client_data[client_i]
train_label_all = train_label_all + per_client_label[client_i]
self.train_dataset = local_client_dataset(
train_data_all, train_label_all, config
)
self.test_dataset = test_dataset(test_data, test_label, config)
def local_train(self, selected_idx):
"""
server-side code
"""
# self.server_network --> mp_list
for i in selected_idx:
self.state_dict_list[i] = return_state_dict(
self.server_network
) # model transfer
set_status(self.status_list, selected_idx, 1)
# wait until all clients returning the model
while check_status(self.status_list, selected_idx, 2) is False:
time.sleep(0.1)
# mp_list --> self.networks (copys of client models on the server). Prepare for aggregation.
for i in selected_idx:
load_state_dict(self.networks[i], self.state_dict_list[i]) # model transfer
print("===> Local training finished")
def aggregation(self, selected_idx, mode):
"""
server-side code: aggregation
"""
if mode in ["fedavg", "fedavgm", "fedbn", "fedprox"]:
self.aggregate_layers(selected_idx, mode, backbone_only=False)
elif mode == "fedavg_fs":
opt = self.config["fl_opt"]
backbone_only, imprint, spread_out = (
opt["backbone_only"],
opt["imprint"],
opt["spread_out"],
)
self.aggregate_layers(selected_idx, "fedavg", backbone_only=backbone_only)
if imprint:
self.imprint(selected_idx)
if spread_out:
self.spread_out()
# model: self.server_network --> mp_list
for i in selected_idx:
self.state_dict_list[i] = return_state_dict(
self.server_network
) # model transfer
set_status(self.status_list, selected_idx, 0) # back to original
print("===> Aggregation finished")
def aggregate_layers(self, selected_idx, mode, backbone_only):
"""
backbone_only: choose to only aggregate backbone
"""
weights_sum = self.client_weights[selected_idx].sum()
with torch.no_grad():
if mode in ["fedavg", "fedprox"]:
for net_name, net in self.server_network.items():
if net_name == "classifier" and backbone_only:
pass
else:
for key, layer in net.state_dict().items():
if "num_batches_tracked" in key:
# num_batches_tracked is a non trainable LongTensor
# and num_batches_tracked are the same for
# all clients for the given datasets
layer.data.copy_(
self.networks[0][net_name].state_dict()[key]
)
else:
temp = torch.zeros_like(layer)
# Fedavg
for idx in selected_idx:
weight = self.client_weights[idx] / weights_sum
temp += (
weight
* self.networks[idx][net_name].state_dict()[key]
)
layer.data.copy_(temp)
# update client models
# for idx in selected_idx:
# self.networks[idx][net_name].state_dict()[key].data.copy_(layer)
elif mode == "fedbn": # https://openreview.net/pdf?id=6YEQUn0QICG
for net_name, net in self.server_network.items():
if net_name == "classifier" and backbone_only:
pass
else:
for key, layer in net.state_dict().items():
if "bn" not in key:
temp = torch.zeros_like(layer)
# Fedavg
for idx in selected_idx:
weight = self.client_weights[idx] / weights_sum
temp += (
weight
* self.networks[idx][net_name].state_dict()[key]
)
layer.data.copy_(temp)
# update client models
# for idx in selected_idx:
# self.networks[idx][net_name].state_dict()[key].data.copy_(layer)
elif mode == "fedavgm":
raise NotImplementedError
def evaluate_global(self, train_dataset=None, test_dataset=None):
"""
Accuracy of the global model and all classes
"""
# evaluate on training set
if train_dataset is None:
train_dataset = self.train_dataset
if test_dataset is None:
test_dataset = self.test_dataset
train_loss_per_cls, train_acc_per_cls = validate_one_model(
self.server_network, train_dataset, self.device, per_cls_acc=True
)
# evaluate on test set: per-class loss/acc
test_loss_per_cls, test_acc_per_cls = validate_one_model(
self.server_network, test_dataset, self.device, per_cls_acc=True
)
print("===> Evaluation finished\n")
return (
train_loss_per_cls,
train_acc_per_cls,
test_loss_per_cls,
test_acc_per_cls,
)
def evaluate_global_all(self, train_dataset=None, test_dataset=None):
"""
Accuracy of models of all nodes and all classes
Return: all_results
shape: (4, num_client, num_cls), 4 for (train_loss, train_acc, test_loss, test_acc)
"""
# evaluate on training set
if train_dataset is None:
train_dataset = self.train_dataset
if test_dataset is None:
test_dataset = self.test_dataset
all_results = [None for i in range(self.num_clients)]
for idx in range(self.num_clients):
# evaluate on test set: per-class loss/acc
train_loss_per_cls, train_acc_per_cls = validate_one_model(
self.networks[idx], train_dataset, self.device, per_cls_acc=True
)
# evaluate on test set: per-class loss/acc
test_loss_per_cls, test_acc_per_cls = validate_one_model(
self.networks[idx], test_dataset, self.device, per_cls_acc=True
)
all_results[idx] = (
train_loss_per_cls,
train_acc_per_cls,
test_loss_per_cls,
test_acc_per_cls,
)
print(f"===> Evaluation finished{idx}\n")
all_results = np.array(all_results).transpose(1, 0, 2)
return all_results
class Fed_client(Process):
"""
Class for client updating and model aggregation
"""
def __init__(
self,
init_network,
criterion,
config,
per_client_data,
per_client_label,
idx_per_client_train,
test_data,
test_label,
state_list=None,
state_dict_list=None,
idx=None,
):
super(Fed_client, self).__init__()
self.local_bs = config["fl_opt"]["local_bs"]
self.local_ep = config["fl_opt"]["local_ep"]
self.num_clients = config["fl_opt"]["num_clients"]
self.criterion = criterion
self.networks, self.optimizers, self.optimizers_stage2, self.schedulers = (
[],
[],
[],
[],
)
self.train_loaders = [] # include dataloader or pre-loaded dataset
self.train_loader_balanced = [] # balanced-sampling dataloader
self.local_num_per_cls = [] # list to store local data number per class
self.test_loaders = []
self.status_list = state_list
self.state_dict_list = state_dict_list
self.client_idx = idx # physical idx of clients (hardcoded)
self.config = config
self.device = config["device_client"][idx]
self.server_network = copy.deepcopy(init_network)
self.balanced_loader = config["fl_opt"]["balanced_loader"]
self.prefetch = False
self.feat_aug = config["fl_opt"]["feat_aug"]
self.crt = config["fl_opt"]["crt"]
if config["fl_opt"]["aggregation"] == "fedprox":
self.fedprox = True
else:
self.fedprox = False
self.mu = 0.05
self.client_weights = np.array([i for i in idx_per_client_train])
self.client_weights = self.client_weights / self.client_weights.sum()
# per-client accuracy and loss
self.acc = [0 for i in range(self.num_clients)]
self.losses_cls = [-1 for i in range(self.num_clients)]
self.losses_kd = [-1 for i in range(self.num_clients)]
print(f'=====> {config['metainfo']['optimizer']}, Client {idx} (fed.py)\n ')
######## init backbone, classifier, optimizer and dataloader ########
for client_i in range(self.num_clients):
# list of network and optimizer_dict. One optimizer for one network.
if client_i != self.client_idx:
self.networks.append(None)
self.optimizers.append(None)
self.optimizers_stage2.append(None)
else:
backbone = copy.deepcopy(self.server_network["feat_model"])
classifier = copy.deepcopy(self.server_network["classifier"])
self.networks.append({"feat_model": backbone, "classifier": classifier})
self.optimizers.append(init_optimizers(self.networks[client_i], config))
optim_params_dict = {
"params": self.networks[client_i]["classifier"].parameters(),
"lr": 0.001,
"momentum": 0.9,
"weight_decay": 0,
}
self.optimizers_stage2.append(torch.optim.SGD([optim_params_dict],))
# dataloader
num_workers = 0
local_dataset = local_client_dataset(
per_client_data[client_i], per_client_label[client_i], config
)
self.train_loaders.append(
torch.utils.data.DataLoader(
local_dataset,
batch_size=self.local_bs,
shuffle=True,
num_workers=num_workers,
pin_memory=False,
)
)
self.train_loader_balanced.append(
torch.utils.data.DataLoader(
local_dataset,
batch_size=self.local_bs,
sampler=local_dataset.get_balanced_sampler(),
num_workers=num_workers,
pin_memory=False,
)
)
self.local_num_per_cls.append(local_dataset.class_sample_count)
""" clients do not need
# centralized train dataset
train_data_all, train_label_all = [], []
for client_i in range(len(per_client_label)):
train_data_all = train_data_all + per_client_data[client_i]
train_label_all = train_label_all + per_client_label[client_i]
self.train_dataset = local_client_dataset(train_data_all, train_label_all, config)
self.test_dataset = test_dataset(test_data, test_label, config)
"""
def run(self):
"""
client-side code
"""
self.server_network["feat_model"].to(self.device)
self.server_network["classifier"].to(self.device)
self.networks[self.client_idx]["feat_model"].to(self.device)
self.networks[self.client_idx]["classifier"].to(self.device)
while 1:
while check_status(self.status_list, self.client_idx, 1) is False:
time.sleep(0.1)
# model: mp_list --> server_network
load_state_dict(
self.server_network, self.state_dict_list[self.client_idx]
) # model transfer
self.train_lt(self.client_idx) # local model updating
# self.networks[i] --> mp_list
self.state_dict_list[self.client_idx] = return_state_dict(
self.networks[self.client_idx]
) # model transfer
set_status(self.status_list, self.client_idx, 2)
def train_lt(self, idx):
"""
client-side code
---
Argus:
- idx: the index in all clients (e.g., 50) or selected clients (e.g., 10).
If self.prefetch is true: the index in selected clients,
If self.prefetch is true: the index in all clients
"""
idx_in_all = idx
# server broadcast the model to clients
"""
# optimizer will not work if use this, because optimizer needs the params from the model
# self.networks[idx_in_all] = copy.deepcopy(self.server_network)
"""
for net_name, net in self.server_network.items(): # feat_model, classifier
state_dict = self.networks[idx_in_all][net_name].state_dict()
for key, layer in net.state_dict().items():
state_dict[key].data.copy_(layer.data)
for net in self.networks[idx_in_all].values():
net.train()
for net in self.server_network.values():
net.train()
teacher = self.server_network
# torch.cuda.empty_cache()
"""
(Per-cls) Covariance Calculation
"""
if self.feat_aug:
# probability for augmentation for every class
max_num = max(self.local_num_per_cls[idx])
prob = torch.tensor(
[1.0 - i / max_num for i in self.local_num_per_cls[idx]]
)
# obtain features and labels under eval mode
feat_list, label_list = [], []
# self.networks[idx_in_all]['feat_model'].eval()
for (imgs, labels, indexs) in self.train_loaders[idx]:
with torch.no_grad():
imgs = imgs.to(self.device)
feat_list.append(teacher["feat_model"](imgs).cpu())
label_list.append(labels)
feat_list = torch.cat(feat_list, 0)
# self.networks[idx_in_all]['feat_model'].train()
label_list = torch.cat(label_list, 0)
unique_labels = list(np.unique(label_list)) # e.g., size (6, )
transformed_label_list = torch.tensor(
[unique_labels.index(i) for i in label_list]
) # e.g., size (n, )
# per-cls features
feats_per_cls = [[] for i in range(len(unique_labels))]
for feats, label in zip(feat_list, transformed_label_list):
feats_per_cls[label].append(feats)
# calculate the variance
sampled_data, sample_label = [], []
per_cls_cov = []
for feats in feats_per_cls:
if len(feats) > 1:
per_cls_cov.append(np.cov(torch.stack(feats, 1).numpy()))
else:
per_cls_cov.append(np.zeros((feats[0].shape[0], feats[0].shape[0])))
per_cls_cov = np.array(per_cls_cov)
# per_cls_cov = np.array([np.cov(torch.stack(feats, 1).numpy()) for feats in feats_per_cls])
cov = np.average(
per_cls_cov, axis=0, weights=self.local_num_per_cls[idx]
) # covariance for feature dimension, shape: e.g., (128, 128)
# pre-generate deviation
divider = 500
pointer = 0
augs = (
torch.from_numpy(
np.random.multivariate_normal(
mean=np.zeros(cov.shape[0]),
cov=cov, # covariance for feature dimension, shape: e.g., (128, 128)
size=divider,
)
)
.float()
.to(self.device)
)
with torch.set_grad_enabled(True):
losses_cls = 0
losses_kd = 0
##########################
#### stage 1 training ####
##########################
for epoch in range(self.local_ep):
"""
model update
"""
if self.local_ep > 10: # locla training mode
print(epoch, end=" ")
if self.balanced_loader:
tmp_loader = self.train_loader_balanced[idx]
else:
tmp_loader = self.train_loaders[idx]
for (imgs, labels, indexs) in tmp_loader:
# to device
imgs = imgs.to(self.device)
# forward
feat = self.networks[idx_in_all]["feat_model"](imgs)
logits = self.networks[idx_in_all]["classifier"](feat)
# do feature space augmentation with a likelihood
if self.feat_aug:
# prob = torch.tensor([1.0 for i in self.local_num_per_cls[idx]])
rand_list = torch.rand(len(labels))
mask = (
rand_list
< prob[
torch.tensor([unique_labels.index(i) for i in labels])
]
)
degree = 1
aug_num = sum(mask).item()
if aug_num > 0:
if pointer + aug_num >= divider:
pointer = 0
feat_aug = feat.clone()
feat_aug[mask] = (
feat[mask] + augs[pointer : pointer + aug_num] * degree
)
logits_aug = self.networks[idx_in_all]["classifier"](
feat_aug
)
pointer = pointer + aug_num
# teacher
with torch.no_grad():
feat_teacher = teacher["feat_model"](imgs)
pred_teacher = teacher["classifier"](feat_teacher)
# loss
labels = labels.to(self.device)
if self.config["criterions"]["def_file"].find("LwF") > 0:
if self.feat_aug:
if len(labels) != len(logits_aug):
continue
loss, loss_cls, loss_kd = self.criterion(
labels, pred_teacher, logits, logits_aug
)
else:
loss, loss_cls, loss_kd = self.criterion(
labels, pred_teacher, logits
)
elif self.config["criterions"]["def_file"].find("KDLoss") > 0:
loss, loss_cls, loss_kd = self.criterion(
logits,
labels,
feat,
feat_teacher,
classfier_weight=self.networks[idx_in_all][
"classifier"
].fc.weight,
)
# fedprox loss: https://epione.gitlabpages.inria.fr/flhd/federated_learning/FedAvg_FedProx_MNIST_iid_and_noniid.html#federated-training-with-fedprox
if self.fedprox:
prox_loss = difference_models_norm_2(
self.networks[idx_in_all], teacher
)
# print("FedProx Loss: ", prox_loss, loss)
loss += self.mu / 2 * prox_loss
# backward
for optimizer in self.optimizers[idx_in_all].values():
optimizer.zero_grad()
loss.backward()
for optimizer in self.optimizers[idx_in_all].values():
optimizer.step()
# classifier L2-norm
if self.networks[idx_in_all]["classifier"].l2_norm:
self.networks[idx_in_all]["classifier"].weight_norm()
losses_cls += loss_cls.item()
losses_kd += loss_kd.item()
self.losses_cls[idx_in_all] = (
losses_cls / len(self.train_loaders[idx]) / self.local_ep
)
self.losses_kd[idx_in_all] = (
losses_kd / len(self.train_loaders[idx]) / self.local_ep
)
##########################
#### stage 2 training ####
##########################
if self.crt:
self.networks[idx_in_all]["feat_model"].eval()
if self.feat_aug:
# obtain features and labels
feat_list = []
label_list = []
for (imgs, labels, indexs) in self.train_loaders[idx]:
imgs = imgs.to(self.device)
with torch.no_grad():
feat_list.append(
self.networks[idx_in_all]["feat_model"](imgs).cpu()
)
label_list.append(labels)
feat_list = torch.cat(feat_list, 0)
label_list = torch.cat(label_list, 0)
unique_labels = list(np.unique(label_list)) # e.g., size (6, )
transformed_label_list = torch.tensor(
[unique_labels.index(i) for i in label_list]
) # e.g., size (n, )
# per-cls features
feats_per_cls = [[] for i in range(len(unique_labels))]
for feat, label in zip(feat_list, transformed_label_list):
feats_per_cls[label].append(feat)
# determine the extra sample number for every existing samples
num_per_cls = np.array(
[len(np.where(label_list == t)[0]) for t in unique_labels]
) # e.g., size (6, )
max_num = max(num_per_cls)
gen_nums = [
np.array(
[max_num // num_per_cls[i] - 1 for _ in feats_per_cls[i]]
)
for i in range(len(unique_labels))
]
for cls_i, nums in enumerate(gen_nums):
nums[: max_num % num_per_cls[cls_i]] = (
nums[: max_num % num_per_cls[cls_i]] + 1
)
# generate samples
sampled_data, sample_label = [], []
per_cls_cov = np.array(
[
np.cov(torch.stack(feats, 1).numpy())
for feats in feats_per_cls
]
)
cov = np.average(per_cls_cov, axis=0, weights=num_per_cls)
# print([np.mean(i) for i in per_cls_cov])
for cls_i, nums in enumerate(gen_nums):
for sample_i, num in enumerate(nums):
if num > 0:
sampled_data.append(
torch.from_numpy(
np.random.multivariate_normal(
mean=feats_per_cls[cls_i][sample_i],
cov=cov, # covariance for feature dimension, shape: e.g., (128, 128)
size=num,
)
).float()
)
sample_label.append(torch.full((num,), cls_i).long())
# add generated fetaures to training data
feat_list = torch.cat([feat_list, *sampled_data], 0)
label_list = torch.cat([transformed_label_list, *sample_label], 0)
# build new dataloader
feats_dataset = local_client_dataset(
feat_list, label_list, self.config
)
feats_loader = torch.utils.data.DataLoader(
feats_dataset,
batch_size=self.local_bs,
shuffle=True,
num_workers=0,
pin_memory=False,
)
# train classifier
for epoch in range(5):
for (feats, labels, indexs) in feats_loader:
feats = feats.to(self.device)
labels = labels.to(self.device)
logits = self.networks[idx_in_all]["classifier"](feats)
loss = torch.nn.CrossEntropyLoss()(
logits[:, unique_labels], labels
)
self.optimizers_stage2[idx_in_all].zero_grad()
loss.backward()
self.optimizers_stage2[idx_in_all].step()
# print(loss)
# re-sampling without feature augmentation
else:
for epoch in range(5):
for (imgs, labels, indexs) in self.train_loader_balanced[idx]:
# to device
imgs = imgs.to(self.device)
# forward
with torch.no_grad():
feat = self.networks[idx_in_all]["feat_model"](imgs)
logits = self.networks[idx_in_all]["classifier"](feat)
pos_cls = torch.unique(labels).tolist()
transformed_labels = torch.tensor(
[pos_cls.index(i) for i in labels]
).to(self.device)
loss = torch.nn.CrossEntropyLoss()(
logits[:, pos_cls], transformed_labels
)
self.optimizers_stage2[idx_in_all].zero_grad()
loss.backward()
self.optimizers_stage2[idx_in_all].step()
# print(loss)
print("=> ", end="")
def fedavg(w):
w_avg = copy.deepcopy(w[0])
for k in w_avg.keys():
for i in range(1, len(w)):
w_avg[k] += w[i][k]
w_avg[k] = torch.div(w_avg[k] * 1.0, len(w))
return w_avg
# See: https://arxiv.org/abs/1909.06335
def fedavgm(new_ws, old_w, vel, args):
"""
fedavg + momentum
- new_ws (list of OrderedDict): The new calculated global model
- old_w (OrderedDict) : Initial state of the global model (which needs to be updated here)
"""
global_lr = 1
beta1 = 0
new_w = fedavg(new_ws)
# For the first round: initialize old_w, create an Orderdict to store velocity
if old_w is None:
old_w = new_w
new_v = OrderedDict()
for key in old_w.keys():
new_v[key] = torch.zeros(old_w[key].shape, dtype=old_w[key].dtype).to(
args.device
)
else:
new_v = copy.deepcopy(vel)
for key in new_w.keys():
delta_w_tmp = old_w[key] - new_w[key]
new_v[key] = beta1 * new_v[key] + torch.mul(delta_w_tmp, global_lr)
old_w[key] -= new_v[key]
return old_w, new_v
def fedavgw(new_ws, old_w, args, round_i):
"""
fedavg + adaptive updating parameter
- new_ws (list of OrderedDict): The new calculated global model
- old_w (OrderedDict) : Initial state of the global model (which needs to be updated here)
"""
new_w = fedavg(new_ws)
# For the first round: initialize old_w
if old_w is None:
old_w = new_w
for key in new_w.keys():
old_w[key] = new_w[key] * (1 / (round_i + 1)) + old_w[key] * (
round_i / (round_i + 1)
)
# for key in new_w.keys():
# if key == "classifier.fc.weight":
# old_w[key] = new_w[key]*(1/(round_i+1)) + old_w[key]*(round_i/(round_i+1))
# else:
# old_w[key] = new_w[key]
return old_w
|
import copy
import time
from collections import OrderedDict
import torch
from data.dataloader import local_client_dataset, test_dataset
from models.utils import *
from utils.train_helper import validate_one_model
from utils.sampling import *
import numpy as np
from multiprocessing import Process
import time
def return_state_dict(network):
"""
save model to state_dict
"""
feat_model = {k: v.cpu() for k, v in network["feat_model"].state_dict().items()}
classifier = {k: v.cpu() for k, v in network["classifier"].state_dict().items()}
return {"feat_model": feat_model, "classifier": classifier}
def load_state_dict(network, state_dict):
"""
restore model from state_dict
"""
network["feat_model"].load_state_dict(state_dict["feat_model"])
network["classifier"].load_state_dict(state_dict["classifier"])
# for name, param in state_dict["feat_model"].items():
# print(name, "\t", param.size())
return network
def check_status(status_list, selected_idx, target_status):
"""
0. original status (1st FL round)
1. server finished sending: server_network --> mp_list
2. client received, and returned the model: mp_list --> networks[i] --> local_update --> mp_list
3. server received: mp_list --> networks[i]
--> 1. aggregation finished. networks[i] --> aggregate --> server_network --> mp_list, the status change to 1
---
Return True: when all clients meet conditions, else False
"""
tmp = np.array(status_list)
if (tmp[selected_idx] == target_status).all() == True:
return True
else:
return False
def set_status(status_list, selected_idx, target_status):
"""
see function: check_status
"""
if type(selected_idx) is int:
selected_idx = [selected_idx]
for i in selected_idx:
status_list[i] = target_status
# print(f"set_status {target_status}")
def difference_models_norm_2(model_1, model_2):
"""
Return the norm 2 difference between the two model parameters. Used in FedProx.
"""
tensor_1_backbone = list(model_1["feat_model"].parameters())
tensor_1_classifier = list(model_1["classifier"].parameters())
tensor_2_backbone = list(model_2["feat_model"].parameters())
tensor_2_classifier = list(model_2["classifier"].parameters())
diff_list = [
torch.sum((tensor_1_backbone[i] - tensor_2_backbone[i]) ** 2)
for i in range(len(tensor_1_backbone))
]
diff_list.extend(
[
torch.sum((tensor_1_classifier[i] - tensor_2_classifier[i]) ** 2)
for i in range(len(tensor_1_classifier))
]
)
norm = sum(diff_list)
return norm
class Fed_server(Process):
"""
Class for client updating and model aggregation
"""
def __init__(
self,
init_network,
criterion,
config,
per_client_data,
per_client_label,
idx_per_client_train,
test_data,
test_label,
state_list=None,
state_dict_list=None,
idx=None,
):
super(Fed_server, self).__init__()
self.local_bs = config["fl_opt"]["local_bs"]
self.local_ep = config["fl_opt"]["local_ep"]
self.num_clients = config["fl_opt"]["num_clients"]
self.criterion = criterion
self.networks, self.optimizers, self.optimizers_stage2, self.schedulers = (
[],
[],
[],
[],
)
self.train_loaders = [] # include dataloader or pre-loaded dataset
self.train_loader_balanced = [] # balanced-sampling dataloader
self.local_num_per_cls = [] # list to store local data number per class
self.test_loaders = []
self.status_list = state_list
self.state_dict_list = state_dict_list
self.client_idx = idx # physical idx of clients (hardcoded)
self.config = config
self.prefetch = False
self.feat_aug = config["fl_opt"]["feat_aug"]
self.crt = config["fl_opt"]["crt"]
self.client_weights = np.array([i for i in idx_per_client_train])
self.client_weights = self.client_weights / self.client_weights.sum()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.server_network = copy.deepcopy(init_network)
self.server_network["feat_model"].to(self.device)
self.server_network["classifier"].to(self.device)
# per-client accuracy and loss
self.acc = [0 for i in range(self.num_clients)]
self.losses_cls = [-1 for i in range(self.num_clients)]
self.losses_kd = [-1 for i in range(self.num_clients)]
print(f'=====> {config["metainfo"]["optimizer"]}, Server (fed.py)\n ')
######## init backbone, classifier, optimizer and dataloader ########
for client_i in range(self.num_clients):
backbone = copy.deepcopy(self.server_network["feat_model"])
classifier = copy.deepcopy(self.server_network["classifier"])
self.networks.append({"feat_model": backbone, "classifier": classifier})
""" Server does not need
# list of optimizer_dict. One optimizer for one network
self.optimizers.append(init_optimizers(self.networks[client_i], config))
optim_params_dict = {'params': self.networks[client_i]["classifier"].parameters(), 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0}
self.optimizers_stage2.append(torch.optim.SGD([optim_params_dict],))
# dataloader
num_workers = 0
local_dataset = \
local_client_dataset(per_client_data[client_i], per_client_label[client_i], config)
self.train_loaders.append(
torch.utils.data.DataLoader(
local_dataset, batch_size=self.local_bs, shuffle=True,
num_workers=num_workers, pin_memory=False)
)
self.train_loader_balanced.append(
torch.utils.data.DataLoader(
local_dataset, batch_size=self.local_bs, sampler=local_dataset.get_balanced_sampler(),
num_workers=num_workers, pin_memory=False)
)
self.local_num_per_cls.append(local_dataset.class_sample_count)
"""
# centralized train dataset
train_data_all, train_label_all = [], []
for client_i in range(len(per_client_label)):
train_data_all = train_data_all + per_client_data[client_i]
train_label_all = train_label_all + per_client_label[client_i]
self.train_dataset = local_client_dataset(
train_data_all, train_label_all, config
)
self.test_dataset = test_dataset(test_data, test_label, config)
def local_train(self, selected_idx):
"""
server-side code
"""
# self.server_network --> mp_list
for i in selected_idx:
self.state_dict_list[i] = return_state_dict(
self.server_network
) # model transfer
set_status(self.status_list, selected_idx, 1)
# wait until all clients returning the model
while check_status(self.status_list, selected_idx, 2) is False:
time.sleep(0.1)
# mp_list --> self.networks (copys of client models on the server). Prepare for aggregation.
for i in selected_idx:
load_state_dict(self.networks[i], self.state_dict_list[i]) # model transfer
print("===> Local training finished")
def aggregation(self, selected_idx, mode):
"""
server-side code: aggregation
"""
if mode in ["fedavg", "fedavgm", "fedbn", "fedprox"]:
self.aggregate_layers(selected_idx, mode, backbone_only=False)
elif mode == "fedavg_fs":
opt = self.config["fl_opt"]
backbone_only, imprint, spread_out = (
opt["backbone_only"],
opt["imprint"],
opt["spread_out"],
)
self.aggregate_layers(selected_idx, "fedavg", backbone_only=backbone_only)
if imprint:
self.imprint(selected_idx)
if spread_out:
self.spread_out()
# model: self.server_network --> mp_list
for i in selected_idx:
self.state_dict_list[i] = return_state_dict(
self.server_network
) # model transfer
set_status(self.status_list, selected_idx, 0) # back to original
print("===> Aggregation finished")
def aggregate_layers(self, selected_idx, mode, backbone_only):
"""
backbone_only: choose to only aggregate backbone
"""
weights_sum = self.client_weights[selected_idx].sum()
with torch.no_grad():
if mode in ["fedavg", "fedprox"]:
for net_name, net in self.server_network.items():
if net_name == "classifier" and backbone_only:
pass
else:
for key, layer in net.state_dict().items():
if "num_batches_tracked" in key:
# num_batches_tracked is a non trainable LongTensor
# and num_batches_tracked are the same for
# all clients for the given datasets
layer.data.copy_(
self.networks[0][net_name].state_dict()[key]
)
else:
temp = torch.zeros_like(layer)
# Fedavg
for idx in selected_idx:
weight = self.client_weights[idx] / weights_sum
temp += (
weight
* self.networks[idx][net_name].state_dict()[key]
)
layer.data.copy_(temp)
# update client models
# for idx in selected_idx:
# self.networks[idx][net_name].state_dict()[key].data.copy_(layer)
elif mode == "fedbn": # https://openreview.net/pdf?id=6YEQUn0QICG
for net_name, net in self.server_network.items():
if net_name == "classifier" and backbone_only:
pass
else:
for key, layer in net.state_dict().items():
if "bn" not in key:
temp = torch.zeros_like(layer)
# Fedavg
for idx in selected_idx:
weight = self.client_weights[idx] / weights_sum
temp += (
weight
* self.networks[idx][net_name].state_dict()[key]
)
layer.data.copy_(temp)
# update client models
# for idx in selected_idx:
# self.networks[idx][net_name].state_dict()[key].data.copy_(layer)
elif mode == "fedavgm":
raise NotImplementedError
def evaluate_global(self, train_dataset=None, test_dataset=None):
"""
Accuracy of the global model and all classes
"""
# evaluate on training set
if train_dataset is None:
train_dataset = self.train_dataset
if test_dataset is None:
test_dataset = self.test_dataset
train_loss_per_cls, train_acc_per_cls = validate_one_model(
self.server_network, train_dataset, self.device, per_cls_acc=True
)
# evaluate on test set: per-class loss/acc
test_loss_per_cls, test_acc_per_cls = validate_one_model(
self.server_network, test_dataset, self.device, per_cls_acc=True
)
print("===> Evaluation finished\n")
return (
train_loss_per_cls,
train_acc_per_cls,
test_loss_per_cls,
test_acc_per_cls,
)
def evaluate_global_all(self, train_dataset=None, test_dataset=None):
"""
Accuracy of models of all nodes and all classes
Return: all_results
shape: (4, num_client, num_cls), 4 for (train_loss, train_acc, test_loss, test_acc)
"""
# evaluate on training set
if train_dataset is None:
train_dataset = self.train_dataset
if test_dataset is None:
test_dataset = self.test_dataset
all_results = [None for i in range(self.num_clients)]
for idx in range(self.num_clients):
# evaluate on test set: per-class loss/acc
train_loss_per_cls, train_acc_per_cls = validate_one_model(
self.networks[idx], train_dataset, self.device, per_cls_acc=True
)
# evaluate on test set: per-class loss/acc
test_loss_per_cls, test_acc_per_cls = validate_one_model(
self.networks[idx], test_dataset, self.device, per_cls_acc=True
)
all_results[idx] = (
train_loss_per_cls,
train_acc_per_cls,
test_loss_per_cls,
test_acc_per_cls,
)
print(f"===> Evaluation finished{idx}\n")
all_results = np.array(all_results).transpose(1, 0, 2)
return all_results
class Fed_client(Process):
"""
Class for client updating and model aggregation
"""
def __init__(
self,
init_network,
criterion,
config,
per_client_data,
per_client_label,
idx_per_client_train,
test_data,
test_label,
state_list=None,
state_dict_list=None,
idx=None,
):
super(Fed_client, self).__init__()
self.local_bs = config["fl_opt"]["local_bs"]
self.local_ep = config["fl_opt"]["local_ep"]
self.num_clients = config["fl_opt"]["num_clients"]
self.criterion = criterion
self.networks, self.optimizers, self.optimizers_stage2, self.schedulers = (
[],
[],
[],
[],
)
self.train_loaders = [] # include dataloader or pre-loaded dataset
self.train_loader_balanced = [] # balanced-sampling dataloader
self.local_num_per_cls = [] # list to store local data number per class
self.test_loaders = []
self.status_list = state_list
self.state_dict_list = state_dict_list
self.client_idx = idx # physical idx of clients (hardcoded)
self.config = config
self.device = config["device_client"][idx]
self.server_network = copy.deepcopy(init_network)
self.balanced_loader = config["fl_opt"]["balanced_loader"]
self.prefetch = False
self.feat_aug = config["fl_opt"]["feat_aug"]
self.crt = config["fl_opt"]["crt"]
if config["fl_opt"]["aggregation"] == "fedprox":
self.fedprox = True
else:
self.fedprox = False
self.mu = 0.05
self.client_weights = np.array([i for i in idx_per_client_train])
self.client_weights = self.client_weights / self.client_weights.sum()
# per-client accuracy and loss
self.acc = [0 for i in range(self.num_clients)]
self.losses_cls = [-1 for i in range(self.num_clients)]
self.losses_kd = [-1 for i in range(self.num_clients)]
print(f'=====> {config["metainfo"]["optimizer"]}, Client {idx} (fed.py)\n ')
######## init backbone, classifier, optimizer and dataloader ########
for client_i in range(self.num_clients):
# list of network and optimizer_dict. One optimizer for one network.
if client_i != self.client_idx:
self.networks.append(None)
self.optimizers.append(None)
self.optimizers_stage2.append(None)
else:
backbone = copy.deepcopy(self.server_network["feat_model"])
classifier = copy.deepcopy(self.server_network["classifier"])
self.networks.append({"feat_model": backbone, "classifier": classifier})
self.optimizers.append(init_optimizers(self.networks[client_i], config))
optim_params_dict = {
"params": self.networks[client_i]["classifier"].parameters(),
"lr": 0.001,
"momentum": 0.9,
"weight_decay": 0,
}
self.optimizers_stage2.append(torch.optim.SGD([optim_params_dict],))
# dataloader
num_workers = 0
local_dataset = local_client_dataset(
per_client_data[client_i], per_client_label[client_i], config
)
self.train_loaders.append(
torch.utils.data.DataLoader(
local_dataset,
batch_size=self.local_bs,
shuffle=True,
num_workers=num_workers,
pin_memory=False,
)
)
self.train_loader_balanced.append(
torch.utils.data.DataLoader(
local_dataset,
batch_size=self.local_bs,
sampler=local_dataset.get_balanced_sampler(),
num_workers=num_workers,
pin_memory=False,
)
)
self.local_num_per_cls.append(local_dataset.class_sample_count)
""" clients do not need
# centralized train dataset
train_data_all, train_label_all = [], []
for client_i in range(len(per_client_label)):
train_data_all = train_data_all + per_client_data[client_i]
train_label_all = train_label_all + per_client_label[client_i]
self.train_dataset = local_client_dataset(train_data_all, train_label_all, config)
self.test_dataset = test_dataset(test_data, test_label, config)
"""
def run(self):
"""
client-side code
"""
self.server_network["feat_model"].to(self.device)
self.server_network["classifier"].to(self.device)
self.networks[self.client_idx]["feat_model"].to(self.device)
self.networks[self.client_idx]["classifier"].to(self.device)
while 1:
while check_status(self.status_list, self.client_idx, 1) is False:
time.sleep(0.1)
# model: mp_list --> server_network
load_state_dict(
self.server_network, self.state_dict_list[self.client_idx]
) # model transfer
self.train_lt(self.client_idx) # local model updating
# self.networks[i] --> mp_list
self.state_dict_list[self.client_idx] = return_state_dict(
self.networks[self.client_idx]
) # model transfer
set_status(self.status_list, self.client_idx, 2)
def train_lt(self, idx):
"""
client-side code
---
Argus:
- idx: the index in all clients (e.g., 50) or selected clients (e.g., 10).
If self.prefetch is true: the index in selected clients,
If self.prefetch is true: the index in all clients
"""
idx_in_all = idx
# server broadcast the model to clients
"""
# optimizer will not work if use this, because optimizer needs the params from the model
# self.networks[idx_in_all] = copy.deepcopy(self.server_network)
"""
for net_name, net in self.server_network.items(): # feat_model, classifier
state_dict = self.networks[idx_in_all][net_name].state_dict()
for key, layer in net.state_dict().items():
state_dict[key].data.copy_(layer.data)
for net in self.networks[idx_in_all].values():
net.train()
for net in self.server_network.values():
net.train()
teacher = self.server_network
# torch.cuda.empty_cache()
"""
(Per-cls) Covariance Calculation
"""
if self.feat_aug:
# probability for augmentation for every class
max_num = max(self.local_num_per_cls[idx])
prob = torch.tensor(
[1.0 - i / max_num for i in self.local_num_per_cls[idx]]
)
# obtain features and labels under eval mode
feat_list, label_list = [], []
# self.networks[idx_in_all]['feat_model'].eval()
for (imgs, labels, indexs) in self.train_loaders[idx]:
with torch.no_grad():
imgs = imgs.to(self.device)
feat_list.append(teacher["feat_model"](imgs).cpu())
label_list.append(labels)
feat_list = torch.cat(feat_list, 0)
# self.networks[idx_in_all]['feat_model'].train()
label_list = torch.cat(label_list, 0)
unique_labels = list(np.unique(label_list)) # e.g., size (6, )
transformed_label_list = torch.tensor(
[unique_labels.index(i) for i in label_list]
) # e.g., size (n, )
# per-cls features
feats_per_cls = [[] for i in range(len(unique_labels))]
for feats, label in zip(feat_list, transformed_label_list):
feats_per_cls[label].append(feats)
# calculate the variance
sampled_data, sample_label = [], []
per_cls_cov = []
for feats in feats_per_cls:
if len(feats) > 1:
per_cls_cov.append(np.cov(torch.stack(feats, 1).numpy()))
else:
per_cls_cov.append(np.zeros((feats[0].shape[0], feats[0].shape[0])))
per_cls_cov = np.array(per_cls_cov)
# per_cls_cov = np.array([np.cov(torch.stack(feats, 1).numpy()) for feats in feats_per_cls])
cov = np.average(
per_cls_cov, axis=0, weights=self.local_num_per_cls[idx]
) # covariance for feature dimension, shape: e.g., (128, 128)
# pre-generate deviation
divider = 500
pointer = 0
augs = (
torch.from_numpy(
np.random.multivariate_normal(
mean=np.zeros(cov.shape[0]),
cov=cov, # covariance for feature dimension, shape: e.g., (128, 128)
size=divider,
)
)
.float()
.to(self.device)
)
with torch.set_grad_enabled(True):
losses_cls = 0
losses_kd = 0
##########################
#### stage 1 training ####
##########################
for epoch in range(self.local_ep):
"""
model update
"""
if self.local_ep > 10: # locla training mode
print(epoch, end=" ")
if self.balanced_loader:
tmp_loader = self.train_loader_balanced[idx]
else:
tmp_loader = self.train_loaders[idx]
for (imgs, labels, indexs) in tmp_loader:
# to device
imgs = imgs.to(self.device)
# forward
feat = self.networks[idx_in_all]["feat_model"](imgs)
logits = self.networks[idx_in_all]["classifier"](feat)
# do feature space augmentation with a likelihood
if self.feat_aug:
# prob = torch.tensor([1.0 for i in self.local_num_per_cls[idx]])
rand_list = torch.rand(len(labels))
mask = (
rand_list
< prob[
torch.tensor([unique_labels.index(i) for i in labels])
]
)
degree = 1
aug_num = sum(mask).item()
if aug_num > 0:
if pointer + aug_num >= divider:
pointer = 0
feat_aug = feat.clone()
feat_aug[mask] = (
feat[mask] + augs[pointer : pointer + aug_num] * degree
)
logits_aug = self.networks[idx_in_all]["classifier"](
feat_aug
)
pointer = pointer + aug_num
# teacher
with torch.no_grad():
feat_teacher = teacher["feat_model"](imgs)
pred_teacher = teacher["classifier"](feat_teacher)
# loss
labels = labels.to(self.device)
if self.config["criterions"]["def_file"].find("LwF") > 0:
if self.feat_aug:
if len(labels) != len(logits_aug):
continue
loss, loss_cls, loss_kd = self.criterion(
labels, pred_teacher, logits, logits_aug
)
else:
loss, loss_cls, loss_kd = self.criterion(
labels, pred_teacher, logits
)
elif self.config["criterions"]["def_file"].find("KDLoss") > 0:
loss, loss_cls, loss_kd = self.criterion(
logits,
labels,
feat,
feat_teacher,
classfier_weight=self.networks[idx_in_all][
"classifier"
].fc.weight,
)
# fedprox loss: https://epione.gitlabpages.inria.fr/flhd/federated_learning/FedAvg_FedProx_MNIST_iid_and_noniid.html#federated-training-with-fedprox
if self.fedprox:
prox_loss = difference_models_norm_2(
self.networks[idx_in_all], teacher
)
# print("FedProx Loss: ", prox_loss, loss)
loss += self.mu / 2 * prox_loss
# backward
for optimizer in self.optimizers[idx_in_all].values():
optimizer.zero_grad()
loss.backward()
for optimizer in self.optimizers[idx_in_all].values():
optimizer.step()
# classifier L2-norm
if self.networks[idx_in_all]["classifier"].l2_norm:
self.networks[idx_in_all]["classifier"].weight_norm()
losses_cls += loss_cls.item()
losses_kd += loss_kd.item()
self.losses_cls[idx_in_all] = (
losses_cls / len(self.train_loaders[idx]) / self.local_ep
)
self.losses_kd[idx_in_all] = (
losses_kd / len(self.train_loaders[idx]) / self.local_ep
)
##########################
#### stage 2 training ####
##########################
if self.crt:
self.networks[idx_in_all]["feat_model"].eval()
if self.feat_aug:
# obtain features and labels
feat_list = []
label_list = []
for (imgs, labels, indexs) in self.train_loaders[idx]:
imgs = imgs.to(self.device)
with torch.no_grad():
feat_list.append(
self.networks[idx_in_all]["feat_model"](imgs).cpu()
)
label_list.append(labels)
feat_list = torch.cat(feat_list, 0)
label_list = torch.cat(label_list, 0)
unique_labels = list(np.unique(label_list)) # e.g., size (6, )
transformed_label_list = torch.tensor(
[unique_labels.index(i) for i in label_list]
) # e.g., size (n, )
# per-cls features
feats_per_cls = [[] for i in range(len(unique_labels))]
for feat, label in zip(feat_list, transformed_label_list):
feats_per_cls[label].append(feat)
# determine the extra sample number for every existing samples
num_per_cls = np.array(
[len(np.where(label_list == t)[0]) for t in unique_labels]
) # e.g., size (6, )
max_num = max(num_per_cls)
gen_nums = [
np.array(
[max_num // num_per_cls[i] - 1 for _ in feats_per_cls[i]]
)
for i in range(len(unique_labels))
]
for cls_i, nums in enumerate(gen_nums):
nums[: max_num % num_per_cls[cls_i]] = (
nums[: max_num % num_per_cls[cls_i]] + 1
)
# generate samples
sampled_data, sample_label = [], []
per_cls_cov = np.array(
[
np.cov(torch.stack(feats, 1).numpy())
for feats in feats_per_cls
]
)
cov = np.average(per_cls_cov, axis=0, weights=num_per_cls)
# print([np.mean(i) for i in per_cls_cov])
for cls_i, nums in enumerate(gen_nums):
for sample_i, num in enumerate(nums):
if num > 0:
sampled_data.append(
torch.from_numpy(
np.random.multivariate_normal(
mean=feats_per_cls[cls_i][sample_i],
cov=cov, # covariance for feature dimension, shape: e.g., (128, 128)
size=num,
)
).float()
)
sample_label.append(torch.full((num,), cls_i).long())
# add generated fetaures to training data
feat_list = torch.cat([feat_list, *sampled_data], 0)
label_list = torch.cat([transformed_label_list, *sample_label], 0)
# build new dataloader
feats_dataset = local_client_dataset(
feat_list, label_list, self.config
)
feats_loader = torch.utils.data.DataLoader(
feats_dataset,
batch_size=self.local_bs,
shuffle=True,
num_workers=0,
pin_memory=False,
)
# train classifier
for epoch in range(5):
for (feats, labels, indexs) in feats_loader:
feats = feats.to(self.device)
labels = labels.to(self.device)
logits = self.networks[idx_in_all]["classifier"](feats)
loss = torch.nn.CrossEntropyLoss()(
logits[:, unique_labels], labels
)
self.optimizers_stage2[idx_in_all].zero_grad()
loss.backward()
self.optimizers_stage2[idx_in_all].step()
# print(loss)
# re-sampling without feature augmentation
else:
for epoch in range(5):
for (imgs, labels, indexs) in self.train_loader_balanced[idx]:
# to device
imgs = imgs.to(self.device)
# forward
with torch.no_grad():
feat = self.networks[idx_in_all]["feat_model"](imgs)
logits = self.networks[idx_in_all]["classifier"](feat)
pos_cls = torch.unique(labels).tolist()
transformed_labels = torch.tensor(
[pos_cls.index(i) for i in labels]
).to(self.device)
loss = torch.nn.CrossEntropyLoss()(
logits[:, pos_cls], transformed_labels
)
self.optimizers_stage2[idx_in_all].zero_grad()
loss.backward()
self.optimizers_stage2[idx_in_all].step()
# print(loss)
print("=> ", end="")
def fedavg(w):
w_avg = copy.deepcopy(w[0])
for k in w_avg.keys():
for i in range(1, len(w)):
w_avg[k] += w[i][k]
w_avg[k] = torch.div(w_avg[k] * 1.0, len(w))
return w_avg
# See: https://arxiv.org/abs/1909.06335
def fedavgm(new_ws, old_w, vel, args):
"""
fedavg + momentum
- new_ws (list of OrderedDict): The new calculated global model
- old_w (OrderedDict) : Initial state of the global model (which needs to be updated here)
"""
global_lr = 1
beta1 = 0
new_w = fedavg(new_ws)
# For the first round: initialize old_w, create an Orderdict to store velocity
if old_w is None:
old_w = new_w
new_v = OrderedDict()
for key in old_w.keys():
new_v[key] = torch.zeros(old_w[key].shape, dtype=old_w[key].dtype).to(
args.device
)
else:
new_v = copy.deepcopy(vel)
for key in new_w.keys():
delta_w_tmp = old_w[key] - new_w[key]
new_v[key] = beta1 * new_v[key] + torch.mul(delta_w_tmp, global_lr)
old_w[key] -= new_v[key]
return old_w, new_v
def fedavgw(new_ws, old_w, args, round_i):
"""
fedavg + adaptive updating parameter
- new_ws (list of OrderedDict): The new calculated global model
- old_w (OrderedDict) : Initial state of the global model (which needs to be updated here)
"""
new_w = fedavg(new_ws)
# For the first round: initialize old_w
if old_w is None:
old_w = new_w
for key in new_w.keys():
old_w[key] = new_w[key] * (1 / (round_i + 1)) + old_w[key] * (
round_i / (round_i + 1)
)
# for key in new_w.keys():
# if key == "classifier.fc.weight":
# old_w[key] = new_w[key]*(1/(round_i+1)) + old_w[key]*(round_i/(round_i+1))
# else:
# old_w[key] = new_w[key]
return old_w
|
class BatmanQuotes(object):
def get_quote(quotes, hero):
return f"{("Batman", "Joker", "Robin")["BJR".index(hero[0])]}: {quotes[int(min(hero))]}"
|
class BatmanQuotes(object):
def get_quote(quotes, hero):
return f"{('Batman', 'Joker', 'Robin')['BJR'.index(hero[0])]}: {quotes[int(min(hero))]}"
|
# Concord
#
# Copyright (c) 2020-2021 VMware, Inc. All Rights Reserved.
#
# This product is licensed to you under the Apache 2.0 license (the "License").
# You may not use this product except in compliance with the Apache 2.0 License.
#
# This product may include a number of subcomponents with separate copyright
# notices and license terms. Your use of these subcomponents is subject to the
# terms and conditions of the subcomponent's license, as noted in the LICENSE
# file.
from visitor import Visitor
def struct_start(name, id):
return f"""
struct {name} {{
static constexpr uint32_t id = {id};
"""
serialize_byte_buffer_fn = "void serialize(std::vector<uint8_t>& output, const {name}& t)"
def serialize_byte_buffer_declaration(name):
return serialize_byte_buffer_fn.format(name=name) + ";\n"
def serialize_byte_buffer_start(name):
return serialize_byte_buffer_fn.format(name=name) + " {\n"
serialize_string_fn = "void serialize(std::string& output, const {name}& t)"
def serialize_string_declaration(name):
return serialize_string_fn.format(name=name) + ";\n"
def serialize_string_start(name):
return serialize_string_fn.format(name=name) + " {\n"
deserialize_fn = "void deserialize(const uint8_t*& input, const uint8_t* end, {name}& t)"
def deserialize_declaration(name):
return deserialize_fn.format(name=name) + ";\n"
def deserialize_start(name):
return deserialize_fn.format(name=name) + " {\n"
deserialize_byte_buffer_fn = "void deserialize(const std::vector<uint8_t>& input, {name}& t)"
def deserialize_byte_buffer_declaration(name):
return deserialize_byte_buffer_fn.format(name=name) + ";\n"
def deserialize_byte_buffer(name):
return deserialize_byte_buffer_fn.format(name=name) + f""" {{
auto begin = input.data();
deserialize(begin, begin + input.size(), t);
}}
"""
deserialize_string_fn = "void deserialize(const std::string& input, {name}& t)"
def deserialize_string_declaration(name):
return deserialize_string_fn.format(name=name) + ";\n"
def deserialize_string(name):
return deserialize_string_fn.format(name=name) + f""" {{
auto begin = reinterpret_cast<const unsigned char*>(input.data());
deserialize(begin, begin + input.size(), t);
}}
"""
def serialize_field(name, type):
# All messages except oneofs and messages exist in the cmf namespace, and are provided in
# serialize.h
if type in ["oneof", "msg"]:
return f" serialize(output, t.{name});\n"
return f" cmf::serialize(output, t.{name});\n"
def deserialize_field(name, type):
# All messages except oneofs and messages exist in the cmf namespace, and are provided in
# serialize.h
if type in ["oneof", "msg"]:
return f" deserialize(input, end, t.{name});\n"
return f" cmf::deserialize(input, end, t.{name});\n"
variant_serialize_byte_buffer_fn = "void serialize(std::vector<uint8_t>& output, const {variant}& val)"
def variant_serialize_byte_buffer_declaration(variant):
return variant_serialize_byte_buffer_fn.format(variant=variant) + ";\n"
def variant_serialize_byte_buffer(variant):
return variant_serialize_byte_buffer_fn.format(variant=variant) + """ {
std::visit([&output](auto&& arg){
cmf::serialize(output, arg.id);
serialize(output, arg);
}, val);
}"""
variant_serialize_string_fn = "void serialize(std::string& output, const {variant}& val)"
def variant_serialize_string_declaration(variant):
return variant_serialize_string_fn.format(variant=variant) + ";\n"
def variant_serialize_string(variant):
return variant_serialize_string_fn.format(variant=variant) + """ {
std::visit([&output](auto&& arg){
cmf::serialize(output, arg.id);
serialize(output, arg);
}, val);
}"""
variant_deserialize_fn = "void deserialize(const uint8_t*& start, const uint8_t* end, {variant}& val)"
def variant_deserialize_declaration(variant):
return variant_deserialize_fn.format(variant=variant) + ";\n"
def variant_deserialize(variant, msgs):
s = variant_deserialize_fn.format(variant=variant) + """ {
uint32_t id;
cmf::deserialize(start, end, id);
"""
for (name, id) in msgs.items():
s += f"""
if (id == {id}) {{
{name} value;
deserialize(start, end, value);
val = value;
return;
}}
"""
s += """
throw cmf::DeserializeError(std::string("Invalid Message id in variant: ") + std::to_string(id));
}
"""
return s
equalop_str_fn = "bool operator==(const {msg_name}& l, const {msg_name}& r)"
def equalop_str_declaration(msg_name):
return equalop_str_fn.format(msg_name=msg_name) + ";\n"
def equalop_str(msg_name, fields):
""" Create an 'operator==' function for the current message struct """
comparison = "true"
if fields:
comparison = " && ".join([f"l.{f} == r.{f}" for f in fields])
return equalop_str_fn.format(msg_name=msg_name) + f""" {{
return {comparison};
}}"""
class CppVisitor(Visitor):
""" A visitor that generates C++ code. """
def __init__(self):
# All output currently constructed
self.output = ""
# All output declarations currently constructed
self.output_declaration = ""
# The current message being processed
self.msg_name = ''
# The current field being processed for a message
self.field = {'type': '', 'name': ''}
# All fields currently seen for the given message
self.fields_seen = []
# The struct being created for the current message. This includes the fields of the struct.
self.struct = ""
# The 'serialize' function for the current message
self.serialize_byte_buffer = ""
self.serialize_string = ""
# The 'deserialize' function for the current message
self.deserialize = ""
# Each oneof in a message corresponds to a variant. Since we don't need duplicate
# serialization functions, in case there are multiple messages or fields with the same
# variants, we only generate a single serialization and deserialization function for each
# variant.
#
# This set keeps track of which variants already had their serialization and
# deserialization functions generated.
self.oneofs_seen = set()
# The `serialize` member functions for all oneofs in the current message
self.oneof_serialize_byte_buffer = ""
self.oneof_serialize_byte_buffer_declaration = ""
self.oneof_serialize_string = ""
self.oneof_serialize_string_declaration = ""
# The `deserialize` member functions for all oneofs in the current message
self.oneof_deserialize = ""
self.oneof_deserialize_declaration = ""
def _reset(self):
# output and oneofs_seen accumulate across messages
output = self.output
output_declaration = self.output_declaration
oneofs = self.oneofs_seen
self.__init__()
self.output = output
self.output_declaration = output_declaration
self.oneofs_seen = oneofs
def create_enum(self, name, tags):
enumstr = 'enum class {name} : uint8_t {{ {tagstr} }};\n'
enumsize_decl = 'uint8_t enumSize({name} _);\n'
enumsize_def = 'uint8_t enumSize({name} _) {{ (void)_; return {num_tags}; }}\n'
self.output_declaration += enumstr.format(name=name, tagstr=", ".join(tags))
self.output_declaration += enumsize_decl.format(name=name)
self.output += enumsize_def.format(name=name, num_tags = len(tags))
def msg_start(self, name, id):
self.msg_name = name
self.struct = struct_start(name, id)
self.serialize_byte_buffer = serialize_byte_buffer_start(name)
self.serialize_string = serialize_string_start(name)
self.deserialize = deserialize_start(name)
def msg_end(self):
self.struct += "};\n"
self.serialize_byte_buffer += "}"
self.serialize_string += "}"
self.deserialize += "}\n"
self.deserialize += deserialize_byte_buffer(self.msg_name)
self.deserialize += "\n"
self.deserialize += deserialize_string(self.msg_name)
self.output += "\n".join([
s for s in [
self.oneof_serialize_byte_buffer,
self.oneof_serialize_string,
self.oneof_deserialize,
equalop_str(self.msg_name, self.fields_seen),
self.serialize_byte_buffer,
self.serialize_string,
self.deserialize,
] if s != ''
]) + "\n"
self.output_declaration += "".join([
s for s in [
self.struct,
"\n",
serialize_byte_buffer_declaration(self.msg_name),
serialize_string_declaration(self.msg_name),
deserialize_declaration(self.msg_name),
deserialize_byte_buffer_declaration(self.msg_name),
deserialize_string_declaration(self.msg_name),
self.oneof_serialize_byte_buffer_declaration,
self.oneof_serialize_string_declaration,
self.oneof_deserialize_declaration,
equalop_str_declaration(self.msg_name),
] if s != ''
]) + "\n"
self._reset()
def field_start(self, name, type):
self.struct += " " # Indent fields
self.field['name'] = name
self.fields_seen.append(name)
self.serialize_byte_buffer += serialize_field(name, type)
self.serialize_string += serialize_field(name, type)
self.deserialize += deserialize_field(name, type)
def field_end(self):
# The field is preceeded by the type in the struct definition. Close it with the name and
# necessary syntax.
self.struct += f" {self.field["name"]}{{}};\n"
### The following callbacks generate types for struct fields, recursively when necessary.
def bool(self):
self.struct += "bool"
def uint8(self):
self.struct += "uint8_t"
def uint16(self):
self.struct += "uint16_t"
def uint32(self):
self.struct += "uint32_t"
def uint64(self):
self.struct += "uint64_t"
def int8(self):
self.struct += "int8_t"
def int16(self):
self.struct += "int16_t"
def int32(self):
self.struct += "int32_t"
def int64(self):
self.struct += "int64_t"
def string(self):
self.struct += "std::string"
def bytes(self):
self.struct += "std::vector<uint8_t>"
def msgname_ref(self, name):
self.struct += name
def kvpair_start(self):
self.struct += "std::pair<"
def kvpair_key_end(self):
self.struct += ", "
def kvpair_end(self):
self.struct += ">"
def list_start(self):
self.struct += "std::vector<"
def list_end(self):
self.struct += ">"
def fixedlist_start(self):
self.struct += "std::array<"
def fixedlist_type_end(self):
self.struct += ", "
def fixedlist_end(self, size):
self.struct += f"{size}>"
def map_start(self):
self.struct += "std::map<"
def map_key_end(self):
self.struct += ", "
def map_end(self):
self.struct += ">"
def optional_start(self):
self.struct += "std::optional<"
def optional_end(self):
self.struct += ">"
def oneof(self, msgs):
variant = "std::variant<" + ", ".join(msgs.keys()) + ">"
self.struct += variant
oneof = frozenset(msgs.keys())
if oneof in self.oneofs_seen:
return
self.oneofs_seen.add(oneof)
self.oneof_serialize_byte_buffer += variant_serialize_byte_buffer(variant)
self.oneof_serialize_string += variant_serialize_string(variant)
self.oneof_serialize_byte_buffer_declaration += \
variant_serialize_byte_buffer_declaration(variant)
self.oneof_serialize_string_declaration += \
variant_serialize_string_declaration(variant)
self.oneof_deserialize += variant_deserialize(variant, msgs)
self.oneof_deserialize_declaration += variant_deserialize_declaration(variant)
def enum(self, type_name):
self.struct += type_name
|
# Concord
#
# Copyright (c) 2020-2021 VMware, Inc. All Rights Reserved.
#
# This product is licensed to you under the Apache 2.0 license (the "License").
# You may not use this product except in compliance with the Apache 2.0 License.
#
# This product may include a number of subcomponents with separate copyright
# notices and license terms. Your use of these subcomponents is subject to the
# terms and conditions of the subcomponent's license, as noted in the LICENSE
# file.
from visitor import Visitor
def struct_start(name, id):
return f"""
struct {name} {{
static constexpr uint32_t id = {id};
"""
serialize_byte_buffer_fn = "void serialize(std::vector<uint8_t>& output, const {name}& t)"
def serialize_byte_buffer_declaration(name):
return serialize_byte_buffer_fn.format(name=name) + ";\n"
def serialize_byte_buffer_start(name):
return serialize_byte_buffer_fn.format(name=name) + " {\n"
serialize_string_fn = "void serialize(std::string& output, const {name}& t)"
def serialize_string_declaration(name):
return serialize_string_fn.format(name=name) + ";\n"
def serialize_string_start(name):
return serialize_string_fn.format(name=name) + " {\n"
deserialize_fn = "void deserialize(const uint8_t*& input, const uint8_t* end, {name}& t)"
def deserialize_declaration(name):
return deserialize_fn.format(name=name) + ";\n"
def deserialize_start(name):
return deserialize_fn.format(name=name) + " {\n"
deserialize_byte_buffer_fn = "void deserialize(const std::vector<uint8_t>& input, {name}& t)"
def deserialize_byte_buffer_declaration(name):
return deserialize_byte_buffer_fn.format(name=name) + ";\n"
def deserialize_byte_buffer(name):
return deserialize_byte_buffer_fn.format(name=name) + f""" {{
auto begin = input.data();
deserialize(begin, begin + input.size(), t);
}}
"""
deserialize_string_fn = "void deserialize(const std::string& input, {name}& t)"
def deserialize_string_declaration(name):
return deserialize_string_fn.format(name=name) + ";\n"
def deserialize_string(name):
return deserialize_string_fn.format(name=name) + f""" {{
auto begin = reinterpret_cast<const unsigned char*>(input.data());
deserialize(begin, begin + input.size(), t);
}}
"""
def serialize_field(name, type):
# All messages except oneofs and messages exist in the cmf namespace, and are provided in
# serialize.h
if type in ["oneof", "msg"]:
return f" serialize(output, t.{name});\n"
return f" cmf::serialize(output, t.{name});\n"
def deserialize_field(name, type):
# All messages except oneofs and messages exist in the cmf namespace, and are provided in
# serialize.h
if type in ["oneof", "msg"]:
return f" deserialize(input, end, t.{name});\n"
return f" cmf::deserialize(input, end, t.{name});\n"
variant_serialize_byte_buffer_fn = "void serialize(std::vector<uint8_t>& output, const {variant}& val)"
def variant_serialize_byte_buffer_declaration(variant):
return variant_serialize_byte_buffer_fn.format(variant=variant) + ";\n"
def variant_serialize_byte_buffer(variant):
return variant_serialize_byte_buffer_fn.format(variant=variant) + """ {
std::visit([&output](auto&& arg){
cmf::serialize(output, arg.id);
serialize(output, arg);
}, val);
}"""
variant_serialize_string_fn = "void serialize(std::string& output, const {variant}& val)"
def variant_serialize_string_declaration(variant):
return variant_serialize_string_fn.format(variant=variant) + ";\n"
def variant_serialize_string(variant):
return variant_serialize_string_fn.format(variant=variant) + """ {
std::visit([&output](auto&& arg){
cmf::serialize(output, arg.id);
serialize(output, arg);
}, val);
}"""
variant_deserialize_fn = "void deserialize(const uint8_t*& start, const uint8_t* end, {variant}& val)"
def variant_deserialize_declaration(variant):
return variant_deserialize_fn.format(variant=variant) + ";\n"
def variant_deserialize(variant, msgs):
s = variant_deserialize_fn.format(variant=variant) + """ {
uint32_t id;
cmf::deserialize(start, end, id);
"""
for (name, id) in msgs.items():
s += f"""
if (id == {id}) {{
{name} value;
deserialize(start, end, value);
val = value;
return;
}}
"""
s += """
throw cmf::DeserializeError(std::string("Invalid Message id in variant: ") + std::to_string(id));
}
"""
return s
equalop_str_fn = "bool operator==(const {msg_name}& l, const {msg_name}& r)"
def equalop_str_declaration(msg_name):
return equalop_str_fn.format(msg_name=msg_name) + ";\n"
def equalop_str(msg_name, fields):
""" Create an 'operator==' function for the current message struct """
comparison = "true"
if fields:
comparison = " && ".join([f"l.{f} == r.{f}" for f in fields])
return equalop_str_fn.format(msg_name=msg_name) + f""" {{
return {comparison};
}}"""
class CppVisitor(Visitor):
""" A visitor that generates C++ code. """
def __init__(self):
# All output currently constructed
self.output = ""
# All output declarations currently constructed
self.output_declaration = ""
# The current message being processed
self.msg_name = ''
# The current field being processed for a message
self.field = {'type': '', 'name': ''}
# All fields currently seen for the given message
self.fields_seen = []
# The struct being created for the current message. This includes the fields of the struct.
self.struct = ""
# The 'serialize' function for the current message
self.serialize_byte_buffer = ""
self.serialize_string = ""
# The 'deserialize' function for the current message
self.deserialize = ""
# Each oneof in a message corresponds to a variant. Since we don't need duplicate
# serialization functions, in case there are multiple messages or fields with the same
# variants, we only generate a single serialization and deserialization function for each
# variant.
#
# This set keeps track of which variants already had their serialization and
# deserialization functions generated.
self.oneofs_seen = set()
# The `serialize` member functions for all oneofs in the current message
self.oneof_serialize_byte_buffer = ""
self.oneof_serialize_byte_buffer_declaration = ""
self.oneof_serialize_string = ""
self.oneof_serialize_string_declaration = ""
# The `deserialize` member functions for all oneofs in the current message
self.oneof_deserialize = ""
self.oneof_deserialize_declaration = ""
def _reset(self):
# output and oneofs_seen accumulate across messages
output = self.output
output_declaration = self.output_declaration
oneofs = self.oneofs_seen
self.__init__()
self.output = output
self.output_declaration = output_declaration
self.oneofs_seen = oneofs
def create_enum(self, name, tags):
enumstr = 'enum class {name} : uint8_t {{ {tagstr} }};\n'
enumsize_decl = 'uint8_t enumSize({name} _);\n'
enumsize_def = 'uint8_t enumSize({name} _) {{ (void)_; return {num_tags}; }}\n'
self.output_declaration += enumstr.format(name=name, tagstr=", ".join(tags))
self.output_declaration += enumsize_decl.format(name=name)
self.output += enumsize_def.format(name=name, num_tags = len(tags))
def msg_start(self, name, id):
self.msg_name = name
self.struct = struct_start(name, id)
self.serialize_byte_buffer = serialize_byte_buffer_start(name)
self.serialize_string = serialize_string_start(name)
self.deserialize = deserialize_start(name)
def msg_end(self):
self.struct += "};\n"
self.serialize_byte_buffer += "}"
self.serialize_string += "}"
self.deserialize += "}\n"
self.deserialize += deserialize_byte_buffer(self.msg_name)
self.deserialize += "\n"
self.deserialize += deserialize_string(self.msg_name)
self.output += "\n".join([
s for s in [
self.oneof_serialize_byte_buffer,
self.oneof_serialize_string,
self.oneof_deserialize,
equalop_str(self.msg_name, self.fields_seen),
self.serialize_byte_buffer,
self.serialize_string,
self.deserialize,
] if s != ''
]) + "\n"
self.output_declaration += "".join([
s for s in [
self.struct,
"\n",
serialize_byte_buffer_declaration(self.msg_name),
serialize_string_declaration(self.msg_name),
deserialize_declaration(self.msg_name),
deserialize_byte_buffer_declaration(self.msg_name),
deserialize_string_declaration(self.msg_name),
self.oneof_serialize_byte_buffer_declaration,
self.oneof_serialize_string_declaration,
self.oneof_deserialize_declaration,
equalop_str_declaration(self.msg_name),
] if s != ''
]) + "\n"
self._reset()
def field_start(self, name, type):
self.struct += " " # Indent fields
self.field['name'] = name
self.fields_seen.append(name)
self.serialize_byte_buffer += serialize_field(name, type)
self.serialize_string += serialize_field(name, type)
self.deserialize += deserialize_field(name, type)
def field_end(self):
# The field is preceeded by the type in the struct definition. Close it with the name and
# necessary syntax.
self.struct += f" {self.field['name']}{{}};\n"
### The following callbacks generate types for struct fields, recursively when necessary.
def bool(self):
self.struct += "bool"
def uint8(self):
self.struct += "uint8_t"
def uint16(self):
self.struct += "uint16_t"
def uint32(self):
self.struct += "uint32_t"
def uint64(self):
self.struct += "uint64_t"
def int8(self):
self.struct += "int8_t"
def int16(self):
self.struct += "int16_t"
def int32(self):
self.struct += "int32_t"
def int64(self):
self.struct += "int64_t"
def string(self):
self.struct += "std::string"
def bytes(self):
self.struct += "std::vector<uint8_t>"
def msgname_ref(self, name):
self.struct += name
def kvpair_start(self):
self.struct += "std::pair<"
def kvpair_key_end(self):
self.struct += ", "
def kvpair_end(self):
self.struct += ">"
def list_start(self):
self.struct += "std::vector<"
def list_end(self):
self.struct += ">"
def fixedlist_start(self):
self.struct += "std::array<"
def fixedlist_type_end(self):
self.struct += ", "
def fixedlist_end(self, size):
self.struct += f"{size}>"
def map_start(self):
self.struct += "std::map<"
def map_key_end(self):
self.struct += ", "
def map_end(self):
self.struct += ">"
def optional_start(self):
self.struct += "std::optional<"
def optional_end(self):
self.struct += ">"
def oneof(self, msgs):
variant = "std::variant<" + ", ".join(msgs.keys()) + ">"
self.struct += variant
oneof = frozenset(msgs.keys())
if oneof in self.oneofs_seen:
return
self.oneofs_seen.add(oneof)
self.oneof_serialize_byte_buffer += variant_serialize_byte_buffer(variant)
self.oneof_serialize_string += variant_serialize_string(variant)
self.oneof_serialize_byte_buffer_declaration += \
variant_serialize_byte_buffer_declaration(variant)
self.oneof_serialize_string_declaration += \
variant_serialize_string_declaration(variant)
self.oneof_deserialize += variant_deserialize(variant, msgs)
self.oneof_deserialize_declaration += variant_deserialize_declaration(variant)
def enum(self, type_name):
self.struct += type_name
|
import os
import dataset
from stuf import stuf
'''HELPER FUNCTIONS'''
def init_local_db(local_db = os.path.expanduser(r'~/scripts/leavesdb.db'), src_db = r'/media/data_cifs/irodri15/data/db/leavesdb.db'):
'''
Whenever working on a new machine, run this function in order to make sure the main leavesdb.db file is stored locally to avoid CIFS permissions issues.
usage: init_local_db()
'''
if not os.path.isfile(local_db):
print(f'Copying sql db file from {src_db} to {local_db}')
shutil.copyfile(src_db, local_db)
print(f'Proceeding with sql db at location {local_db}')
return local_db
def __get_family_names_per_dataset(db):
'''
Helper function that returns dataset_families, a list of tuples: [(,),(,),...]
db = dataset.connect(f'sqlite:///{db_path}', row_type=stuf)
dataset_families contains tuples of len == 2, where item 0 is a dataset name, and item 1 is a list of strings, one for each family name in the dataset.
e.g. [('Fossil',['Adoxaceae', 'Anacardiaceae',...]),
('PNAS',['Apocynaceae','Betulaceae',...]),
...]
'''
dataset_families = []
for dataset in db['dataset'].distinct('dataset'):
dataset_name = dataset.dataset
distinct_families = db['dataset'].distinct('family', dataset=dataset_name)
dataset_families.append((dataset_name, [fam.family for fam in distinct_families]))
return dataset_families
def __get_num_families_per_dataset(db):
'''
Helper function similar to __get_family_names_per_dataset, but instead of tuple containing (dataset_name, list(family names)),
returns the total number of unique families for each dataset.
Arguments:
db : open connection to database
e.g. db = dataset.connect(f'sqlite:///{db_path}', row_type=stuf)
Return:
num_families_per_dataset : list(tuples(str,int))
e.g. [('Fossil',27),
('PNAS',19),
...]
'''
num_families_per_dataset = []
dataset_families = __get_family_names_per_dataset(db)
for dataset in dataset_families:
num_families_per_dataset.append((dataset[0], len(dataset[1])))
return num_families_per_dataset
def summarize_db(db):
'''
Combines helper functions to summarize key info about the data in opened database, db.
'''
print('Database column keys:\n', db['dataset'].columns)
print('Number of distinct families:\n', __get_num_families_per_dataset(db))
print(f"Number of rows in db:\n {len(db["dataset"])}")
|
import os
import dataset
from stuf import stuf
'''HELPER FUNCTIONS'''
def init_local_db(local_db = os.path.expanduser(r'~/scripts/leavesdb.db'), src_db = r'/media/data_cifs/irodri15/data/db/leavesdb.db'):
'''
Whenever working on a new machine, run this function in order to make sure the main leavesdb.db file is stored locally to avoid CIFS permissions issues.
usage: init_local_db()
'''
if not os.path.isfile(local_db):
print(f'Copying sql db file from {src_db} to {local_db}')
shutil.copyfile(src_db, local_db)
print(f'Proceeding with sql db at location {local_db}')
return local_db
def __get_family_names_per_dataset(db):
'''
Helper function that returns dataset_families, a list of tuples: [(,),(,),...]
db = dataset.connect(f'sqlite:///{db_path}', row_type=stuf)
dataset_families contains tuples of len == 2, where item 0 is a dataset name, and item 1 is a list of strings, one for each family name in the dataset.
e.g. [('Fossil',['Adoxaceae', 'Anacardiaceae',...]),
('PNAS',['Apocynaceae','Betulaceae',...]),
...]
'''
dataset_families = []
for dataset in db['dataset'].distinct('dataset'):
dataset_name = dataset.dataset
distinct_families = db['dataset'].distinct('family', dataset=dataset_name)
dataset_families.append((dataset_name, [fam.family for fam in distinct_families]))
return dataset_families
def __get_num_families_per_dataset(db):
'''
Helper function similar to __get_family_names_per_dataset, but instead of tuple containing (dataset_name, list(family names)),
returns the total number of unique families for each dataset.
Arguments:
db : open connection to database
e.g. db = dataset.connect(f'sqlite:///{db_path}', row_type=stuf)
Return:
num_families_per_dataset : list(tuples(str,int))
e.g. [('Fossil',27),
('PNAS',19),
...]
'''
num_families_per_dataset = []
dataset_families = __get_family_names_per_dataset(db)
for dataset in dataset_families:
num_families_per_dataset.append((dataset[0], len(dataset[1])))
return num_families_per_dataset
def summarize_db(db):
'''
Combines helper functions to summarize key info about the data in opened database, db.
'''
print('Database column keys:\n', db['dataset'].columns)
print('Number of distinct families:\n', __get_num_families_per_dataset(db))
print(f"Number of rows in db:\n {len(db['dataset'])}")
|
from functools import reduce
import io
import json
import logging
import os
import platform
import random
import re
import shlex
import smtplib
import string
import subprocess
import time
import traceback
import stat
from copy import deepcopy
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from scipy.stats import tmean, scoreatpercentile
from shutil import which, move, rmtree
import hcl2
import requests
import yaml
import git
from bs4 import BeautifulSoup
from paramiko import SSHClient, AutoAddPolicy
from paramiko.auth_handler import AuthenticationException, SSHException
from semantic_version import Version
from tempfile import NamedTemporaryFile, mkdtemp
from ocs_ci.framework import config
from ocs_ci.ocs import constants, defaults
from ocs_ci.ocs.exceptions import (
CephHealthException,
ClientDownloadError,
CommandFailed,
TagNotFoundException,
TimeoutException,
TimeoutExpiredError,
UnavailableBuildException,
UnexpectedImage,
UnsupportedOSType,
)
from ocs_ci.utility import version as version_module
from ocs_ci.utility.flexy import load_cluster_info
from ocs_ci.utility.retry import retry
log = logging.getLogger(__name__)
# variables
mounting_dir = "/mnt/cephfs/"
clients = []
md5sum_list1 = []
md5sum_list2 = []
fuse_clients = []
kernel_clients = []
mon_node = ""
mon_node_ip = ""
mds_nodes = []
md5sum_file_lock = []
active_mdss = []
RC = []
failure = {}
output = []
unique_test_names = []
# function for getting the clients
def get_client_info(ceph_nodes, clients):
log.info("Getting Clients")
for node in ceph_nodes:
if node.role == "client":
clients.append(node)
# Identifying MON node
for node in ceph_nodes:
if node.role == "mon":
mon_node = node
out, err = mon_node.exec_command(cmd="sudo hostname -I")
mon_node_ip = out.read().decode().rstrip("\n")
break
for node in ceph_nodes:
if node.role == "mds":
mds_nodes.append(node)
for node in clients:
node.exec_command(cmd="sudo yum install -y attr")
fuse_clients = clients[0:2] # seperating clients for fuse and kernel
kernel_clients = clients[2:4]
return (
fuse_clients,
kernel_clients,
mon_node,
mounting_dir,
mds_nodes,
md5sum_file_lock,
mon_node_ip,
)
# function for providing authorization to the clients from MON ndoe
def auth_list(clients, mon_node):
for node in clients:
log.info("Giving required permissions for clients from MON node:")
mon_node.exec_command(
cmd="sudo ceph auth get-or-create client.%s mon 'allow *' mds 'allow *, allow rw path=/' "
"osd 'allow rw pool=cephfs_data' -o /etc/ceph/ceph.client.%s.keyring"
% (node.hostname, node.hostname)
)
out, err = mon_node.exec_command(
sudo=True, cmd="cat /etc/ceph/ceph.client.%s.keyring" % (node.hostname)
)
keyring = out.read().decode()
key_file = node.write_file(
sudo=True,
file_name="/etc/ceph/ceph.client.%s.keyring" % (node.hostname),
file_mode="w",
)
key_file.write(keyring)
key_file.flush()
node.exec_command(
cmd="sudo chmod 644 /etc/ceph/ceph.client.%s.keyring" % (node.hostname)
)
# creating mounting directory
node.exec_command(cmd="sudo mkdir %s" % (mounting_dir))
# MOunting single FS with ceph-fuse
def fuse_mount(fuse_clients, mounting_dir):
try:
for client in fuse_clients:
log.info("Creating mounting dir:")
log.info("Mounting fs with ceph-fuse on client %s:" % (client.hostname))
client.exec_command(
cmd="sudo ceph-fuse -n client.%s %s" % (client.hostname, mounting_dir)
)
out, err = client.exec_command(cmd="mount")
mount_output = out.read().decode()
mount_output.split()
log.info("Checking if fuse mount is is passed of failed:")
if "fuse" in mount_output:
log.info("ceph-fuse mounting passed")
else:
log.error("ceph-fuse mounting failed")
return md5sum_list1
except Exception as e:
log.error(e)
def kernel_mount(mounting_dir, mon_node_ip, kernel_clients):
try:
for client in kernel_clients:
out, err = client.exec_command(
cmd="sudo ceph auth get-key client.%s" % (client.hostname)
)
secret_key = out.read().decode().rstrip("\n")
mon_node_ip = mon_node_ip.replace(" ", "")
client.exec_command(
cmd="sudo mount -t ceph %s:6789:/ %s -o name=%s,secret=%s"
% (mon_node_ip, mounting_dir, client.hostname, secret_key)
)
out, err = client.exec_command(cmd="mount")
mount_output = out.read().decode()
mount_output.split()
log.info("Checking if kernel mount is is passed of failed:")
if "%s:6789:/" % (mon_node_ip) in mount_output:
log.info("kernel mount passed")
else:
log.error("kernel mount failed")
return md5sum_list2
except Exception as e:
log.error(e)
def fuse_client_io(client, mounting_dir):
try:
rand_count = random.randint(1, 5)
rand_bs = random.randint(100, 300)
log.info("Performing IOs on fuse-clients")
client.exec_command(
cmd="sudo dd if=/dev/zero of=%snewfile_%s bs=%dM count=%d"
% (mounting_dir, client.hostname, rand_bs, rand_count),
long_running=True,
)
except Exception as e:
log.error(e)
def kernel_client_io(client, mounting_dir):
try:
rand_count = random.randint(1, 6)
rand_bs = random.randint(100, 500)
log.info("Performing IOs on kernel-clients")
client.exec_command(
cmd="sudo dd if=/dev/zero of=%snewfile_%s bs=%dM count=%d"
% (mounting_dir, client.hostname, rand_bs, rand_count),
long_running=True,
)
except Exception as e:
log.error(e)
def fuse_client_md5(fuse_clients, md5sum_list1):
try:
log.info("Calculating MD5 sums of files in fuse-clients:")
for client in fuse_clients:
md5sum_list1.append(
client.exec_command(
cmd="sudo md5sum %s* | awk '{print $1}' " % (mounting_dir),
long_running=True,
)
)
except Exception as e:
log.error(e)
def kernel_client_md5(kernel_clients, md5sum_list2):
try:
log.info("Calculating MD5 sums of files in kernel-clients:")
for client in kernel_clients:
md5sum_list2.append(
client.exec_command(
cmd="sudo md5sum %s* | awk '{print $1}' " % (mounting_dir),
long_running=True,
)
)
except Exception as e:
log.error(e)
# checking file locking mechanism
def file_locking(client):
try:
to_lock_file = """
import fcntl
import subprocess
import time
try:
f = open('/mnt/cephfs/to_test_file_lock', 'w+')
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
print "locking file:--------------------------------"
subprocess.check_output(["sudo","dd","if=/dev/zero","of=/mnt/cephfs/to_test_file_lock","bs=1M","count=2"])
except IOError as e:
print e
finally:
print "Unlocking file:------------------------------"
fcntl.lockf(f,fcntl.LOCK_UN)
"""
to_lock_code = client.write_file(
sudo=True, file_name="/home/cephuser/file_lock.py", file_mode="w"
)
to_lock_code.write(to_lock_file)
to_lock_code.flush()
out, err = client.exec_command(cmd="sudo python /home/cephuser/file_lock.py")
output = out.read().decode()
output.split()
if "Errno 11" in output:
log.info("File locking achieved, data is not corrupted")
elif "locking" in output:
log.info("File locking achieved, data is not corrupted")
else:
log.error("Data is corrupted")
out, err = client.exec_command(
cmd="sudo md5sum %sto_test_file_lock | awk '{print $1}'" % (mounting_dir)
)
md5sum_file_lock.append(out.read().decode())
except Exception as e:
log.error(e)
def activate_multiple_mdss(mds_nodes):
try:
log.info("Activating Multiple MDSs")
for node in mds_nodes:
out1, err = node.exec_command(
cmd="sudo ceph fs set cephfs allow_multimds true --yes-i-really-mean-it"
)
out2, err = node.exec_command(cmd="sudo ceph fs set cephfs max_mds 2")
break
except Exception as e:
log.error(e)
def mkdir_pinning(clients, range1, range2, dir_name, pin_val):
try:
log.info("Creating Directories and Pinning to MDS %s" % (pin_val))
for client in clients:
for num in range(range1, range2):
out, err = client.exec_command(
cmd="sudo mkdir %s%s_%d" % (mounting_dir, dir_name, num)
)
if pin_val != "":
client.exec_command(
cmd="sudo setfattr -n ceph.dir.pin -v %s %s%s_%d"
% (pin_val, mounting_dir, dir_name, num)
)
else:
print("Pin val not given")
print(out.read().decode())
print(time.time())
break
except Exception as e:
log.error(e)
def allow_dir_fragmentation(mds_nodes):
try:
log.info("Allowing directorty fragmenation for splitting")
for node in mds_nodes:
node.exec_command(cmd="sudo ceph fs set cephfs allow_dirfrags 1")
break
except Exception as e:
log.error(e)
def mds_fail_over(mds_nodes):
try:
rand = random.randint(0, 1)
for node in mds_nodes:
log.info("Failing MDS %d" % (rand))
node.exec_command(cmd="sudo ceph mds fail %d" % (rand))
break
except Exception as e:
log.error(e)
def pinned_dir_io(clients, mds_fail_over, num_of_files, range1, range2):
try:
log.info("Performing IOs and MDSfailovers on clients")
for client in clients:
client.exec_command(cmd="sudo pip install crefi")
for num in range(range1, range2):
if mds_fail_over != "":
mds_fail_over(mds_nodes)
out, err = client.exec_command(
cmd="sudo crefi -n %d %sdir_%d" % (num_of_files, mounting_dir, num)
)
rc = out.channel.recv_exit_status()
print(out.read().decode())
RC.append(rc)
print(time.time())
if rc == 0:
log.info("Client IO is going on,success")
else:
log.error("Client IO got interrupted")
failure.update({client: out})
break
break
except Exception as e:
log.error(e)
def custom_ceph_config(suite_config, custom_config, custom_config_file):
"""
Combines and returns custom configuration overrides for ceph.
Hierarchy is as follows::
custom_config > custom_config_file > suite_config
Args:
suite_config: ceph_conf_overrides that currently exist in the test suite
custom_config: custom config args provided by the cli (these all go to the global scope)
custom_config_file: path to custom config yaml file provided by the cli
Returns
New value to be used for ceph_conf_overrides in test config
"""
log.debug("Suite config: {}".format(suite_config))
log.debug("Custom config: {}".format(custom_config))
log.debug("Custom config file: {}".format(custom_config_file))
full_custom_config = suite_config or {}
cli_config_dict = {}
custom_config_dict = {}
# retrieve custom config from file
if custom_config_file:
with open(custom_config_file) as f:
custom_config_dict = yaml.safe_load(f)
log.info("File contents: {}".format(custom_config_dict))
# format cli configs into dict
if custom_config:
cli_config_dict = dict(item.split("=") for item in custom_config)
# combine file and cli configs
if cli_config_dict:
if not custom_config_dict.get("global"):
custom_config_dict["global"] = {}
for key, value in cli_config_dict.items():
custom_config_dict["global"][key] = value
# combine file and suite configs
for key, value in custom_config_dict.items():
subsection = {}
if full_custom_config.get(key):
subsection.update(full_custom_config[key])
subsection.update(value)
full_custom_config[key] = subsection
log.info("Full custom config: {}".format(full_custom_config))
return full_custom_config
def mask_secrets(plaintext, secrets):
"""
Replace secrets in plaintext with asterisks
Args:
plaintext (str or list): The plaintext to remove the secrets from or
list of strings to remove secrets from
secrets (list): List of secret strings to replace in the plaintext
Returns:
str: The censored version of plaintext
"""
if secrets:
for secret in secrets:
if isinstance(plaintext, list):
plaintext = [string.replace(secret, "*" * 5) for string in plaintext]
else:
plaintext = plaintext.replace(secret, "*" * 5)
return plaintext
def run_cmd(cmd, secrets=None, timeout=600, ignore_error=False, **kwargs):
"""
*The deprecated form of exec_cmd.*
Run an arbitrary command locally
Args:
cmd (str): command to run
secrets (list): A list of secrets to be masked with asterisks
This kwarg is popped in order to not interfere with
subprocess.run(``**kwargs``)
timeout (int): Timeout for the command, defaults to 600 seconds.
ignore_error (bool): True if ignore non zero return code and do not
raise the exception.
Raises:
CommandFailed: In case the command execution fails
Returns:
(str) Decoded stdout of command
"""
completed_process = exec_cmd(cmd, secrets, timeout, ignore_error, **kwargs)
return mask_secrets(completed_process.stdout.decode(), secrets)
def exec_cmd(cmd, secrets=None, timeout=600, ignore_error=False, **kwargs):
"""
Run an arbitrary command locally
Args:
cmd (str): command to run
secrets (list): A list of secrets to be masked with asterisks
This kwarg is popped in order to not interfere with
subprocess.run(``**kwargs``)
timeout (int): Timeout for the command, defaults to 600 seconds.
ignore_error (bool): True if ignore non zero return code and do not
raise the exception.
Raises:
CommandFailed: In case the command execution fails
Returns:
(CompletedProcess) A CompletedProcess object of the command that was executed
CompletedProcess attributes:
args: The list or str args passed to run().
returncode (str): The exit code of the process, negative for signals.
stdout (str): The standard output (None if not captured).
stderr (str): The standard error (None if not captured).
"""
masked_cmd = mask_secrets(cmd, secrets)
log.info(f"Executing command: {masked_cmd}")
if isinstance(cmd, str):
cmd = shlex.split(cmd)
completed_process = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
timeout=timeout,
**kwargs,
)
masked_stdout = mask_secrets(completed_process.stdout.decode(), secrets)
if len(completed_process.stdout) > 0:
log.debug(f"Command stdout: {masked_stdout}")
else:
log.debug("Command stdout is empty")
masked_stderr = mask_secrets(completed_process.stderr.decode(), secrets)
if len(completed_process.stderr) > 0:
log.warning(f"Command stderr: {masked_stderr}")
else:
log.debug("Command stderr is empty")
log.debug(f"Command return code: {completed_process.returncode}")
if completed_process.returncode and not ignore_error:
raise CommandFailed(
f"Error during execution of command: {masked_cmd}."
f"\nError is {masked_stderr}"
)
return completed_process
def download_file(url, filename, **kwargs):
"""
Download a file from a specified url
Args:
url (str): URL of the file to download
filename (str): Name of the file to write the download to
kwargs (dict): additional keyword arguments passed to requests.get(...)
"""
log.debug(f"Download '{url}' to '{filename}'.")
with open(filename, "wb") as f:
r = requests.get(url, **kwargs)
assert r.ok, f"The URL {url} is not available! Status: {r.status_code}."
f.write(r.content)
def get_url_content(url, **kwargs):
"""
Return URL content
Args:
url (str): URL address to return
kwargs (dict): additional keyword arguments passed to requests.get(...)
Returns:
str: Content of URL
Raises:
AssertionError: When couldn't load URL
"""
log.debug(f"Download '{url}' content.")
r = requests.get(url, **kwargs)
assert r.ok, f"Couldn't load URL: {url} content! Status: {r.status_code}."
return r.content
def expose_ocp_version(version):
"""
This helper function exposes latest nightly version or GA version of OCP.
When the version string ends with .nightly (e.g. 4.2.0-0.nightly) it will
expose the version to latest accepted OCP build
(e.g. 4.2.0-0.nightly-2019-08-08-103722)
If the version ends with -ga than it will find the latest GA OCP version
and will expose 4.2-ga to for example 4.2.22.
Args:
version (str): Verison of OCP
Returns:
str: Version of OCP exposed to full version if latest nighly passed
"""
if version.endswith(".nightly"):
latest_nightly_url = (
f"https://amd64.ocp.releases.ci.openshift.org/api/v1/"
f"releasestream/{version}/latest"
)
version_url_content = get_url_content(latest_nightly_url)
version_json = json.loads(version_url_content)
return version_json["name"]
if version.endswith("-ga"):
channel = config.DEPLOYMENT.get("ocp_channel", "stable")
ocp_version = version.rstrip("-ga")
index = config.DEPLOYMENT.get("ocp_version_index", -1)
return get_latest_ocp_version(f"{channel}-{ocp_version}", index)
else:
return version
def get_openshift_installer(
version=None,
bin_dir=None,
force_download=False,
):
"""
Download the OpenShift installer binary, if not already present.
Update env. PATH and get path of the openshift installer binary.
Args:
version (str): Version of the installer to download
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force installer download even if already present
Returns:
str: Path to the installer binary
"""
version = version or config.DEPLOYMENT["installer_version"]
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
installer_filename = "openshift-install"
installer_binary_path = os.path.join(bin_dir, installer_filename)
if os.path.isfile(installer_binary_path) and force_download:
delete_file(installer_binary_path)
if os.path.isfile(installer_binary_path):
log.debug(f"Installer exists ({installer_binary_path}), skipping download.")
# TODO: check installer version
else:
version = expose_ocp_version(version)
log.info(f"Downloading openshift installer ({version}).")
prepare_bin_dir()
# record current working directory and switch to BIN_DIR
previous_dir = os.getcwd()
os.chdir(bin_dir)
tarball = f"{installer_filename}.tar.gz"
url = get_openshift_mirror_url(installer_filename, version)
download_file(url, tarball)
run_cmd(f"tar xzvf {tarball} {installer_filename}")
delete_file(tarball)
# return to the previous working directory
os.chdir(previous_dir)
installer_version = run_cmd(f"{installer_binary_path} version")
log.info(f"OpenShift Installer version: {installer_version}")
return installer_binary_path
def get_ocm_cli(
version=None,
bin_dir=None,
force_download=False,
):
"""
Download the OCM binary, if not already present.
Update env. PATH and get path of the OCM binary.
Args:
version (str): Version of the OCM to download
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force OCM download even if already present
Returns:
str: Path to the OCM binary
"""
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
ocm_filename = "ocm"
ocm_binary_path = os.path.join(bin_dir, ocm_filename)
if os.path.isfile(ocm_binary_path) and force_download:
delete_file(ocm_binary_path)
if os.path.isfile(ocm_binary_path):
log.debug(f"ocm exists ({ocm_binary_path}), skipping download.")
else:
log.info(f"Downloading ocm cli ({version}).")
prepare_bin_dir()
# record current working directory and switch to BIN_DIR
previous_dir = os.getcwd()
os.chdir(bin_dir)
url = f"https://github.com/openshift-online/ocm-cli/releases/download/v{version}/ocm-linux-amd64"
download_file(url, ocm_filename)
# return to the previous working directory
os.chdir(previous_dir)
current_file_permissions = os.stat(ocm_binary_path)
os.chmod(
ocm_binary_path,
current_file_permissions.st_mode | stat.S_IEXEC,
)
ocm_version = run_cmd(f"{ocm_binary_path} version")
log.info(f"OCM version: {ocm_version}")
return ocm_binary_path
def get_rosa_cli(
version=None,
bin_dir=None,
force_download=False,
):
"""
Download the ROSA binary, if not already present.
Update env. PATH and get path of the ROSA binary.
Args:
version (str): Version of the ROSA to download
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force ROSA download even if already present
Returns:
str: Path to the rosa binary
"""
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
rosa_filename = "rosa"
rosa_binary_path = os.path.join(bin_dir, rosa_filename)
if os.path.isfile(rosa_binary_path) and force_download:
delete_file(rosa_binary_path)
if os.path.isfile(rosa_binary_path):
log.debug(f"rosa exists ({rosa_binary_path}), skipping download.")
else:
log.info(f"Downloading rosa cli ({version}).")
prepare_bin_dir()
# record current working directory and switch to BIN_DIR
previous_dir = os.getcwd()
os.chdir(bin_dir)
url = f"https://github.com/openshift/rosa/releases/download/v{version}/rosa-linux-amd64"
download_file(url, rosa_filename)
# return to the previous working directory
os.chdir(previous_dir)
current_file_permissions = os.stat(rosa_binary_path)
os.chmod(
rosa_binary_path,
current_file_permissions.st_mode | stat.S_IEXEC,
)
rosa_version = run_cmd(f"{rosa_binary_path} version")
log.info(f"rosa version: {rosa_version}")
return rosa_binary_path
def get_openshift_client(
version=None, bin_dir=None, force_download=False, skip_comparison=False
):
"""
Download the OpenShift client binary, if not already present.
Update env. PATH and get path of the oc binary.
Args:
version (str): Version of the client to download
(default: config.RUN['client_version'])
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force client download even if already present
skip_comparison (bool): Skip the comparison between the existing OCP client
version and the configured one.
Returns:
str: Path to the client binary
"""
version = version or config.RUN["client_version"]
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
client_binary_path = os.path.join(bin_dir, "oc")
kubectl_binary_path = os.path.join(bin_dir, "kubectl")
download_client = True
client_version = None
try:
version = expose_ocp_version(version)
except Exception:
log.exception("Unable to expose OCP version, skipping client download.")
skip_comparison = True
download_client = False
force_download = False
if force_download:
log.info("Forcing client download.")
elif os.path.isfile(client_binary_path) and not skip_comparison:
current_client_version = get_client_version(client_binary_path)
if current_client_version != version:
log.info(
f"Existing client version ({current_client_version}) does not match "
f"configured version ({version})."
)
else:
log.debug(
f"Client exists ({client_binary_path}) and matches configured version, "
f"skipping download."
)
download_client = False
if download_client:
# Move existing client binaries to backup location
client_binary_backup = f"{client_binary_path}.bak"
kubectl_binary_backup = f"{kubectl_binary_path}.bak"
try:
os.rename(client_binary_path, client_binary_backup)
os.rename(kubectl_binary_path, kubectl_binary_backup)
except FileNotFoundError:
pass
# Download the client
log.info(f"Downloading openshift client ({version}).")
prepare_bin_dir()
# record current working directory and switch to BIN_DIR
previous_dir = os.getcwd()
os.chdir(bin_dir)
url = get_openshift_mirror_url("openshift-client", version)
tarball = "openshift-client.tar.gz"
download_file(url, tarball)
run_cmd(f"tar xzvf {tarball} oc kubectl")
delete_file(tarball)
try:
client_version = run_cmd(f"{client_binary_path} version --client")
except CommandFailed:
log.error("Unable to get version from downloaded client.")
if client_version:
try:
delete_file(client_binary_backup)
delete_file(kubectl_binary_backup)
log.info("Deleted backup binaries.")
except FileNotFoundError:
pass
else:
try:
os.rename(client_binary_backup, client_binary_path)
os.rename(kubectl_binary_backup, kubectl_binary_path)
log.info("Restored backup binaries to their original location.")
except FileNotFoundError:
raise ClientDownloadError(
"No backups exist and new binary was unable to be verified."
)
# return to the previous working directory
os.chdir(previous_dir)
log.info(f"OpenShift Client version: {client_version}")
return client_binary_path
def get_vault_cli(bind_dir=None, force_download=False):
"""
Download vault based on platform
basically for CLI purpose. Binary will be directly
put into ocs_ci/bin/ directory
Args:
bind_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force vault cli download even if already present
"""
res = requests.get(constants.VAULT_VERSION_INFO_URL)
version = res.url.split("/")[-1].lstrip("v")
bin_dir = os.path.expanduser(bind_dir or config.RUN["bin_dir"])
system = platform.system()
if "Darwin" not in system and "Linux" not in system:
raise UnsupportedOSType("Not a supported platform for vault")
system = system.lower()
zip_file = f"vault_{version}_{system}_amd64.zip"
vault_cli_filename = "vault"
vault_binary_path = os.path.join(bin_dir, vault_cli_filename)
if os.path.isfile(vault_binary_path) and force_download:
delete_file(vault_binary_path)
if os.path.isfile(vault_binary_path):
log.debug(
f"Vault CLI binary already exists {vault_binary_path}, skipping download."
)
else:
log.info(f"Downloading vault cli {version}")
prepare_bin_dir()
previous_dir = os.getcwd()
os.chdir(bin_dir)
url = f"{constants.VAULT_DOWNLOAD_BASE_URL}/{version}/{zip_file}"
download_file(url, zip_file)
run_cmd(f"unzip {zip_file}")
delete_file(zip_file)
os.chdir(previous_dir)
vault_ver = run_cmd(f"{vault_binary_path} version")
log.info(f"Vault cli version:{vault_ver}")
def ensure_nightly_build_availability(build_url):
base_build_url = build_url.rsplit("/", 1)[0]
r = requests.get(base_build_url)
extracting_condition = b"Extracting" in r.content
if extracting_condition:
log.info("Build is extracting now, may take up to a minute.")
return r.ok and not extracting_condition
def get_openshift_mirror_url(file_name, version):
"""
Format url to OpenShift mirror (for client and installer download).
Args:
file_name (str): Name of file
version (str): Version of the installer or client to download
Returns:
str: Url of the desired file (installer or client)
Raises:
UnsupportedOSType: In case the OS type is not supported
UnavailableBuildException: In case the build url is not reachable
"""
if platform.system() == "Darwin":
os_type = "mac"
elif platform.system() == "Linux":
os_type = "linux"
else:
raise UnsupportedOSType
url_template = config.DEPLOYMENT.get(
"ocp_url_template",
"https://openshift-release-artifacts.apps.ci.l2s4.p1.openshiftapps.com/"
"{version}/{file_name}-{os_type}-{version}.tar.gz",
)
url = url_template.format(
version=version,
file_name=file_name,
os_type=os_type,
)
sample = TimeoutSampler(
timeout=540,
sleep=5,
func=ensure_nightly_build_availability,
build_url=url,
)
if not sample.wait_for_func_status(result=True):
raise UnavailableBuildException(f"The build url {url} is not reachable")
return url
def prepare_bin_dir(bin_dir=None):
"""
Prepare bin directory for OpenShift client and installer
Args:
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
"""
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
try:
os.mkdir(bin_dir)
log.info(f"Directory '{bin_dir}' successfully created.")
except FileExistsError:
log.debug(f"Directory '{bin_dir}' already exists.")
def add_path_to_env_path(path):
"""
Add path to the PATH environment variable (if not already there).
Args:
path (str): Path which should be added to the PATH env. variable
"""
env_path = os.environ["PATH"].split(os.pathsep)
if path not in env_path:
os.environ["PATH"] = os.pathsep.join([path] + env_path)
log.info(f"Path '{path}' added to the PATH environment variable.")
log.debug(f"PATH: {os.environ["PATH"]}")
def delete_file(file_name):
"""
Delete file_name
Args:
file_name (str): Path to the file you want to delete
"""
os.remove(file_name)
def delete_dir(dir_name):
"""
Deletes the directory
Args:
dir_name (str): Directory path to delete
"""
try:
rmtree(dir_name)
except OSError as e:
log.error(f"Failed to delete the directory {dir_name}. Error: {e.strerror}")
class TimeoutSampler(object):
"""
Samples the function output.
This is a generator object that at first yields the output of function
`func`. After the yield, it either raises instance of `timeout_exc_cls` or
sleeps `sleep` seconds.
Yielding the output allows you to handle every value as you wish.
Feel free to set the instance variables.
Args:
timeout (int): Timeout in seconds
sleep (int): Sleep interval in seconds
func (function): The function to sample
func_args: Arguments for the function
func_kwargs: Keyword arguments for the function
"""
def __init__(self, timeout, sleep, func, *func_args, **func_kwargs):
self.timeout = timeout
self.sleep = sleep
# check that given timeout and sleep values makes sense
if self.timeout < self.sleep:
raise ValueError("timeout should be larger than sleep time")
self.func = func
self.func_args = func_args
self.func_kwargs = func_kwargs
# Timestamps of the first and most recent samples
self.start_time = None
self.last_sample_time = None
# The exception to raise
self.timeout_exc_cls = TimeoutExpiredError
# Arguments that will be passed to the exception
self.timeout_exc_args = [self.timeout]
try:
self.timeout_exc_args.append(
f"Timed out after {timeout}s running {self._build_call_string()}"
)
except Exception:
log.exception(
"Failed to assemble call string. Not necessarily a test failure."
)
def _build_call_string(self):
def stringify(value):
if isinstance(value, str):
return f'"{value}"'
return str(value)
args = list(map(stringify, self.func_args))
kwargs = [f"{stringify(k)}={stringify(v)}" for k, v in self.func_kwargs.items()]
all_args_string = ", ".join(args + kwargs)
return f"{self.func.__name__}({all_args_string})"
def __iter__(self):
if self.start_time is None:
self.start_time = time.time()
while True:
self.last_sample_time = time.time()
if self.timeout <= (self.last_sample_time - self.start_time):
raise self.timeout_exc_cls(*self.timeout_exc_args)
try:
yield self.func(*self.func_args, **self.func_kwargs)
except Exception as ex:
msg = f"Exception raised during iteration: {ex}"
log.exception(msg)
if self.timeout <= (time.time() - self.start_time):
raise self.timeout_exc_cls(*self.timeout_exc_args)
log.info("Going to sleep for %d seconds before next iteration", self.sleep)
time.sleep(self.sleep)
def wait_for_func_value(self, value):
"""
Implements common usecase of TimeoutSampler: waiting until func (given
function) returns a given value.
Args:
value: Expected return value of func we are waiting for.
"""
try:
for i_value in self:
if i_value == value:
break
except self.timeout_exc_cls:
log.error(
"function %s failed to return expected value %s "
"after multiple retries during %d second timeout",
self.func.__name__,
value,
self.timeout,
)
raise
def wait_for_func_status(self, result):
"""
Get function and run it for given time until success or timeout.
(using __iter__ function)
Args:
result (bool): Expected result from func.
Examples::
sample = TimeoutSampler(
timeout=60, sleep=1, func=some_func, func_arg1="1",
func_arg2="2"
)
if not sample.wait_for_func_status(result=True):
raise Exception
"""
try:
self.wait_for_func_value(result)
return True
except self.timeout_exc_cls:
return False
class TimeoutIterator(TimeoutSampler):
"""
Wrapper of TimeoutSampler which separates parameters of the class itself
and func arguments in __init__ method. Such way of passing function with
parameters is used in python standard library.
This allows more explicit usage, which improves readability, eg.::
t1 = TimeoutIterator(timeout=60, sleep=5, func=foo, func_args=[bar])
t2 = TimeoutIterator(3600, sleep=10, func=foo, func_args=[bar])
"""
def __init__(self, timeout, sleep, func, func_args=None, func_kwargs=None):
if func_args is None:
func_args = []
if func_kwargs is None:
func_kwargs = {}
super().__init__(timeout, sleep, func, *func_args, **func_kwargs)
def get_random_str(size=13):
"""
generates the random string of given size
Args:
size (int): number of random characters to generate
Returns:
str : string of random characters of given size
"""
chars = string.ascii_lowercase + string.digits
return "".join(random.choice(chars) for _ in range(size))
def run_async(command):
"""
Run command locally and return without waiting for completion
Args:
command (str): The command to run.
Returns:
An open descriptor to be used by the calling function.
Example:
command = 'oc delete pvc pvc1'
proc = run_async(command)
ret, out, err = proc.async_communicate()
"""
log.info(f"Executing command: {command}")
popen_obj = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
encoding="utf-8",
)
def async_communicate():
"""
Wait for command to complete and fetch the result
Returns:
retcode, stdout, stderr of the command
"""
stdout, stderr = popen_obj.communicate()
retcode = popen_obj.returncode
return retcode, stdout, stderr
popen_obj.async_communicate = async_communicate
return popen_obj
def is_cluster_running(cluster_path):
from ocs_ci.ocs.openshift_ops import OCP
return config.RUN["cli_params"].get("cluster_path") and OCP.set_kubeconfig(
os.path.join(cluster_path, config.RUN.get("kubeconfig_location"))
)
def decompose_html_attributes(soup, attributes):
"""
Decomposes the given html attributes
Args:
soup (obj): BeautifulSoup object
attributes (list): attributes to decompose
Returns: None
"""
for attribute in attributes:
tg = soup.find_all(attrs={"class": attribute})
for each in tg:
each.decompose()
def parse_html_for_email(soup):
"""
Parses the html and filters out the unnecessary data/tags/attributes
for email reporting
Args:
soup (obj): BeautifulSoup object
"""
attributes_to_decompose = ["extra"]
if not config.RUN.get("logs_url"):
attributes_to_decompose.append("col-links")
decompose_html_attributes(soup, attributes_to_decompose)
soup.find(id="not-found-message").decompose()
if not config.RUN.get("logs_url"):
for tr in soup.find_all("tr"):
for th in tr.find_all("th"):
if "Links" in th.text:
th.decompose()
for p in soup.find_all("p"):
if "(Un)check the boxes to filter the results." in p.text:
p.decompose()
if "pytest-html" in p.text:
data = p.text.split("by")[0]
p.string = data
for ip in soup.find_all("input"):
if not ip.has_attr("disabled"):
ip["disabled"] = "true"
for td in soup.find_all("td"):
if "pytest" in td.text or "html" in td.text:
data = td.text.replace("&apos", "")
td.string = data
main_header = soup.find("h1")
main_header.string.replace_with("OCS-CI RESULTS")
def add_squad_analysis_to_email(session, soup):
"""
Add squad analysis to the html test results used in email reporting
Args:
session (obj): Pytest session object
soup (obj): BeautifulSoup object of HTML Report data
"""
failed = {}
skipped = {}
# sort out failed and skipped test cases to failed and skipped dicts
for result in session.results.values():
if result.failed or result.skipped:
unassigned = True
for squad, res in constants.SQUADS.items():
for item in res:
if item in result.nodeid:
if result.failed:
if squad not in failed:
failed[squad] = []
failed[squad].append(result.nodeid)
unassigned = False
if result.skipped:
if squad not in skipped:
skipped[squad] = []
try:
skipped_message = result.longrepr[2][8:]
except TypeError:
skipped_message = "--unknown--"
skipped[squad].append((result.nodeid, skipped_message))
unassigned = False
if unassigned:
if result.failed:
if "UNASSIGNED" not in failed:
failed["UNASSIGNED"] = []
failed["UNASSIGNED"].append(result.nodeid)
if result.skipped:
if "UNASSIGNED" not in skipped:
skipped["UNASSIGNED"] = []
try:
skipped_message = result.longrepr[2][8:]
except TypeError:
skipped_message = "--unknown--"
skipped["UNASSIGNED"].append((result.nodeid, skipped_message))
# no failed or skipped tests - exit the function
if not failed and not skipped:
return
# add CSS for the Squad Analysis report
style = soup.find("style")
# use colors for squad names from squad names
style.string += "\n".join(
[
f"h4.squad-{color.lower()} {{\n color: {color.lower()};\n}}"
for color in constants.SQUADS
]
)
# few additional styles
style.string += """
.squad-analysis {
color: black;
font-family: monospace;
background-color: #eee;
padding: 5px;
margin-top: 10px;
}
.squad-analysis h2 {
margin: 0px;
}
.squad-analysis h3 {
margin: 0px;
margin-top: 10px;
}
.squad-analysis h4 {
margin: 0px;
}
.squad-analysis ul {
margin: 0px;
}
.squad-analysis ul li em {
margin-left: 1em;
}
.squad-unassigned {
background-color: #FFBA88;
}
h4.squad-yellow {
color: black;
background-color: yellow;
display: inline;
}
"""
# prepare place for the Squad Analysis in the email
squad_analysis_div = soup.new_tag("div")
squad_analysis_div["class"] = "squad-analysis"
main_header = soup.find("h1")
main_header.insert_after(squad_analysis_div)
failed_h2_tag = soup.new_tag("h2")
failed_h2_tag.string = "Squad Analysis - please analyze:"
squad_analysis_div.append(failed_h2_tag)
if failed:
# print failed testcases peer squad
failed_div_tag = soup.new_tag("div")
squad_analysis_div.append(failed_div_tag)
failed_h3_tag = soup.new_tag("h3")
failed_h3_tag.string = "Failures:"
failed_div_tag.append(failed_h3_tag)
for squad in failed:
failed_h4_tag = soup.new_tag("h4")
failed_h4_tag.string = f"{squad} squad"
failed_h4_tag["class"] = f"squad-{squad.lower()}"
failed_div_tag.append(failed_h4_tag)
failed_ul_tag = soup.new_tag("ul")
failed_ul_tag["class"] = f"squad-{squad.lower()}"
failed_div_tag.append(failed_ul_tag)
for test in failed[squad]:
failed_li_tag = soup.new_tag("li")
failed_li_tag.string = test
failed_ul_tag.append(failed_li_tag)
if skipped:
# print skipped testcases with reason peer squad
skips_div_tag = soup.new_tag("div")
squad_analysis_div.append(skips_div_tag)
skips_h3_tag = soup.new_tag("h3")
skips_h3_tag.string = "Skips:"
skips_div_tag.append(skips_h3_tag)
for squad in skipped:
skips_h4_tag = soup.new_tag("h4")
skips_h4_tag.string = f"{squad} squad"
skips_h4_tag["class"] = f"squad-{squad.lower()}"
skips_div_tag.append(skips_h4_tag)
skips_ul_tag = soup.new_tag("ul")
skips_ul_tag["class"] = f"squad-{squad.lower()}"
skips_div_tag.append(skips_ul_tag)
for test in skipped[squad]:
skips_li_tag = soup.new_tag("li")
skips_test_span_tag = soup.new_tag("span")
skips_test_span_tag.string = test[0]
skips_li_tag.append(skips_test_span_tag)
skips_li_tag.append(soup.new_tag("br"))
skips_reason_em_tag = soup.new_tag("em")
skips_reason_em_tag.string = f"Reason: {test[1]}"
skips_li_tag.append(skips_reason_em_tag)
skips_ul_tag.append(skips_li_tag)
def move_summary_to_top(soup):
"""
Move summary to the top of the eamil report
"""
summary = []
summary.append(soup.find("h2", text="Summary"))
for tag in summary[0].next_siblings:
if tag.name == "h2":
break
else:
summary.append(tag)
for tag in summary:
tag.extract()
main_header = soup.find("h1")
# because we are inserting the tags just after the header one by one, we
# have to insert them in reverse order
summary.reverse()
for tag in summary:
main_header.insert_after(tag)
def email_reports(session):
"""
Email results of test run
"""
# calculate percentage pass
# reporter = session.config.pluginmanager.get_plugin("terminalreporter")
# passed = len(reporter.stats.get("passed", []))
# failed = len(reporter.stats.get("failed", []))
# error = len(reporter.stats.get("error", []))
# total = passed + failed + error
# percentage_passed = (passed / total) * 100
try:
build_id = get_ocs_build_number()
except Exception:
build_id = ""
log.exception("Getting OCS operator build number failed!")
build_str = f"BUILD ID: {build_id} " if build_id else ""
mailids = config.RUN["cli_params"]["email"]
recipients = []
[recipients.append(mailid) for mailid in mailids.split(",")]
sender = "ocs-ci@redhat.com"
msg = MIMEMultipart("alternative")
msg["Subject"] = (
f"ocs-ci results for {get_testrun_name()} "
f"({build_str}"
f"RUN ID: {config.RUN["run_id"]}) "
# f"Passed: {percentage_passed:.0f}%"
)
msg["From"] = sender
msg["To"] = ", ".join(recipients)
html = config.RUN["cli_params"]["--html"]
with open(os.path.expanduser(html)) as fd:
html_data = fd.read()
soup = BeautifulSoup(html_data, "html.parser")
parse_html_for_email(soup)
if config.RUN["cli_params"].get("squad_analysis"):
add_squad_analysis_to_email(session, soup)
move_summary_to_top(soup)
part1 = MIMEText(soup, "html")
msg.attach(part1)
try:
s = smtplib.SMTP(config.REPORTING["email"]["smtp_server"])
s.sendmail(sender, recipients, msg.as_string())
s.quit()
log.info(f"Results have been emailed to {recipients}")
except Exception:
log.exception("Sending email with results failed!")
def get_cluster_version_info():
"""
Gets the complete cluster version information
Returns:
dict: cluster version information
"""
# importing here to avoid circular imports
from ocs_ci.ocs.ocp import OCP
ocp = OCP(kind="clusterversion")
cluster_version_info = ocp.get("version")
return cluster_version_info
def get_ocs_build_number():
"""
Gets the build number for ocs operator
Return:
str: build number for ocs operator version
"""
# Importing here to avoid circular dependency
from ocs_ci.ocs.resources.csv import get_csvs_start_with_prefix
from ocs_ci.ocs.resources.catalog_source import CatalogSource
from ocs_ci.ocs.resources.packagemanifest import get_selector_for_ocs_operator
build_num = ""
if (
version_module.get_semantic_ocs_version_from_config()
>= version_module.VERSION_4_9
):
operator_name = defaults.ODF_OPERATOR_NAME
else:
operator_name = defaults.OCS_OPERATOR_NAME
ocs_csvs = get_csvs_start_with_prefix(
operator_name,
defaults.ROOK_CLUSTER_NAMESPACE,
)
try:
ocs_csv = ocs_csvs[0]
csv_labels = ocs_csv["metadata"]["labels"]
if "full_version" in csv_labels:
return csv_labels["full_version"]
build_num = ocs_csv["spec"]["version"]
operator_selector = get_selector_for_ocs_operator()
# This is a temporary solution how to get the build id from the registry image.
# Because we are now missing build ID in the CSV. If catalog source with our
# internal label exists, we will be getting build id from the tag of the image
# in catalog source. Boris is working on better way how to populate the internal
# build version in the CSV.
if operator_selector:
catalog_source = CatalogSource(
resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME,
namespace=constants.MARKETPLACE_NAMESPACE,
selector=operator_selector,
)
cs_data = catalog_source.get()["items"][0]
cs_image = cs_data["spec"]["image"]
image_tag = cs_image.split(":")[1]
if "-" in image_tag:
build_id = image_tag.split("-")[1]
build_num += f"-{build_id}"
except (IndexError, AttributeError, CommandFailed, KeyError):
log.exception("No version info found for OCS operator")
return build_num
def get_cluster_version():
"""
Gets the cluster version
Returns:
str: cluster version
"""
return get_cluster_version_info()["status"]["desired"]["version"]
def get_cluster_image():
"""
Gets the cluster image
Returns:
str: cluster image
"""
return get_cluster_version_info()["status"]["desired"]["image"]
def get_ceph_version():
"""
Gets the ceph version
Returns:
str: ceph version
"""
# importing here to avoid circular imports
from ocs_ci.ocs.resources import pod
ct_pod = pod.get_ceph_tools_pod()
ceph_version = ct_pod.exec_ceph_cmd("ceph version")
return re.split(r"ceph version ", ceph_version["version"])[1]
def get_rook_version():
"""
Gets the rook version
Returns:
str: rook version
"""
# importing here to avoid circular imports
from ocs_ci.ocs.resources import pod
ct_pod = pod.get_ceph_tools_pod()
rook_versions = ct_pod.exec_ceph_cmd("rook version", format="")
return rook_versions["rook"]
def get_csi_versions():
"""
Gets the CSI related version information
Returns:
dict: CSI related version information
"""
csi_versions = {}
# importing here to avoid circular imports
from ocs_ci.ocs.ocp import OCP
ocp_pod_obj = OCP(
kind=constants.POD, namespace=config.ENV_DATA["cluster_namespace"]
)
csi_provisioners = ["csi-cephfsplugin-provisioner", "csi-rbdplugin-provisioner"]
for provisioner in csi_provisioners:
csi_provisioner_pod = run_cmd(
f"oc -n {config.ENV_DATA["cluster_namespace"]} get pod -l "
f"'app={provisioner}' -o jsonpath='{{.items[0].metadata.name}}'"
)
desc = ocp_pod_obj.get(csi_provisioner_pod)
for container in desc["spec"]["containers"]:
name = container["name"]
version = container["image"].split("/")[-1].split(":")[1]
csi_versions[name] = version
return csi_versions
def get_ocp_version(seperator=None):
"""
Get current ocp version
Args:
seperator (str): String that would seperate major and
minor version nubers
Returns:
string : If seperator is 'None', version string will be returned as is
eg: '4.2', '4.3'.
If seperator is provided then '.' in the version string would be
replaced by seperator and resulting string will be returned.
eg: If seperator is '_' then string returned would be '4_2'
"""
char = seperator if seperator else "."
if config.ENV_DATA.get("skip_ocp_deployment"):
raw_version = json.loads(run_cmd("oc version -o json"))["openshiftVersion"]
else:
raw_version = config.DEPLOYMENT["installer_version"]
version = Version.coerce(raw_version)
return char.join([str(version.major), str(version.minor)])
def get_running_ocp_version(separator=None):
"""
Get current running ocp version
Args:
separator (str): String that would separate major and
minor version numbers
Returns:
string : If separator is 'None', version string will be returned as is
eg: '4.2', '4.3'.
If separator is provided then '.' in the version string would be
replaced by separator and resulting string will be returned.
eg: If separator is '_' then string returned would be '4_2'
"""
char = separator if separator else "."
namespace = config.ENV_DATA["cluster_namespace"]
try:
# if the cluster exist, this part will be run
results = run_cmd(f"oc get clusterversion -n {namespace} -o yaml")
build = yaml.safe_load(results)["items"][0]["status"]["desired"]["version"]
return char.join(build.split(".")[0:2])
except Exception:
# this part will return version from the config file in case
# cluster is not exists.
return get_ocp_version(seperator=char)
def get_ocp_repo():
"""
Get ocp repo file, name will be generated dynamically based on
ocp version.
Returns:
string : Path to ocp repo file
"""
repo_path = os.path.join(constants.REPO_DIR, f"ocp_{get_ocp_version("_")}.repo")
path = os.path.expanduser(repo_path)
assert os.path.exists(path), f"OCP repo file {path} doesn't exists!"
return path
def parse_pgsql_logs(data):
"""
Parse the pgsql benchmark data from ripsaw and return
the data in list format
Args:
data (str): log data from pgsql bench run
Returns:
list_data (list): data digestable by scripts with below format
e.g.:
[
{1: {'num_clients': '2','num_threads': '7','latency_avg': '7',
'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'},
{2: {'num_clients': '2','num_threads': '7','latency_avg': '7',
'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'},
{3: {'num_clients': '2','num_threads': '7','latency_avg': '7',
'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'},
]
where keys{1,2,3} are run-IDs
"""
match = data.split("PGBench Results")
list_data = []
for i in range(2, len(match)):
log = "".join(match[i].split("\n"))
pgsql_data = dict()
pgsql_data[i - 1] = {}
clients = re.search(r"scaling_factor\':\s+(\d+),", log)
if clients and clients.group(1):
pgsql_data[i - 1]["scaling_factor"] = clients.group(1)
clients = re.search(r"number_of_clients\':\s+(\d+),", log)
if clients and clients.group(1):
pgsql_data[i - 1]["num_clients"] = clients.group(1)
threads = re.search(r"number_of_threads\':\s+(\d+)", log)
if threads and threads.group(1):
pgsql_data[i - 1]["num_threads"] = threads.group(1)
clients = re.search(r"number_of_transactions_per_client\':\s+(\d+),", log)
if clients and clients.group(1):
pgsql_data[i - 1]["number_of_transactions_per_client"] = clients.group(1)
clients = re.search(
r"number_of_transactions_actually_processed\':\s+(\d+),", log
)
if clients and clients.group(1):
pgsql_data[i - 1][
"number_of_transactions_actually_processed"
] = clients.group(1)
lat_avg = re.search(r"latency_average_ms\':\s+(\d+)", log)
if lat_avg and lat_avg.group(1):
pgsql_data[i - 1]["latency_avg"] = lat_avg.group(1)
lat_stddev = re.search(r"latency_stddev_ms\':\s+(\d+)", log)
if lat_stddev and lat_stddev.group(1):
pgsql_data[i - 1]["lat_stddev"] = lat_stddev.group(1)
tps_incl = re.search(r"tps_incl_con_est\':\s+(\w+)", log)
if tps_incl and tps_incl.group(1):
pgsql_data[i - 1]["tps_incl"] = tps_incl.group(1)
tps_excl = re.search(r"tps_excl_con_est\':\s+(\w+)", log)
if tps_excl and tps_excl.group(1):
pgsql_data[i - 1]["tps_excl"] = tps_excl.group(1)
list_data.append(pgsql_data)
return list_data
def create_directory_path(path):
"""
Creates directory if path doesn't exists
"""
path = os.path.expanduser(path)
if not os.path.exists(path):
os.makedirs(path)
else:
log.debug(f"{path} already exists")
def ocsci_log_path():
"""
Construct the full path for the log directory.
Returns:
str: full path for ocs-ci log directory
"""
return os.path.expanduser(
os.path.join(config.RUN["log_dir"], f"ocs-ci-logs-{config.RUN["run_id"]}")
)
def get_testrun_name():
"""
Prepare testrun ID for Polarion (and other reports).
Returns:
str: String containing testrun name
"""
markers = config.RUN["cli_params"].get("-m", "").replace(" ", "-")
us_ds = config.REPORTING.get("us_ds")
if us_ds.upper() == "US":
us_ds = "Upstream"
elif us_ds.upper() == "DS":
us_ds = "Downstream"
ocp_version = ".".join(config.DEPLOYMENT.get("installer_version").split(".")[:-2])
ocp_version_string = f"OCP{ocp_version}" if ocp_version else ""
ocs_version = config.ENV_DATA.get("ocs_version")
ocs_version_string = f"OCS{ocs_version}" if ocs_version else ""
worker_os = "RHEL" if config.ENV_DATA.get("rhel_workers") else "RHCOS"
build_user = None
baremetal_config = None
if config.ENV_DATA.get("mon_type"):
baremetal_config = (
f"MON {config.ENV_DATA.get("mon_type").upper()} "
f"OSD {config.ENV_DATA.get("osd_type").upper()}"
)
lso_deployment = ""
if not baremetal_config and config.DEPLOYMENT.get("local_storage"):
lso_deployment = "LSO "
if config.REPORTING.get("display_name"):
testrun_name = config.REPORTING.get("display_name")
else:
build_user = config.REPORTING.get("build_user")
testrun_name = (
f"{config.ENV_DATA.get("platform", "").upper()} "
f"{config.ENV_DATA.get("deployment_type", "").upper()} "
)
if baremetal_config:
testrun_name = f"LSO {baremetal_config} {testrun_name}"
testrun_name = (
f"{testrun_name}"
f"{get_az_count()}AZ "
f"{worker_os} "
f"{lso_deployment}"
f"{config.ENV_DATA.get("master_replicas")}M "
f"{config.ENV_DATA.get("worker_replicas")}W "
f"{markers}"
)
testrun_name = (
f"{ocs_version_string} {us_ds} {ocp_version_string} " f"{testrun_name}"
)
if build_user:
testrun_name = f"{build_user} {testrun_name}"
# replace invalid character(s) by '-'
testrun_name = testrun_name.translate(
str.maketrans({key: "-" for key in """ \\/.:*"<>|~!@#$?%^&'*(){}+`,=\t"""})
)
log.info("testrun_name: %s", testrun_name)
return testrun_name
def get_az_count():
"""
Using a number of different configuration attributes, determine how many
availability zones the cluster is configured for.
Returns:
int: number of availability zones
"""
if config.ENV_DATA.get("availability_zone_count"):
return int(config.ENV_DATA.get("availability_zone_count"))
elif config.ENV_DATA.get("worker_availability_zones"):
return len(config.ENV_DATA.get("worker_availability_zones"))
elif config.ENV_DATA.get("platform") == "vsphere":
return 1
else:
return 1
def ceph_health_check(namespace=None, tries=20, delay=30):
"""
Args:
namespace (str): Namespace of OCS
(default: config.ENV_DATA['cluster_namespace'])
tries (int): Number of retries
delay (int): Delay in seconds between retries
Returns:
bool: ceph_health_check_base return value with default retries of 20,
delay of 30 seconds if default values are not changed via args.
"""
if config.ENV_DATA["platform"].lower() == constants.IBM_POWER_PLATFORM:
delay = 60
return retry(
(CephHealthException, CommandFailed, subprocess.TimeoutExpired),
tries=tries,
delay=delay,
backoff=1,
)(ceph_health_check_base)(namespace)
def ceph_health_check_base(namespace=None):
"""
Exec `ceph health` cmd on tools pod to determine health of cluster.
Args:
namespace (str): Namespace of OCS
(default: config.ENV_DATA['cluster_namespace'])
Raises:
CephHealthException: If the ceph health returned is not HEALTH_OK
CommandFailed: If the command to retrieve the tools pod name or the
command to get ceph health returns a non-zero exit code
Returns:
boolean: True if HEALTH_OK
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
run_cmd(
f"oc wait --for condition=ready pod "
f"-l app=rook-ceph-tools "
f"-n {namespace} "
f"--timeout=120s"
)
tools_pod = run_cmd(
f"oc -n {namespace} get pod -l 'app=rook-ceph-tools' "
f"-o jsonpath='{{.items[0].metadata.name}}'",
timeout=60,
)
health = run_cmd(f"oc -n {namespace} exec {tools_pod} -- ceph health")
if health.strip() == "HEALTH_OK":
log.info("Ceph cluster health is HEALTH_OK.")
return True
else:
raise CephHealthException(f"Ceph cluster health is not OK. Health: {health}")
def get_rook_repo(branch="master", to_checkout=None):
"""
Clone and checkout the rook repository to specific branch/commit.
Args:
branch (str): Branch name to checkout
to_checkout (str): Commit id or tag to checkout
"""
cwd = constants.ROOK_REPO_DIR
if not os.path.isdir(cwd):
log.info(f"Cloning rook repository into {cwd}.")
run_cmd(f"git clone {constants.ROOK_REPOSITORY} {cwd}")
else:
log.info(
f"The rook directory {cwd} already exists, ocs-ci will skip the "
f"clone of rook repository."
)
log.info("Fetching latest changes from rook repository.")
run_cmd("git fetch --all", cwd=cwd)
log.info(f"Checkout rook repository to specific branch: {branch}")
run_cmd(f"git checkout {branch}", cwd=cwd)
log.info(f"Reset branch: {branch} with latest changes")
run_cmd(f"git reset --hard origin/{branch}", cwd=cwd)
if to_checkout:
run_cmd(f"git checkout {to_checkout}", cwd=cwd)
def clone_repo(url, location, branch="master", to_checkout=None):
"""
Clone a repository or checkout latest changes if it already exists at
specified location.
Args:
url (str): location of the repository to clone
location (str): path where the repository will be cloned to
branch (str): branch name to checkout
to_checkout (str): commit id or tag to checkout
"""
if not os.path.isdir(location):
log.info("Cloning repository into %s", location)
run_cmd(f"git clone {url} {location}")
else:
log.info("Repository already cloned at %s, skipping clone", location)
log.info("Fetching latest changes from repository")
run_cmd("git fetch --all", cwd=location)
log.info("Checking out repository to specific branch: %s", branch)
run_cmd(f"git checkout {branch}", cwd=location)
log.info("Reset branch: %s with latest changes", branch)
run_cmd(f"git reset --hard origin/{branch}", cwd=location)
if to_checkout:
run_cmd(f"git checkout {to_checkout}", cwd=location)
def get_latest_ds_olm_tag(upgrade=False, latest_tag=None):
"""
This function returns latest tag of OCS downstream registry or one before
latest if upgrade parameter is True
Args:
upgrade (str): If True then it returns one version of the build before
the latest.
latest_tag (str): Tag of the latest build. If not specified
config.DEPLOYMENT['default_latest_tag'] or 'latest' will be used.
Returns:
str: latest tag for downstream image from quay registry
Raises:
TagNotFoundException: In case no tag found
"""
latest_tag = latest_tag or config.DEPLOYMENT.get("default_latest_tag", "latest")
tags = get_ocs_olm_operator_tags()
latest_image = None
ocs_version = config.ENV_DATA["ocs_version"]
upgrade_ocs_version = config.UPGRADE.get("upgrade_ocs_version")
use_rc_build = config.UPGRADE.get("use_rc_build")
previous_rc_build = config.UPGRADE.get("previous_rc_build")
upgrade_version_change = upgrade_ocs_version and ocs_version != upgrade_ocs_version
if upgrade and use_rc_build and previous_rc_build and not upgrade_version_change:
latest_tag = previous_rc_build
if upgrade_version_change:
upgrade = False
for tag in tags:
if tag["name"] == latest_tag:
latest_image = tag["manifest_digest"]
break
if not latest_image:
raise TagNotFoundException("Couldn't find latest tag!")
latest_tag_found = False
for tag in tags:
if not upgrade:
if (
not any(t in tag["name"] for t in constants.LATEST_TAGS)
and tag["manifest_digest"] == latest_image
):
return tag["name"]
if upgrade:
if not latest_tag_found and tag["name"] == latest_tag:
latest_tag_found = True
continue
if not latest_tag_found:
continue
if (
not any(t in tag["name"] for t in constants.LATEST_TAGS)
and tag["manifest_digest"] != latest_image
and ocs_version in tag["name"]
):
if config.UPGRADE.get("use_rc_build") and "rc" not in tag["name"]:
continue
return tag["name"]
raise TagNotFoundException("Couldn't find any desired tag!")
def get_next_version_available_for_upgrade(current_tag):
"""
This function returns the tag built after the current_version
Args:
current_tag (str): Current build tag from which to search the next one
build tag.
Returns:
str: tag for downstream image from quay registry built after
the current_tag.
Raises:
TagNotFoundException: In case no tag suitable for upgrade found
"""
tags = get_ocs_olm_operator_tags()
if any(t in current_tag for t in constants.LATEST_TAGS):
return current_tag
current_tag_index = None
for index, tag in enumerate(tags):
if tag["name"] == current_tag:
if index < 2:
raise TagNotFoundException("Couldn't find tag for upgrade!")
current_tag_index = index
break
sliced_reversed_tags = tags[:current_tag_index]
sliced_reversed_tags.reverse()
ocs_version = config.ENV_DATA["ocs_version"]
for tag in sliced_reversed_tags:
if (
not any(t in tag["name"] for t in constants.LATEST_TAGS)
and ocs_version in tag["name"]
):
if config.UPGRADE.get("use_rc_build") and "rc" not in tag["name"]:
continue
return tag["name"]
raise TagNotFoundException("Couldn't find any tag!")
def load_auth_config():
"""
Load the authentication config YAML from /data/auth.yaml
Raises:
FileNotFoundError: if the auth config is not found
Returns:
dict: A dictionary reprensenting the YAML file
"""
log.info("Retrieving the authentication config dictionary")
auth_file = os.path.join(constants.TOP_DIR, "data", constants.AUTHYAML)
try:
with open(auth_file) as f:
return yaml.safe_load(f)
except FileNotFoundError:
log.warning(
f"Unable to find the authentication configuration at {auth_file}, "
f"please refer to the getting started guide ({constants.AUTH_CONFIG_DOCS})"
)
return {}
def get_ocs_olm_operator_tags(limit=100):
"""
Query the OCS OLM Operator repo and retrieve a list of tags. Since we are limited
to 100 tags per page, we end up making several API calls and combining the results
into a single list of tags.
Args:
limit: the number of tags to limit the request to
Raises:
KeyError: if the auth config isn't setup properly
requests.RequestException: if the response return code is not ok
Returns:
list: OCS OLM Operator tags
"""
try:
quay_access_token = load_auth_config()["quay"]["access_token"]
except (KeyError, TypeError):
log.error(
"Unable to retrieve the access token for quay, please refer to "
f"the getting started guide ({constants.AUTH_CONFIG_DOCS}) "
"to properly setup your authentication configuration"
)
raise
headers = {"Authorization": f"Bearer {quay_access_token}"}
image = "ocs-registry"
try:
ocs_version = float(config.ENV_DATA.get("ocs_version"))
if ocs_version < 4.5:
image = "ocs-olm-operator"
except (ValueError, TypeError):
log.warning("Invalid ocs_version given, defaulting to ocs-registry image")
pass
all_tags = []
page = 1
while True:
log.info(f"Retrieving OCS OLM Operator tags (limit {limit}, page {page})")
resp = requests.get(
constants.OPERATOR_CS_QUAY_API_QUERY.format(
tag_limit=limit,
image=image,
page=page,
),
headers=headers,
)
if not resp.ok:
raise requests.RequestException(resp.json())
tags = resp.json()["tags"]
if len(tags) == 0:
log.info("No more tags to retrieve")
break
log.debug(tags)
all_tags.extend(tags)
page += 1
return all_tags
def check_if_executable_in_path(exec_name):
"""
Checks whether an executable can be found in the $PATH
Args:
exec_name: Name of executable to look for
Returns:
Boolean: Whether the executable was found
"""
return which(exec_name) is not None
def upload_file(server, localpath, remotepath, user=None, password=None, key_file=None):
"""
Upload a file to remote server
Args:
server (str): Name of the server to upload
localpath (str): Local file to upload
remotepath (str): Target path on the remote server. filename should be included
user (str): User to use for the remote connection
"""
if not user:
user = "root"
try:
ssh = SSHClient()
ssh.set_missing_host_key_policy(AutoAddPolicy())
if password:
ssh.connect(hostname=server, username=user, password=password)
else:
log.info(key_file)
ssh.connect(hostname=server, username=user, key_filename=key_file)
sftp = ssh.open_sftp()
log.info(f"uploading {localpath} to {user}@{server}:{remotepath}")
sftp.put(localpath, remotepath)
sftp.close()
ssh.close()
except AuthenticationException as authException:
log.error(f"Authentication failed: {authException}")
raise authException
except SSHException as sshException:
log.error(f"SSH connection failed: {sshException}")
raise sshException
def read_file_as_str(filepath):
"""
Reads the file content
Args:
filepath (str): File to read
Returns:
str : File contents in string
"""
with open(rf"{filepath}") as fd:
content = fd.read()
return content
def replace_content_in_file(file, old, new, match_and_replace_line=False):
"""
Replaces contents in file, if old value is not found, it adds
new value to the file
Args:
file (str): Name of the file in which contents will be replaced
old (str): Data to search for
new (str): Data to replace the old value
match_and_replace_line (bool): If True, it will match a line if
`old` pattern is found in the line. The whole line will be replaced
with `new` content.
Otherwise it will replace only `old` string with `new` string but
the rest of the line will be intact. This is the default option.
"""
# Read the file
with open(rf"{file}", "r") as fd:
file_data = [line.rstrip("\n") for line in fd.readlines()]
if match_and_replace_line:
# Replace the whole line with `new` string if the line contains `old`
# string pattern.
file_data = [new if old in line else line for line in file_data]
else:
# Replace the old string by new
file_data = [
line.replace(old, new) if old in line else line for line in file_data
]
updated_data = [line for line in file_data if new in line]
# In case the old pattern wasn't found it will be added as first line
if not updated_data:
file_data.insert(0, new)
file_data = [f"{line}\n" for line in file_data]
# Write the file out again
with open(rf"{file}", "w") as fd:
fd.writelines(file_data)
@retry((CommandFailed), tries=100, delay=10, backoff=1)
def wait_for_co(operator):
"""
Waits for ClusterOperator to created
Args:
operator (str): Name of the ClusterOperator
"""
from ocs_ci.ocs.ocp import OCP
ocp = OCP(kind="ClusterOperator")
ocp.get(operator)
def censor_values(data_to_censor):
"""
This function censor string and numeric values in dictionary based on
keys that match pattern defined in config_keys_patterns_to_censor in
constants. It is performed recursively for nested dictionaries.
Args:
data_to_censor (dict): Data to censor.
Returns:
dict: filtered data
"""
for key in data_to_censor:
if isinstance(data_to_censor[key], dict):
censor_values(data_to_censor[key])
elif isinstance(data_to_censor[key], (str, int, float)):
for pattern in constants.config_keys_patterns_to_censor:
if pattern in key.lower():
data_to_censor[key] = "*" * 5
return data_to_censor
def dump_config_to_file(file_path):
"""
Dump the config to the yaml file with censored secret values.
Args:
file_path (str): Path to file where to write the configuration.
"""
config_copy = deepcopy(config.to_dict())
censor_values(config_copy)
with open(file_path, "w+") as fs:
yaml.safe_dump(config_copy, fs)
def create_rhelpod(namespace, pod_name, timeout=300):
"""
Creates the RHEL pod
Args:
namespace (str): Namespace to create RHEL pod
pod_name (str): Pod name
timeout (int): wait time for RHEL pod to be in Running state
Returns:
pod: Pod instance for RHEL
"""
# importing here to avoid dependencies
from ocs_ci.helpers import helpers
rhelpod_obj = helpers.create_pod(
namespace=namespace,
pod_name=pod_name,
pod_dict_path=constants.RHEL_7_7_POD_YAML,
)
helpers.wait_for_resource_state(rhelpod_obj, constants.STATUS_RUNNING, timeout)
return rhelpod_obj
def check_timeout_reached(start_time, timeout, err_msg=None):
"""
Check if timeout reached and if so raise the exception.
Args:
start_time (time): Star time of the operation.
timeout (int): Timeout in seconds.
err_msg (str): Error message for the exception.
Raises:
TimeoutException: In case the timeout reached.
"""
msg = f"Timeout {timeout} reached!"
if err_msg:
msg += " Error: {err_msg}"
if timeout < (time.time() - start_time):
raise TimeoutException(msg)
def convert_yaml2tfvars(yaml):
"""
Converts yaml file to tfvars. It creates the tfvars with the
same filename in the required format which is used for deployment.
Args:
yaml (str): File path to yaml
Returns:
str: File path to tfvars
"""
# importing here to avoid dependencies
from ocs_ci.utility.templating import load_yaml
data = load_yaml(yaml)
tfvars_file = os.path.splitext(yaml)[0]
log.debug(f"Converting {yaml} to {tfvars_file}")
with open(tfvars_file, "w+") as fd:
for key, val in data.items():
if key == "control_plane_ignition":
fd.write("control_plane_ignition = <<END_OF_MASTER_IGNITION\n")
fd.write(f"{val}\n")
fd.write("END_OF_MASTER_IGNITION\n")
continue
if key == "compute_ignition":
fd.write("compute_ignition = <<END_OF_WORKER_IGNITION\n")
fd.write(f"{val}\n")
fd.write("END_OF_WORKER_IGNITION\n")
continue
if key == "vm_dns_addresses":
fd.write(f'vm_dns_addresses = ["{val}"]\n')
continue
fd.write(key)
fd.write(" = ")
fd.write('"')
fd.write(f"{val}")
fd.write('"\n')
return tfvars_file
def remove_keys_from_tf_variable_file(tf_file, keys):
"""
Removes the keys from the tf files and convert to json format
Args:
tf_file (str): path to tf file
keys (list): list of keys to remove
"""
# importing here to avoid dependencies
from ocs_ci.utility.templating import dump_data_to_json
with open(tf_file, "r") as fd:
obj = hcl2.load(fd)
for key in keys:
obj["variable"].pop(key)
dump_data_to_json(obj, f"{tf_file}.json")
os.rename(tf_file, f"{tf_file}.backup")
def get_kubeadmin_password():
filename = os.path.join(
config.ENV_DATA["cluster_path"], config.RUN["password_location"]
)
with open(filename) as f:
return f.read()
def get_infra_id(cluster_path):
"""
Get infraID from metadata.json in given cluster_path
Args:
cluster_path: path to cluster install directory
Returns:
str: metadata.json['infraID']
"""
metadata_file = os.path.join(cluster_path, "metadata.json")
with open(metadata_file) as f:
metadata = json.load(f)
return metadata["infraID"]
def get_cluster_name(cluster_path):
"""
Get clusterName from metadata.json in given cluster_path
Args:
cluster_path: path to cluster install directory
Returns:
str: metadata.json['clusterName']
"""
metadata_file = os.path.join(cluster_path, "metadata.json")
with open(metadata_file) as f:
metadata = json.load(f)
return metadata["clusterName"]
def skipif_ocp_version(expressions):
"""
This function evaluates the condition for test skip
based on expression
Args:
expressions (str OR list): condition for which we need to check,
eg: A single expression string '>=4.2' OR
A list of expressions like ['<4.3', '>4.2'], ['<=4.3', '>=4.2']
Return:
'True' if test needs to be skipped else 'False'
"""
skip_this = True
ocp_version = get_running_ocp_version()
expr_list = [expressions] if isinstance(expressions, str) else expressions
for expr in expr_list:
comparision_str = ocp_version + expr
skip_this = skip_this and eval(comparision_str)
# skip_this will be either True or False after eval
return skip_this
def skipif_ocs_version(expressions):
"""
This function evaluates the condition for test skip
based on expression
Args:
expressions (str OR list): condition for which we need to check,
eg: A single expression string '>=4.2' OR
A list of expressions like ['<4.3', '>4.2'], ['<=4.3', '>=4.2']
Return:
'True' if test needs to be skipped else 'False'
"""
expr_list = [expressions] if isinstance(expressions, str) else expressions
return any(eval(config.ENV_DATA["ocs_version"] + expr) for expr in expr_list)
def skipif_ui_not_support(ui_test):
"""
This function evaluates the condition for ui test skip
based on ui_test expression
Args:
ui_test (str): condition for which we need to check,
Return:
'True' if test needs to be skipped else 'False'
"""
from ocs_ci.ocs.ui.views import locators
ocp_version = get_running_ocp_version()
if (
config.ENV_DATA["platform"].lower() == constants.IBMCLOUD_PLATFORM
or config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM
or config.ENV_DATA["platform"].lower() == constants.ROSA_PLATFORM
):
return True
try:
locators[ocp_version][ui_test]
except KeyError:
return True
return False
def get_ocs_version_from_image(image):
"""
Parse major.minor version from OCS image tag.
Args:
image (str): image in format url:tag
Returns
str: Version in x.y format
Raises:
ValueError: In case of the tag which we cannot parse to version.
"""
try:
version = image.rsplit(":", 1)[1].lstrip("latest-").lstrip("stable-")
version = Version.coerce(version)
return "{major}.{minor}".format(major=version.major, minor=version.minor)
except ValueError:
log.error(f"The version: {version} couldn't be parsed!")
raise
def get_available_ocp_versions(channel):
"""
Find all available OCP versions for specific channel.
Args:
channel (str): Channel of OCP (e.g. stable-4.2 or fast-4.2)
Returns
list: Sorted list with OCP versions for specified channel.
"""
headers = {"Accept": "application/json"}
req = requests.get(
constants.OPENSHIFT_UPGRADE_INFO_API.format(channel=channel), headers=headers
)
data = req.json()
versions = [Version(node["version"]) for node in data["nodes"]]
versions.sort()
return versions
def get_latest_ocp_version(channel, index=-1):
"""
Find latest OCP version for specific channel.
Args:
channel (str): Channel of OCP (e.g. stable-4.2 or fast-4.2)
index (int): Index to get from all available versions list
e.g. default -1 is latest version (version[-1]). If you want to get
previous version pass index -2 and so on.
Returns
str: Latest OCP version for specified channel.
"""
versions = get_available_ocp_versions(channel)
return str(versions[index])
def load_config_file(config_file):
"""
Loads config file to the ocs-ci config
Args:
config_file (str): Path to yaml config file.
Raises:
FileNotFoundError: In the case the config file not found.
"""
config_file = os.path.expanduser(config_file)
assert os.path.exists(config_file), f"Config file {config_file} doesn't exist!"
with open(os.path.abspath(os.path.expanduser(config_file)), "r") as file_stream:
custom_config_data = yaml.safe_load(file_stream)
config.update(custom_config_data)
def destroy_cluster(installer, cluster_path, log_level="DEBUG"):
"""
Destroy OCP cluster specific
Args:
installer (str): The path to the installer binary
cluster_path (str): The path of the cluster
log_level (str): log level openshift-installer (default: DEBUG)
"""
destroy_cmd = (
f"{installer} destroy cluster "
f"--dir {cluster_path} "
f"--log-level {log_level}"
)
try:
# Execute destroy cluster using OpenShift installer
log.info(f"Destroying cluster defined in {cluster_path}")
run_cmd(destroy_cmd, timeout=1200)
except CommandFailed:
log.error(traceback.format_exc())
raise
except Exception:
log.error(traceback.format_exc())
def config_to_string(config):
"""
Convert ConfigParser object to string in INI format.
Args:
config (obj): ConfigParser object
Returns:
str: Config in one string
"""
strio = io.StringIO()
config.write(strio, space_around_delimiters=False)
return strio.getvalue()
class AZInfo(object):
"""
A class for getting different az numbers across calls
"""
zone_number = 0
def get_zone_number(self):
"""
Increment current zone_number and perform modulus op
to roll-on to next available number
Returns:
int: zone number index
"""
prev = AZInfo.zone_number
AZInfo.zone_number += 1
AZInfo.zone_number %= get_az_count()
return prev
def convert_device_size(unformatted_size, units_to_covert_to):
"""
Convert a string representing a size to an int according to the given units
to convert to
Args:
unformatted_size (str): The size to convert (i.e, '1Gi'/'100Mi')
units_to_covert_to (str): The units to convert the size to (i.e, TB/GB/MB)
Returns:
int: The converted size
"""
units = unformatted_size[-2:]
abso = int(unformatted_size[:-2])
conversion = {
"TB": {"Ti": abso, "Gi": abso / 1000, "Mi": abso / 1e6, "Ki": abso / 1e9},
"GB": {"Ti": abso * 1000, "Gi": abso, "Mi": abso / 1000, "Ki": abso / 1e6},
"MB": {"Ti": abso * 1e6, "Gi": abso * 1000, "Mi": abso, "Ki": abso / 1000},
"KB": {"Ti": abso * 1e9, "Gi": abso * 1e6, "Mi": abso * 1000, "Ki": abso},
"B": {"Ti": abso * 1e12, "Gi": abso * 1e9, "Mi": abso * 1e6, "Ki": abso * 1000},
}
return conversion[units_to_covert_to][units]
def prepare_customized_pull_secret(images=None):
"""
Prepare customized pull-secret containing auth section related to given
image(s). If image(s) not defined or no related section is found, it will
use whole content of pull-secret.
Args:
images (str, list): image (or images) to match with auth section
Returns:
NamedTemporaryFile: prepared pull-secret
"""
log.debug(f"Prepare customized pull-secret for images: {images}")
if type(images) == str:
images = [images]
# load pull-secret file to pull_secret dict
pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret")
with open(pull_secret_path) as pull_secret_fo:
pull_secret = json.load(pull_secret_fo)
authfile_content = {"auths": {}}
# if images defined, try to find auth section related to specified images
if images:
for image in images:
# find all auths which might be related to the specified image
tmp_auths = [auth for auth in pull_secret["auths"] if auth in image]
# get the most specific auth for particular image
tmp_auths = sorted(tmp_auths, key=len, reverse=True)
if tmp_auths:
# if there is match to particular auth, prepare authfile just with the
# matching auth
auth = tmp_auths[0]
# as key use only server name, without namespace
authfile_content["auths"][auth.split("/", 1)[0]] = pull_secret["auths"][
auth
]
if not authfile_content["auths"]:
authfile_content = pull_secret
# create temporary auth file
authfile_fo = NamedTemporaryFile(mode="w", prefix="authfile_")
json.dump(authfile_content, authfile_fo)
# ensure the content will be saved into the file
authfile_fo.flush()
return authfile_fo
def inspect_image(image, authfile_fo):
"""
Inspect image
Args:
image (str): image to inspect
authfile_fo (NamedTemporaryFile): pull-secret required for pulling the given image
Returns:
dict: json object of the inspected image
"""
# pull original image (to be able to inspect it)
exec_cmd(f"podman image pull {image} --authfile {authfile_fo.name}")
# inspect the image
cmd_result = exec_cmd(f"podman image inspect {image}")
image_inspect = json.loads(cmd_result.stdout)
return image_inspect
def get_image_with_digest(image):
"""
Return image with sha256 digest for usage in disconnected environment
Args:
image (str): image
Raises:
UnexpectedImage: In case the image information is unexpected
Returns:
str: image with sha256 digest specification
"""
if "@sha256:" in image:
return image
with prepare_customized_pull_secret(image) as authfile_fo:
image_inspect = inspect_image(image, authfile_fo)
# we expect, that 'Digest' will match one of the images in 'RepoDigests',
# if not, raise UnexpectedImage
for image in image_inspect[0]["RepoDigests"]:
if image_inspect[0]["Digest"] in image:
return image
else:
raise UnexpectedImage(
f"Image digest ({image_inspect[0]["Digest"]}) doesn't match with "
f"any image from RepoDigests ({image_inspect[0]["RepoDigests"]})."
)
def login_to_mirror_registry(authfile):
"""
Login to mirror registry
Args:
authfile (str): authfile (pull-secret) path
"""
# load cluster info
load_cluster_info()
mirror_registry = config.DEPLOYMENT["mirror_registry"]
mirror_registry_user = config.DEPLOYMENT["mirror_registry_user"]
mirror_registry_password = config.DEPLOYMENT["mirror_registry_password"]
login_cmd = (
f"podman login --authfile {authfile} "
f"{mirror_registry} -u {mirror_registry_user} "
f"-p {mirror_registry_password} --tls-verify=false"
)
exec_cmd(login_cmd, (mirror_registry_user, mirror_registry_password))
def mirror_image(image):
"""
Mirror image to mirror image registry.
Args:
image (str): image to be mirrored, can be defined just with name or
with full url, with or without tag or digest
Returns:
str: the mirrored image link
"""
with prepare_customized_pull_secret(image) as authfile_fo:
# login to mirror registry
login_to_mirror_registry(authfile_fo.name)
# if there is any tag specified, use it in the full image url,
# otherwise use url with digest
image_inspect = inspect_image(image, authfile_fo)
if image_inspect[0].get("RepoTags"):
orig_image_full = image_inspect[0]["RepoTags"][0]
else:
orig_image_full = image_inspect[0]["RepoDigests"][0]
# prepare mirrored image url
mirror_registry = config.DEPLOYMENT["mirror_registry"]
mirrored_image = mirror_registry + re.sub(r"^[^/]*", "", orig_image_full)
# mirror the image
log.info(
f"Mirroring image '{image}' ('{orig_image_full}') to '{mirrored_image}'"
)
exec_cmd(
f"oc image mirror --insecure --registry-config"
f" {authfile_fo.name} {orig_image_full} {mirrored_image}"
)
return mirrored_image
def update_container_with_mirrored_image(job_pod_dict):
"""
Update Job or Pod configuration dict with mirrored image (required for
disconnected installation).
Args:
job_pod_dict (dict): dictionary with Job or Pod configuration
Returns:
dict: for disconnected installation, returns updated Job or Pod dict,
for normal installation return unchanged job_pod_dict
"""
if config.DEPLOYMENT.get("disconnected"):
if "containers" in job_pod_dict["spec"]:
container = job_pod_dict["spec"]["containers"][0]
else:
container = job_pod_dict["spec"]["template"]["spec"]["containers"][0]
container["image"] = mirror_image(container["image"])
return job_pod_dict
def get_trim_mean(values, percentage=20):
"""
Get the trimmed mean of a list of values.
Explanation: This function finds the arithmetic mean of given values,
ignoring values outside the given limits.
Args:
values (list): The list of values
percentage (int): The percentage to be trimmed
Returns:
float: Trimmed mean. In case trimmed mean calculation fails,
the regular mean average is returned
"""
lower_limit = scoreatpercentile(values, percentage)
upper_limit = scoreatpercentile(values, 100 - percentage)
try:
return tmean(values, limits=(lower_limit, upper_limit))
except ValueError:
log.warning(
f"Failed to calculate the trimmed mean of {values}. The "
f"Regular mean average will be calculated instead"
)
return sum(values) / len(values)
def set_selinux_permissions(workers=None):
"""
Workaround for #1777384 - enable container_use_cephfs on RHEL workers
Ticket: RHSTOR-787, see more details in the issue: #1151
Args:
workers (list): List of worker nodes to set selinux permissions
"""
log.info("Running WA for ticket: RHSTOR-787")
from ocs_ci.ocs import ocp
ocp_obj = ocp.OCP()
cmd = ["/usr/sbin/setsebool -P container_use_cephfs on"]
cmd_list = cmd.copy()
if not workers:
from ocs_ci.ocs.node import get_typed_worker_nodes
worker_nodes = get_typed_worker_nodes(os_id="rhel")
else:
worker_nodes = workers
for worker in worker_nodes:
node = worker.get().get("metadata").get("name") if not workers else worker
log.info(f"{node} is a RHEL based worker - applying '{cmd_list}'")
if config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM:
retry(CommandFailed, tries=10, delay=3, backoff=2)(
ocp_obj.exec_oc_debug_cmd
)(node=node, cmd_list=cmd_list)
else:
retry(CommandFailed)(ocp_obj.exec_oc_debug_cmd)(
node=node, cmd_list=cmd_list
)
def set_registry_to_managed_state():
"""
In order to be able to deploy from stage we need to change
image registry config to Managed state.
More described in BZs:
https://bugzilla.redhat.com/show_bug.cgi?id=1806593
https://bugzilla.redhat.com/show_bug.cgi?id=1807471#c3
We need to change to managed state as described here:
https://github.com/red-hat-storage/ocs-ci/issues/1436
So this is not suppose to be deleted as WA case we really need to do
this operation for OCS deployment as was originally done here:
https://github.com/red-hat-storage/ocs-ci/pull/1437
Currently it has to be moved here to enable CA certificate to be
properly propagated for the stage deployment as mentioned in BZ.
"""
# In RHV platform config is already set to Managed and storage pre-configured
on_prem_platform_to_exclude = [constants.RHV_PLATFORM]
platform_list_to_exclude = constants.CLOUD_PLATFORMS + on_prem_platform_to_exclude
if config.ENV_DATA["platform"] not in platform_list_to_exclude:
cluster_config = yaml.safe_load(
exec_cmd(f"oc get {constants.IMAGE_REGISTRY_CONFIG} -o yaml").stdout
)
if "emptyDir" not in cluster_config["spec"].get("storage", {}).keys():
run_cmd(
f"oc patch {constants.IMAGE_REGISTRY_CONFIG} --type merge -p "
f'\'{{'spec':{{'storage': {{'emptyDir':{{}}}}}}}}\''
)
if cluster_config["spec"].get("managementState") != "Managed":
run_cmd(
f"oc patch {constants.IMAGE_REGISTRY_CONFIG} --type merge -p "
f'\'{{'spec':{{'managementState': 'Managed'}}}}\''
)
def add_stage_cert():
"""
Deploy stage certificate to the cluster.
"""
log.info("Create configmap stage-registry-config with stage CA.")
run_cmd(
f"oc -n openshift-config create configmap stage-registry-config"
f" --from-file=registry.stage.redhat.io={constants.STAGE_CA_FILE}"
)
log.info("Add stage-registry-config to additionalTrustedCA.")
additional_trusted_ca_patch = (
'{"spec":{"additionalTrustedCA":{"name":"stage-registry-config"}}}'
)
run_cmd(
f"oc patch image.config.openshift.io cluster --type=merge"
f" -p '{additional_trusted_ca_patch}'"
)
def get_terraform(version=None, bin_dir=None):
"""
Downloads the terraform binary
Args:
version (str): Version of the terraform to download
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
Returns:
str: Path to the terraform binary
"""
if platform.system() == "Darwin":
os_type = "darwin"
elif platform.system() == "Linux":
os_type = "linux"
else:
raise UnsupportedOSType
version = version or config.DEPLOYMENT["terraform_version"]
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
terraform_zip_file = f"terraform_{version}_{os_type}_amd64.zip"
terraform_filename = "terraform"
terraform_binary_path = os.path.join(bin_dir, terraform_filename)
log.info(f"Downloading terraform version {version}")
previous_dir = os.getcwd()
os.chdir(bin_dir)
url = f"https://releases.hashicorp.com/terraform/{version}/" f"{terraform_zip_file}"
download_file(url, terraform_zip_file)
run_cmd(f"unzip -o {terraform_zip_file}")
delete_file(terraform_zip_file)
# return to the previous working directory
os.chdir(previous_dir)
return terraform_binary_path
def get_terraform_ignition_provider(terraform_dir, version=None):
"""
Downloads the terraform ignition provider
Args:
terraform_dir (str): Path to terraform working directory
version (str): Version of the terraform ignition provider to download
"""
version = version or constants.TERRAFORM_IGNITION_PROVIDER_VERSION
terraform_ignition_provider_zip_file = (
f"terraform-provider-ignition-{version}-linux-amd64.tar.gz"
)
terraform_ignition_provider_dir = (
f"terraform-provider-ignition-{version}-linux-amd64"
)
terraform_plugins_path = ".terraform/plugins/linux_amd64/"
log.info(f"Downloading terraform ignition proivider version {version}")
previous_dir = os.getcwd()
os.chdir(terraform_dir)
url = (
"https://github.com/community-terraform-providers/"
f"terraform-provider-ignition/releases/download/{version}/"
f"{terraform_ignition_provider_zip_file}"
)
# Download and untar
download_file(url, terraform_ignition_provider_zip_file)
run_cmd(f"tar xzf {terraform_ignition_provider_zip_file}")
# move the ignition provider binary to plugins path
create_directory_path(terraform_plugins_path)
move(
f"{terraform_ignition_provider_dir}/terraform-provider-ignition",
terraform_plugins_path,
)
# delete the downloaded files
delete_file(terraform_ignition_provider_zip_file)
delete_dir(terraform_ignition_provider_dir)
# return to the previous working directory
os.chdir(previous_dir)
def get_module_ip(terraform_state_file, module):
"""
Gets the node IP from terraform.tfstate file
Args:
terraform_state_file (str): Path to terraform state file
module (str): Module name in terraform.tfstate file
e.g: constants.LOAD_BALANCER_MODULE
Returns:
list: IP of the node
"""
ips = []
with open(terraform_state_file) as fd:
obj = json.loads(fd.read())
if config.ENV_DATA.get("folder_structure"):
resources = obj["resources"]
log.debug(f"Extracting module information for {module}")
log.debug(f"Resource in {terraform_state_file}: {resources}")
for resource in resources:
if resource.get("module") == module and resource.get("mode") == "data":
for each_resource in resource["instances"]:
resource_body = each_resource["attributes"]["body"]
ips.append(resource_body.split('"')[3])
else:
modules = obj["modules"]
target_module = module.split("_")[1]
log.debug(f"Extracting module information for {module}")
log.debug(f"Modules in {terraform_state_file}: {modules}")
for each_module in modules:
if target_module in each_module["path"]:
return each_module["outputs"]["ip_addresses"]["value"]
return ips
def set_aws_region(region=None):
"""
Exports environment variable AWS_REGION
Args:
region (str): AWS region to export
"""
log.debug("Exporting environment variable AWS_REGION")
region = region or config.ENV_DATA["region"]
os.environ["AWS_REGION"] = region
def get_system_architecture():
"""
Get output from 'uname -m' command run on first worker node.
Returns:
str: Architecture of system
"""
from ocs_ci.ocs.node import get_nodes
log.info("Checking architecture of system")
node = get_nodes(node_type=constants.WORKER_MACHINE)[0]
return node.ocp.exec_oc_debug_cmd(node.data["metadata"]["name"], ["uname -m"])
def wait_for_machineconfigpool_status(node_type, timeout=900):
"""
Check for Machineconfigpool status
Args:
node_type (str): The node type to check machineconfigpool
status is updated.
e.g: worker, master and all if we want to check for all nodes
timeout (int): Time in seconds to wait
"""
# importing here to avoid dependencies
from ocs_ci.ocs import ocp
node_types = [node_type]
if node_type == "all":
node_types = [f"{constants.WORKER_MACHINE}", f"{constants.MASTER_MACHINE}"]
for role in node_types:
log.info(f"Checking machineconfigpool status for {role} nodes")
ocp_obj = ocp.OCP(kind=constants.MACHINECONFIGPOOL, resource_name=role)
machine_count = ocp_obj.get()["status"]["machineCount"]
assert ocp_obj.wait_for_resource(
condition=str(machine_count),
column="READYMACHINECOUNT",
timeout=timeout,
sleep=5,
)
def configure_chrony_and_wait_for_machineconfig_status(
node_type=constants.WORKER_MACHINE, timeout=900
):
"""
Configure chrony on the nodes
Args:
node_type (str): The node type to configure chrony
e.g: worker, master and all if we want to configure on all nodes
timeout (int): Time in seconds to wait
"""
# importing here to avoid dependencies
from ocs_ci.utility.templating import load_yaml
from ocs_ci.ocs.resources.ocs import OCS
chrony_data = load_yaml(constants.NTP_CHRONY_CONF)
node_types = [node_type]
if node_type == "all":
node_types = [f"{constants.WORKER_MACHINE}", f"{constants.MASTER_MACHINE}"]
for role in node_types:
log.info(f"Creating chrony for {role} nodes")
chrony_data["metadata"]["labels"][
"machineconfiguration.openshift.io/role"
] = role
chrony_data["metadata"]["name"] = f"{role}-chrony-configuration"
chrony_obj = OCS(**chrony_data)
chrony_obj.create()
# sleep here to start update machineconfigpool status
time.sleep(60)
wait_for_machineconfigpool_status(role, timeout=timeout)
def modify_csv(csv, replace_from, replace_to):
"""
Modify the CSV
Args:
csv (str): The CSV name
replace_from (str): The pattern to replace from in the CSV
replace_to (str): The pattern to replace to in the CSV
"""
data = (
f"oc -n openshift-storage get csv {csv} -o yaml | sed"
f" 's,{replace_from},{replace_to},g' | oc replace -f -"
)
log.info(
f"CSV {csv} will be modified: {replace_from} will be replaced "
f"with {replace_to}.\nThe command that will be used for that is:\n{data}"
)
temp_file = NamedTemporaryFile(mode="w+", prefix="csv_modification", suffix=".sh")
with open(temp_file.name, "w") as t_file:
t_file.writelines(data)
run_cmd(f"chmod 777 {temp_file.name}")
run_cmd(f"sh {temp_file.name}")
def check_for_rhcos_images(url):
"""
Check for rhcos images are present in given location
Args:
url (str): rhcos_images url
Returns:
(bool): True if images present if not false
"""
r = requests.head(url)
return r.status_code == requests.codes.ok
def download_file_from_git_repo(git_repo_url, path_to_file_in_git, filename):
"""
Download a file from a specified git repository
Args:
git_repo_url (str): The git repository url
path_to_file_in_git (str): Path to the file to download
in git repository
filename (str): Name of the file to write the download to
"""
log.debug(
f"Download file '{path_to_file_in_git}' from "
f"git repository {git_repo_url} to local file '{filename}'."
)
temp_dir = mkdtemp()
git.Repo.clone_from(git_repo_url, temp_dir, branch="master", depth=1)
move(os.path.join(temp_dir, path_to_file_in_git), filename)
rmtree(temp_dir)
def skipif_upgraded_from(version_list):
"""
This function evaluates the condition to skip a test if the cluster
is upgraded from a particular OCS version
Args:
version_list (list): List of versions to check
Return:
(bool): True if test needs to be skipped else False
"""
try:
from ocs_ci.ocs.resources.ocs import get_ocs_csv
skip_this = False
version_list = [version_list] if isinstance(version_list, str) else version_list
ocs_csv = get_ocs_csv()
csv_info = ocs_csv.get()
prev_version = csv_info.get("spec").get("replaces", "")
for version in version_list:
if f".v{version}" in prev_version:
skip_this = True
break
return skip_this
except Exception as err:
log.error(str(err))
return False
def get_cluster_id(cluster_path):
"""
Get ClusterID from metadata.json in given cluster_path
Args:
cluster_path: path to cluster install directory
Returns:
str: metadata.json['clusterID']
"""
metadata_file = os.path.join(cluster_path, "metadata.json")
with open(metadata_file) as f:
metadata = json.load(f)
return metadata["clusterID"]
def get_running_cluster_id():
"""
Get cluster UUID
Not relying on metadata.json as user sometimes want to run
only with kubeconfig for some tests. For this function to work
cluster has to be in running state
Returns:
str: cluster UUID
"""
cluster_id = run_cmd(
"oc get clusterversion version -o jsonpath='{.spec.clusterID}'"
)
return cluster_id
def get_ocp_upgrade_history():
"""
Gets the OCP upgrade history for the cluster
Returns:
list: List of OCP upgrade paths. Latest version in the
beginning of the list
"""
# importing here to avoid circular imports
from ocs_ci.ocs.ocp import OCP
ocp = OCP(kind="clusterversion")
cluster_version_info = ocp.get("version")
upgrade_history_info = cluster_version_info["status"]["history"]
upgrade_history = [each_upgrade["version"] for each_upgrade in upgrade_history_info]
return upgrade_history
def get_attr_chain(obj, attr_chain):
"""
Attempt to retrieve object attributes when uncertain about the existence of the attribute
or a different attribute in a given attribute chain. If the retrieval fails, None is returned.
The function can be used to retrieve a direct attribute, or a chain of attributes.
i.e. - obj.attr_a, obj_attr_a.sub_attr
Another example - trying to access "sub_attr_b" in object.attr.sub_attr_a.sub_attr_b -
get_attr_chain(object, "attr.sub_attr_a.sub_attr_b")
The function can be used to try and retrieve "sub_attribute_b" without an exception,
even in cases where "attr" or "sub_attr_a" might not exist.
In those cases, the function will return None.
Args:
obj: An object
attr_chain (str): A string containing one attribute or several sub-attributes
separated by dots (i.e. - "attr.sub_attr_a.sub_attr_b")
Returns:
The requested attribute if found, otherwise None
"""
return reduce(
lambda _obj, _attr: getattr(_obj, _attr, None), attr_chain.split("."), obj
)
def get_default_if_keyval_empty(dictionary, key, default_val):
"""
if Key has an empty value OR key doesn't exist
then return default value
Args:
dictionary (dict): Dictionary where we have to lookup
key (str): key to lookup
default_val (str): If key doesn't have value then return
this default_val
Returns:
dictionary[key] if value is present else default_val
"""
if not dictionary.get(key):
return default_val
return dictionary.get(key)
def get_client_version(client_binary_path):
"""
Get version reported by `oc version`.
Args:
client_binary_path (str): path to `oc` binary
Returns:
str: version reported by `oc version`.
None if the client does not exist at the provided path.
"""
if os.path.isfile(client_binary_path):
cmd = f"{client_binary_path} version --client -o json"
resp = exec_cmd(cmd)
stdout = json.loads(resp.stdout.decode())
return stdout["releaseClientVersion"]
def clone_notify():
"""
Repository contains the source code of notify tool,
which is a python3 based tool wrapped by a container
used to configure Ceph Bucket Notifications
Returns:
notify_path (str): Path location of the notify code
"""
notify_dir = mkdtemp(prefix="notify_")
log.info(f"cloning repo notify in {notify_dir}")
git_clone_cmd = f"git clone {constants.RGW_KAFKA_NOTIFY}"
subprocess.run(git_clone_cmd, shell=True, cwd=notify_dir, check=True)
notify_path = f"{notify_dir}/notify/notify.py"
return notify_path
def add_chrony_to_ocp_deployment():
"""
Create and Add necessary chrony resources
"""
for role in ["master", "worker"]:
log.info(f"Creating and Adding Chrony file for {role}")
with open(constants.CHRONY_TEMPLATE) as file_stream:
chrony_template_obj = yaml.safe_load(file_stream)
chrony_template_obj["metadata"]["labels"][
"machineconfiguration.openshift.io/role"
] = role
chrony_template_obj["metadata"]["name"] = f"99-{role}-chrony-configuration"
ignition_version = config.DEPLOYMENT["ignition_version"]
chrony_template_obj["spec"]["config"]["ignition"]["version"] = ignition_version
if Version.coerce(ignition_version) < Version.coerce("3.0"):
chrony_template_obj["spec"]["config"]["storage"]["files"][0][
"filesystem"
] = "root"
chrony_template_str = yaml.safe_dump(chrony_template_obj)
chrony_file = os.path.join(
config.ENV_DATA["cluster_path"],
"openshift",
f"99-{role}-chrony-configuration.yaml",
)
with open(chrony_file, "w") as f:
f.write(chrony_template_str)
def enable_huge_pages():
log.info("Enabling huge pages.")
exec_cmd(f"oc apply -f {constants.HUGE_PAGES_TEMPLATE}")
time.sleep(10)
log.info("Waiting for machine config will be applied with huge pages")
wait_for_machineconfigpool_status(node_type=constants.WORKER_MACHINE)
|
from functools import reduce
import io
import json
import logging
import os
import platform
import random
import re
import shlex
import smtplib
import string
import subprocess
import time
import traceback
import stat
from copy import deepcopy
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from scipy.stats import tmean, scoreatpercentile
from shutil import which, move, rmtree
import hcl2
import requests
import yaml
import git
from bs4 import BeautifulSoup
from paramiko import SSHClient, AutoAddPolicy
from paramiko.auth_handler import AuthenticationException, SSHException
from semantic_version import Version
from tempfile import NamedTemporaryFile, mkdtemp
from ocs_ci.framework import config
from ocs_ci.ocs import constants, defaults
from ocs_ci.ocs.exceptions import (
CephHealthException,
ClientDownloadError,
CommandFailed,
TagNotFoundException,
TimeoutException,
TimeoutExpiredError,
UnavailableBuildException,
UnexpectedImage,
UnsupportedOSType,
)
from ocs_ci.utility import version as version_module
from ocs_ci.utility.flexy import load_cluster_info
from ocs_ci.utility.retry import retry
log = logging.getLogger(__name__)
# variables
mounting_dir = "/mnt/cephfs/"
clients = []
md5sum_list1 = []
md5sum_list2 = []
fuse_clients = []
kernel_clients = []
mon_node = ""
mon_node_ip = ""
mds_nodes = []
md5sum_file_lock = []
active_mdss = []
RC = []
failure = {}
output = []
unique_test_names = []
# function for getting the clients
def get_client_info(ceph_nodes, clients):
log.info("Getting Clients")
for node in ceph_nodes:
if node.role == "client":
clients.append(node)
# Identifying MON node
for node in ceph_nodes:
if node.role == "mon":
mon_node = node
out, err = mon_node.exec_command(cmd="sudo hostname -I")
mon_node_ip = out.read().decode().rstrip("\n")
break
for node in ceph_nodes:
if node.role == "mds":
mds_nodes.append(node)
for node in clients:
node.exec_command(cmd="sudo yum install -y attr")
fuse_clients = clients[0:2] # seperating clients for fuse and kernel
kernel_clients = clients[2:4]
return (
fuse_clients,
kernel_clients,
mon_node,
mounting_dir,
mds_nodes,
md5sum_file_lock,
mon_node_ip,
)
# function for providing authorization to the clients from MON ndoe
def auth_list(clients, mon_node):
for node in clients:
log.info("Giving required permissions for clients from MON node:")
mon_node.exec_command(
cmd="sudo ceph auth get-or-create client.%s mon 'allow *' mds 'allow *, allow rw path=/' "
"osd 'allow rw pool=cephfs_data' -o /etc/ceph/ceph.client.%s.keyring"
% (node.hostname, node.hostname)
)
out, err = mon_node.exec_command(
sudo=True, cmd="cat /etc/ceph/ceph.client.%s.keyring" % (node.hostname)
)
keyring = out.read().decode()
key_file = node.write_file(
sudo=True,
file_name="/etc/ceph/ceph.client.%s.keyring" % (node.hostname),
file_mode="w",
)
key_file.write(keyring)
key_file.flush()
node.exec_command(
cmd="sudo chmod 644 /etc/ceph/ceph.client.%s.keyring" % (node.hostname)
)
# creating mounting directory
node.exec_command(cmd="sudo mkdir %s" % (mounting_dir))
# MOunting single FS with ceph-fuse
def fuse_mount(fuse_clients, mounting_dir):
try:
for client in fuse_clients:
log.info("Creating mounting dir:")
log.info("Mounting fs with ceph-fuse on client %s:" % (client.hostname))
client.exec_command(
cmd="sudo ceph-fuse -n client.%s %s" % (client.hostname, mounting_dir)
)
out, err = client.exec_command(cmd="mount")
mount_output = out.read().decode()
mount_output.split()
log.info("Checking if fuse mount is is passed of failed:")
if "fuse" in mount_output:
log.info("ceph-fuse mounting passed")
else:
log.error("ceph-fuse mounting failed")
return md5sum_list1
except Exception as e:
log.error(e)
def kernel_mount(mounting_dir, mon_node_ip, kernel_clients):
try:
for client in kernel_clients:
out, err = client.exec_command(
cmd="sudo ceph auth get-key client.%s" % (client.hostname)
)
secret_key = out.read().decode().rstrip("\n")
mon_node_ip = mon_node_ip.replace(" ", "")
client.exec_command(
cmd="sudo mount -t ceph %s:6789:/ %s -o name=%s,secret=%s"
% (mon_node_ip, mounting_dir, client.hostname, secret_key)
)
out, err = client.exec_command(cmd="mount")
mount_output = out.read().decode()
mount_output.split()
log.info("Checking if kernel mount is is passed of failed:")
if "%s:6789:/" % (mon_node_ip) in mount_output:
log.info("kernel mount passed")
else:
log.error("kernel mount failed")
return md5sum_list2
except Exception as e:
log.error(e)
def fuse_client_io(client, mounting_dir):
try:
rand_count = random.randint(1, 5)
rand_bs = random.randint(100, 300)
log.info("Performing IOs on fuse-clients")
client.exec_command(
cmd="sudo dd if=/dev/zero of=%snewfile_%s bs=%dM count=%d"
% (mounting_dir, client.hostname, rand_bs, rand_count),
long_running=True,
)
except Exception as e:
log.error(e)
def kernel_client_io(client, mounting_dir):
try:
rand_count = random.randint(1, 6)
rand_bs = random.randint(100, 500)
log.info("Performing IOs on kernel-clients")
client.exec_command(
cmd="sudo dd if=/dev/zero of=%snewfile_%s bs=%dM count=%d"
% (mounting_dir, client.hostname, rand_bs, rand_count),
long_running=True,
)
except Exception as e:
log.error(e)
def fuse_client_md5(fuse_clients, md5sum_list1):
try:
log.info("Calculating MD5 sums of files in fuse-clients:")
for client in fuse_clients:
md5sum_list1.append(
client.exec_command(
cmd="sudo md5sum %s* | awk '{print $1}' " % (mounting_dir),
long_running=True,
)
)
except Exception as e:
log.error(e)
def kernel_client_md5(kernel_clients, md5sum_list2):
try:
log.info("Calculating MD5 sums of files in kernel-clients:")
for client in kernel_clients:
md5sum_list2.append(
client.exec_command(
cmd="sudo md5sum %s* | awk '{print $1}' " % (mounting_dir),
long_running=True,
)
)
except Exception as e:
log.error(e)
# checking file locking mechanism
def file_locking(client):
try:
to_lock_file = """
import fcntl
import subprocess
import time
try:
f = open('/mnt/cephfs/to_test_file_lock', 'w+')
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
print "locking file:--------------------------------"
subprocess.check_output(["sudo","dd","if=/dev/zero","of=/mnt/cephfs/to_test_file_lock","bs=1M","count=2"])
except IOError as e:
print e
finally:
print "Unlocking file:------------------------------"
fcntl.lockf(f,fcntl.LOCK_UN)
"""
to_lock_code = client.write_file(
sudo=True, file_name="/home/cephuser/file_lock.py", file_mode="w"
)
to_lock_code.write(to_lock_file)
to_lock_code.flush()
out, err = client.exec_command(cmd="sudo python /home/cephuser/file_lock.py")
output = out.read().decode()
output.split()
if "Errno 11" in output:
log.info("File locking achieved, data is not corrupted")
elif "locking" in output:
log.info("File locking achieved, data is not corrupted")
else:
log.error("Data is corrupted")
out, err = client.exec_command(
cmd="sudo md5sum %sto_test_file_lock | awk '{print $1}'" % (mounting_dir)
)
md5sum_file_lock.append(out.read().decode())
except Exception as e:
log.error(e)
def activate_multiple_mdss(mds_nodes):
try:
log.info("Activating Multiple MDSs")
for node in mds_nodes:
out1, err = node.exec_command(
cmd="sudo ceph fs set cephfs allow_multimds true --yes-i-really-mean-it"
)
out2, err = node.exec_command(cmd="sudo ceph fs set cephfs max_mds 2")
break
except Exception as e:
log.error(e)
def mkdir_pinning(clients, range1, range2, dir_name, pin_val):
try:
log.info("Creating Directories and Pinning to MDS %s" % (pin_val))
for client in clients:
for num in range(range1, range2):
out, err = client.exec_command(
cmd="sudo mkdir %s%s_%d" % (mounting_dir, dir_name, num)
)
if pin_val != "":
client.exec_command(
cmd="sudo setfattr -n ceph.dir.pin -v %s %s%s_%d"
% (pin_val, mounting_dir, dir_name, num)
)
else:
print("Pin val not given")
print(out.read().decode())
print(time.time())
break
except Exception as e:
log.error(e)
def allow_dir_fragmentation(mds_nodes):
try:
log.info("Allowing directorty fragmenation for splitting")
for node in mds_nodes:
node.exec_command(cmd="sudo ceph fs set cephfs allow_dirfrags 1")
break
except Exception as e:
log.error(e)
def mds_fail_over(mds_nodes):
try:
rand = random.randint(0, 1)
for node in mds_nodes:
log.info("Failing MDS %d" % (rand))
node.exec_command(cmd="sudo ceph mds fail %d" % (rand))
break
except Exception as e:
log.error(e)
def pinned_dir_io(clients, mds_fail_over, num_of_files, range1, range2):
try:
log.info("Performing IOs and MDSfailovers on clients")
for client in clients:
client.exec_command(cmd="sudo pip install crefi")
for num in range(range1, range2):
if mds_fail_over != "":
mds_fail_over(mds_nodes)
out, err = client.exec_command(
cmd="sudo crefi -n %d %sdir_%d" % (num_of_files, mounting_dir, num)
)
rc = out.channel.recv_exit_status()
print(out.read().decode())
RC.append(rc)
print(time.time())
if rc == 0:
log.info("Client IO is going on,success")
else:
log.error("Client IO got interrupted")
failure.update({client: out})
break
break
except Exception as e:
log.error(e)
def custom_ceph_config(suite_config, custom_config, custom_config_file):
"""
Combines and returns custom configuration overrides for ceph.
Hierarchy is as follows::
custom_config > custom_config_file > suite_config
Args:
suite_config: ceph_conf_overrides that currently exist in the test suite
custom_config: custom config args provided by the cli (these all go to the global scope)
custom_config_file: path to custom config yaml file provided by the cli
Returns
New value to be used for ceph_conf_overrides in test config
"""
log.debug("Suite config: {}".format(suite_config))
log.debug("Custom config: {}".format(custom_config))
log.debug("Custom config file: {}".format(custom_config_file))
full_custom_config = suite_config or {}
cli_config_dict = {}
custom_config_dict = {}
# retrieve custom config from file
if custom_config_file:
with open(custom_config_file) as f:
custom_config_dict = yaml.safe_load(f)
log.info("File contents: {}".format(custom_config_dict))
# format cli configs into dict
if custom_config:
cli_config_dict = dict(item.split("=") for item in custom_config)
# combine file and cli configs
if cli_config_dict:
if not custom_config_dict.get("global"):
custom_config_dict["global"] = {}
for key, value in cli_config_dict.items():
custom_config_dict["global"][key] = value
# combine file and suite configs
for key, value in custom_config_dict.items():
subsection = {}
if full_custom_config.get(key):
subsection.update(full_custom_config[key])
subsection.update(value)
full_custom_config[key] = subsection
log.info("Full custom config: {}".format(full_custom_config))
return full_custom_config
def mask_secrets(plaintext, secrets):
"""
Replace secrets in plaintext with asterisks
Args:
plaintext (str or list): The plaintext to remove the secrets from or
list of strings to remove secrets from
secrets (list): List of secret strings to replace in the plaintext
Returns:
str: The censored version of plaintext
"""
if secrets:
for secret in secrets:
if isinstance(plaintext, list):
plaintext = [string.replace(secret, "*" * 5) for string in plaintext]
else:
plaintext = plaintext.replace(secret, "*" * 5)
return plaintext
def run_cmd(cmd, secrets=None, timeout=600, ignore_error=False, **kwargs):
"""
*The deprecated form of exec_cmd.*
Run an arbitrary command locally
Args:
cmd (str): command to run
secrets (list): A list of secrets to be masked with asterisks
This kwarg is popped in order to not interfere with
subprocess.run(``**kwargs``)
timeout (int): Timeout for the command, defaults to 600 seconds.
ignore_error (bool): True if ignore non zero return code and do not
raise the exception.
Raises:
CommandFailed: In case the command execution fails
Returns:
(str) Decoded stdout of command
"""
completed_process = exec_cmd(cmd, secrets, timeout, ignore_error, **kwargs)
return mask_secrets(completed_process.stdout.decode(), secrets)
def exec_cmd(cmd, secrets=None, timeout=600, ignore_error=False, **kwargs):
"""
Run an arbitrary command locally
Args:
cmd (str): command to run
secrets (list): A list of secrets to be masked with asterisks
This kwarg is popped in order to not interfere with
subprocess.run(``**kwargs``)
timeout (int): Timeout for the command, defaults to 600 seconds.
ignore_error (bool): True if ignore non zero return code and do not
raise the exception.
Raises:
CommandFailed: In case the command execution fails
Returns:
(CompletedProcess) A CompletedProcess object of the command that was executed
CompletedProcess attributes:
args: The list or str args passed to run().
returncode (str): The exit code of the process, negative for signals.
stdout (str): The standard output (None if not captured).
stderr (str): The standard error (None if not captured).
"""
masked_cmd = mask_secrets(cmd, secrets)
log.info(f"Executing command: {masked_cmd}")
if isinstance(cmd, str):
cmd = shlex.split(cmd)
completed_process = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
timeout=timeout,
**kwargs,
)
masked_stdout = mask_secrets(completed_process.stdout.decode(), secrets)
if len(completed_process.stdout) > 0:
log.debug(f"Command stdout: {masked_stdout}")
else:
log.debug("Command stdout is empty")
masked_stderr = mask_secrets(completed_process.stderr.decode(), secrets)
if len(completed_process.stderr) > 0:
log.warning(f"Command stderr: {masked_stderr}")
else:
log.debug("Command stderr is empty")
log.debug(f"Command return code: {completed_process.returncode}")
if completed_process.returncode and not ignore_error:
raise CommandFailed(
f"Error during execution of command: {masked_cmd}."
f"\nError is {masked_stderr}"
)
return completed_process
def download_file(url, filename, **kwargs):
"""
Download a file from a specified url
Args:
url (str): URL of the file to download
filename (str): Name of the file to write the download to
kwargs (dict): additional keyword arguments passed to requests.get(...)
"""
log.debug(f"Download '{url}' to '{filename}'.")
with open(filename, "wb") as f:
r = requests.get(url, **kwargs)
assert r.ok, f"The URL {url} is not available! Status: {r.status_code}."
f.write(r.content)
def get_url_content(url, **kwargs):
"""
Return URL content
Args:
url (str): URL address to return
kwargs (dict): additional keyword arguments passed to requests.get(...)
Returns:
str: Content of URL
Raises:
AssertionError: When couldn't load URL
"""
log.debug(f"Download '{url}' content.")
r = requests.get(url, **kwargs)
assert r.ok, f"Couldn't load URL: {url} content! Status: {r.status_code}."
return r.content
def expose_ocp_version(version):
"""
This helper function exposes latest nightly version or GA version of OCP.
When the version string ends with .nightly (e.g. 4.2.0-0.nightly) it will
expose the version to latest accepted OCP build
(e.g. 4.2.0-0.nightly-2019-08-08-103722)
If the version ends with -ga than it will find the latest GA OCP version
and will expose 4.2-ga to for example 4.2.22.
Args:
version (str): Verison of OCP
Returns:
str: Version of OCP exposed to full version if latest nighly passed
"""
if version.endswith(".nightly"):
latest_nightly_url = (
f"https://amd64.ocp.releases.ci.openshift.org/api/v1/"
f"releasestream/{version}/latest"
)
version_url_content = get_url_content(latest_nightly_url)
version_json = json.loads(version_url_content)
return version_json["name"]
if version.endswith("-ga"):
channel = config.DEPLOYMENT.get("ocp_channel", "stable")
ocp_version = version.rstrip("-ga")
index = config.DEPLOYMENT.get("ocp_version_index", -1)
return get_latest_ocp_version(f"{channel}-{ocp_version}", index)
else:
return version
def get_openshift_installer(
version=None,
bin_dir=None,
force_download=False,
):
"""
Download the OpenShift installer binary, if not already present.
Update env. PATH and get path of the openshift installer binary.
Args:
version (str): Version of the installer to download
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force installer download even if already present
Returns:
str: Path to the installer binary
"""
version = version or config.DEPLOYMENT["installer_version"]
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
installer_filename = "openshift-install"
installer_binary_path = os.path.join(bin_dir, installer_filename)
if os.path.isfile(installer_binary_path) and force_download:
delete_file(installer_binary_path)
if os.path.isfile(installer_binary_path):
log.debug(f"Installer exists ({installer_binary_path}), skipping download.")
# TODO: check installer version
else:
version = expose_ocp_version(version)
log.info(f"Downloading openshift installer ({version}).")
prepare_bin_dir()
# record current working directory and switch to BIN_DIR
previous_dir = os.getcwd()
os.chdir(bin_dir)
tarball = f"{installer_filename}.tar.gz"
url = get_openshift_mirror_url(installer_filename, version)
download_file(url, tarball)
run_cmd(f"tar xzvf {tarball} {installer_filename}")
delete_file(tarball)
# return to the previous working directory
os.chdir(previous_dir)
installer_version = run_cmd(f"{installer_binary_path} version")
log.info(f"OpenShift Installer version: {installer_version}")
return installer_binary_path
def get_ocm_cli(
version=None,
bin_dir=None,
force_download=False,
):
"""
Download the OCM binary, if not already present.
Update env. PATH and get path of the OCM binary.
Args:
version (str): Version of the OCM to download
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force OCM download even if already present
Returns:
str: Path to the OCM binary
"""
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
ocm_filename = "ocm"
ocm_binary_path = os.path.join(bin_dir, ocm_filename)
if os.path.isfile(ocm_binary_path) and force_download:
delete_file(ocm_binary_path)
if os.path.isfile(ocm_binary_path):
log.debug(f"ocm exists ({ocm_binary_path}), skipping download.")
else:
log.info(f"Downloading ocm cli ({version}).")
prepare_bin_dir()
# record current working directory and switch to BIN_DIR
previous_dir = os.getcwd()
os.chdir(bin_dir)
url = f"https://github.com/openshift-online/ocm-cli/releases/download/v{version}/ocm-linux-amd64"
download_file(url, ocm_filename)
# return to the previous working directory
os.chdir(previous_dir)
current_file_permissions = os.stat(ocm_binary_path)
os.chmod(
ocm_binary_path,
current_file_permissions.st_mode | stat.S_IEXEC,
)
ocm_version = run_cmd(f"{ocm_binary_path} version")
log.info(f"OCM version: {ocm_version}")
return ocm_binary_path
def get_rosa_cli(
version=None,
bin_dir=None,
force_download=False,
):
"""
Download the ROSA binary, if not already present.
Update env. PATH and get path of the ROSA binary.
Args:
version (str): Version of the ROSA to download
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force ROSA download even if already present
Returns:
str: Path to the rosa binary
"""
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
rosa_filename = "rosa"
rosa_binary_path = os.path.join(bin_dir, rosa_filename)
if os.path.isfile(rosa_binary_path) and force_download:
delete_file(rosa_binary_path)
if os.path.isfile(rosa_binary_path):
log.debug(f"rosa exists ({rosa_binary_path}), skipping download.")
else:
log.info(f"Downloading rosa cli ({version}).")
prepare_bin_dir()
# record current working directory and switch to BIN_DIR
previous_dir = os.getcwd()
os.chdir(bin_dir)
url = f"https://github.com/openshift/rosa/releases/download/v{version}/rosa-linux-amd64"
download_file(url, rosa_filename)
# return to the previous working directory
os.chdir(previous_dir)
current_file_permissions = os.stat(rosa_binary_path)
os.chmod(
rosa_binary_path,
current_file_permissions.st_mode | stat.S_IEXEC,
)
rosa_version = run_cmd(f"{rosa_binary_path} version")
log.info(f"rosa version: {rosa_version}")
return rosa_binary_path
def get_openshift_client(
version=None, bin_dir=None, force_download=False, skip_comparison=False
):
"""
Download the OpenShift client binary, if not already present.
Update env. PATH and get path of the oc binary.
Args:
version (str): Version of the client to download
(default: config.RUN['client_version'])
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force client download even if already present
skip_comparison (bool): Skip the comparison between the existing OCP client
version and the configured one.
Returns:
str: Path to the client binary
"""
version = version or config.RUN["client_version"]
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
client_binary_path = os.path.join(bin_dir, "oc")
kubectl_binary_path = os.path.join(bin_dir, "kubectl")
download_client = True
client_version = None
try:
version = expose_ocp_version(version)
except Exception:
log.exception("Unable to expose OCP version, skipping client download.")
skip_comparison = True
download_client = False
force_download = False
if force_download:
log.info("Forcing client download.")
elif os.path.isfile(client_binary_path) and not skip_comparison:
current_client_version = get_client_version(client_binary_path)
if current_client_version != version:
log.info(
f"Existing client version ({current_client_version}) does not match "
f"configured version ({version})."
)
else:
log.debug(
f"Client exists ({client_binary_path}) and matches configured version, "
f"skipping download."
)
download_client = False
if download_client:
# Move existing client binaries to backup location
client_binary_backup = f"{client_binary_path}.bak"
kubectl_binary_backup = f"{kubectl_binary_path}.bak"
try:
os.rename(client_binary_path, client_binary_backup)
os.rename(kubectl_binary_path, kubectl_binary_backup)
except FileNotFoundError:
pass
# Download the client
log.info(f"Downloading openshift client ({version}).")
prepare_bin_dir()
# record current working directory and switch to BIN_DIR
previous_dir = os.getcwd()
os.chdir(bin_dir)
url = get_openshift_mirror_url("openshift-client", version)
tarball = "openshift-client.tar.gz"
download_file(url, tarball)
run_cmd(f"tar xzvf {tarball} oc kubectl")
delete_file(tarball)
try:
client_version = run_cmd(f"{client_binary_path} version --client")
except CommandFailed:
log.error("Unable to get version from downloaded client.")
if client_version:
try:
delete_file(client_binary_backup)
delete_file(kubectl_binary_backup)
log.info("Deleted backup binaries.")
except FileNotFoundError:
pass
else:
try:
os.rename(client_binary_backup, client_binary_path)
os.rename(kubectl_binary_backup, kubectl_binary_path)
log.info("Restored backup binaries to their original location.")
except FileNotFoundError:
raise ClientDownloadError(
"No backups exist and new binary was unable to be verified."
)
# return to the previous working directory
os.chdir(previous_dir)
log.info(f"OpenShift Client version: {client_version}")
return client_binary_path
def get_vault_cli(bind_dir=None, force_download=False):
"""
Download vault based on platform
basically for CLI purpose. Binary will be directly
put into ocs_ci/bin/ directory
Args:
bind_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
force_download (bool): Force vault cli download even if already present
"""
res = requests.get(constants.VAULT_VERSION_INFO_URL)
version = res.url.split("/")[-1].lstrip("v")
bin_dir = os.path.expanduser(bind_dir or config.RUN["bin_dir"])
system = platform.system()
if "Darwin" not in system and "Linux" not in system:
raise UnsupportedOSType("Not a supported platform for vault")
system = system.lower()
zip_file = f"vault_{version}_{system}_amd64.zip"
vault_cli_filename = "vault"
vault_binary_path = os.path.join(bin_dir, vault_cli_filename)
if os.path.isfile(vault_binary_path) and force_download:
delete_file(vault_binary_path)
if os.path.isfile(vault_binary_path):
log.debug(
f"Vault CLI binary already exists {vault_binary_path}, skipping download."
)
else:
log.info(f"Downloading vault cli {version}")
prepare_bin_dir()
previous_dir = os.getcwd()
os.chdir(bin_dir)
url = f"{constants.VAULT_DOWNLOAD_BASE_URL}/{version}/{zip_file}"
download_file(url, zip_file)
run_cmd(f"unzip {zip_file}")
delete_file(zip_file)
os.chdir(previous_dir)
vault_ver = run_cmd(f"{vault_binary_path} version")
log.info(f"Vault cli version:{vault_ver}")
def ensure_nightly_build_availability(build_url):
base_build_url = build_url.rsplit("/", 1)[0]
r = requests.get(base_build_url)
extracting_condition = b"Extracting" in r.content
if extracting_condition:
log.info("Build is extracting now, may take up to a minute.")
return r.ok and not extracting_condition
def get_openshift_mirror_url(file_name, version):
"""
Format url to OpenShift mirror (for client and installer download).
Args:
file_name (str): Name of file
version (str): Version of the installer or client to download
Returns:
str: Url of the desired file (installer or client)
Raises:
UnsupportedOSType: In case the OS type is not supported
UnavailableBuildException: In case the build url is not reachable
"""
if platform.system() == "Darwin":
os_type = "mac"
elif platform.system() == "Linux":
os_type = "linux"
else:
raise UnsupportedOSType
url_template = config.DEPLOYMENT.get(
"ocp_url_template",
"https://openshift-release-artifacts.apps.ci.l2s4.p1.openshiftapps.com/"
"{version}/{file_name}-{os_type}-{version}.tar.gz",
)
url = url_template.format(
version=version,
file_name=file_name,
os_type=os_type,
)
sample = TimeoutSampler(
timeout=540,
sleep=5,
func=ensure_nightly_build_availability,
build_url=url,
)
if not sample.wait_for_func_status(result=True):
raise UnavailableBuildException(f"The build url {url} is not reachable")
return url
def prepare_bin_dir(bin_dir=None):
"""
Prepare bin directory for OpenShift client and installer
Args:
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
"""
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
try:
os.mkdir(bin_dir)
log.info(f"Directory '{bin_dir}' successfully created.")
except FileExistsError:
log.debug(f"Directory '{bin_dir}' already exists.")
def add_path_to_env_path(path):
"""
Add path to the PATH environment variable (if not already there).
Args:
path (str): Path which should be added to the PATH env. variable
"""
env_path = os.environ["PATH"].split(os.pathsep)
if path not in env_path:
os.environ["PATH"] = os.pathsep.join([path] + env_path)
log.info(f"Path '{path}' added to the PATH environment variable.")
log.debug(f"PATH: {os.environ['PATH']}")
def delete_file(file_name):
"""
Delete file_name
Args:
file_name (str): Path to the file you want to delete
"""
os.remove(file_name)
def delete_dir(dir_name):
"""
Deletes the directory
Args:
dir_name (str): Directory path to delete
"""
try:
rmtree(dir_name)
except OSError as e:
log.error(f"Failed to delete the directory {dir_name}. Error: {e.strerror}")
class TimeoutSampler(object):
"""
Samples the function output.
This is a generator object that at first yields the output of function
`func`. After the yield, it either raises instance of `timeout_exc_cls` or
sleeps `sleep` seconds.
Yielding the output allows you to handle every value as you wish.
Feel free to set the instance variables.
Args:
timeout (int): Timeout in seconds
sleep (int): Sleep interval in seconds
func (function): The function to sample
func_args: Arguments for the function
func_kwargs: Keyword arguments for the function
"""
def __init__(self, timeout, sleep, func, *func_args, **func_kwargs):
self.timeout = timeout
self.sleep = sleep
# check that given timeout and sleep values makes sense
if self.timeout < self.sleep:
raise ValueError("timeout should be larger than sleep time")
self.func = func
self.func_args = func_args
self.func_kwargs = func_kwargs
# Timestamps of the first and most recent samples
self.start_time = None
self.last_sample_time = None
# The exception to raise
self.timeout_exc_cls = TimeoutExpiredError
# Arguments that will be passed to the exception
self.timeout_exc_args = [self.timeout]
try:
self.timeout_exc_args.append(
f"Timed out after {timeout}s running {self._build_call_string()}"
)
except Exception:
log.exception(
"Failed to assemble call string. Not necessarily a test failure."
)
def _build_call_string(self):
def stringify(value):
if isinstance(value, str):
return f'"{value}"'
return str(value)
args = list(map(stringify, self.func_args))
kwargs = [f"{stringify(k)}={stringify(v)}" for k, v in self.func_kwargs.items()]
all_args_string = ", ".join(args + kwargs)
return f"{self.func.__name__}({all_args_string})"
def __iter__(self):
if self.start_time is None:
self.start_time = time.time()
while True:
self.last_sample_time = time.time()
if self.timeout <= (self.last_sample_time - self.start_time):
raise self.timeout_exc_cls(*self.timeout_exc_args)
try:
yield self.func(*self.func_args, **self.func_kwargs)
except Exception as ex:
msg = f"Exception raised during iteration: {ex}"
log.exception(msg)
if self.timeout <= (time.time() - self.start_time):
raise self.timeout_exc_cls(*self.timeout_exc_args)
log.info("Going to sleep for %d seconds before next iteration", self.sleep)
time.sleep(self.sleep)
def wait_for_func_value(self, value):
"""
Implements common usecase of TimeoutSampler: waiting until func (given
function) returns a given value.
Args:
value: Expected return value of func we are waiting for.
"""
try:
for i_value in self:
if i_value == value:
break
except self.timeout_exc_cls:
log.error(
"function %s failed to return expected value %s "
"after multiple retries during %d second timeout",
self.func.__name__,
value,
self.timeout,
)
raise
def wait_for_func_status(self, result):
"""
Get function and run it for given time until success or timeout.
(using __iter__ function)
Args:
result (bool): Expected result from func.
Examples::
sample = TimeoutSampler(
timeout=60, sleep=1, func=some_func, func_arg1="1",
func_arg2="2"
)
if not sample.wait_for_func_status(result=True):
raise Exception
"""
try:
self.wait_for_func_value(result)
return True
except self.timeout_exc_cls:
return False
class TimeoutIterator(TimeoutSampler):
"""
Wrapper of TimeoutSampler which separates parameters of the class itself
and func arguments in __init__ method. Such way of passing function with
parameters is used in python standard library.
This allows more explicit usage, which improves readability, eg.::
t1 = TimeoutIterator(timeout=60, sleep=5, func=foo, func_args=[bar])
t2 = TimeoutIterator(3600, sleep=10, func=foo, func_args=[bar])
"""
def __init__(self, timeout, sleep, func, func_args=None, func_kwargs=None):
if func_args is None:
func_args = []
if func_kwargs is None:
func_kwargs = {}
super().__init__(timeout, sleep, func, *func_args, **func_kwargs)
def get_random_str(size=13):
"""
generates the random string of given size
Args:
size (int): number of random characters to generate
Returns:
str : string of random characters of given size
"""
chars = string.ascii_lowercase + string.digits
return "".join(random.choice(chars) for _ in range(size))
def run_async(command):
"""
Run command locally and return without waiting for completion
Args:
command (str): The command to run.
Returns:
An open descriptor to be used by the calling function.
Example:
command = 'oc delete pvc pvc1'
proc = run_async(command)
ret, out, err = proc.async_communicate()
"""
log.info(f"Executing command: {command}")
popen_obj = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
encoding="utf-8",
)
def async_communicate():
"""
Wait for command to complete and fetch the result
Returns:
retcode, stdout, stderr of the command
"""
stdout, stderr = popen_obj.communicate()
retcode = popen_obj.returncode
return retcode, stdout, stderr
popen_obj.async_communicate = async_communicate
return popen_obj
def is_cluster_running(cluster_path):
from ocs_ci.ocs.openshift_ops import OCP
return config.RUN["cli_params"].get("cluster_path") and OCP.set_kubeconfig(
os.path.join(cluster_path, config.RUN.get("kubeconfig_location"))
)
def decompose_html_attributes(soup, attributes):
"""
Decomposes the given html attributes
Args:
soup (obj): BeautifulSoup object
attributes (list): attributes to decompose
Returns: None
"""
for attribute in attributes:
tg = soup.find_all(attrs={"class": attribute})
for each in tg:
each.decompose()
def parse_html_for_email(soup):
"""
Parses the html and filters out the unnecessary data/tags/attributes
for email reporting
Args:
soup (obj): BeautifulSoup object
"""
attributes_to_decompose = ["extra"]
if not config.RUN.get("logs_url"):
attributes_to_decompose.append("col-links")
decompose_html_attributes(soup, attributes_to_decompose)
soup.find(id="not-found-message").decompose()
if not config.RUN.get("logs_url"):
for tr in soup.find_all("tr"):
for th in tr.find_all("th"):
if "Links" in th.text:
th.decompose()
for p in soup.find_all("p"):
if "(Un)check the boxes to filter the results." in p.text:
p.decompose()
if "pytest-html" in p.text:
data = p.text.split("by")[0]
p.string = data
for ip in soup.find_all("input"):
if not ip.has_attr("disabled"):
ip["disabled"] = "true"
for td in soup.find_all("td"):
if "pytest" in td.text or "html" in td.text:
data = td.text.replace("&apos", "")
td.string = data
main_header = soup.find("h1")
main_header.string.replace_with("OCS-CI RESULTS")
def add_squad_analysis_to_email(session, soup):
"""
Add squad analysis to the html test results used in email reporting
Args:
session (obj): Pytest session object
soup (obj): BeautifulSoup object of HTML Report data
"""
failed = {}
skipped = {}
# sort out failed and skipped test cases to failed and skipped dicts
for result in session.results.values():
if result.failed or result.skipped:
unassigned = True
for squad, res in constants.SQUADS.items():
for item in res:
if item in result.nodeid:
if result.failed:
if squad not in failed:
failed[squad] = []
failed[squad].append(result.nodeid)
unassigned = False
if result.skipped:
if squad not in skipped:
skipped[squad] = []
try:
skipped_message = result.longrepr[2][8:]
except TypeError:
skipped_message = "--unknown--"
skipped[squad].append((result.nodeid, skipped_message))
unassigned = False
if unassigned:
if result.failed:
if "UNASSIGNED" not in failed:
failed["UNASSIGNED"] = []
failed["UNASSIGNED"].append(result.nodeid)
if result.skipped:
if "UNASSIGNED" not in skipped:
skipped["UNASSIGNED"] = []
try:
skipped_message = result.longrepr[2][8:]
except TypeError:
skipped_message = "--unknown--"
skipped["UNASSIGNED"].append((result.nodeid, skipped_message))
# no failed or skipped tests - exit the function
if not failed and not skipped:
return
# add CSS for the Squad Analysis report
style = soup.find("style")
# use colors for squad names from squad names
style.string += "\n".join(
[
f"h4.squad-{color.lower()} {{\n color: {color.lower()};\n}}"
for color in constants.SQUADS
]
)
# few additional styles
style.string += """
.squad-analysis {
color: black;
font-family: monospace;
background-color: #eee;
padding: 5px;
margin-top: 10px;
}
.squad-analysis h2 {
margin: 0px;
}
.squad-analysis h3 {
margin: 0px;
margin-top: 10px;
}
.squad-analysis h4 {
margin: 0px;
}
.squad-analysis ul {
margin: 0px;
}
.squad-analysis ul li em {
margin-left: 1em;
}
.squad-unassigned {
background-color: #FFBA88;
}
h4.squad-yellow {
color: black;
background-color: yellow;
display: inline;
}
"""
# prepare place for the Squad Analysis in the email
squad_analysis_div = soup.new_tag("div")
squad_analysis_div["class"] = "squad-analysis"
main_header = soup.find("h1")
main_header.insert_after(squad_analysis_div)
failed_h2_tag = soup.new_tag("h2")
failed_h2_tag.string = "Squad Analysis - please analyze:"
squad_analysis_div.append(failed_h2_tag)
if failed:
# print failed testcases peer squad
failed_div_tag = soup.new_tag("div")
squad_analysis_div.append(failed_div_tag)
failed_h3_tag = soup.new_tag("h3")
failed_h3_tag.string = "Failures:"
failed_div_tag.append(failed_h3_tag)
for squad in failed:
failed_h4_tag = soup.new_tag("h4")
failed_h4_tag.string = f"{squad} squad"
failed_h4_tag["class"] = f"squad-{squad.lower()}"
failed_div_tag.append(failed_h4_tag)
failed_ul_tag = soup.new_tag("ul")
failed_ul_tag["class"] = f"squad-{squad.lower()}"
failed_div_tag.append(failed_ul_tag)
for test in failed[squad]:
failed_li_tag = soup.new_tag("li")
failed_li_tag.string = test
failed_ul_tag.append(failed_li_tag)
if skipped:
# print skipped testcases with reason peer squad
skips_div_tag = soup.new_tag("div")
squad_analysis_div.append(skips_div_tag)
skips_h3_tag = soup.new_tag("h3")
skips_h3_tag.string = "Skips:"
skips_div_tag.append(skips_h3_tag)
for squad in skipped:
skips_h4_tag = soup.new_tag("h4")
skips_h4_tag.string = f"{squad} squad"
skips_h4_tag["class"] = f"squad-{squad.lower()}"
skips_div_tag.append(skips_h4_tag)
skips_ul_tag = soup.new_tag("ul")
skips_ul_tag["class"] = f"squad-{squad.lower()}"
skips_div_tag.append(skips_ul_tag)
for test in skipped[squad]:
skips_li_tag = soup.new_tag("li")
skips_test_span_tag = soup.new_tag("span")
skips_test_span_tag.string = test[0]
skips_li_tag.append(skips_test_span_tag)
skips_li_tag.append(soup.new_tag("br"))
skips_reason_em_tag = soup.new_tag("em")
skips_reason_em_tag.string = f"Reason: {test[1]}"
skips_li_tag.append(skips_reason_em_tag)
skips_ul_tag.append(skips_li_tag)
def move_summary_to_top(soup):
"""
Move summary to the top of the eamil report
"""
summary = []
summary.append(soup.find("h2", text="Summary"))
for tag in summary[0].next_siblings:
if tag.name == "h2":
break
else:
summary.append(tag)
for tag in summary:
tag.extract()
main_header = soup.find("h1")
# because we are inserting the tags just after the header one by one, we
# have to insert them in reverse order
summary.reverse()
for tag in summary:
main_header.insert_after(tag)
def email_reports(session):
"""
Email results of test run
"""
# calculate percentage pass
# reporter = session.config.pluginmanager.get_plugin("terminalreporter")
# passed = len(reporter.stats.get("passed", []))
# failed = len(reporter.stats.get("failed", []))
# error = len(reporter.stats.get("error", []))
# total = passed + failed + error
# percentage_passed = (passed / total) * 100
try:
build_id = get_ocs_build_number()
except Exception:
build_id = ""
log.exception("Getting OCS operator build number failed!")
build_str = f"BUILD ID: {build_id} " if build_id else ""
mailids = config.RUN["cli_params"]["email"]
recipients = []
[recipients.append(mailid) for mailid in mailids.split(",")]
sender = "ocs-ci@redhat.com"
msg = MIMEMultipart("alternative")
msg["Subject"] = (
f"ocs-ci results for {get_testrun_name()} "
f"({build_str}"
f"RUN ID: {config.RUN['run_id']}) "
# f"Passed: {percentage_passed:.0f}%"
)
msg["From"] = sender
msg["To"] = ", ".join(recipients)
html = config.RUN["cli_params"]["--html"]
with open(os.path.expanduser(html)) as fd:
html_data = fd.read()
soup = BeautifulSoup(html_data, "html.parser")
parse_html_for_email(soup)
if config.RUN["cli_params"].get("squad_analysis"):
add_squad_analysis_to_email(session, soup)
move_summary_to_top(soup)
part1 = MIMEText(soup, "html")
msg.attach(part1)
try:
s = smtplib.SMTP(config.REPORTING["email"]["smtp_server"])
s.sendmail(sender, recipients, msg.as_string())
s.quit()
log.info(f"Results have been emailed to {recipients}")
except Exception:
log.exception("Sending email with results failed!")
def get_cluster_version_info():
"""
Gets the complete cluster version information
Returns:
dict: cluster version information
"""
# importing here to avoid circular imports
from ocs_ci.ocs.ocp import OCP
ocp = OCP(kind="clusterversion")
cluster_version_info = ocp.get("version")
return cluster_version_info
def get_ocs_build_number():
"""
Gets the build number for ocs operator
Return:
str: build number for ocs operator version
"""
# Importing here to avoid circular dependency
from ocs_ci.ocs.resources.csv import get_csvs_start_with_prefix
from ocs_ci.ocs.resources.catalog_source import CatalogSource
from ocs_ci.ocs.resources.packagemanifest import get_selector_for_ocs_operator
build_num = ""
if (
version_module.get_semantic_ocs_version_from_config()
>= version_module.VERSION_4_9
):
operator_name = defaults.ODF_OPERATOR_NAME
else:
operator_name = defaults.OCS_OPERATOR_NAME
ocs_csvs = get_csvs_start_with_prefix(
operator_name,
defaults.ROOK_CLUSTER_NAMESPACE,
)
try:
ocs_csv = ocs_csvs[0]
csv_labels = ocs_csv["metadata"]["labels"]
if "full_version" in csv_labels:
return csv_labels["full_version"]
build_num = ocs_csv["spec"]["version"]
operator_selector = get_selector_for_ocs_operator()
# This is a temporary solution how to get the build id from the registry image.
# Because we are now missing build ID in the CSV. If catalog source with our
# internal label exists, we will be getting build id from the tag of the image
# in catalog source. Boris is working on better way how to populate the internal
# build version in the CSV.
if operator_selector:
catalog_source = CatalogSource(
resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME,
namespace=constants.MARKETPLACE_NAMESPACE,
selector=operator_selector,
)
cs_data = catalog_source.get()["items"][0]
cs_image = cs_data["spec"]["image"]
image_tag = cs_image.split(":")[1]
if "-" in image_tag:
build_id = image_tag.split("-")[1]
build_num += f"-{build_id}"
except (IndexError, AttributeError, CommandFailed, KeyError):
log.exception("No version info found for OCS operator")
return build_num
def get_cluster_version():
"""
Gets the cluster version
Returns:
str: cluster version
"""
return get_cluster_version_info()["status"]["desired"]["version"]
def get_cluster_image():
"""
Gets the cluster image
Returns:
str: cluster image
"""
return get_cluster_version_info()["status"]["desired"]["image"]
def get_ceph_version():
"""
Gets the ceph version
Returns:
str: ceph version
"""
# importing here to avoid circular imports
from ocs_ci.ocs.resources import pod
ct_pod = pod.get_ceph_tools_pod()
ceph_version = ct_pod.exec_ceph_cmd("ceph version")
return re.split(r"ceph version ", ceph_version["version"])[1]
def get_rook_version():
"""
Gets the rook version
Returns:
str: rook version
"""
# importing here to avoid circular imports
from ocs_ci.ocs.resources import pod
ct_pod = pod.get_ceph_tools_pod()
rook_versions = ct_pod.exec_ceph_cmd("rook version", format="")
return rook_versions["rook"]
def get_csi_versions():
"""
Gets the CSI related version information
Returns:
dict: CSI related version information
"""
csi_versions = {}
# importing here to avoid circular imports
from ocs_ci.ocs.ocp import OCP
ocp_pod_obj = OCP(
kind=constants.POD, namespace=config.ENV_DATA["cluster_namespace"]
)
csi_provisioners = ["csi-cephfsplugin-provisioner", "csi-rbdplugin-provisioner"]
for provisioner in csi_provisioners:
csi_provisioner_pod = run_cmd(
f"oc -n {config.ENV_DATA['cluster_namespace']} get pod -l "
f"'app={provisioner}' -o jsonpath='{{.items[0].metadata.name}}'"
)
desc = ocp_pod_obj.get(csi_provisioner_pod)
for container in desc["spec"]["containers"]:
name = container["name"]
version = container["image"].split("/")[-1].split(":")[1]
csi_versions[name] = version
return csi_versions
def get_ocp_version(seperator=None):
"""
Get current ocp version
Args:
seperator (str): String that would seperate major and
minor version nubers
Returns:
string : If seperator is 'None', version string will be returned as is
eg: '4.2', '4.3'.
If seperator is provided then '.' in the version string would be
replaced by seperator and resulting string will be returned.
eg: If seperator is '_' then string returned would be '4_2'
"""
char = seperator if seperator else "."
if config.ENV_DATA.get("skip_ocp_deployment"):
raw_version = json.loads(run_cmd("oc version -o json"))["openshiftVersion"]
else:
raw_version = config.DEPLOYMENT["installer_version"]
version = Version.coerce(raw_version)
return char.join([str(version.major), str(version.minor)])
def get_running_ocp_version(separator=None):
"""
Get current running ocp version
Args:
separator (str): String that would separate major and
minor version numbers
Returns:
string : If separator is 'None', version string will be returned as is
eg: '4.2', '4.3'.
If separator is provided then '.' in the version string would be
replaced by separator and resulting string will be returned.
eg: If separator is '_' then string returned would be '4_2'
"""
char = separator if separator else "."
namespace = config.ENV_DATA["cluster_namespace"]
try:
# if the cluster exist, this part will be run
results = run_cmd(f"oc get clusterversion -n {namespace} -o yaml")
build = yaml.safe_load(results)["items"][0]["status"]["desired"]["version"]
return char.join(build.split(".")[0:2])
except Exception:
# this part will return version from the config file in case
# cluster is not exists.
return get_ocp_version(seperator=char)
def get_ocp_repo():
"""
Get ocp repo file, name will be generated dynamically based on
ocp version.
Returns:
string : Path to ocp repo file
"""
repo_path = os.path.join(constants.REPO_DIR, f"ocp_{get_ocp_version('_')}.repo")
path = os.path.expanduser(repo_path)
assert os.path.exists(path), f"OCP repo file {path} doesn't exists!"
return path
def parse_pgsql_logs(data):
"""
Parse the pgsql benchmark data from ripsaw and return
the data in list format
Args:
data (str): log data from pgsql bench run
Returns:
list_data (list): data digestable by scripts with below format
e.g.:
[
{1: {'num_clients': '2','num_threads': '7','latency_avg': '7',
'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'},
{2: {'num_clients': '2','num_threads': '7','latency_avg': '7',
'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'},
{3: {'num_clients': '2','num_threads': '7','latency_avg': '7',
'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'},
]
where keys{1,2,3} are run-IDs
"""
match = data.split("PGBench Results")
list_data = []
for i in range(2, len(match)):
log = "".join(match[i].split("\n"))
pgsql_data = dict()
pgsql_data[i - 1] = {}
clients = re.search(r"scaling_factor\':\s+(\d+),", log)
if clients and clients.group(1):
pgsql_data[i - 1]["scaling_factor"] = clients.group(1)
clients = re.search(r"number_of_clients\':\s+(\d+),", log)
if clients and clients.group(1):
pgsql_data[i - 1]["num_clients"] = clients.group(1)
threads = re.search(r"number_of_threads\':\s+(\d+)", log)
if threads and threads.group(1):
pgsql_data[i - 1]["num_threads"] = threads.group(1)
clients = re.search(r"number_of_transactions_per_client\':\s+(\d+),", log)
if clients and clients.group(1):
pgsql_data[i - 1]["number_of_transactions_per_client"] = clients.group(1)
clients = re.search(
r"number_of_transactions_actually_processed\':\s+(\d+),", log
)
if clients and clients.group(1):
pgsql_data[i - 1][
"number_of_transactions_actually_processed"
] = clients.group(1)
lat_avg = re.search(r"latency_average_ms\':\s+(\d+)", log)
if lat_avg and lat_avg.group(1):
pgsql_data[i - 1]["latency_avg"] = lat_avg.group(1)
lat_stddev = re.search(r"latency_stddev_ms\':\s+(\d+)", log)
if lat_stddev and lat_stddev.group(1):
pgsql_data[i - 1]["lat_stddev"] = lat_stddev.group(1)
tps_incl = re.search(r"tps_incl_con_est\':\s+(\w+)", log)
if tps_incl and tps_incl.group(1):
pgsql_data[i - 1]["tps_incl"] = tps_incl.group(1)
tps_excl = re.search(r"tps_excl_con_est\':\s+(\w+)", log)
if tps_excl and tps_excl.group(1):
pgsql_data[i - 1]["tps_excl"] = tps_excl.group(1)
list_data.append(pgsql_data)
return list_data
def create_directory_path(path):
"""
Creates directory if path doesn't exists
"""
path = os.path.expanduser(path)
if not os.path.exists(path):
os.makedirs(path)
else:
log.debug(f"{path} already exists")
def ocsci_log_path():
"""
Construct the full path for the log directory.
Returns:
str: full path for ocs-ci log directory
"""
return os.path.expanduser(
os.path.join(config.RUN["log_dir"], f"ocs-ci-logs-{config.RUN['run_id']}")
)
def get_testrun_name():
"""
Prepare testrun ID for Polarion (and other reports).
Returns:
str: String containing testrun name
"""
markers = config.RUN["cli_params"].get("-m", "").replace(" ", "-")
us_ds = config.REPORTING.get("us_ds")
if us_ds.upper() == "US":
us_ds = "Upstream"
elif us_ds.upper() == "DS":
us_ds = "Downstream"
ocp_version = ".".join(config.DEPLOYMENT.get("installer_version").split(".")[:-2])
ocp_version_string = f"OCP{ocp_version}" if ocp_version else ""
ocs_version = config.ENV_DATA.get("ocs_version")
ocs_version_string = f"OCS{ocs_version}" if ocs_version else ""
worker_os = "RHEL" if config.ENV_DATA.get("rhel_workers") else "RHCOS"
build_user = None
baremetal_config = None
if config.ENV_DATA.get("mon_type"):
baremetal_config = (
f"MON {config.ENV_DATA.get('mon_type').upper()} "
f"OSD {config.ENV_DATA.get('osd_type').upper()}"
)
lso_deployment = ""
if not baremetal_config and config.DEPLOYMENT.get("local_storage"):
lso_deployment = "LSO "
if config.REPORTING.get("display_name"):
testrun_name = config.REPORTING.get("display_name")
else:
build_user = config.REPORTING.get("build_user")
testrun_name = (
f"{config.ENV_DATA.get('platform', '').upper()} "
f"{config.ENV_DATA.get('deployment_type', '').upper()} "
)
if baremetal_config:
testrun_name = f"LSO {baremetal_config} {testrun_name}"
testrun_name = (
f"{testrun_name}"
f"{get_az_count()}AZ "
f"{worker_os} "
f"{lso_deployment}"
f"{config.ENV_DATA.get('master_replicas')}M "
f"{config.ENV_DATA.get('worker_replicas')}W "
f"{markers}"
)
testrun_name = (
f"{ocs_version_string} {us_ds} {ocp_version_string} " f"{testrun_name}"
)
if build_user:
testrun_name = f"{build_user} {testrun_name}"
# replace invalid character(s) by '-'
testrun_name = testrun_name.translate(
str.maketrans({key: "-" for key in """ \\/.:*"<>|~!@#$?%^&'*(){}+`,=\t"""})
)
log.info("testrun_name: %s", testrun_name)
return testrun_name
def get_az_count():
"""
Using a number of different configuration attributes, determine how many
availability zones the cluster is configured for.
Returns:
int: number of availability zones
"""
if config.ENV_DATA.get("availability_zone_count"):
return int(config.ENV_DATA.get("availability_zone_count"))
elif config.ENV_DATA.get("worker_availability_zones"):
return len(config.ENV_DATA.get("worker_availability_zones"))
elif config.ENV_DATA.get("platform") == "vsphere":
return 1
else:
return 1
def ceph_health_check(namespace=None, tries=20, delay=30):
"""
Args:
namespace (str): Namespace of OCS
(default: config.ENV_DATA['cluster_namespace'])
tries (int): Number of retries
delay (int): Delay in seconds between retries
Returns:
bool: ceph_health_check_base return value with default retries of 20,
delay of 30 seconds if default values are not changed via args.
"""
if config.ENV_DATA["platform"].lower() == constants.IBM_POWER_PLATFORM:
delay = 60
return retry(
(CephHealthException, CommandFailed, subprocess.TimeoutExpired),
tries=tries,
delay=delay,
backoff=1,
)(ceph_health_check_base)(namespace)
def ceph_health_check_base(namespace=None):
"""
Exec `ceph health` cmd on tools pod to determine health of cluster.
Args:
namespace (str): Namespace of OCS
(default: config.ENV_DATA['cluster_namespace'])
Raises:
CephHealthException: If the ceph health returned is not HEALTH_OK
CommandFailed: If the command to retrieve the tools pod name or the
command to get ceph health returns a non-zero exit code
Returns:
boolean: True if HEALTH_OK
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
run_cmd(
f"oc wait --for condition=ready pod "
f"-l app=rook-ceph-tools "
f"-n {namespace} "
f"--timeout=120s"
)
tools_pod = run_cmd(
f"oc -n {namespace} get pod -l 'app=rook-ceph-tools' "
f"-o jsonpath='{{.items[0].metadata.name}}'",
timeout=60,
)
health = run_cmd(f"oc -n {namespace} exec {tools_pod} -- ceph health")
if health.strip() == "HEALTH_OK":
log.info("Ceph cluster health is HEALTH_OK.")
return True
else:
raise CephHealthException(f"Ceph cluster health is not OK. Health: {health}")
def get_rook_repo(branch="master", to_checkout=None):
"""
Clone and checkout the rook repository to specific branch/commit.
Args:
branch (str): Branch name to checkout
to_checkout (str): Commit id or tag to checkout
"""
cwd = constants.ROOK_REPO_DIR
if not os.path.isdir(cwd):
log.info(f"Cloning rook repository into {cwd}.")
run_cmd(f"git clone {constants.ROOK_REPOSITORY} {cwd}")
else:
log.info(
f"The rook directory {cwd} already exists, ocs-ci will skip the "
f"clone of rook repository."
)
log.info("Fetching latest changes from rook repository.")
run_cmd("git fetch --all", cwd=cwd)
log.info(f"Checkout rook repository to specific branch: {branch}")
run_cmd(f"git checkout {branch}", cwd=cwd)
log.info(f"Reset branch: {branch} with latest changes")
run_cmd(f"git reset --hard origin/{branch}", cwd=cwd)
if to_checkout:
run_cmd(f"git checkout {to_checkout}", cwd=cwd)
def clone_repo(url, location, branch="master", to_checkout=None):
"""
Clone a repository or checkout latest changes if it already exists at
specified location.
Args:
url (str): location of the repository to clone
location (str): path where the repository will be cloned to
branch (str): branch name to checkout
to_checkout (str): commit id or tag to checkout
"""
if not os.path.isdir(location):
log.info("Cloning repository into %s", location)
run_cmd(f"git clone {url} {location}")
else:
log.info("Repository already cloned at %s, skipping clone", location)
log.info("Fetching latest changes from repository")
run_cmd("git fetch --all", cwd=location)
log.info("Checking out repository to specific branch: %s", branch)
run_cmd(f"git checkout {branch}", cwd=location)
log.info("Reset branch: %s with latest changes", branch)
run_cmd(f"git reset --hard origin/{branch}", cwd=location)
if to_checkout:
run_cmd(f"git checkout {to_checkout}", cwd=location)
def get_latest_ds_olm_tag(upgrade=False, latest_tag=None):
"""
This function returns latest tag of OCS downstream registry or one before
latest if upgrade parameter is True
Args:
upgrade (str): If True then it returns one version of the build before
the latest.
latest_tag (str): Tag of the latest build. If not specified
config.DEPLOYMENT['default_latest_tag'] or 'latest' will be used.
Returns:
str: latest tag for downstream image from quay registry
Raises:
TagNotFoundException: In case no tag found
"""
latest_tag = latest_tag or config.DEPLOYMENT.get("default_latest_tag", "latest")
tags = get_ocs_olm_operator_tags()
latest_image = None
ocs_version = config.ENV_DATA["ocs_version"]
upgrade_ocs_version = config.UPGRADE.get("upgrade_ocs_version")
use_rc_build = config.UPGRADE.get("use_rc_build")
previous_rc_build = config.UPGRADE.get("previous_rc_build")
upgrade_version_change = upgrade_ocs_version and ocs_version != upgrade_ocs_version
if upgrade and use_rc_build and previous_rc_build and not upgrade_version_change:
latest_tag = previous_rc_build
if upgrade_version_change:
upgrade = False
for tag in tags:
if tag["name"] == latest_tag:
latest_image = tag["manifest_digest"]
break
if not latest_image:
raise TagNotFoundException("Couldn't find latest tag!")
latest_tag_found = False
for tag in tags:
if not upgrade:
if (
not any(t in tag["name"] for t in constants.LATEST_TAGS)
and tag["manifest_digest"] == latest_image
):
return tag["name"]
if upgrade:
if not latest_tag_found and tag["name"] == latest_tag:
latest_tag_found = True
continue
if not latest_tag_found:
continue
if (
not any(t in tag["name"] for t in constants.LATEST_TAGS)
and tag["manifest_digest"] != latest_image
and ocs_version in tag["name"]
):
if config.UPGRADE.get("use_rc_build") and "rc" not in tag["name"]:
continue
return tag["name"]
raise TagNotFoundException("Couldn't find any desired tag!")
def get_next_version_available_for_upgrade(current_tag):
"""
This function returns the tag built after the current_version
Args:
current_tag (str): Current build tag from which to search the next one
build tag.
Returns:
str: tag for downstream image from quay registry built after
the current_tag.
Raises:
TagNotFoundException: In case no tag suitable for upgrade found
"""
tags = get_ocs_olm_operator_tags()
if any(t in current_tag for t in constants.LATEST_TAGS):
return current_tag
current_tag_index = None
for index, tag in enumerate(tags):
if tag["name"] == current_tag:
if index < 2:
raise TagNotFoundException("Couldn't find tag for upgrade!")
current_tag_index = index
break
sliced_reversed_tags = tags[:current_tag_index]
sliced_reversed_tags.reverse()
ocs_version = config.ENV_DATA["ocs_version"]
for tag in sliced_reversed_tags:
if (
not any(t in tag["name"] for t in constants.LATEST_TAGS)
and ocs_version in tag["name"]
):
if config.UPGRADE.get("use_rc_build") and "rc" not in tag["name"]:
continue
return tag["name"]
raise TagNotFoundException("Couldn't find any tag!")
def load_auth_config():
"""
Load the authentication config YAML from /data/auth.yaml
Raises:
FileNotFoundError: if the auth config is not found
Returns:
dict: A dictionary reprensenting the YAML file
"""
log.info("Retrieving the authentication config dictionary")
auth_file = os.path.join(constants.TOP_DIR, "data", constants.AUTHYAML)
try:
with open(auth_file) as f:
return yaml.safe_load(f)
except FileNotFoundError:
log.warning(
f"Unable to find the authentication configuration at {auth_file}, "
f"please refer to the getting started guide ({constants.AUTH_CONFIG_DOCS})"
)
return {}
def get_ocs_olm_operator_tags(limit=100):
"""
Query the OCS OLM Operator repo and retrieve a list of tags. Since we are limited
to 100 tags per page, we end up making several API calls and combining the results
into a single list of tags.
Args:
limit: the number of tags to limit the request to
Raises:
KeyError: if the auth config isn't setup properly
requests.RequestException: if the response return code is not ok
Returns:
list: OCS OLM Operator tags
"""
try:
quay_access_token = load_auth_config()["quay"]["access_token"]
except (KeyError, TypeError):
log.error(
"Unable to retrieve the access token for quay, please refer to "
f"the getting started guide ({constants.AUTH_CONFIG_DOCS}) "
"to properly setup your authentication configuration"
)
raise
headers = {"Authorization": f"Bearer {quay_access_token}"}
image = "ocs-registry"
try:
ocs_version = float(config.ENV_DATA.get("ocs_version"))
if ocs_version < 4.5:
image = "ocs-olm-operator"
except (ValueError, TypeError):
log.warning("Invalid ocs_version given, defaulting to ocs-registry image")
pass
all_tags = []
page = 1
while True:
log.info(f"Retrieving OCS OLM Operator tags (limit {limit}, page {page})")
resp = requests.get(
constants.OPERATOR_CS_QUAY_API_QUERY.format(
tag_limit=limit,
image=image,
page=page,
),
headers=headers,
)
if not resp.ok:
raise requests.RequestException(resp.json())
tags = resp.json()["tags"]
if len(tags) == 0:
log.info("No more tags to retrieve")
break
log.debug(tags)
all_tags.extend(tags)
page += 1
return all_tags
def check_if_executable_in_path(exec_name):
"""
Checks whether an executable can be found in the $PATH
Args:
exec_name: Name of executable to look for
Returns:
Boolean: Whether the executable was found
"""
return which(exec_name) is not None
def upload_file(server, localpath, remotepath, user=None, password=None, key_file=None):
"""
Upload a file to remote server
Args:
server (str): Name of the server to upload
localpath (str): Local file to upload
remotepath (str): Target path on the remote server. filename should be included
user (str): User to use for the remote connection
"""
if not user:
user = "root"
try:
ssh = SSHClient()
ssh.set_missing_host_key_policy(AutoAddPolicy())
if password:
ssh.connect(hostname=server, username=user, password=password)
else:
log.info(key_file)
ssh.connect(hostname=server, username=user, key_filename=key_file)
sftp = ssh.open_sftp()
log.info(f"uploading {localpath} to {user}@{server}:{remotepath}")
sftp.put(localpath, remotepath)
sftp.close()
ssh.close()
except AuthenticationException as authException:
log.error(f"Authentication failed: {authException}")
raise authException
except SSHException as sshException:
log.error(f"SSH connection failed: {sshException}")
raise sshException
def read_file_as_str(filepath):
"""
Reads the file content
Args:
filepath (str): File to read
Returns:
str : File contents in string
"""
with open(rf"{filepath}") as fd:
content = fd.read()
return content
def replace_content_in_file(file, old, new, match_and_replace_line=False):
"""
Replaces contents in file, if old value is not found, it adds
new value to the file
Args:
file (str): Name of the file in which contents will be replaced
old (str): Data to search for
new (str): Data to replace the old value
match_and_replace_line (bool): If True, it will match a line if
`old` pattern is found in the line. The whole line will be replaced
with `new` content.
Otherwise it will replace only `old` string with `new` string but
the rest of the line will be intact. This is the default option.
"""
# Read the file
with open(rf"{file}", "r") as fd:
file_data = [line.rstrip("\n") for line in fd.readlines()]
if match_and_replace_line:
# Replace the whole line with `new` string if the line contains `old`
# string pattern.
file_data = [new if old in line else line for line in file_data]
else:
# Replace the old string by new
file_data = [
line.replace(old, new) if old in line else line for line in file_data
]
updated_data = [line for line in file_data if new in line]
# In case the old pattern wasn't found it will be added as first line
if not updated_data:
file_data.insert(0, new)
file_data = [f"{line}\n" for line in file_data]
# Write the file out again
with open(rf"{file}", "w") as fd:
fd.writelines(file_data)
@retry((CommandFailed), tries=100, delay=10, backoff=1)
def wait_for_co(operator):
"""
Waits for ClusterOperator to created
Args:
operator (str): Name of the ClusterOperator
"""
from ocs_ci.ocs.ocp import OCP
ocp = OCP(kind="ClusterOperator")
ocp.get(operator)
def censor_values(data_to_censor):
"""
This function censor string and numeric values in dictionary based on
keys that match pattern defined in config_keys_patterns_to_censor in
constants. It is performed recursively for nested dictionaries.
Args:
data_to_censor (dict): Data to censor.
Returns:
dict: filtered data
"""
for key in data_to_censor:
if isinstance(data_to_censor[key], dict):
censor_values(data_to_censor[key])
elif isinstance(data_to_censor[key], (str, int, float)):
for pattern in constants.config_keys_patterns_to_censor:
if pattern in key.lower():
data_to_censor[key] = "*" * 5
return data_to_censor
def dump_config_to_file(file_path):
"""
Dump the config to the yaml file with censored secret values.
Args:
file_path (str): Path to file where to write the configuration.
"""
config_copy = deepcopy(config.to_dict())
censor_values(config_copy)
with open(file_path, "w+") as fs:
yaml.safe_dump(config_copy, fs)
def create_rhelpod(namespace, pod_name, timeout=300):
"""
Creates the RHEL pod
Args:
namespace (str): Namespace to create RHEL pod
pod_name (str): Pod name
timeout (int): wait time for RHEL pod to be in Running state
Returns:
pod: Pod instance for RHEL
"""
# importing here to avoid dependencies
from ocs_ci.helpers import helpers
rhelpod_obj = helpers.create_pod(
namespace=namespace,
pod_name=pod_name,
pod_dict_path=constants.RHEL_7_7_POD_YAML,
)
helpers.wait_for_resource_state(rhelpod_obj, constants.STATUS_RUNNING, timeout)
return rhelpod_obj
def check_timeout_reached(start_time, timeout, err_msg=None):
"""
Check if timeout reached and if so raise the exception.
Args:
start_time (time): Star time of the operation.
timeout (int): Timeout in seconds.
err_msg (str): Error message for the exception.
Raises:
TimeoutException: In case the timeout reached.
"""
msg = f"Timeout {timeout} reached!"
if err_msg:
msg += " Error: {err_msg}"
if timeout < (time.time() - start_time):
raise TimeoutException(msg)
def convert_yaml2tfvars(yaml):
"""
Converts yaml file to tfvars. It creates the tfvars with the
same filename in the required format which is used for deployment.
Args:
yaml (str): File path to yaml
Returns:
str: File path to tfvars
"""
# importing here to avoid dependencies
from ocs_ci.utility.templating import load_yaml
data = load_yaml(yaml)
tfvars_file = os.path.splitext(yaml)[0]
log.debug(f"Converting {yaml} to {tfvars_file}")
with open(tfvars_file, "w+") as fd:
for key, val in data.items():
if key == "control_plane_ignition":
fd.write("control_plane_ignition = <<END_OF_MASTER_IGNITION\n")
fd.write(f"{val}\n")
fd.write("END_OF_MASTER_IGNITION\n")
continue
if key == "compute_ignition":
fd.write("compute_ignition = <<END_OF_WORKER_IGNITION\n")
fd.write(f"{val}\n")
fd.write("END_OF_WORKER_IGNITION\n")
continue
if key == "vm_dns_addresses":
fd.write(f'vm_dns_addresses = ["{val}"]\n')
continue
fd.write(key)
fd.write(" = ")
fd.write('"')
fd.write(f"{val}")
fd.write('"\n')
return tfvars_file
def remove_keys_from_tf_variable_file(tf_file, keys):
"""
Removes the keys from the tf files and convert to json format
Args:
tf_file (str): path to tf file
keys (list): list of keys to remove
"""
# importing here to avoid dependencies
from ocs_ci.utility.templating import dump_data_to_json
with open(tf_file, "r") as fd:
obj = hcl2.load(fd)
for key in keys:
obj["variable"].pop(key)
dump_data_to_json(obj, f"{tf_file}.json")
os.rename(tf_file, f"{tf_file}.backup")
def get_kubeadmin_password():
filename = os.path.join(
config.ENV_DATA["cluster_path"], config.RUN["password_location"]
)
with open(filename) as f:
return f.read()
def get_infra_id(cluster_path):
"""
Get infraID from metadata.json in given cluster_path
Args:
cluster_path: path to cluster install directory
Returns:
str: metadata.json['infraID']
"""
metadata_file = os.path.join(cluster_path, "metadata.json")
with open(metadata_file) as f:
metadata = json.load(f)
return metadata["infraID"]
def get_cluster_name(cluster_path):
"""
Get clusterName from metadata.json in given cluster_path
Args:
cluster_path: path to cluster install directory
Returns:
str: metadata.json['clusterName']
"""
metadata_file = os.path.join(cluster_path, "metadata.json")
with open(metadata_file) as f:
metadata = json.load(f)
return metadata["clusterName"]
def skipif_ocp_version(expressions):
"""
This function evaluates the condition for test skip
based on expression
Args:
expressions (str OR list): condition for which we need to check,
eg: A single expression string '>=4.2' OR
A list of expressions like ['<4.3', '>4.2'], ['<=4.3', '>=4.2']
Return:
'True' if test needs to be skipped else 'False'
"""
skip_this = True
ocp_version = get_running_ocp_version()
expr_list = [expressions] if isinstance(expressions, str) else expressions
for expr in expr_list:
comparision_str = ocp_version + expr
skip_this = skip_this and eval(comparision_str)
# skip_this will be either True or False after eval
return skip_this
def skipif_ocs_version(expressions):
"""
This function evaluates the condition for test skip
based on expression
Args:
expressions (str OR list): condition for which we need to check,
eg: A single expression string '>=4.2' OR
A list of expressions like ['<4.3', '>4.2'], ['<=4.3', '>=4.2']
Return:
'True' if test needs to be skipped else 'False'
"""
expr_list = [expressions] if isinstance(expressions, str) else expressions
return any(eval(config.ENV_DATA["ocs_version"] + expr) for expr in expr_list)
def skipif_ui_not_support(ui_test):
"""
This function evaluates the condition for ui test skip
based on ui_test expression
Args:
ui_test (str): condition for which we need to check,
Return:
'True' if test needs to be skipped else 'False'
"""
from ocs_ci.ocs.ui.views import locators
ocp_version = get_running_ocp_version()
if (
config.ENV_DATA["platform"].lower() == constants.IBMCLOUD_PLATFORM
or config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM
or config.ENV_DATA["platform"].lower() == constants.ROSA_PLATFORM
):
return True
try:
locators[ocp_version][ui_test]
except KeyError:
return True
return False
def get_ocs_version_from_image(image):
"""
Parse major.minor version from OCS image tag.
Args:
image (str): image in format url:tag
Returns
str: Version in x.y format
Raises:
ValueError: In case of the tag which we cannot parse to version.
"""
try:
version = image.rsplit(":", 1)[1].lstrip("latest-").lstrip("stable-")
version = Version.coerce(version)
return "{major}.{minor}".format(major=version.major, minor=version.minor)
except ValueError:
log.error(f"The version: {version} couldn't be parsed!")
raise
def get_available_ocp_versions(channel):
"""
Find all available OCP versions for specific channel.
Args:
channel (str): Channel of OCP (e.g. stable-4.2 or fast-4.2)
Returns
list: Sorted list with OCP versions for specified channel.
"""
headers = {"Accept": "application/json"}
req = requests.get(
constants.OPENSHIFT_UPGRADE_INFO_API.format(channel=channel), headers=headers
)
data = req.json()
versions = [Version(node["version"]) for node in data["nodes"]]
versions.sort()
return versions
def get_latest_ocp_version(channel, index=-1):
"""
Find latest OCP version for specific channel.
Args:
channel (str): Channel of OCP (e.g. stable-4.2 or fast-4.2)
index (int): Index to get from all available versions list
e.g. default -1 is latest version (version[-1]). If you want to get
previous version pass index -2 and so on.
Returns
str: Latest OCP version for specified channel.
"""
versions = get_available_ocp_versions(channel)
return str(versions[index])
def load_config_file(config_file):
"""
Loads config file to the ocs-ci config
Args:
config_file (str): Path to yaml config file.
Raises:
FileNotFoundError: In the case the config file not found.
"""
config_file = os.path.expanduser(config_file)
assert os.path.exists(config_file), f"Config file {config_file} doesn't exist!"
with open(os.path.abspath(os.path.expanduser(config_file)), "r") as file_stream:
custom_config_data = yaml.safe_load(file_stream)
config.update(custom_config_data)
def destroy_cluster(installer, cluster_path, log_level="DEBUG"):
"""
Destroy OCP cluster specific
Args:
installer (str): The path to the installer binary
cluster_path (str): The path of the cluster
log_level (str): log level openshift-installer (default: DEBUG)
"""
destroy_cmd = (
f"{installer} destroy cluster "
f"--dir {cluster_path} "
f"--log-level {log_level}"
)
try:
# Execute destroy cluster using OpenShift installer
log.info(f"Destroying cluster defined in {cluster_path}")
run_cmd(destroy_cmd, timeout=1200)
except CommandFailed:
log.error(traceback.format_exc())
raise
except Exception:
log.error(traceback.format_exc())
def config_to_string(config):
"""
Convert ConfigParser object to string in INI format.
Args:
config (obj): ConfigParser object
Returns:
str: Config in one string
"""
strio = io.StringIO()
config.write(strio, space_around_delimiters=False)
return strio.getvalue()
class AZInfo(object):
"""
A class for getting different az numbers across calls
"""
zone_number = 0
def get_zone_number(self):
"""
Increment current zone_number and perform modulus op
to roll-on to next available number
Returns:
int: zone number index
"""
prev = AZInfo.zone_number
AZInfo.zone_number += 1
AZInfo.zone_number %= get_az_count()
return prev
def convert_device_size(unformatted_size, units_to_covert_to):
"""
Convert a string representing a size to an int according to the given units
to convert to
Args:
unformatted_size (str): The size to convert (i.e, '1Gi'/'100Mi')
units_to_covert_to (str): The units to convert the size to (i.e, TB/GB/MB)
Returns:
int: The converted size
"""
units = unformatted_size[-2:]
abso = int(unformatted_size[:-2])
conversion = {
"TB": {"Ti": abso, "Gi": abso / 1000, "Mi": abso / 1e6, "Ki": abso / 1e9},
"GB": {"Ti": abso * 1000, "Gi": abso, "Mi": abso / 1000, "Ki": abso / 1e6},
"MB": {"Ti": abso * 1e6, "Gi": abso * 1000, "Mi": abso, "Ki": abso / 1000},
"KB": {"Ti": abso * 1e9, "Gi": abso * 1e6, "Mi": abso * 1000, "Ki": abso},
"B": {"Ti": abso * 1e12, "Gi": abso * 1e9, "Mi": abso * 1e6, "Ki": abso * 1000},
}
return conversion[units_to_covert_to][units]
def prepare_customized_pull_secret(images=None):
"""
Prepare customized pull-secret containing auth section related to given
image(s). If image(s) not defined or no related section is found, it will
use whole content of pull-secret.
Args:
images (str, list): image (or images) to match with auth section
Returns:
NamedTemporaryFile: prepared pull-secret
"""
log.debug(f"Prepare customized pull-secret for images: {images}")
if type(images) == str:
images = [images]
# load pull-secret file to pull_secret dict
pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret")
with open(pull_secret_path) as pull_secret_fo:
pull_secret = json.load(pull_secret_fo)
authfile_content = {"auths": {}}
# if images defined, try to find auth section related to specified images
if images:
for image in images:
# find all auths which might be related to the specified image
tmp_auths = [auth for auth in pull_secret["auths"] if auth in image]
# get the most specific auth for particular image
tmp_auths = sorted(tmp_auths, key=len, reverse=True)
if tmp_auths:
# if there is match to particular auth, prepare authfile just with the
# matching auth
auth = tmp_auths[0]
# as key use only server name, without namespace
authfile_content["auths"][auth.split("/", 1)[0]] = pull_secret["auths"][
auth
]
if not authfile_content["auths"]:
authfile_content = pull_secret
# create temporary auth file
authfile_fo = NamedTemporaryFile(mode="w", prefix="authfile_")
json.dump(authfile_content, authfile_fo)
# ensure the content will be saved into the file
authfile_fo.flush()
return authfile_fo
def inspect_image(image, authfile_fo):
"""
Inspect image
Args:
image (str): image to inspect
authfile_fo (NamedTemporaryFile): pull-secret required for pulling the given image
Returns:
dict: json object of the inspected image
"""
# pull original image (to be able to inspect it)
exec_cmd(f"podman image pull {image} --authfile {authfile_fo.name}")
# inspect the image
cmd_result = exec_cmd(f"podman image inspect {image}")
image_inspect = json.loads(cmd_result.stdout)
return image_inspect
def get_image_with_digest(image):
"""
Return image with sha256 digest for usage in disconnected environment
Args:
image (str): image
Raises:
UnexpectedImage: In case the image information is unexpected
Returns:
str: image with sha256 digest specification
"""
if "@sha256:" in image:
return image
with prepare_customized_pull_secret(image) as authfile_fo:
image_inspect = inspect_image(image, authfile_fo)
# we expect, that 'Digest' will match one of the images in 'RepoDigests',
# if not, raise UnexpectedImage
for image in image_inspect[0]["RepoDigests"]:
if image_inspect[0]["Digest"] in image:
return image
else:
raise UnexpectedImage(
f"Image digest ({image_inspect[0]['Digest']}) doesn't match with "
f"any image from RepoDigests ({image_inspect[0]['RepoDigests']})."
)
def login_to_mirror_registry(authfile):
"""
Login to mirror registry
Args:
authfile (str): authfile (pull-secret) path
"""
# load cluster info
load_cluster_info()
mirror_registry = config.DEPLOYMENT["mirror_registry"]
mirror_registry_user = config.DEPLOYMENT["mirror_registry_user"]
mirror_registry_password = config.DEPLOYMENT["mirror_registry_password"]
login_cmd = (
f"podman login --authfile {authfile} "
f"{mirror_registry} -u {mirror_registry_user} "
f"-p {mirror_registry_password} --tls-verify=false"
)
exec_cmd(login_cmd, (mirror_registry_user, mirror_registry_password))
def mirror_image(image):
"""
Mirror image to mirror image registry.
Args:
image (str): image to be mirrored, can be defined just with name or
with full url, with or without tag or digest
Returns:
str: the mirrored image link
"""
with prepare_customized_pull_secret(image) as authfile_fo:
# login to mirror registry
login_to_mirror_registry(authfile_fo.name)
# if there is any tag specified, use it in the full image url,
# otherwise use url with digest
image_inspect = inspect_image(image, authfile_fo)
if image_inspect[0].get("RepoTags"):
orig_image_full = image_inspect[0]["RepoTags"][0]
else:
orig_image_full = image_inspect[0]["RepoDigests"][0]
# prepare mirrored image url
mirror_registry = config.DEPLOYMENT["mirror_registry"]
mirrored_image = mirror_registry + re.sub(r"^[^/]*", "", orig_image_full)
# mirror the image
log.info(
f"Mirroring image '{image}' ('{orig_image_full}') to '{mirrored_image}'"
)
exec_cmd(
f"oc image mirror --insecure --registry-config"
f" {authfile_fo.name} {orig_image_full} {mirrored_image}"
)
return mirrored_image
def update_container_with_mirrored_image(job_pod_dict):
"""
Update Job or Pod configuration dict with mirrored image (required for
disconnected installation).
Args:
job_pod_dict (dict): dictionary with Job or Pod configuration
Returns:
dict: for disconnected installation, returns updated Job or Pod dict,
for normal installation return unchanged job_pod_dict
"""
if config.DEPLOYMENT.get("disconnected"):
if "containers" in job_pod_dict["spec"]:
container = job_pod_dict["spec"]["containers"][0]
else:
container = job_pod_dict["spec"]["template"]["spec"]["containers"][0]
container["image"] = mirror_image(container["image"])
return job_pod_dict
def get_trim_mean(values, percentage=20):
"""
Get the trimmed mean of a list of values.
Explanation: This function finds the arithmetic mean of given values,
ignoring values outside the given limits.
Args:
values (list): The list of values
percentage (int): The percentage to be trimmed
Returns:
float: Trimmed mean. In case trimmed mean calculation fails,
the regular mean average is returned
"""
lower_limit = scoreatpercentile(values, percentage)
upper_limit = scoreatpercentile(values, 100 - percentage)
try:
return tmean(values, limits=(lower_limit, upper_limit))
except ValueError:
log.warning(
f"Failed to calculate the trimmed mean of {values}. The "
f"Regular mean average will be calculated instead"
)
return sum(values) / len(values)
def set_selinux_permissions(workers=None):
"""
Workaround for #1777384 - enable container_use_cephfs on RHEL workers
Ticket: RHSTOR-787, see more details in the issue: #1151
Args:
workers (list): List of worker nodes to set selinux permissions
"""
log.info("Running WA for ticket: RHSTOR-787")
from ocs_ci.ocs import ocp
ocp_obj = ocp.OCP()
cmd = ["/usr/sbin/setsebool -P container_use_cephfs on"]
cmd_list = cmd.copy()
if not workers:
from ocs_ci.ocs.node import get_typed_worker_nodes
worker_nodes = get_typed_worker_nodes(os_id="rhel")
else:
worker_nodes = workers
for worker in worker_nodes:
node = worker.get().get("metadata").get("name") if not workers else worker
log.info(f"{node} is a RHEL based worker - applying '{cmd_list}'")
if config.ENV_DATA["platform"] == constants.IBMCLOUD_PLATFORM:
retry(CommandFailed, tries=10, delay=3, backoff=2)(
ocp_obj.exec_oc_debug_cmd
)(node=node, cmd_list=cmd_list)
else:
retry(CommandFailed)(ocp_obj.exec_oc_debug_cmd)(
node=node, cmd_list=cmd_list
)
def set_registry_to_managed_state():
"""
In order to be able to deploy from stage we need to change
image registry config to Managed state.
More described in BZs:
https://bugzilla.redhat.com/show_bug.cgi?id=1806593
https://bugzilla.redhat.com/show_bug.cgi?id=1807471#c3
We need to change to managed state as described here:
https://github.com/red-hat-storage/ocs-ci/issues/1436
So this is not suppose to be deleted as WA case we really need to do
this operation for OCS deployment as was originally done here:
https://github.com/red-hat-storage/ocs-ci/pull/1437
Currently it has to be moved here to enable CA certificate to be
properly propagated for the stage deployment as mentioned in BZ.
"""
# In RHV platform config is already set to Managed and storage pre-configured
on_prem_platform_to_exclude = [constants.RHV_PLATFORM]
platform_list_to_exclude = constants.CLOUD_PLATFORMS + on_prem_platform_to_exclude
if config.ENV_DATA["platform"] not in platform_list_to_exclude:
cluster_config = yaml.safe_load(
exec_cmd(f"oc get {constants.IMAGE_REGISTRY_CONFIG} -o yaml").stdout
)
if "emptyDir" not in cluster_config["spec"].get("storage", {}).keys():
run_cmd(
f"oc patch {constants.IMAGE_REGISTRY_CONFIG} --type merge -p "
f'\'{{"spec":{{"storage": {{"emptyDir":{{}}}}}}}}\''
)
if cluster_config["spec"].get("managementState") != "Managed":
run_cmd(
f"oc patch {constants.IMAGE_REGISTRY_CONFIG} --type merge -p "
f'\'{{"spec":{{"managementState": "Managed"}}}}\''
)
def add_stage_cert():
"""
Deploy stage certificate to the cluster.
"""
log.info("Create configmap stage-registry-config with stage CA.")
run_cmd(
f"oc -n openshift-config create configmap stage-registry-config"
f" --from-file=registry.stage.redhat.io={constants.STAGE_CA_FILE}"
)
log.info("Add stage-registry-config to additionalTrustedCA.")
additional_trusted_ca_patch = (
'{"spec":{"additionalTrustedCA":{"name":"stage-registry-config"}}}'
)
run_cmd(
f"oc patch image.config.openshift.io cluster --type=merge"
f" -p '{additional_trusted_ca_patch}'"
)
def get_terraform(version=None, bin_dir=None):
"""
Downloads the terraform binary
Args:
version (str): Version of the terraform to download
bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])
Returns:
str: Path to the terraform binary
"""
if platform.system() == "Darwin":
os_type = "darwin"
elif platform.system() == "Linux":
os_type = "linux"
else:
raise UnsupportedOSType
version = version or config.DEPLOYMENT["terraform_version"]
bin_dir = os.path.expanduser(bin_dir or config.RUN["bin_dir"])
terraform_zip_file = f"terraform_{version}_{os_type}_amd64.zip"
terraform_filename = "terraform"
terraform_binary_path = os.path.join(bin_dir, terraform_filename)
log.info(f"Downloading terraform version {version}")
previous_dir = os.getcwd()
os.chdir(bin_dir)
url = f"https://releases.hashicorp.com/terraform/{version}/" f"{terraform_zip_file}"
download_file(url, terraform_zip_file)
run_cmd(f"unzip -o {terraform_zip_file}")
delete_file(terraform_zip_file)
# return to the previous working directory
os.chdir(previous_dir)
return terraform_binary_path
def get_terraform_ignition_provider(terraform_dir, version=None):
"""
Downloads the terraform ignition provider
Args:
terraform_dir (str): Path to terraform working directory
version (str): Version of the terraform ignition provider to download
"""
version = version or constants.TERRAFORM_IGNITION_PROVIDER_VERSION
terraform_ignition_provider_zip_file = (
f"terraform-provider-ignition-{version}-linux-amd64.tar.gz"
)
terraform_ignition_provider_dir = (
f"terraform-provider-ignition-{version}-linux-amd64"
)
terraform_plugins_path = ".terraform/plugins/linux_amd64/"
log.info(f"Downloading terraform ignition proivider version {version}")
previous_dir = os.getcwd()
os.chdir(terraform_dir)
url = (
"https://github.com/community-terraform-providers/"
f"terraform-provider-ignition/releases/download/{version}/"
f"{terraform_ignition_provider_zip_file}"
)
# Download and untar
download_file(url, terraform_ignition_provider_zip_file)
run_cmd(f"tar xzf {terraform_ignition_provider_zip_file}")
# move the ignition provider binary to plugins path
create_directory_path(terraform_plugins_path)
move(
f"{terraform_ignition_provider_dir}/terraform-provider-ignition",
terraform_plugins_path,
)
# delete the downloaded files
delete_file(terraform_ignition_provider_zip_file)
delete_dir(terraform_ignition_provider_dir)
# return to the previous working directory
os.chdir(previous_dir)
def get_module_ip(terraform_state_file, module):
"""
Gets the node IP from terraform.tfstate file
Args:
terraform_state_file (str): Path to terraform state file
module (str): Module name in terraform.tfstate file
e.g: constants.LOAD_BALANCER_MODULE
Returns:
list: IP of the node
"""
ips = []
with open(terraform_state_file) as fd:
obj = json.loads(fd.read())
if config.ENV_DATA.get("folder_structure"):
resources = obj["resources"]
log.debug(f"Extracting module information for {module}")
log.debug(f"Resource in {terraform_state_file}: {resources}")
for resource in resources:
if resource.get("module") == module and resource.get("mode") == "data":
for each_resource in resource["instances"]:
resource_body = each_resource["attributes"]["body"]
ips.append(resource_body.split('"')[3])
else:
modules = obj["modules"]
target_module = module.split("_")[1]
log.debug(f"Extracting module information for {module}")
log.debug(f"Modules in {terraform_state_file}: {modules}")
for each_module in modules:
if target_module in each_module["path"]:
return each_module["outputs"]["ip_addresses"]["value"]
return ips
def set_aws_region(region=None):
"""
Exports environment variable AWS_REGION
Args:
region (str): AWS region to export
"""
log.debug("Exporting environment variable AWS_REGION")
region = region or config.ENV_DATA["region"]
os.environ["AWS_REGION"] = region
def get_system_architecture():
"""
Get output from 'uname -m' command run on first worker node.
Returns:
str: Architecture of system
"""
from ocs_ci.ocs.node import get_nodes
log.info("Checking architecture of system")
node = get_nodes(node_type=constants.WORKER_MACHINE)[0]
return node.ocp.exec_oc_debug_cmd(node.data["metadata"]["name"], ["uname -m"])
def wait_for_machineconfigpool_status(node_type, timeout=900):
"""
Check for Machineconfigpool status
Args:
node_type (str): The node type to check machineconfigpool
status is updated.
e.g: worker, master and all if we want to check for all nodes
timeout (int): Time in seconds to wait
"""
# importing here to avoid dependencies
from ocs_ci.ocs import ocp
node_types = [node_type]
if node_type == "all":
node_types = [f"{constants.WORKER_MACHINE}", f"{constants.MASTER_MACHINE}"]
for role in node_types:
log.info(f"Checking machineconfigpool status for {role} nodes")
ocp_obj = ocp.OCP(kind=constants.MACHINECONFIGPOOL, resource_name=role)
machine_count = ocp_obj.get()["status"]["machineCount"]
assert ocp_obj.wait_for_resource(
condition=str(machine_count),
column="READYMACHINECOUNT",
timeout=timeout,
sleep=5,
)
def configure_chrony_and_wait_for_machineconfig_status(
node_type=constants.WORKER_MACHINE, timeout=900
):
"""
Configure chrony on the nodes
Args:
node_type (str): The node type to configure chrony
e.g: worker, master and all if we want to configure on all nodes
timeout (int): Time in seconds to wait
"""
# importing here to avoid dependencies
from ocs_ci.utility.templating import load_yaml
from ocs_ci.ocs.resources.ocs import OCS
chrony_data = load_yaml(constants.NTP_CHRONY_CONF)
node_types = [node_type]
if node_type == "all":
node_types = [f"{constants.WORKER_MACHINE}", f"{constants.MASTER_MACHINE}"]
for role in node_types:
log.info(f"Creating chrony for {role} nodes")
chrony_data["metadata"]["labels"][
"machineconfiguration.openshift.io/role"
] = role
chrony_data["metadata"]["name"] = f"{role}-chrony-configuration"
chrony_obj = OCS(**chrony_data)
chrony_obj.create()
# sleep here to start update machineconfigpool status
time.sleep(60)
wait_for_machineconfigpool_status(role, timeout=timeout)
def modify_csv(csv, replace_from, replace_to):
"""
Modify the CSV
Args:
csv (str): The CSV name
replace_from (str): The pattern to replace from in the CSV
replace_to (str): The pattern to replace to in the CSV
"""
data = (
f"oc -n openshift-storage get csv {csv} -o yaml | sed"
f" 's,{replace_from},{replace_to},g' | oc replace -f -"
)
log.info(
f"CSV {csv} will be modified: {replace_from} will be replaced "
f"with {replace_to}.\nThe command that will be used for that is:\n{data}"
)
temp_file = NamedTemporaryFile(mode="w+", prefix="csv_modification", suffix=".sh")
with open(temp_file.name, "w") as t_file:
t_file.writelines(data)
run_cmd(f"chmod 777 {temp_file.name}")
run_cmd(f"sh {temp_file.name}")
def check_for_rhcos_images(url):
"""
Check for rhcos images are present in given location
Args:
url (str): rhcos_images url
Returns:
(bool): True if images present if not false
"""
r = requests.head(url)
return r.status_code == requests.codes.ok
def download_file_from_git_repo(git_repo_url, path_to_file_in_git, filename):
"""
Download a file from a specified git repository
Args:
git_repo_url (str): The git repository url
path_to_file_in_git (str): Path to the file to download
in git repository
filename (str): Name of the file to write the download to
"""
log.debug(
f"Download file '{path_to_file_in_git}' from "
f"git repository {git_repo_url} to local file '{filename}'."
)
temp_dir = mkdtemp()
git.Repo.clone_from(git_repo_url, temp_dir, branch="master", depth=1)
move(os.path.join(temp_dir, path_to_file_in_git), filename)
rmtree(temp_dir)
def skipif_upgraded_from(version_list):
"""
This function evaluates the condition to skip a test if the cluster
is upgraded from a particular OCS version
Args:
version_list (list): List of versions to check
Return:
(bool): True if test needs to be skipped else False
"""
try:
from ocs_ci.ocs.resources.ocs import get_ocs_csv
skip_this = False
version_list = [version_list] if isinstance(version_list, str) else version_list
ocs_csv = get_ocs_csv()
csv_info = ocs_csv.get()
prev_version = csv_info.get("spec").get("replaces", "")
for version in version_list:
if f".v{version}" in prev_version:
skip_this = True
break
return skip_this
except Exception as err:
log.error(str(err))
return False
def get_cluster_id(cluster_path):
"""
Get ClusterID from metadata.json in given cluster_path
Args:
cluster_path: path to cluster install directory
Returns:
str: metadata.json['clusterID']
"""
metadata_file = os.path.join(cluster_path, "metadata.json")
with open(metadata_file) as f:
metadata = json.load(f)
return metadata["clusterID"]
def get_running_cluster_id():
"""
Get cluster UUID
Not relying on metadata.json as user sometimes want to run
only with kubeconfig for some tests. For this function to work
cluster has to be in running state
Returns:
str: cluster UUID
"""
cluster_id = run_cmd(
"oc get clusterversion version -o jsonpath='{.spec.clusterID}'"
)
return cluster_id
def get_ocp_upgrade_history():
"""
Gets the OCP upgrade history for the cluster
Returns:
list: List of OCP upgrade paths. Latest version in the
beginning of the list
"""
# importing here to avoid circular imports
from ocs_ci.ocs.ocp import OCP
ocp = OCP(kind="clusterversion")
cluster_version_info = ocp.get("version")
upgrade_history_info = cluster_version_info["status"]["history"]
upgrade_history = [each_upgrade["version"] for each_upgrade in upgrade_history_info]
return upgrade_history
def get_attr_chain(obj, attr_chain):
"""
Attempt to retrieve object attributes when uncertain about the existence of the attribute
or a different attribute in a given attribute chain. If the retrieval fails, None is returned.
The function can be used to retrieve a direct attribute, or a chain of attributes.
i.e. - obj.attr_a, obj_attr_a.sub_attr
Another example - trying to access "sub_attr_b" in object.attr.sub_attr_a.sub_attr_b -
get_attr_chain(object, "attr.sub_attr_a.sub_attr_b")
The function can be used to try and retrieve "sub_attribute_b" without an exception,
even in cases where "attr" or "sub_attr_a" might not exist.
In those cases, the function will return None.
Args:
obj: An object
attr_chain (str): A string containing one attribute or several sub-attributes
separated by dots (i.e. - "attr.sub_attr_a.sub_attr_b")
Returns:
The requested attribute if found, otherwise None
"""
return reduce(
lambda _obj, _attr: getattr(_obj, _attr, None), attr_chain.split("."), obj
)
def get_default_if_keyval_empty(dictionary, key, default_val):
"""
if Key has an empty value OR key doesn't exist
then return default value
Args:
dictionary (dict): Dictionary where we have to lookup
key (str): key to lookup
default_val (str): If key doesn't have value then return
this default_val
Returns:
dictionary[key] if value is present else default_val
"""
if not dictionary.get(key):
return default_val
return dictionary.get(key)
def get_client_version(client_binary_path):
"""
Get version reported by `oc version`.
Args:
client_binary_path (str): path to `oc` binary
Returns:
str: version reported by `oc version`.
None if the client does not exist at the provided path.
"""
if os.path.isfile(client_binary_path):
cmd = f"{client_binary_path} version --client -o json"
resp = exec_cmd(cmd)
stdout = json.loads(resp.stdout.decode())
return stdout["releaseClientVersion"]
def clone_notify():
"""
Repository contains the source code of notify tool,
which is a python3 based tool wrapped by a container
used to configure Ceph Bucket Notifications
Returns:
notify_path (str): Path location of the notify code
"""
notify_dir = mkdtemp(prefix="notify_")
log.info(f"cloning repo notify in {notify_dir}")
git_clone_cmd = f"git clone {constants.RGW_KAFKA_NOTIFY}"
subprocess.run(git_clone_cmd, shell=True, cwd=notify_dir, check=True)
notify_path = f"{notify_dir}/notify/notify.py"
return notify_path
def add_chrony_to_ocp_deployment():
"""
Create and Add necessary chrony resources
"""
for role in ["master", "worker"]:
log.info(f"Creating and Adding Chrony file for {role}")
with open(constants.CHRONY_TEMPLATE) as file_stream:
chrony_template_obj = yaml.safe_load(file_stream)
chrony_template_obj["metadata"]["labels"][
"machineconfiguration.openshift.io/role"
] = role
chrony_template_obj["metadata"]["name"] = f"99-{role}-chrony-configuration"
ignition_version = config.DEPLOYMENT["ignition_version"]
chrony_template_obj["spec"]["config"]["ignition"]["version"] = ignition_version
if Version.coerce(ignition_version) < Version.coerce("3.0"):
chrony_template_obj["spec"]["config"]["storage"]["files"][0][
"filesystem"
] = "root"
chrony_template_str = yaml.safe_dump(chrony_template_obj)
chrony_file = os.path.join(
config.ENV_DATA["cluster_path"],
"openshift",
f"99-{role}-chrony-configuration.yaml",
)
with open(chrony_file, "w") as f:
f.write(chrony_template_str)
def enable_huge_pages():
log.info("Enabling huge pages.")
exec_cmd(f"oc apply -f {constants.HUGE_PAGES_TEMPLATE}")
time.sleep(10)
log.info("Waiting for machine config will be applied with huge pages")
wait_for_machineconfigpool_status(node_type=constants.WORKER_MACHINE)
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import json
import re
from ....trello import TrelloClient
from ...console import abort, echo_info
class RCBuildCardsUpdater:
version_regex = r'(\d*)\.(\d*)\.(\d*)([-~])rc([\.-])(\d*)'
def __init__(self, trello: TrelloClient, release_version: str):
self.__trello = trello
match = re.fullmatch(self.version_regex, release_version)
if not match:
abort(
f'Cannot update cards in RC builds columnn. '
f'`{release_version}` is an invalid release candidate version. '
f'A valid version is for example `7.21.0-rc.3`. '
f'You can disable the update of cards in RC builds column by removing --update-rc-builds-cards'
)
else:
groups = match.groups()
if len(groups) != 6:
raise Exception('Regex in RCBuildCardsUpdater is not correct')
(_, self.__minor, self.__patch, _, _, self.__rc) = groups
def update_cards(self):
rc_build_cards = [
'Rrn1Y0yU', # [A7] Windows + Docker + Chocolatey
'DyjjKkZD', # [A6] Windows
'BOvSs9Le', # [IOT] Linux
'hu1JXJ18', # [A7] Linux + Docker
'E7bHwa14', # [A6] Linux + Docker
'dYrSpOLW', # MacOS
]
for card_id in rc_build_cards:
card = self.__trello.get_card(card_id)
description = card['desc']
new_version = f'\\g<1>.{self.__minor}.{self.__patch}\\g<4>rc\\g<5>{self.__rc}'
new_description = re.sub(self.version_regex, new_version, description)
updated_card = {'desc': new_description}
echo_info(f'updating release version for the card {card['name']}')
self.__trello.update_card(card_id, json.dumps(updated_card))
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import json
import re
from ....trello import TrelloClient
from ...console import abort, echo_info
class RCBuildCardsUpdater:
version_regex = r'(\d*)\.(\d*)\.(\d*)([-~])rc([\.-])(\d*)'
def __init__(self, trello: TrelloClient, release_version: str):
self.__trello = trello
match = re.fullmatch(self.version_regex, release_version)
if not match:
abort(
f'Cannot update cards in RC builds columnn. '
f'`{release_version}` is an invalid release candidate version. '
f'A valid version is for example `7.21.0-rc.3`. '
f'You can disable the update of cards in RC builds column by removing --update-rc-builds-cards'
)
else:
groups = match.groups()
if len(groups) != 6:
raise Exception('Regex in RCBuildCardsUpdater is not correct')
(_, self.__minor, self.__patch, _, _, self.__rc) = groups
def update_cards(self):
rc_build_cards = [
'Rrn1Y0yU', # [A7] Windows + Docker + Chocolatey
'DyjjKkZD', # [A6] Windows
'BOvSs9Le', # [IOT] Linux
'hu1JXJ18', # [A7] Linux + Docker
'E7bHwa14', # [A6] Linux + Docker
'dYrSpOLW', # MacOS
]
for card_id in rc_build_cards:
card = self.__trello.get_card(card_id)
description = card['desc']
new_version = f'\\g<1>.{self.__minor}.{self.__patch}\\g<4>rc\\g<5>{self.__rc}'
new_description = re.sub(self.version_regex, new_version, description)
updated_card = {'desc': new_description}
echo_info(f'updating release version for the card {card["name"]}')
self.__trello.update_card(card_id, json.dumps(updated_card))
|
from requests import get
from re import findall
import os
import glob
from rubika.client import Bot
import requests
from rubika.tools import Tools
from rubika.encryption import encryption
from gtts import gTTS
from mutagen.mp3 import MP3
import time
import random
import urllib
import io
bot = Bot("ssnakxcydoxdtheauejqujhwujbctupo")
target = "g0B4OLc066ebaf44f890d98bf8f5d156"
# created By HiBye & ShayanHeidari(Snipe4Kill)(TG GAMES)(libs for Bahman Ahmadi)
def hasAds(msg):
links = ["http://","https://",".ir",".com",".org",".net",".me"]
for i in links:
if i in msg:
return True
def hasInsult(msg):
swData = [False,None]
for i in open("dontReadMe.txt").read().split("\n"):
if i in msg:
swData = [True, i]
break
else: continue
return swData
# static variable
answered, sleeped, retries = [], False, {}
alerts, blacklist = [] , []
def alert(guid,user,link=False):
alerts.append(guid)
coun = int(alerts.count(guid))
haslink = ""
if link : haslink = "گزاشتن لینک در گروه ممنوع میباشد .\n\n"
if coun == 1:
bot.sendMessage(target, "💢 اخطار [ @"+user+" ] \n"+haslink+" شما (1/3) اخطار دریافت کرده اید .\n\nپس از دریافت 3 اخطار از گروه حذف خواهید شد !\nجهت اطلاع از قوانین کلمه (قوانین) را ارسال کنید .")
elif coun == 2:
bot.sendMessage(target, "💢 اخطار [ @"+user+" ] \n"+haslink+" شما (2/3) اخطار دریافت کرده اید .\n\nپس از دریافت 3 اخطار از گروه حذف خواهید شد !\nجهت اطلاع از قوانین کلمه (قوانین) را ارسال کنید .")
elif coun == 3:
blacklist.append(guid)
bot.sendMessage(target, "🚫 کاربر [ @"+user+" ] \n (3/3) اخطار دریافت کرد ، بنابراین اکنون اخراج میشود .")
bot.banGroupMember(target, guid)
while True:
# time.sleep(15)
try:
admins = [i["member_guid"] for i in bot.getGroupAdmins(target)["data"]["in_chat_members"]]
min_id = bot.getGroupInfo(target)["data"]["chat"]["last_message_id"]
while True:
try:
messages = bot.getMessages(target,min_id)
break
except:
continue
for msg in messages:
try:
if msg["type"]=="Text" and not msg.get("message_id") in answered:
if not sleeped:
if hasAds(msg.get("text")) and not msg.get("author_object_guid") in admins :
guid = msg.get("author_object_guid")
user = bot.getUserInfo(guid)["data"]["user"]["username"]
bot.deleteMessages(target, [msg.get("message_id")])
alert(guid,user,True)
elif msg.get("text") == "!stop" or msg.get("text") == "/stop" and msg.get("author_object_guid") in admins :
try:
sleeped = True
bot.sendMessage(target, "✅ ربات اکنون خاموش است", message_id=msg.get("message_id"))
except:
print("err off bot")
elif msg.get("text") == "!restart" or msg.get("text") == "/restart" and msg.get("author_object_guid") in admins :
try:
sleeped = True
bot.sendMessage(target, "در حال راه اندازی مجدد...", message_id=msg.get("message_id"))
sleeped = False
bot.sendMessage(target, "ربات با موفقیت مجددا راه اندازی شد!", message_id=msg.get("message_id"))
except:
print("err Restart bot")
elif msg.get("text").startswith("حذف") and msg.get("author_object_guid") in admins :
try:
number = int(msg.get("text").split(" ")[1])
answered.reverse()
bot.deleteMessages(target, answered[0:number])
bot.sendMessage(target, "✅ "+ str(number) +" پیام اخیر با موفقیت حذف شد", message_id=msg.get("message_id"))
answered.reverse()
except IndexError:
bot.deleteMessages(target, [msg.get("reply_to_message_id")])
bot.sendMessage(target, "✅ پیام با موفقیت حذف شد", message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "❌ لطفا دستور را به درستی وارد کنید", message_id=msg.get("message_id"))
elif msg.get("text").startswith("اخراج") and msg.get("author_object_guid") in admins :
try:
guid = bot.getInfoByUsername(msg.get("text").split(" ")[1][1:])["data"]["chat"]["abs_object"]["object_guid"]
if not guid in admins :
bot.banGroupMember(target, guid)
# bot.sendMessage(target, "✅ کاربر با موفقیت از گروه اخراج شد", message_id=msg.get("message_id"))
else :
bot.sendMessage(target, "❌ کاربر ادمین میباشد", message_id=msg.get("message_id"))
except IndexError:
bot.banGroupMember(target, bot.getMessagesInfo(target, [msg.get("reply_to_message_id")])[0]["author_object_guid"])
# bot.sendMessage(target, "✅ کاربر با موفقیت از گروه اخراج شد", message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "❌ دستور اشتباه", message_id=msg.get("message_id"))
elif msg.get("text").startswith("افزودن") or msg.get("text").startswith("!add") :
try:
guid = bot.getInfoByUsername(msg.get("text").split(" ")[1][1:])["data"]["chat"]["object_guid"]
if guid in blacklist:
if msg.get("author_object_guid") in admins:
alerts.remove(guid)
alerts.remove(guid)
alerts.remove(guid)
blacklist.remove(guid)
bot.invite(target, [guid])
else:
bot.sendMessage(target, "❌ کاربر محدود میباشد", message_id=msg.get("message_id"))
else:
bot.invite(target, [guid])
# bot.sendMessage(target, "✅ کاربر اکنون عضو گروه است", message_id=msg.get("message_id"))
except IndexError:
bot.sendMessage(target, "❌ لطفا دستور را به درستی وارد کنید", message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "❌ دستور اشتباه", message_id=msg.get("message_id"))
elif msg.get("text") == "دستورات":
try:
rules = open("help.txt","r",encoding='utf-8').read()
bot.sendMessage(target, str(rules), message_id=msg.get("message_id"))
except:
print("err dastorat")
elif msg.get("text").startswith("آپدیت دستورات") and msg.get("author_object_guid") in admins:
try:
rules = open("help.txt","w",encoding='utf-8').write(str(msg.get("text").strip("آپدیت قوانین")))
bot.sendMessage(target, "دستورات ربات بهروزرسانی شد!", message_id=msg.get("message_id"))
# rules.close()
except:
bot.sendMessage(target, "مشکلی پیش اومد مجددا تلاش کنید!", message_id=msg.get("message_id"))
elif msg["text"].startswith("!number") or msg["text"].startswith("بشمار"):
try:
response = get(f"http://api.codebazan.ir/adad/?text={msg["text"].split()[1]}").json()
bot.sendMessage(msg["author_object_guid"], "\n".join(list(response["result"].values())[:20])).text
bot.sendMessage(target, "نتیجه بزودی برای شما ارسال خواهد شد...", message_id=msg["message_id"])
except:
bot.sendMessage(target, "متاسفانه نتیجهای موجود نبود!", message_id=msg["message_id"])
elif msg.get("text").startswith("زمان"):
try:
response = get("https://api.codebazan.ir/time-date/?td=all").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
print("err answer time")
elif msg.get("text") == "ساعت":
try:
bot.sendMessage(target, f"Time : {time.localtime().tm_hour} : {time.localtime().tm_min} : {time.localtime().tm_sec}", message_id=msg.get("message_id"))
except:
print("err time answer")
elif msg.get("text") == "!date":
try:
bot.sendMessage(target, f"Date: {time.localtime().tm_year} / {time.localtime().tm_mon} / {time.localtime().tm_mday}", message_id=msg.get("message_id"))
except:
print("err date")
elif msg.get("text") == "پاک" and msg.get("author_object_guid") in admins :
try:
bot.deleteMessages(target, [msg.get("reply_to_message_id")])
bot.sendMessage(target, "پیام مورد نظر پاک شد...", message_id=msg.get("message_id"))
except:
print("err pak")
elif msg.get("text").startswith("!cal") or msg.get("text").startswith("حساب"):
msd = msg.get("text")
if plus == True:
try:
call = [msd.split(" ")[1], msd.split(" ")[2], msd.split(" ")[3]]
if call[1] == "+":
try:
am = float(call[0]) + float(call[2])
bot.sendMessage(target, "حاصل :\n"+"".join(str(am)), message_id=msg.get("message_id"))
plus = False
except:
print("err answer +")
elif call[1] == "-":
try:
am = float(call[0]) - float(call[2])
bot.sendMessage(target, "حاصل :\n"+"".join(str(am)), message_id=msg.get("message_id"))
except:
print("err answer -")
elif call[1] == "*":
try:
am = float(call[0]) * float(call[2])
bot.sendMessage(target, "حاصل :\n"+"".join(str(am)), message_id=msg.get("message_id"))
except:
print("err answer *")
elif call[1] == "/":
try:
am = float(call[0]) / float(call[2])
bot.sendMessage(target, "حاصل :\n"+"".join(str(am)), message_id=msg.get("message_id"))
except:
print("err answer /")
except IndexError:
bot.sendMessage(target, "متاسفانه دستور شما اشتباه میباشد!" ,message_id=msg.get("message_id"))
plus= True
elif hasInsult(msg.get("text"))[0] and not msg.get("author_object_guid") in admins :
try:
print("yek ahmagh fohsh dad")
bot.deleteMessages(target, [str(msg.get("message_id"))])
print("fohsh pak shod")
except:
print("err del fohsh Bug")
elif msg.get("text").startswith("سلام") or msg.get("text").startswith("سلم") or msg.get("text").startswith("صلام") or msg.get("text").startswith("صلم") or msg.get("text").startswith("سیلام") or msg.get("text").startswith("صیلام"):
try:
bot.sendMessage(target,'سلام' ,message_id=msg.get("message_id"))
except:
print("err hello")
elif msg.get("text").startswith("خوبی") or msg.get("text").startswith("خبی"):
try:
bot.sendMessage(target, "تو چطوری؟🤪", message_id=msg.get("message_id"))
except:
print("err answer hay")
elif msg.get("text").startswith("چه خبر") or msg.get("text").startswith("چخبر"):
try:
bot.sendMessage(target, "ســلامـتیت😍♥", message_id=msg.get("message_id"))
except:
print("err CheKhabar")
elif msg.get("text").startswith("ربات") or msg.get("text").startswith("بات"):
try:
bot.sendMessage(target, "جــونـم😁💋", message_id=msg.get("message_id"))
except:
print("err bot answer")
elif msg.get("text").startswith("😂") or msg.get("text").startswith("🤣"):
try:
bot.sendMessage(target, "جــون تـو فــقط بخـند😍", message_id=msg.get("message_id"))
except:
print("err luagh")
elif msg.get("text") == "😐":
try:
bot.sendMessage(target, "😑😐", message_id=msg.get("message_id"))
except:
print("err poker answer")
elif msg.get("text") == "سنجاق" and msg.get("author_object_guid") in admins :
try:
bot.pin(target, msg["reply_to_message_id"])
bot.sendMessage(target, "پیام مورد نظر با موفقیت سنجاق شد!", message_id=msg.get("message_id"))
except:
print("err pin")
elif msg.get("text") == "برداشتن سنجاق" and msg.get("author_object_guid") in admins :
try:
bot.unpin(target, msg["reply_to_message_id"])
bot.sendMessage(target, "پیام مورد نظر از سنجاق برداشته شد!", message_id=msg.get("message_id"))
except:
print("err unpin")
elif msg.get("text").startswith("!trans"):
try:
responser = get(f"https://api.codebazan.ir/translate/?type=json&from=en&to=fa&text={msg.get("text").split()[1:]}").json()
al = [responser["result"]]
bot.sendMessage(msg.get("author_object_guid"), "پاسخ به ترجمه:\n"+"".join(al)).text
bot.sendMessage(target, "نتیجه رو برات ارسال کردم😘", message_id=msg["message_id"])
except:
bot.sendMessage(target, "دستور رو درست وارد کن دیگه😁", message_id=msg["message_id"])
elif msg.get("text").startswith("!font"):
try:
response = get(f"https://api.codebazan.ir/font/?text={msg.get("text").split()[1]}").json()
bot.sendMessage(msg.get("author_object_guid"), "\n".join(list(response["result"].values())[:110])).text
bot.sendMessage(target, "نتیجه رو برات ارسال کردم😘", message_id=msg["message_id"])
except:
bot.sendMessage(target, "دستور رو درست وارد کن دیگه😁", message_id=msg["message_id"])
elif msg.get("text").startswith("جوک") or msg.get("text").startswith("jok") or msg.get("text").startswith("!jok"):
try:
response = get("https://api.codebazan.ir/jok/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "دستورت رو اشتباه وارد کردی", message_id=msg["message_id"])
elif msg.get("text").startswith("ذکر") or msg.get("text").startswith("zekr") or msg.get("text").startswith("!zekr"):
try:
response = get("http://api.codebazan.ir/zekr/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "ببخشید، خطایی پیش اومد!", message_id=msg["message_id"])
elif msg.get("text").startswith("حدیث") or msg.get("text").startswith("hadis") or msg.get("text").startswith("!hadis"):
try:
response = get("http://api.codebazan.ir/hadis/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "ببخشید، خطایی تو ارسال پیش اومد!", message_id=msg["message_id"])
elif msg.get("text").startswith("بیو") or msg.get("text").startswith("bio") or msg.get("text").startswith("!bio"):
try:
response = get("https://api.codebazan.ir/bio/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "ببخشید، خطایی تو ارسال پیش اومد!", message_id=msg["message_id"])
elif msg["text"].startswith("!weather"):
try:
response = get(f"https://api.codebazan.ir/weather/?city={msg["text"].split()[1]}").json()
bot.sendMessage(msg["author_object_guid"], "\n".join(list(response["result"].values())[:20])).text
bot.sendMessage(target, "نتیجه بزودی برای شما ارسال خواهد شد...", message_id=msg["message_id"])
except:
bot.sendMessage(target, "متاسفانه نتیجهای موجود نبود!", message_id=msg["message_id"])
elif msg.get("text").startswith("دیالوگ"):
try:
response = get("http://api.codebazan.ir/dialog/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "متاسفانه تو ارسال مشکلی پیش اومد!", message_id=msg["message_id"])
elif msg.get("text").startswith("دانستنی"):
try:
response = get("http://api.codebazan.ir/danestani/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "دستورت رو اشتباه وارد کردی", message_id=msg["message_id"])
elif msg.get("text").startswith("پ ن پ") or msg.get("text").startswith("!pa-na-pa") or msg.get("text").startswith("په نه په"):
try:
response = get("http://api.codebazan.ir/jok/pa-na-pa/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "شرمنده نتونستم بفرستم!", message_id=msg["message_id"])
elif msg.get("text").startswith("الکی مثلا") or msg.get("text").startswith("!alaki-masalan"):
try:
response = get("http://api.codebazan.ir/jok/alaki-masalan/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "نشد بفرستم:(", message_id=msg["message_id"])
elif msg.get("text").startswith("داستان") or msg.get("text").startswith("!dastan"):
try:
response = get("http://api.codebazan.ir/dastan/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "مشکلی پیش اومد!", message_id=msg["message_id"])
elif msg.get("text").startswith("!ping"):
try:
responser = get(f"https://api.codebazan.ir/ping/?url={msg.get("text").split()[1]}").text
bot.sendMessage(target, responser,message_id=msg["message_id"])
except:
bot.sendMessage(target, "دستور رو درست وارد کن دیگه😁", message_id=msg["message_id"])
elif "forwarded_from" in msg.keys() and bot.getMessagesInfo(target, [msg.get("message_id")])[0]["forwarded_from"]["type_from"] == "Channel" and not msg.get("author_object_guid") in admins :
try:
print("Yek ahmagh forwared Zad")
bot.deleteMessages(target, [str(msg.get("message_id"))])
print("tabligh forearedi pak shod")
except:
print("err delete forwared")
elif msg.get("text") == "قوانین":
try:
rules = open("rules.txt","r",encoding='utf-8').read()
bot.sendMessage(target, str(rules), message_id=msg.get("message_id"))
except:
print("err ghanon")
elif msg.get("text").startswith("آپدیت قوانین") and msg.get("author_object_guid") in admins:
try:
rules = open("rules.txt","w",encoding='utf-8').write(str(msg.get("text").strip("آپدیت قوانین")))
bot.sendMessage(target, "✅ قوانین بروزرسانی شد", message_id=msg.get("message_id"))
# rules.close()
except:
bot.sendMessage(target, "❌ لطفا دستور را به درستی وارد کنید", message_id=msg.get("message_id"))
elif msg.get("text") == "حالت آرام" and msg.get("author_object_guid") in admins:
try:
number = 10
bot.setGroupTimer(target,number)
bot.sendMessage(target, "✅ حالت آرام برای "+str(number)+"ثانیه فعال شد", message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "❌ لطفا دستور را به درستی وارد کنید", message_id=msg.get("message_id"))
elif msg.get("text") == "!speak" or msg.get("text") == "speak" or msg.get("text") == "Speak" or msg.get("text") == "بگو":
try:
if msg.get('reply_to_message_id') != None:
msg_reply_info = bot.getMessagesInfo(target, [msg.get('reply_to_message_id')])[0]
if msg_reply_info['text'] != None:
text = msg_reply_info['text']
speech = gTTS(text)
changed_voice = io.BytesIO()
speech.write_to_fp(changed_voice)
b2 = changed_voice.getvalue()
changed_voice.seek(0)
audio = MP3(changed_voice)
dur = audio.info.length
dur = dur * 1000
f = open('sound.ogg','wb')
f.write(b2)
f.close()
bot.sendVoice(target , 'sound.ogg', dur,message_id=msg["message_id"])
os.remove('sound.ogg')
print('sended voice')
else:
bot.sendMessage(target, 'پیام شما متن یا کپشن ندارد',message_id=msg["message_id"])
except:
print('server gtts bug')
elif msg.get("text") == "برداشتن حالت آرام" and msg.get("author_object_guid") in admins:
try:
number = 0
bot.setGroupTimer(target,number)
bot.sendMessage(target, "✅ حالت آرام غیرفعال شد", message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "لطفا دستور رو صحیح وارد کنید!", message_id=msg.get("message_id"))
elif msg.get("text").startswith("اخطار") and msg.get("author_object_guid") in admins:
try:
user = msg.get("text").split(" ")[1][1:]
guid = bot.getInfoByUsername(user)["data"]["chat"]["abs_object"]["object_guid"]
if not guid in admins :
alert(guid,user)
else :
bot.sendMessage(target, "❌ کاربر ادمین میباشد", message_id=msg.get("message_id"))
except IndexError:
guid = bot.getMessagesInfo(target, [msg.get("reply_to_message_id")])[0]["author_object_guid"]
user = bot.getUserInfo(guid)["data"]["user"]["username"]
if not guid in admins:
alert(guid,user)
else:
bot.sendMessage(target, "❌ کاربر ادمین میباشد", message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "❌ لطفا دستور را به درستی وارد کنید", message_id=msg.get("message_id"))
elif msg.get("text") == "قفل گروه" and msg.get("author_object_guid") in admins :
try:
bot.setMembersAccess(target, ["AddMember"])
bot.sendMessage(target, "🔒 گروه قفل شد", message_id=msg.get("message_id"))
except:
print("err lock GP")
elif msg.get("text") == "بازکردن گروه" or msg.get("text") == "باز کردن گروه" and msg.get("author_object_guid") in admins :
try:
bot.setMembersAccess(target, ["SendMessages","AddMember"])
bot.sendMessage(target, "🔓 گروه اکنون باز است", message_id=msg.get("message_id"))
except:
print("err unlock GP")
else:
if msg.get("text") == "!start" or msg.get("text") == "/start" and msg.get("author_object_guid") in admins :
try:
sleeped = False
bot.sendMessage(target, "ربات با موفقیت روشن شد!", message_id=msg.get("message_id"))
except:
print("err on bot")
elif msg["type"]=="Event" and not msg.get("message_id") in answered and not sleeped:
name = bot.getGroupInfo(target)["data"]["group"]["group_title"]
data = msg['event_data']
if data["type"]=="RemoveGroupMembers":
try:
user = bot.getUserInfo(data['peer_objects'][0]['object_guid'])["data"]["user"]["first_name"]
bot.sendMessage(target, f"‼️ کاربر {user} با موفقیت از گروه حذف شد .", message_id=msg["message_id"])
# bot.deleteMessages(target, [msg["message_id"]])
except:
print("err rm member answer")
elif data["type"]=="AddedGroupMembers":
try:
user = bot.getUserInfo(data['peer_objects'][0]['object_guid'])["data"]["user"]["first_name"]
bot.sendMessage(target, f"هــای {user} عزیز 😘🌹 \n • به گـروه {name} خیـلی خوش اومدی 😍❤️ \nلطفا قوانین رو رعایت کن .\n 💎 برای مشاهده قوانین کافیه کلمه (قوانین) رو ارسال کنی!\n", message_id=msg["message_id"])
# bot.deleteMessages(target, [msg["message_id"]])
except:
print("err add member answer")
elif data["type"]=="LeaveGroup":
try:
user = bot.getUserInfo(data['performer_object']['object_guid'])["data"]["user"]["first_name"]
bot.sendMessage(target, f"خدانگهدار {user} 👋 ", message_id=msg["message_id"])
# bot.deleteMessages(target, [msg["message_id"]])
except:
print("err Leave member Answer")
elif data["type"]=="JoinedGroupByLink":
try:
user = bot.getUserInfo(data['performer_object']['object_guid'])["data"]["user"]["first_name"]
bot.sendMessage(target, f"هــای {user} عزیز 😘🌹 \n • به گـروه {name} خیـلی خوش اومدی 😍❤️ \nلطفا قوانین رو رعایت کن .\n 💎 برای مشاهده قوانین کافیه کلمه (قوانین) رو ارسال کنی!\nدوست داری ربات بسازی؟ بیا اینجا😍👇\nt.me/RubikaBotCreate", message_id=msg["message_id"])
# bot.deleteMessages(target, [msg["message_id"]])
except:
print("err Joined member Answer")
else:
if "forwarded_from" in msg.keys() and bot.getMessagesInfo(target, [msg.get("message_id")])[0]["forwarded_from"]["type_from"] == "Channel" and not msg.get("author_object_guid") in admins :
bot.deleteMessages(target, [msg.get("message_id")])
guid = msg.get("author_object_guid")
user = bot.getUserInfo(guid)["data"]["user"]["username"]
bot.deleteMessages(target, [msg.get("message_id")])
alert(guid,user,True)
continue
except:
continue
answered.append(msg.get("message_id"))
print("[" + msg.get("message_id")+ "] >>> " + msg.get("text") + "\n")
except KeyboardInterrupt:
exit()
except Exception as e:
if type(e) in list(retries.keys()):
if retries[type(e)] < 3:
retries[type(e)] += 1
continue
else:
retries.pop(type(e))
else:
retries[type(e)] = 1
continue
|
from requests import get
from re import findall
import os
import glob
from rubika.client import Bot
import requests
from rubika.tools import Tools
from rubika.encryption import encryption
from gtts import gTTS
from mutagen.mp3 import MP3
import time
import random
import urllib
import io
bot = Bot("ssnakxcydoxdtheauejqujhwujbctupo")
target = "g0B4OLc066ebaf44f890d98bf8f5d156"
# created By HiBye & ShayanHeidari(Snipe4Kill)(TG GAMES)(libs for Bahman Ahmadi)
def hasAds(msg):
links = ["http://","https://",".ir",".com",".org",".net",".me"]
for i in links:
if i in msg:
return True
def hasInsult(msg):
swData = [False,None]
for i in open("dontReadMe.txt").read().split("\n"):
if i in msg:
swData = [True, i]
break
else: continue
return swData
# static variable
answered, sleeped, retries = [], False, {}
alerts, blacklist = [] , []
def alert(guid,user,link=False):
alerts.append(guid)
coun = int(alerts.count(guid))
haslink = ""
if link : haslink = "گزاشتن لینک در گروه ممنوع میباشد .\n\n"
if coun == 1:
bot.sendMessage(target, "💢 اخطار [ @"+user+" ] \n"+haslink+" شما (1/3) اخطار دریافت کرده اید .\n\nپس از دریافت 3 اخطار از گروه حذف خواهید شد !\nجهت اطلاع از قوانین کلمه (قوانین) را ارسال کنید .")
elif coun == 2:
bot.sendMessage(target, "💢 اخطار [ @"+user+" ] \n"+haslink+" شما (2/3) اخطار دریافت کرده اید .\n\nپس از دریافت 3 اخطار از گروه حذف خواهید شد !\nجهت اطلاع از قوانین کلمه (قوانین) را ارسال کنید .")
elif coun == 3:
blacklist.append(guid)
bot.sendMessage(target, "🚫 کاربر [ @"+user+" ] \n (3/3) اخطار دریافت کرد ، بنابراین اکنون اخراج میشود .")
bot.banGroupMember(target, guid)
while True:
# time.sleep(15)
try:
admins = [i["member_guid"] for i in bot.getGroupAdmins(target)["data"]["in_chat_members"]]
min_id = bot.getGroupInfo(target)["data"]["chat"]["last_message_id"]
while True:
try:
messages = bot.getMessages(target,min_id)
break
except:
continue
for msg in messages:
try:
if msg["type"]=="Text" and not msg.get("message_id") in answered:
if not sleeped:
if hasAds(msg.get("text")) and not msg.get("author_object_guid") in admins :
guid = msg.get("author_object_guid")
user = bot.getUserInfo(guid)["data"]["user"]["username"]
bot.deleteMessages(target, [msg.get("message_id")])
alert(guid,user,True)
elif msg.get("text") == "!stop" or msg.get("text") == "/stop" and msg.get("author_object_guid") in admins :
try:
sleeped = True
bot.sendMessage(target, "✅ ربات اکنون خاموش است", message_id=msg.get("message_id"))
except:
print("err off bot")
elif msg.get("text") == "!restart" or msg.get("text") == "/restart" and msg.get("author_object_guid") in admins :
try:
sleeped = True
bot.sendMessage(target, "در حال راه اندازی مجدد...", message_id=msg.get("message_id"))
sleeped = False
bot.sendMessage(target, "ربات با موفقیت مجددا راه اندازی شد!", message_id=msg.get("message_id"))
except:
print("err Restart bot")
elif msg.get("text").startswith("حذف") and msg.get("author_object_guid") in admins :
try:
number = int(msg.get("text").split(" ")[1])
answered.reverse()
bot.deleteMessages(target, answered[0:number])
bot.sendMessage(target, "✅ "+ str(number) +" پیام اخیر با موفقیت حذف شد", message_id=msg.get("message_id"))
answered.reverse()
except IndexError:
bot.deleteMessages(target, [msg.get("reply_to_message_id")])
bot.sendMessage(target, "✅ پیام با موفقیت حذف شد", message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "❌ لطفا دستور را به درستی وارد کنید", message_id=msg.get("message_id"))
elif msg.get("text").startswith("اخراج") and msg.get("author_object_guid") in admins :
try:
guid = bot.getInfoByUsername(msg.get("text").split(" ")[1][1:])["data"]["chat"]["abs_object"]["object_guid"]
if not guid in admins :
bot.banGroupMember(target, guid)
# bot.sendMessage(target, "✅ کاربر با موفقیت از گروه اخراج شد", message_id=msg.get("message_id"))
else :
bot.sendMessage(target, "❌ کاربر ادمین میباشد", message_id=msg.get("message_id"))
except IndexError:
bot.banGroupMember(target, bot.getMessagesInfo(target, [msg.get("reply_to_message_id")])[0]["author_object_guid"])
# bot.sendMessage(target, "✅ کاربر با موفقیت از گروه اخراج شد", message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "❌ دستور اشتباه", message_id=msg.get("message_id"))
elif msg.get("text").startswith("افزودن") or msg.get("text").startswith("!add") :
try:
guid = bot.getInfoByUsername(msg.get("text").split(" ")[1][1:])["data"]["chat"]["object_guid"]
if guid in blacklist:
if msg.get("author_object_guid") in admins:
alerts.remove(guid)
alerts.remove(guid)
alerts.remove(guid)
blacklist.remove(guid)
bot.invite(target, [guid])
else:
bot.sendMessage(target, "❌ کاربر محدود میباشد", message_id=msg.get("message_id"))
else:
bot.invite(target, [guid])
# bot.sendMessage(target, "✅ کاربر اکنون عضو گروه است", message_id=msg.get("message_id"))
except IndexError:
bot.sendMessage(target, "❌ لطفا دستور را به درستی وارد کنید", message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "❌ دستور اشتباه", message_id=msg.get("message_id"))
elif msg.get("text") == "دستورات":
try:
rules = open("help.txt","r",encoding='utf-8').read()
bot.sendMessage(target, str(rules), message_id=msg.get("message_id"))
except:
print("err dastorat")
elif msg.get("text").startswith("آپدیت دستورات") and msg.get("author_object_guid") in admins:
try:
rules = open("help.txt","w",encoding='utf-8').write(str(msg.get("text").strip("آپدیت قوانین")))
bot.sendMessage(target, "دستورات ربات بهروزرسانی شد!", message_id=msg.get("message_id"))
# rules.close()
except:
bot.sendMessage(target, "مشکلی پیش اومد مجددا تلاش کنید!", message_id=msg.get("message_id"))
elif msg["text"].startswith("!number") or msg["text"].startswith("بشمار"):
try:
response = get(f"http://api.codebazan.ir/adad/?text={msg['text'].split()[1]}").json()
bot.sendMessage(msg["author_object_guid"], "\n".join(list(response["result"].values())[:20])).text
bot.sendMessage(target, "نتیجه بزودی برای شما ارسال خواهد شد...", message_id=msg["message_id"])
except:
bot.sendMessage(target, "متاسفانه نتیجهای موجود نبود!", message_id=msg["message_id"])
elif msg.get("text").startswith("زمان"):
try:
response = get("https://api.codebazan.ir/time-date/?td=all").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
print("err answer time")
elif msg.get("text") == "ساعت":
try:
bot.sendMessage(target, f"Time : {time.localtime().tm_hour} : {time.localtime().tm_min} : {time.localtime().tm_sec}", message_id=msg.get("message_id"))
except:
print("err time answer")
elif msg.get("text") == "!date":
try:
bot.sendMessage(target, f"Date: {time.localtime().tm_year} / {time.localtime().tm_mon} / {time.localtime().tm_mday}", message_id=msg.get("message_id"))
except:
print("err date")
elif msg.get("text") == "پاک" and msg.get("author_object_guid") in admins :
try:
bot.deleteMessages(target, [msg.get("reply_to_message_id")])
bot.sendMessage(target, "پیام مورد نظر پاک شد...", message_id=msg.get("message_id"))
except:
print("err pak")
elif msg.get("text").startswith("!cal") or msg.get("text").startswith("حساب"):
msd = msg.get("text")
if plus == True:
try:
call = [msd.split(" ")[1], msd.split(" ")[2], msd.split(" ")[3]]
if call[1] == "+":
try:
am = float(call[0]) + float(call[2])
bot.sendMessage(target, "حاصل :\n"+"".join(str(am)), message_id=msg.get("message_id"))
plus = False
except:
print("err answer +")
elif call[1] == "-":
try:
am = float(call[0]) - float(call[2])
bot.sendMessage(target, "حاصل :\n"+"".join(str(am)), message_id=msg.get("message_id"))
except:
print("err answer -")
elif call[1] == "*":
try:
am = float(call[0]) * float(call[2])
bot.sendMessage(target, "حاصل :\n"+"".join(str(am)), message_id=msg.get("message_id"))
except:
print("err answer *")
elif call[1] == "/":
try:
am = float(call[0]) / float(call[2])
bot.sendMessage(target, "حاصل :\n"+"".join(str(am)), message_id=msg.get("message_id"))
except:
print("err answer /")
except IndexError:
bot.sendMessage(target, "متاسفانه دستور شما اشتباه میباشد!" ,message_id=msg.get("message_id"))
plus= True
elif hasInsult(msg.get("text"))[0] and not msg.get("author_object_guid") in admins :
try:
print("yek ahmagh fohsh dad")
bot.deleteMessages(target, [str(msg.get("message_id"))])
print("fohsh pak shod")
except:
print("err del fohsh Bug")
elif msg.get("text").startswith("سلام") or msg.get("text").startswith("سلم") or msg.get("text").startswith("صلام") or msg.get("text").startswith("صلم") or msg.get("text").startswith("سیلام") or msg.get("text").startswith("صیلام"):
try:
bot.sendMessage(target,'سلام' ,message_id=msg.get("message_id"))
except:
print("err hello")
elif msg.get("text").startswith("خوبی") or msg.get("text").startswith("خبی"):
try:
bot.sendMessage(target, "تو چطوری؟🤪", message_id=msg.get("message_id"))
except:
print("err answer hay")
elif msg.get("text").startswith("چه خبر") or msg.get("text").startswith("چخبر"):
try:
bot.sendMessage(target, "ســلامـتیت😍♥", message_id=msg.get("message_id"))
except:
print("err CheKhabar")
elif msg.get("text").startswith("ربات") or msg.get("text").startswith("بات"):
try:
bot.sendMessage(target, "جــونـم😁💋", message_id=msg.get("message_id"))
except:
print("err bot answer")
elif msg.get("text").startswith("😂") or msg.get("text").startswith("🤣"):
try:
bot.sendMessage(target, "جــون تـو فــقط بخـند😍", message_id=msg.get("message_id"))
except:
print("err luagh")
elif msg.get("text") == "😐":
try:
bot.sendMessage(target, "😑😐", message_id=msg.get("message_id"))
except:
print("err poker answer")
elif msg.get("text") == "سنجاق" and msg.get("author_object_guid") in admins :
try:
bot.pin(target, msg["reply_to_message_id"])
bot.sendMessage(target, "پیام مورد نظر با موفقیت سنجاق شد!", message_id=msg.get("message_id"))
except:
print("err pin")
elif msg.get("text") == "برداشتن سنجاق" and msg.get("author_object_guid") in admins :
try:
bot.unpin(target, msg["reply_to_message_id"])
bot.sendMessage(target, "پیام مورد نظر از سنجاق برداشته شد!", message_id=msg.get("message_id"))
except:
print("err unpin")
elif msg.get("text").startswith("!trans"):
try:
responser = get(f"https://api.codebazan.ir/translate/?type=json&from=en&to=fa&text={msg.get('text').split()[1:]}").json()
al = [responser["result"]]
bot.sendMessage(msg.get("author_object_guid"), "پاسخ به ترجمه:\n"+"".join(al)).text
bot.sendMessage(target, "نتیجه رو برات ارسال کردم😘", message_id=msg["message_id"])
except:
bot.sendMessage(target, "دستور رو درست وارد کن دیگه😁", message_id=msg["message_id"])
elif msg.get("text").startswith("!font"):
try:
response = get(f"https://api.codebazan.ir/font/?text={msg.get('text').split()[1]}").json()
bot.sendMessage(msg.get("author_object_guid"), "\n".join(list(response["result"].values())[:110])).text
bot.sendMessage(target, "نتیجه رو برات ارسال کردم😘", message_id=msg["message_id"])
except:
bot.sendMessage(target, "دستور رو درست وارد کن دیگه😁", message_id=msg["message_id"])
elif msg.get("text").startswith("جوک") or msg.get("text").startswith("jok") or msg.get("text").startswith("!jok"):
try:
response = get("https://api.codebazan.ir/jok/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "دستورت رو اشتباه وارد کردی", message_id=msg["message_id"])
elif msg.get("text").startswith("ذکر") or msg.get("text").startswith("zekr") or msg.get("text").startswith("!zekr"):
try:
response = get("http://api.codebazan.ir/zekr/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "ببخشید، خطایی پیش اومد!", message_id=msg["message_id"])
elif msg.get("text").startswith("حدیث") or msg.get("text").startswith("hadis") or msg.get("text").startswith("!hadis"):
try:
response = get("http://api.codebazan.ir/hadis/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "ببخشید، خطایی تو ارسال پیش اومد!", message_id=msg["message_id"])
elif msg.get("text").startswith("بیو") or msg.get("text").startswith("bio") or msg.get("text").startswith("!bio"):
try:
response = get("https://api.codebazan.ir/bio/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "ببخشید، خطایی تو ارسال پیش اومد!", message_id=msg["message_id"])
elif msg["text"].startswith("!weather"):
try:
response = get(f"https://api.codebazan.ir/weather/?city={msg['text'].split()[1]}").json()
bot.sendMessage(msg["author_object_guid"], "\n".join(list(response["result"].values())[:20])).text
bot.sendMessage(target, "نتیجه بزودی برای شما ارسال خواهد شد...", message_id=msg["message_id"])
except:
bot.sendMessage(target, "متاسفانه نتیجهای موجود نبود!", message_id=msg["message_id"])
elif msg.get("text").startswith("دیالوگ"):
try:
response = get("http://api.codebazan.ir/dialog/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "متاسفانه تو ارسال مشکلی پیش اومد!", message_id=msg["message_id"])
elif msg.get("text").startswith("دانستنی"):
try:
response = get("http://api.codebazan.ir/danestani/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "دستورت رو اشتباه وارد کردی", message_id=msg["message_id"])
elif msg.get("text").startswith("پ ن پ") or msg.get("text").startswith("!pa-na-pa") or msg.get("text").startswith("په نه په"):
try:
response = get("http://api.codebazan.ir/jok/pa-na-pa/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "شرمنده نتونستم بفرستم!", message_id=msg["message_id"])
elif msg.get("text").startswith("الکی مثلا") or msg.get("text").startswith("!alaki-masalan"):
try:
response = get("http://api.codebazan.ir/jok/alaki-masalan/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "نشد بفرستم:(", message_id=msg["message_id"])
elif msg.get("text").startswith("داستان") or msg.get("text").startswith("!dastan"):
try:
response = get("http://api.codebazan.ir/dastan/").text
bot.sendMessage(target, response,message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "مشکلی پیش اومد!", message_id=msg["message_id"])
elif msg.get("text").startswith("!ping"):
try:
responser = get(f"https://api.codebazan.ir/ping/?url={msg.get('text').split()[1]}").text
bot.sendMessage(target, responser,message_id=msg["message_id"])
except:
bot.sendMessage(target, "دستور رو درست وارد کن دیگه😁", message_id=msg["message_id"])
elif "forwarded_from" in msg.keys() and bot.getMessagesInfo(target, [msg.get("message_id")])[0]["forwarded_from"]["type_from"] == "Channel" and not msg.get("author_object_guid") in admins :
try:
print("Yek ahmagh forwared Zad")
bot.deleteMessages(target, [str(msg.get("message_id"))])
print("tabligh forearedi pak shod")
except:
print("err delete forwared")
elif msg.get("text") == "قوانین":
try:
rules = open("rules.txt","r",encoding='utf-8').read()
bot.sendMessage(target, str(rules), message_id=msg.get("message_id"))
except:
print("err ghanon")
elif msg.get("text").startswith("آپدیت قوانین") and msg.get("author_object_guid") in admins:
try:
rules = open("rules.txt","w",encoding='utf-8').write(str(msg.get("text").strip("آپدیت قوانین")))
bot.sendMessage(target, "✅ قوانین بروزرسانی شد", message_id=msg.get("message_id"))
# rules.close()
except:
bot.sendMessage(target, "❌ لطفا دستور را به درستی وارد کنید", message_id=msg.get("message_id"))
elif msg.get("text") == "حالت آرام" and msg.get("author_object_guid") in admins:
try:
number = 10
bot.setGroupTimer(target,number)
bot.sendMessage(target, "✅ حالت آرام برای "+str(number)+"ثانیه فعال شد", message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "❌ لطفا دستور را به درستی وارد کنید", message_id=msg.get("message_id"))
elif msg.get("text") == "!speak" or msg.get("text") == "speak" or msg.get("text") == "Speak" or msg.get("text") == "بگو":
try:
if msg.get('reply_to_message_id') != None:
msg_reply_info = bot.getMessagesInfo(target, [msg.get('reply_to_message_id')])[0]
if msg_reply_info['text'] != None:
text = msg_reply_info['text']
speech = gTTS(text)
changed_voice = io.BytesIO()
speech.write_to_fp(changed_voice)
b2 = changed_voice.getvalue()
changed_voice.seek(0)
audio = MP3(changed_voice)
dur = audio.info.length
dur = dur * 1000
f = open('sound.ogg','wb')
f.write(b2)
f.close()
bot.sendVoice(target , 'sound.ogg', dur,message_id=msg["message_id"])
os.remove('sound.ogg')
print('sended voice')
else:
bot.sendMessage(target, 'پیام شما متن یا کپشن ندارد',message_id=msg["message_id"])
except:
print('server gtts bug')
elif msg.get("text") == "برداشتن حالت آرام" and msg.get("author_object_guid") in admins:
try:
number = 0
bot.setGroupTimer(target,number)
bot.sendMessage(target, "✅ حالت آرام غیرفعال شد", message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "لطفا دستور رو صحیح وارد کنید!", message_id=msg.get("message_id"))
elif msg.get("text").startswith("اخطار") and msg.get("author_object_guid") in admins:
try:
user = msg.get("text").split(" ")[1][1:]
guid = bot.getInfoByUsername(user)["data"]["chat"]["abs_object"]["object_guid"]
if not guid in admins :
alert(guid,user)
else :
bot.sendMessage(target, "❌ کاربر ادمین میباشد", message_id=msg.get("message_id"))
except IndexError:
guid = bot.getMessagesInfo(target, [msg.get("reply_to_message_id")])[0]["author_object_guid"]
user = bot.getUserInfo(guid)["data"]["user"]["username"]
if not guid in admins:
alert(guid,user)
else:
bot.sendMessage(target, "❌ کاربر ادمین میباشد", message_id=msg.get("message_id"))
except:
bot.sendMessage(target, "❌ لطفا دستور را به درستی وارد کنید", message_id=msg.get("message_id"))
elif msg.get("text") == "قفل گروه" and msg.get("author_object_guid") in admins :
try:
bot.setMembersAccess(target, ["AddMember"])
bot.sendMessage(target, "🔒 گروه قفل شد", message_id=msg.get("message_id"))
except:
print("err lock GP")
elif msg.get("text") == "بازکردن گروه" or msg.get("text") == "باز کردن گروه" and msg.get("author_object_guid") in admins :
try:
bot.setMembersAccess(target, ["SendMessages","AddMember"])
bot.sendMessage(target, "🔓 گروه اکنون باز است", message_id=msg.get("message_id"))
except:
print("err unlock GP")
else:
if msg.get("text") == "!start" or msg.get("text") == "/start" and msg.get("author_object_guid") in admins :
try:
sleeped = False
bot.sendMessage(target, "ربات با موفقیت روشن شد!", message_id=msg.get("message_id"))
except:
print("err on bot")
elif msg["type"]=="Event" and not msg.get("message_id") in answered and not sleeped:
name = bot.getGroupInfo(target)["data"]["group"]["group_title"]
data = msg['event_data']
if data["type"]=="RemoveGroupMembers":
try:
user = bot.getUserInfo(data['peer_objects'][0]['object_guid'])["data"]["user"]["first_name"]
bot.sendMessage(target, f"‼️ کاربر {user} با موفقیت از گروه حذف شد .", message_id=msg["message_id"])
# bot.deleteMessages(target, [msg["message_id"]])
except:
print("err rm member answer")
elif data["type"]=="AddedGroupMembers":
try:
user = bot.getUserInfo(data['peer_objects'][0]['object_guid'])["data"]["user"]["first_name"]
bot.sendMessage(target, f"هــای {user} عزیز 😘🌹 \n • به گـروه {name} خیـلی خوش اومدی 😍❤️ \nلطفا قوانین رو رعایت کن .\n 💎 برای مشاهده قوانین کافیه کلمه (قوانین) رو ارسال کنی!\n", message_id=msg["message_id"])
# bot.deleteMessages(target, [msg["message_id"]])
except:
print("err add member answer")
elif data["type"]=="LeaveGroup":
try:
user = bot.getUserInfo(data['performer_object']['object_guid'])["data"]["user"]["first_name"]
bot.sendMessage(target, f"خدانگهدار {user} 👋 ", message_id=msg["message_id"])
# bot.deleteMessages(target, [msg["message_id"]])
except:
print("err Leave member Answer")
elif data["type"]=="JoinedGroupByLink":
try:
user = bot.getUserInfo(data['performer_object']['object_guid'])["data"]["user"]["first_name"]
bot.sendMessage(target, f"هــای {user} عزیز 😘🌹 \n • به گـروه {name} خیـلی خوش اومدی 😍❤️ \nلطفا قوانین رو رعایت کن .\n 💎 برای مشاهده قوانین کافیه کلمه (قوانین) رو ارسال کنی!\nدوست داری ربات بسازی؟ بیا اینجا😍👇\nt.me/RubikaBotCreate", message_id=msg["message_id"])
# bot.deleteMessages(target, [msg["message_id"]])
except:
print("err Joined member Answer")
else:
if "forwarded_from" in msg.keys() and bot.getMessagesInfo(target, [msg.get("message_id")])[0]["forwarded_from"]["type_from"] == "Channel" and not msg.get("author_object_guid") in admins :
bot.deleteMessages(target, [msg.get("message_id")])
guid = msg.get("author_object_guid")
user = bot.getUserInfo(guid)["data"]["user"]["username"]
bot.deleteMessages(target, [msg.get("message_id")])
alert(guid,user,True)
continue
except:
continue
answered.append(msg.get("message_id"))
print("[" + msg.get("message_id")+ "] >>> " + msg.get("text") + "\n")
except KeyboardInterrupt:
exit()
except Exception as e:
if type(e) in list(retries.keys()):
if retries[type(e)] < 3:
retries[type(e)] += 1
continue
else:
retries.pop(type(e))
else:
retries[type(e)] = 1
continue
|
import dash
from dash.dependencies import Input, Output
from dash import dash_table
from dash import dcc
from dash import html
import pandas as pd
# Import data into pandas
df = pd.read_csv("data.csv")
df["Condition"] = df["Condition Category"]
df = df.drop(["Condition Category", "Missed Prices", "Index", "SKU"], axis=1)
df = df[
[
"Brand",
"Model",
"Reference",
"Year",
"Condition",
"Papers",
"Box",
"Movement",
"Dimensions",
"Gender",
"Case",
"Bracelet",
"Crystal",
"Dial Color",
"Price",
"Features",
"Link",
]
]
app = dash.Dash(__name__)
money = dash_table.FormatTemplate.money(0)
# App Layout
app.layout = html.Div(
[
# Title
html.H1("Watch Data", style={"text-align": "center"}),
# Dropdowns
html.Div(
className="row",
children=[
# First dropdown
html.Div(
children=[
html.Label(["Brand"], style={"text-align": "center"},),
dcc.Dropdown(
id="brand_dropdown",
options=[
{"label": i, "value": i}
for i in df["Brand"].sort_values().unique()
],
value=None,
clearable=True,
searchable=True,
),
],
style=dict(width="50%"),
),
# Second dropdown
html.Div(
children=[
html.Label(["Model"], style={"text-align": "center"},),
dcc.Dropdown(
id="model_dropdown",
value=None, # [![enter image description here][1]][1]
clearable=True,
searchable=True,
),
],
style=dict(width="50%"),
),
html.Div(
children=[
html.Label(["Price"], style={"text-align": "center"},),
dcc.RangeSlider(
id="range_slider",
tooltip={"placement": "bottom", "always_visible": True},
),
],
style=dict(width="50%"),
),
],
style=dict(display="flex"),
),
html.Br(),
html.Div(
[
dash_table.DataTable(
id="table",
filter_action="native",
sort_action="native",
style_cell={"textAlign": "left", "minWidth": 110, "width": 110},
style_table={"minWidth": "100%"},
style_cell_conditional=[
{"if": {"column_id": "Features"}, "textAlign": "right",},
{"if": {"column_id": "Link"}, "textAlign": "right"},
],
style_data_conditional=[
{
"if": {"row_index": "odd"},
"backgroundColor": "rgb(220, 220, 220)",
}
],
style_header={
"backgroundColor": "rgb(210, 210, 210)",
"color": "black",
"fontWeight": "bold",
},
)
]
),
]
)
# Connecting Dash Components
@app.callback(
[Output(component_id="model_dropdown", component_property="options")],
[Input(component_id="brand_dropdown", component_property="value")],
)
def update_model(brand_selected):
dff = df[df["Brand"] == brand_selected]
return [[{"label": i, "value": i} for i in dff["Model"].sort_values().unique()]]
@app.callback(
[
Output(component_id="range_slider", component_property="min"),
Output(component_id="range_slider", component_property="max"),
Output(component_id="range_slider", component_property="value"),
],
[
Input(component_id="brand_dropdown", component_property="value"),
Input(component_id="model_dropdown", component_property="value"),
],
)
def update_slider(brand_selected, model_selected):
dff = df[(df["Brand"] == brand_selected) & (df["Model"] == model_selected)]
return (
dff["Price"].min(),
dff["Price"].max(),
[dff["Price"].min(), dff["Price"].max()],
)
@app.callback(
[
Output(component_id="table", component_property="columns"),
Output(component_id="table", component_property="data"),
],
[
Input(component_id="brand_dropdown", component_property="value"),
Input(component_id="model_dropdown", component_property="value"),
Input(component_id="range_slider", component_property="value"),
],
)
def update_table(brand_selected, model_selected, range):
if brand_selected is None and model_selected is None:
dff = df
elif model_selected is None:
dff = df[df["Brand"] == brand_selected]
else:
dff = df[
(df["Brand"] == brand_selected)
& (df["Model"] == model_selected)
& (df["Price"] >= range[0])
& (df["Price"] <= range[1])
]
return (
[
{"name": i, "id": i, "hideable": True, "type": "numeric", "format": money}
if i == "Price"
else {"name": i, "id": i, "hideable": True}
for i in dff.columns
],
dff.to_dict("records"),
)
if __name__ == "__main__":
app.run_server(debug=True)
|
import dash
from dash.dependencies import Input, Output
from dash import dash_table
from dash import dcc
from dash import html
import pandas as pd
# Import data into pandas
df = pd.read_csv("data.csv")
df["Condition"] = df["Condition Category"]
df = df.drop(["Condition Category", "Missed Prices", "Index", "SKU"], axis=1)
df = df[
[
"Brand",
"Model",
"Reference",
"Year",
"Condition",
"Papers",
"Box",
"Movement",
"Dimensions",
"Gender",
"Case",
"Bracelet",
"Crystal",
"Dial Color",
"Price",
"Features",
"Link",
]
]
app = dash.Dash(__name__)
money = dash_table.FormatTemplate.money(0)
# App Layout
app.layout = html.Div(
[
# Title
html.H1("Watch Data", style={"text-align": "center"}),
# Dropdowns
html.Div(
className="row",
children=[
# First dropdown
html.Div(
children=[
html.Label(["Brand"], style={"text-align": "center"},),
dcc.Dropdown(
id="brand_dropdown",
options=[
{"label": i, "value": i}
for i in df["Brand"].sort_values().unique()
],
value=None,
clearable=True,
searchable=True,
),
],
style=dict(width="50%"),
),
# Second dropdown
html.Div(
children=[
html.Label(["Model"], style={"text-align": "center"},),
dcc.Dropdown(
id="model_dropdown",
value=None, # [![enter image description here][1]][1]
clearable=True,
searchable=True,
),
],
style=dict(width="50%"),
),
html.Div(
children=[
html.Label(["Price"], style={"text-align": "center"},),
dcc.RangeSlider(
id="range_slider",
tooltip={"placement": "bottom", "always_visible": True},
),
],
style=dict(width="50%"),
),
],
style=dict(display="flex"),
),
html.Br(),
html.Div(
[
dash_table.DataTable(
id="table",
filter_action="native",
sort_action="native",
style_cell={"textAlign": "left", "minWidth": 110, "width": 110},
style_table={"minWidth": "100%"},
style_cell_conditional=[
{"if": {"column_id": "Features"}, "textAlign": "right",},
{"if": {"column_id": "Link"}, "textAlign": "right"},
],
style_data_conditional=[
{
"if": {"row_index": "odd"},
"backgroundColor": "rgb(220, 220, 220)",
}
],
style_header={
"backgroundColor": "rgb(210, 210, 210)",
"color": "black",
"fontWeight": "bold",
},
)
]
),
]
)
# Connecting Dash Components
@app.callback(
[Output(component_id="model_dropdown", component_property="options")],
[Input(component_id="brand_dropdown", component_property="value")],
)
def update_model(brand_selected):
dff = df[df["Brand"] == brand_selected]
return [[{"label": i, "value": i} for i in dff["Model"].sort_values().unique()]]
@app.callback(
[
Output(component_id="range_slider", component_property="min"),
Output(component_id="range_slider", component_property="max"),
Output(component_id="range_slider", component_property="value"),
],
[
Input(component_id="brand_dropdown", component_property="value"),
Input(component_id="model_dropdown", component_property="value"),
],
)
def update_slider(brand_selected, model_selected):
dff = df[(df["Brand"] == brand_selected) & (df["Model"] == model_selected)]
return (
dff["Price"].min(),
dff["Price"].max(),
[dff["Price"].min(), dff["Price"].max()],
)
@app.callback(
[
Output(component_id="table", component_property="columns"),
Output(component_id="table", component_property="data"),
],
[
Input(component_id="brand_dropdown", component_property="value"),
Input(component_id="model_dropdown", component_property="value"),
Input(component_id="range_slider", component_property="value"),
],
)
def update_table(brand_selected, model_selected, range):
if brand_selected is None and model_selected is None:
dff = df
elif model_selected is None:
dff = df[df["Brand"] == brand_selected]
else:
dff = df[
(df["Brand"] == brand_selected)
& (df["Model"] == model_selected)
& (df["Price"] >= range[0])
& (df["Price"] <= range[1])
]
return (
[
{"name": i, "id": i, "hideable": True, "type": "numeric", "format": money}
if i == "Price"
else {"name": i, "id": i, "hideable": True}
for i in dff.columns
],
dff.to_dict("records"),
)
if __name__ == "__main__":
app.run_server(debug=True)
|
#!/usr/bin/env python3
#
# Copyright (c) 2019 Nordic Semiconductor ASA
#
# SPDX-License-Identifier: LicenseRef-BSD-5-Clause-Nordic
import argparse
import yaml
from os import path
import sys
from pprint import pformat
PERMITTED_STR_KEYS = ['size', 'region']
END_TO_START = 'end_to_start'
START_TO_END = 'start_to_end'
COMPLEX = 'complex'
def remove_item_not_in_list(list_to_remove_from, list_to_check):
to_remove = [x for x in list_to_remove_from.copy() if x not in list_to_check and x != 'app']
list(map(list_to_remove_from.remove, to_remove))
def item_is_placed(d, item, after_or_before):
assert after_or_before in ['after', 'before']
return after_or_before in d['placement'] and d['placement'][after_or_before][0] == item
def resolve_one_of(reqs, partitions):
def empty_one_of(one_of_list):
return RuntimeError("'one_of' dict did not evaluate to any partition. "
"Available partitions {}, one_of {}".format(partitions, one_of_list))
for k, v in reqs.items():
if isinstance(v, dict):
if 'one_of' in v.keys():
if len(v.keys()) != 1:
raise RuntimeError("'one_of' must be the only key in its dict")
# Now fetch the first existing partition. Note that the value must be a list even if there is only
# one entry.
reqs[k] = [partition for partition in v['one_of'] if partition in partitions][:1]
if len(reqs[k]) == 0:
raise empty_one_of(v['one_of'])
else:
resolve_one_of(v, partitions)
# 'one_of' dicts can occur inside lists of partitions.
# dicts with 'one_of' key is the only dict supported inside lists
elif isinstance(v, list):
# Resolve all 'one-of' dicts inside the list
to_remove = list()
to_add = list()
for i in v:
if isinstance(i, dict):
if 'one_of' not in i.keys():
raise RuntimeError("Found illegal dict inside list. Only 'one_of' dicts are allowed")
try:
to_add.append([partition for partition in i['one_of'] if partition in partitions][0])
except IndexError:
raise empty_one_of(i['one_of'])
to_remove.append(i)
if to_add:
reqs[k] = [i if i not in to_remove else to_add.pop(0) for i in v]
def remove_all_zero_sized_partitions(reqs, to_delete=None):
first = False
if to_delete is None:
to_delete = list()
first = True
for k, v in reqs.items():
if 'size' in v and v['size'] == 0:
to_delete.append(k)
remove_all_zero_sized_partitions({k: v for k, v in reqs.items() if k not in to_delete}, to_delete)
if 'share_size' in v.keys():
non_zero_partitions = [p for p in reqs if 'size' not in reqs[p] or reqs[p]['size'] != 0]
actual_partitions = v['share_size'] if not isinstance(v['share_size'], dict) else v['share_size']['one_of']
remove_item_not_in_list(actual_partitions, non_zero_partitions)
if not v['share_size'] or ('one_of' in v['share_size'] and len(v['share_size']['one_of']) == 0):
del v['share_size']
if 'size' not in v.keys():
# The partition has no size, delete it, and rerun this function with the new reqs.
to_delete.append(k)
remove_all_zero_sized_partitions({k: v for k, v in reqs.items() if k not in to_delete}, to_delete)
if first and to_delete:
for k in list(set(to_delete)):
print (f"Dropping partition '{k}' since its size is 0.")
del reqs[k]
def remove_irrelevant_requirements(reqs):
remove_all_zero_sized_partitions(reqs)
# Verify that no partitions define an empty 'placement'
for k, v in reqs.items():
if 'placement' in v.keys() and len(v['placement']) == 0:
raise RuntimeError("Found empty 'placement' property for partition '{}'".format(k))
# Exchange all occurrences of 'one_of' list, with the first existing partition in the 'one_of' list.
# Extend the keys given as input with 'end' and 'start' as these are also valid references.
resolve_one_of(reqs, list(reqs.keys()) + ['end', 'start'])
# Remove dependencies to partitions which are not present
for k, v in reqs.items():
for before_after in ['before', 'after']:
if 'placement' in v.keys() and before_after in v['placement'].keys():
remove_item_not_in_list(v['placement'][before_after], [*reqs.keys(), 'start', 'end'])
if not v['placement'][before_after]:
del v['placement'][before_after]
if 'span' in v.keys():
remove_item_not_in_list(v['span'], reqs.keys())
if 'inside' in v.keys():
remove_item_not_in_list(v['inside'], reqs.keys())
if not v['inside']:
del v['inside']
def get_images_which_need_resolving(reqs, sub_partitions):
# Get candidates which have placement specs.
unsorted = {x for x in reqs.keys() if 'placement' in reqs[x].keys() and ('before' in reqs[x]['placement'].keys()
or 'after' in reqs[x]['placement'].keys())}
# Sort sub_partitions by whether they are inside other sub_partitions. Innermost first.
sorted_subs = sorted(sub_partitions.values(), key=lambda x: len(x['span']))
# Sort candidates by whether they are part of a sub_partitions.
# sub_partition parts come last in the result list so they are more likely
# to end up being placed next to each other, since they are inserted last.
result = []
for sub in sorted_subs:
result = [part for part in sub['span'] if part in unsorted and part not in result] + result
# Lastly, place non-partitioned parts at the front.
result = [part for part in unsorted if part not in result] + result
return result
def solve_direction(reqs, sub_partitions, unsolved, solution, ab):
assert ab in ['after', 'before']
current_index = 0
pool = solution + list(sub_partitions.keys())
current = pool[current_index]
while current:
depends = [x for x in unsolved if item_is_placed(reqs[x], current, ab)]
if depends:
# Place based on current, or based on the first/last element in the span of current.
if ab == 'before':
anchor = current if current in solution else next(solved for solved in solution
if solved in sub_partitions[current]['span'])
solution.insert(solution.index(anchor), depends[0])
else:
anchor = current if current in solution else next(solved for solved in reversed(solution)
if solved in sub_partitions[current]['span'])
solution.insert(solution.index(anchor) + 1, depends[0])
unsolved.remove(depends[0])
current = depends[0]
else:
current_index += 1
if current_index >= len(pool):
break
current = pool[current_index]
def solve_first_last(reqs, unsolved, solution):
for fl in [('after', 'start', lambda x: solution.insert(0, x)), ('before', 'end', solution.append)]:
first_or_last = [x for x in reqs.keys() if 'placement' in reqs[x]
and fl[0] in reqs[x]['placement'].keys()
and fl[1] in reqs[x]['placement'][fl[0]]]
if first_or_last:
fl[2](first_or_last[0])
if first_or_last[0] in unsolved:
unsolved.remove(first_or_last[0])
def solve_inside(reqs, sub_partitions):
for key, value in reqs.items():
if 'inside' in value.keys():
sub_partitions[value['inside'][0]]['span'].append(key)
def clean_sub_partitions(reqs, sub_partitions):
keys_to_delete = list()
new_deletion = True
# Remove empty partitions and partitions containing only empty partitions.
while new_deletion:
new_deletion = False
for key, value in sub_partitions.items():
if (len(value['span']) == 0) or all(x in keys_to_delete for x in value['span']):
if key not in keys_to_delete:
keys_to_delete.append(key)
new_deletion = True
for key in keys_to_delete:
print (f"Dropping partition '{key}' since it is empty.")
del sub_partitions[key]
# "Flatten" by changing all span lists to contain the innermost partitions.
done = False
while not done:
done = True
for key, value in sub_partitions.items():
assert len(value['span']) > 0, "partition {} is empty".format(key)
value['orig_span'] = value['span'].copy() # Take a "backup" of the span.
for part in (part for part in value['span'] if part in sub_partitions):
value['span'].extend(sub_partitions[part]['span'])
value['span'].remove(part)
value['span'] = list(set(value['span'])) # remove duplicates
done = False
for part in (part for part in value['span'] if part not in sub_partitions and part not in reqs):
value['span'].remove(part)
def convert_str_to_list(with_str):
for k, v in with_str.items():
if isinstance(v, dict):
convert_str_to_list(v)
elif isinstance(v, str) and k not in PERMITTED_STR_KEYS:
with_str[k] = list()
with_str[k].append(v)
def resolve(reqs):
convert_str_to_list(reqs)
solution = list(['app'])
remove_irrelevant_requirements(reqs)
sub_partitions = {k: v for k, v in reqs.items() if 'span' in v}
reqs = {k: v for k, v in reqs.items() if 'span' not in v}
solve_inside(reqs, sub_partitions)
clean_sub_partitions(reqs, sub_partitions)
unsolved = get_images_which_need_resolving(reqs, sub_partitions)
solve_first_last(reqs, unsolved, solution)
while unsolved:
solve_direction(reqs, sub_partitions, unsolved, solution, 'before')
solve_direction(reqs, sub_partitions, unsolved, solution, 'after')
# Validate partition spanning.
for sub in sub_partitions:
indices = [solution.index(part) for part in sub_partitions[sub]['span']]
assert ((not indices) or (max(indices) - min(indices) + 1 == len(indices))), \
"partition {} ({}) does not span over consecutive parts." \
" Solution: {}".format(sub, str(sub_partitions[sub]['span']), str(solution))
for part in sub_partitions[sub]['span']:
assert (part in solution), "Some or all parts of partition {} have not been placed.".format(part)
return solution, sub_partitions
def shared_size(reqs, share_with, total_size):
sharer_count = reqs[share_with]['sharers']
size = sizeof(reqs, share_with, total_size)
if share_with == 'app' or ('span' in reqs[share_with].keys() and 'app' in reqs[share_with]['span']):
size /= (sharer_count + 1)
return int(size)
def get_size_source(reqs, sharer):
size_source = sharer
while 'share_size' in reqs[size_source].keys():
# Find "original" source.
size_source = reqs[size_source]['share_size'][0]
return size_source
def set_shared_size(all_reqs, total_size):
for req in all_reqs.keys():
if 'share_size' in all_reqs[req].keys():
size_source = get_size_source(all_reqs, req)
if 'sharers' not in all_reqs[size_source].keys():
all_reqs[size_source]['sharers'] = 0
all_reqs[size_source]['sharers'] += 1
all_reqs[req]['share_size'] = [size_source]
new_sizes = dict()
# Find all partitions which share size with 'app' or a container partition which spans 'app'.
dynamic_size_sharers = get_dependent_partitions(all_reqs, 'app')
static_size_sharers = [k for k, v in all_reqs.items() if 'share_size' in v.keys() and k not in dynamic_size_sharers]
for req in static_size_sharers:
all_reqs[req]['size'] = shared_size(all_reqs, all_reqs[req]['share_size'][0], total_size)
for req in dynamic_size_sharers:
new_sizes[req] = shared_size(all_reqs, all_reqs[req]['share_size'][0], total_size)
# Update all sizes after-the-fact or else the calculation will be messed up.
for key, value in new_sizes.items():
all_reqs[key]['size'] = value
def get_dependent_partitions(all_reqs, target):
return [k for k, v in all_reqs.items() if 'share_size' in v.keys()
and (v['share_size'][0] == target
or ('span' in all_reqs[v['share_size'][0]].keys()
and target in all_reqs[v['share_size'][0]]['span']))]
def app_size(reqs, total_size):
size = total_size - sum([req['size'] for name, req in reqs.items() if 'size' in req.keys() and name != 'app'])
return size
def verify_layout(reqs, solution, total_size, flash_start):
# Verify no overlap, that all flash is assigned, and that the total amount of flash
# assigned corresponds to the total size available.
expected_address = flash_start + reqs[solution[0]]['size']
for p in solution[1:]:
actual_address = reqs[p]['address']
if actual_address != expected_address:
raise RuntimeError("Error when inspecting {}, invalid address {}".format(p, actual_address))
expected_address += reqs[p]['size']
last = reqs[solution[-1]]
assert last['address'] + last['size'] == flash_start + total_size
def set_addresses_and_align(reqs, sub_partitions, solution, size, start=0):
all_reqs = dict(reqs, **sub_partitions)
set_shared_size(all_reqs, size)
dynamic_partitions = ['app']
dynamic_partitions += get_dependent_partitions(all_reqs, 'app')
reqs['app']['size'] = app_size(reqs, size)
reqs[solution[0]]['address'] = start
if len(reqs) > 1:
_set_addresses_and_align(reqs, sub_partitions, solution, size, start, dynamic_partitions)
verify_layout(reqs, solution, size, start)
def first_partition_has_been_aligned(first, solution):
return 'placement' in first and 'align' in first['placement'] and 'end' in first['placement']['align'] \
and solution[1] == 'EMPTY_0'
def _set_addresses_and_align(reqs, sub_partitions, solution, size, start, dynamic_partitions):
# Perform address assignment and alignment in two steps, first from start to app, then from end to app.
for i in range(0, solution.index('app') + 1):
current = solution[i]
if i != 0:
previous = solution[i - 1]
reqs[current]['address'] = reqs[previous]['address'] + reqs[previous]['size']
# To avoid messing with vector table, don't store empty partition as the first.
insert_empty_partition_before = i != 0
# Special handling is needed when aligning the first partition
if i == 0 and first_partition_has_been_aligned(reqs[current], solution):
continue
if align_if_required(i, dynamic_partitions, insert_empty_partition_before, reqs, solution):
_set_addresses_and_align(reqs, sub_partitions, solution, size, start, dynamic_partitions)
for i in range(len(solution) - 1, solution.index('app'), -1):
current = solution[i]
if i == len(solution) - 1:
reqs[current]['address'] = (start + size) - reqs[current]['size']
else:
higher_partition = solution[i + 1]
reqs[current]['address'] = reqs[higher_partition]['address'] - reqs[current]['size']
if align_if_required(i, dynamic_partitions, False, reqs, solution):
_set_addresses_and_align(reqs, sub_partitions, solution, size, start, dynamic_partitions)
def align_if_required(i, dynamic_partitions, move_up, reqs, solution):
current = solution[i]
if 'placement' in reqs[current] and 'align' in reqs[current]['placement']:
required_offset = align_partition(current, reqs, move_up, dynamic_partitions)
if required_offset:
solution_index = i if move_up else i + 1
solution.insert(solution_index, required_offset)
return True
return False
def align_partition(current, reqs, move_up, dynamic_partitions):
required_offset = get_required_offset(align=reqs[current]['placement']['align'], start=reqs[current]['address'],
size=reqs[current]['size'], move_up=move_up)
if not required_offset:
return None
empty_partition_size = required_offset
if current not in dynamic_partitions:
if move_up:
empty_partition_address = reqs[current]['address']
reqs[current]['address'] += required_offset
else:
empty_partition_address = reqs[current]['address'] + reqs[current]['size']
if reqs[current]['address'] != 0: # Special handling for the first partition as it cannot be moved down
reqs[current]['address'] -= required_offset
elif not move_up:
empty_partition_address, empty_partition_size = \
align_dynamic_partition(dynamic_partitions, current, reqs, required_offset)
else:
raise RuntimeError("Invalid combination, can not have dynamic partition in front of app with alignment")
e = 'EMPTY_{}'.format(len([x for x in reqs.keys() if 'EMPTY' in x]))
reqs[e] = {'address': empty_partition_address,
'size': empty_partition_size,
'placement': {'before' if move_up else 'after': [current]}}
if current not in dynamic_partitions:
# We have stolen space from the 'app' partition. Hence, all partitions which share size with 'app' partition
# must have their sizes reduced. Note that the total amount of 'stealing' is divided between the partitions
# sharing size with app (including 'app' itself).
for p in dynamic_partitions:
reqs[p]['size'] = reqs[p]['size'] - (reqs[e]['size'] // len(dynamic_partitions))
return e
def align_dynamic_partition(app_dep_parts, current, reqs, required_offset):
# Since this is a dynamic partition, the introduced empty partition will take space from the 'app' partition
# and the partition being aligned. Take special care to ensure the offset becomes correct.
required_offset *= 2
for p in app_dep_parts:
reqs[p]['size'] -= required_offset // 2
reqs[current]['address'] -= required_offset
empty_partition_address = reqs[current]['address'] + reqs[current]['size']
empty_partition_size = required_offset
return empty_partition_address, empty_partition_size
def get_required_offset(align, start, size, move_up):
if len(align) != 1 or ('start' not in align and 'end' not in align):
raise RuntimeError("Invalid alignment requirement {}".format(align))
end = start + size
align_start = 'start' in align
try:
if (align_start and start % align['start'] == 0) or (not align_start and end % align['end'] == 0):
return 0
if move_up:
return align['start'] - (start % align['start']) if align_start else align['end'] - (end % align['end'])
else:
if align_start:
return start % align['start']
else:
# Special handling is needed if start is 0 since this partition can not be moved down
return end % align['end'] if start != 0 else align['end'] - (end % align['end'])
except TypeError as err:
keyword = 'start' if align_start else 'end'
raise TypeError(f"elements in align: {{{keyword}:{align[keyword]}}} is not type of \'int\'") from err
def set_size_addr(entry, size, address):
entry['size'] = size
entry['address'] = address
def set_sub_partition_address_and_size(reqs, sub_partitions):
for sp_name, sp_value in sub_partitions.items():
size = sum([reqs[part]['size'] for part in sp_value['span']])
if size == 0:
raise RuntimeError("No compatible parent partition found for {}".format(sp_name))
address = min([reqs[part]['address'] for part in sp_value['span']])
reqs[sp_name] = sp_value
reqs[sp_name]['span'] = reqs[sp_name]['orig_span'] # Restore "backup".
set_size_addr(reqs[sp_name], size, address)
def sizeof(reqs, req, total_size):
if req == 'app':
size = app_size(reqs, total_size)
elif 'span' not in reqs[req].keys():
size = reqs[req]['size'] if 'size' in reqs[req].keys() else 0
else:
size = sum([sizeof(reqs, part, total_size) for part in reqs[req]['span']])
return size
def load_reqs(input_config):
reqs = dict()
for ymlpath in input_config:
if path.exists(ymlpath):
with open(ymlpath, 'r') as f:
loaded_reqs = yaml.safe_load(f)
if loaded_reqs is None:
continue
for key in loaded_reqs.keys():
if key in reqs.keys() and loaded_reqs[key] != reqs[key]:
raise RuntimeError("Conflicting configuration found for '{}' value for key '{}' differs."
"val1: {} val2: {} ".format(f.name, key, loaded_reqs[key], reqs[key]))
reqs.update(loaded_reqs)
return reqs
def get_dynamic_area_start_and_size(static_config, flash_size):
# Remove app from this dict to simplify the case where partitions before and after are removed.
proper_partitions = [config for name, config in static_config.items()
if 'span' not in config.keys() and name != 'app']
starts = {flash_size} | {config['address'] for config in proper_partitions}
ends = {0} | {config['address'] + config['size'] for config in proper_partitions}
gaps = list(zip(sorted(ends - starts), sorted(starts - ends)))
assert len(gaps) == 1, "Incorrect amount of gaps found"
start, end = gaps[0]
return start, end - start
def get_region_config(pm_config, region_config, static_conf=None):
start = region_config['base_address']
size = region_config['size']
placement_strategy = region_config['placement_strategy']
region_name = region_config['name']
device = region_config['device']
if placement_strategy in [END_TO_START, START_TO_END]:
solve_simple_region(pm_config, start, size, placement_strategy, region_name, device, static_conf)
else:
solve_complex_region(pm_config, start, size, placement_strategy, region_name, device, static_conf)
def solve_simple_region(pm_config, start, size, placement_strategy, region_name, device, static_conf):
reserved = 0
if static_conf:
verify_static_conf(size, start, placement_strategy, static_conf)
reserved = sum([config['size'] for name, config in static_conf.items()
if 'region' in config.keys() and config['region'] == region_name and name != 'app'])
if placement_strategy == END_TO_START:
address = start + size - reserved
else:
address = start + reserved
for partition_name in pm_config:
if placement_strategy == END_TO_START:
address -= pm_config[partition_name]['size']
pm_config[partition_name]['address'] = address
if placement_strategy == START_TO_END:
address += pm_config[partition_name]['size']
if device:
pm_config[partition_name]['device'] = device
# Generate the region partition containing the non-reserved memory.
# But first, verify that the user hasn't created a partition with the name of the region.
if region_name in pm_config:
raise RuntimeError(f"Found partition named {region_name}, this is the name of a region, and is a reserved name")
pm_config[region_name] = dict()
pm_config[region_name]['region'] = region_name
if placement_strategy == END_TO_START:
pm_config[region_name]['address'] = start
pm_config[region_name]['size'] = address - start
else:
pm_config[region_name]['address'] = address
pm_config[region_name]['size'] = (start + size) - address
def verify_static_conf(size, start, placement_strategy, static_conf):
# Verify that all statically defined partitions has given address,
# and that they are packed at the end/start of the region.
starts = {start + size} | {c['address'] for c in static_conf.values() if 'size' in c}
ends = {start} | {c['address'] + c['size'] for c in static_conf.values() if 'size' in c}
gaps = list(zip(sorted(ends - starts), sorted(starts - ends)))
if placement_strategy == START_TO_END:
start_end_correct = gaps[0][0] == start + size
else:
start_end_correct = gaps[0][0] == start
if len(gaps) != 1 or not start_end_correct:
raise RuntimeError("Statically defined partitions are not packed at the start/end of region")
def solve_complex_region(pm_config, start, size, placement_strategy, region_name, device, static_conf):
free_size = size
if static_conf:
start, free_size = get_dynamic_area_start_and_size(static_conf, free_size)
# If nothing is unresolved (only app remaining), simply return the pre defined config with 'app'
if len(pm_config) == 1:
pm_config.update(static_conf)
pm_config['app']['address'] = start
pm_config['app']['size'] = free_size
return
solution, sub_partitions = resolve(pm_config)
set_addresses_and_align(pm_config, sub_partitions, solution, free_size, start)
set_sub_partition_address_and_size(pm_config, sub_partitions)
if static_conf:
# Merge the results, take the new 'app' as that has the correct size.
pm_config.update({name: config for name, config in static_conf.items() if name != 'app'})
def write_yaml_out_file(pm_config, out_path):
def hexint_presenter(dumper, data):
return dumper.represent_int(hex(data))
yaml.add_representer(int, hexint_presenter)
yamldump = yaml.dump(pm_config)
with open(out_path, 'w') as out_file:
out_file.write(yamldump)
def parse_args():
parser = argparse.ArgumentParser(
description='''Parse given 'pm.yml' partition manager configuration files to deduce the placement of partitions.
The partitions and their relative placement is defined in the 'pm.yml' files. The path to the 'pm.yml' files are used
to locate 'autoconf.h' files. These are used to find the partition sizes, as well as the total flash size.
This script generates a file for each partition - "pm_config.h".
This file contains all addresses and sizes of all partitions.
"pm_config.h" is in the same folder as the given 'pm.yml' file.''',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--input-files", required=True, type=str, nargs="+",
help="List of paths to input yaml files. ")
parser.add_argument("--output-partitions", required=True, type=str,
help="Path to output partition configuration file.")
parser.add_argument("--output-regions", required=True, type=str,
help="Path to output regions configuration file.")
parser.add_argument("--static-config", required=False, type=argparse.FileType(mode='r'),
help="Path static configuration.")
parser.add_argument("--regions", required=False, type=str, nargs='*',
help="Space separated list of regions. For each region specified here, one must specify"
"--{region_name}-base-addr and --{region_name}-size. If the region is associated"
"with a driver, the device name must be given in --{region_name}-device (e.g. an "
"external flash driver. For regions with placement strategy 'complex' it is possible"
"to specify the --{region_name}-dynamic-partition to set the name of the dynamic partition"
"which occupies all non-used area.")
main_args, region_args = parser.parse_known_args()
# Create new instance to parse regions
parser = argparse.ArgumentParser()
for x in main_args.regions:
# Generate arguments for each region dynamically
parser.add_argument(f'--{x}-size', required=True, type=lambda z: int(z, 0))
parser.add_argument(f'--{x}-base-address', required=False, type=lambda z: int(z, 0), default=0)
parser.add_argument(f'--{x}-placement-strategy', required=False, type=str,
choices=[START_TO_END, END_TO_START, COMPLEX], default=START_TO_END)
parser.add_argument(f'--{x}-device', required=False, type=str, default='')
parser.add_argument(f'--{x}-dynamic-partition', required=False, type=str, help="Name of dynamic partition")
ranges_configuration = parser.parse_args(region_args)
return main_args, ranges_configuration
def replace_app_with_dynamic_partition(d, dynamic_partition_name):
for k, v in d.items():
if isinstance(v, dict):
replace_app_with_dynamic_partition(v, dynamic_partition_name)
elif isinstance(v, list) and "app" in v:
d[k] = [o if o != "app" else dynamic_partition_name for o in v]
elif isinstance(v, str) and v == "app":
v = dynamic_partition_name
def set_flash_primary_region(pm_config):
for v in pm_config.values():
if 'region' not in v:
v['region'] = 'flash_primary'
def fix_syntactic_sugar(pm_config):
set_flash_primary_region(pm_config)
def get_region_config_from_args(args, ranges_configuration):
regions = {x: {k.replace(f'{x}_', ''): v
for k, v in vars(ranges_configuration).items() if k.startswith(x)}
for x in [y.replace('-', '_') for y in args.regions]} # Replace - with _ to match argparse namespace
return regions
def solve_region(pm_config, region, region_config, static_config):
solution = dict()
region_config['name'] = region
partitions = {k: v for k, v in pm_config.items() if region in v['region']}
static_partitions = {k: v for k, v in static_config.items() if region in v['region']}
get_region_config(partitions, region_config, static_partitions)
solution.update(partitions)
if region_config['dynamic_partition']:
solution[region_config['dynamic_partition'].strip()] = solution['app']
del solution['app']
replace_app_with_dynamic_partition(solution, region_config['dynamic_partition'].strip())
return solution
def load_static_configuration(args, pm_config):
static_config = yaml.safe_load(args.static_config)
fix_syntactic_sugar(static_config)
# Delete all statically defined partitions from the pm_config dict.
# This is done since all partitions in pm_config will be resolved.
for statically_defined_image in static_config:
if statically_defined_image in pm_config and statically_defined_image:
print (f"Dropping partition '{statically_defined_image}' since it is statically defined.")
del pm_config[statically_defined_image]
return static_config
def main():
args, ranges_configuration = parse_args()
pm_config = load_reqs(args.input_files)
static_config = load_static_configuration(args, pm_config) if args.static_config else dict()
pm_config['app'] = dict()
fix_syntactic_sugar(pm_config)
regions = get_region_config_from_args(args, ranges_configuration)
solution = dict()
for region, region_config in regions.items():
solution.update(solve_region(pm_config, region, region_config, static_config))
write_yaml_out_file(solution, args.output_partitions)
write_yaml_out_file(regions, args.output_regions)
def expect_addr_size(td, name, expected_address, expected_size):
if expected_size:
assert td[name]['size'] == expected_size, \
"Size of {} was {}, expected {}.\ntd:{}".format(name, td[name]['size'], expected_size, pformat(td))
if expected_address:
assert td[name]['address'] == expected_address, \
"Address of {} was {}, expected {}.\ntd:{}".format(name, td[name]['address'], expected_address, pformat(td))
def expect_list(expected, actual):
expected_list = list(sorted(expected))
actual_list = list(sorted(actual))
assert sorted(expected_list) == sorted(actual_list), "Expected list %s, was %s" % (str(expected_list), str(actual_list))
def test():
list_one = [1, 2, 3, 4]
items_to_check = [4]
remove_item_not_in_list(list_one, items_to_check)
assert list_one[0] == 4
assert len(list_one) == 1
test_config = {
'first': {'address': 0, 'size': 10},
# Gap from deleted partition.
'app': {'address': 20, 'size': 10},
# Gap from deleted partition.
'fourth': {'address': 40, 'size': 60}}
start, size = get_dynamic_area_start_and_size(test_config, 100)
assert start == 10
assert size == 40-10
test_config = {
'first': {'address': 0, 'size': 10},
'second': {'address': 10, 'size': 10},
'app': {'address': 20, 'size': 80}
# Gap from deleted partition.
}
start, size = get_dynamic_area_start_and_size(test_config, 100)
assert start == 20
assert size == 80
test_config = {
'app': {'address': 0, 'size': 10},
# Gap from deleted partition.
'second': {'address': 40, 'size': 60}}
start, size = get_dynamic_area_start_and_size(test_config, 100)
assert start == 0
assert size == 40
test_config = {
'first': {'address': 0, 'size': 10},
# Gap from deleted partition.
'app': {'address': 20, 'size': 10}}
start, size = get_dynamic_area_start_and_size(test_config, 100)
assert start == 10
assert size == 100 - 10
# Verify that all 'end' and 'start' are valid references in 'one_of' dicts
td = {
'a': {'placement': {'after': {'one_of': ['x0', 'x1', 'start']}}, 'size': 100},
'b': {'placement': {'before': {'one_of': ['x0', 'x1', 'end']}}, 'size': 200},
'app': {},
}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'a', 0, 100)
expect_addr_size(td, 'app', 100, 700)
expect_addr_size(td, 'b', 800, 200)
# Verify that START_TO_END region configuration is correct
td = {'b': {'size': 100, 'region': 'extflash'}}
test_region = {'name': 'extflash',
'size': 1000,
'base_address': 2000,
'placement_strategy': START_TO_END,
'device': 'some-driver-device'}
get_region_config(td, test_region)
assert td['b']['size'] == 100
assert td['extflash']['address'] == 2100
assert td['extflash']['size'] == 900
# Verify that RAM configuration is correct
td = {'b': {'size': 100, 'region': 'ram'}}
test_region = {'name': 'ram',
'size': 1000,
'base_address': 2000,
'placement_strategy': END_TO_START,
'device': None}
get_region_config(td, test_region)
assert td['b']['size'] == 100
assert td['ram']['address'] == 2000
assert td['ram']['size'] == 900
# Verify that RAM configuration is correct
td = {
'b': {'size': 100, 'region': 'ram'},
'c': {'size': 200, 'region': 'ram'},
'd': {'size': 300, 'region': 'ram'}
}
test_region = {'name': 'ram',
'size': 1000,
'base_address': 2000,
'placement_strategy': END_TO_START,
'device': None}
get_region_config(td, test_region)
assert td['ram']['address'] == 2000
assert td['ram']['size'] == 400
# Can not verify the placement, as this is random
assert td['b']['size'] == 100
assert td['c']['size'] == 200
assert td['d']['size'] == 300
# Verify that RAM configuration with given static configuration is correct
test_region = {'name': 'ram',
'size': 1000,
'base_address': 2000,
'placement_strategy': END_TO_START,
'device': None}
td = {
'b': {'size': 100, 'region': 'ram'},
'c': {'size': 200, 'region': 'ram'},
'd': {'size': 300, 'region': 'ram'},
}
get_region_config(td,
test_region,
static_conf={'s1': {'size': 100,
'address': (1000+2000)-100,
'region': 'ram'},
's2': {'size': 200,
'address': (1000+2000)-100-200,
'region': 'ram'}})
assert td['ram']['address'] == 2000
assert td['ram']['size'] == 100
# Can not verify the placement, as this is random
assert td['b']['size'] == 100
assert td['c']['size'] == 200
assert td['d']['size'] == 300
# Verify that RAM configuration with given static configuration fails if static RAM partitions are not
# packed at the end of flash, here there is a space between the two regions
test_region = {'name': 'ram',
'size': 1000,
'base_address': 2000,
'placement_strategy': END_TO_START,
'device': None}
failed = False
td = {
'a': {'placement': {'after': 'start'}, 'size': 100},
'b': {'size': 100, 'region': 'ram'},
'c': {'size': 200, 'region': 'ram'},
'd': {'size': 300, 'region': 'ram'},
'app': {}
}
try:
get_region_config(td,
test_region,
static_conf={'s1': {'size': 100,
'address': (1000+2000)-100,
'region': 'ram'},
's2': {'size': 200,
'address': (1000+2000)-100-300,
'region': 'ram'}}) # Note 300 not 200
except RuntimeError:
failed = True
assert failed
# Verify that RAM configuration with given static configuration fails if static RAM partitions are not
# packed at the end of flash, here the partitions are packed, but does not go to the end of RAM
failed = False
test_region = {'name': 'ram',
'size': 1000,
'base_address': 2000,
'placement_strategy': END_TO_START,
'device': None}
td = {
'a': {'placement': {'after': 'start'}, 'size': 100},
'b': {'size': 100, 'region': 'ram'},
'c': {'size': 200, 'region': 'ram'},
'd': {'size': 300, 'region': 'ram'},
'app': {}
}
try:
get_region_config(td,
test_region,
static_conf={'s1': {'size': 100,
'address': (1000+2000-50)-100,
'region': 'ram'}, # Note - 50
's2': {'size': 200,
'address': (1000+2000-50)-100-200,
'region': 'ram'}}) # Note - 50
except RuntimeError:
failed = True
assert failed
# Verify that all 'one_of' dicts are replaced with the first entry which corresponds to an existing partition
td = {
'a': {'placement': {'after': 'start'}, 'size': 100},
'b': {'placement': {'after': {'one_of': ['x0', 'x1', 'a', 'x2']}}, 'size': 200},
'c': {'placement': {'after': 'b'}, 'share_size': {'one_of': ['x0', 'x1', 'b', 'a']}},
'd': {'placement': {'after': 'c'}, 'share_size': {'one_of': ['a', 'b']}}, # Should take first existing
# We can use several 'one_of' - dicts inside lists
's': {'span': ['a', {'one_of': ['x0', 'b', 'd']}, {'one_of': ['x2', 'c', 'a']}]},
'app': {},
'e': {'placement': {'after': 'app'}, 'share_size': {'one_of': ['x0', 'app']}}, # app always exists
}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'a', 0, 100) # b is after a
expect_addr_size(td, 'b', 100, 200) # b is after a
expect_addr_size(td, 'c', 300, 200) # c shares size with b
expect_addr_size(td, 'd', 500, 100) # d shares size with a
expect_addr_size(td, 's', 0, 500) # s spans a, b and c
expect_addr_size(td, 'app', 600, 200) # s spans a, b and c
expect_addr_size(td, 'e', 800, 200) # s spans a, b and c
# Verify that all 'share_size' with value partition that has size 0 is compatible with 'one_of' dicts
td = {
'a': {'placement': {'after': 'start'}, 'size': 0},
'b': {'placement': {'after': {'one_of': ['a', 'start']}},
'share_size': ['a']},
'c': {'placement': {'after': {'one_of': ['a', 'b', 'start']}},
'share_size': {'one_of': ['a', 'b']}},
'd': {'placement': {'after': {'one_of': ['a', 'b', 'c', 'start']}},
'share_size': {'one_of': ['a', 'b', 'c']}},
# You get the point
'e': {'placement': {'after': {'one_of': ['a', 'b', 'c', 'd', 'start']}}, 'size': 100}
}
remove_all_zero_sized_partitions(td)
assert 'a' not in td
assert 'b' not in td
assert 'c' not in td
assert 'd' not in td
# Verify that all 'share_size' with value partition that has size 0 is compatible withe 'one_of' dicts.
td = {
'a': {'placement': {'after': 'start'}, 'size': 0},
'b': {'placement': {'after': {'one_of': ['a', 'start']}},
'share_size': ['a']},
'c': {'placement': {'after': {'one_of': ['a', 'b', 'start']}},
'share_size': {'one_of': ['a', 'b']}},
'd': {'placement': {'after': {'one_of': ['a', 'b', 'c', 'start']}},
'share_size': {'one_of': ['a', 'b', 'c']}},
# You get the point
'e': {'placement': {'after': {'one_of': ['a', 'b', 'c', 'd', 'start']}}, 'size': 100},
'app': {}
}
# Perform the same test as above, but run it through the 'resolve' function this time.
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
assert 'a' not in td
assert 'b' not in td
assert 'c' not in td
assert 'd' not in td
expect_addr_size(td, 'e', 0, 100)
# Verify that an error is raised when no partition inside 'one_of' dicts exist as dict value
failed = False
td = {
'a': {'placement': {'after': {'one_of': ['x0', 'x1']}}, 'size': 100},
'app': {}
}
try:
resolve(td)
except RuntimeError:
failed = True
assert failed
# Verify that an error is raised when no partition inside 'one_of' dicts exist as list item
failed = False
td = {
'app': {},
'a': {'placement': {'after': 'app'}, 'size': 100},
's': {'span': ['a', {'one_of': ['x0', 'x1']}]},
}
try:
resolve(td)
except RuntimeError:
failed = True
assert failed
# Verify that empty placement property throws error
td = {'spm': {'placement': {'before': ['app']}, 'size': 100, 'inside': ['mcuboot_slot0']},
'mcuboot': {'placement': {'before': ['spm', 'app']}, 'size': 200},
'mcuboot_slot0': {'span': ['app']},
'invalid': {'placement': {}},
'app': {}}
failed = False
try:
s, sub_partitions = resolve(td)
except RuntimeError:
failed = True
assert failed
# Verify that offset is correct when aligning partition not at address 0
offset = get_required_offset(align={'end': 800}, start=1400, size=100, move_up=False)
assert offset == 700
# Verify that offset is correct when aligning partition at address 0
offset = get_required_offset(align={'end': 800}, start=0, size=100, move_up=False)
assert offset == 700
# Verify that offset is correct when aligning partition at address 0
# and end of first partition is larger than the required alignment.
offset = get_required_offset(align={'end': 800}, start=0, size=1000, move_up=False)
assert offset == 600
for l in [
lambda : get_required_offset(align={'end': ["CONFIG_VAR"]}, start=0, size=1000, move_up=False),
lambda : get_required_offset(align={'start': ["CONFIG_VAR"]}, start=0, size=1000, move_up=False),
lambda : get_required_offset(align={'start': [[2]]},start=0, size=1000, move_up=False)
]:
failed = False
try:
l()
except TypeError:
failed = True
assert failed, "Should have received a TypeError."
# Verify that the first partition can be aligned, and that the inserted empty partition is placed behind it.
td = {'first': {'placement': {'before': 'app', 'align': {'end': 800}}, 'size': 100}, 'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'EMPTY_0', 100, 700)
expect_addr_size(td, 'app', 800, 200)
# Verify that providing a static configuration with nothing unresolved gives a valid configuration with 'app'.
static_config = {'spm': {'address': 0, 'placement': None, 'before': ['app'], 'size': 400}}
test_config = {'app': dict()}
flash_region = {
'name': 'flash_primary',
'placement_strategy': COMPLEX,
'size': 1000,
'base_address': 0,
'device': 'nordic_flash_stuff'
}
get_region_config(test_config, flash_region, static_config)
assert 'app' in test_config
assert test_config['app']['address'] == 400
assert test_config['app']['size'] == 600
assert 'spm' in test_config
assert test_config['spm']['address'] == 0
# Test a single partition with alignment where the address is smaller than the alignment value.
td = {'without_alignment': {'placement': {'before': 'with_alignment'}, 'size': 100},
'with_alignment': {'placement': {'before': 'app', 'align': {'start': 200}}, 'size': 100},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'EMPTY_0', 100, 100)
expect_addr_size(td, 'with_alignment', 200, 100)
# Test alignment after 'app'
td = {'without_alignment': {'placement': {'after': 'app'}, 'size': 100},
'with_alignment': {'placement': {'after': 'without_alignment', 'align': {'start': 400}}, 'size': 100},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'app', 0, 700)
expect_addr_size(td, 'with_alignment', 800, 100)
expect_addr_size(td, 'EMPTY_0', 900, 100)
# Test two partitions with alignment where the address is smaller than the alignment value.
td = {'without_alignment': {'placement': {'before': 'with_alignment'}, 'size': 100},
'with_alignment': {'placement': {'before': 'with_alignment_2', 'align': {'end': 400}}, 'size': 100},
'with_alignment_2': {'placement': {'before': 'app', 'align': {'start': 1000}}, 'size': 100},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 10000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'EMPTY_0', 100, 200)
expect_addr_size(td, 'with_alignment', 300, 100)
expect_addr_size(td, 'EMPTY_1', 400, 600)
expect_addr_size(td, 'with_alignment_2', 1000, 100)
# Test three partitions with alignment where the address is BIGGER than the alignment value.
td = {'without_alignment': {'placement': {'before': 'with_alignment'}, 'size': 10000},
'with_alignment': {'placement': {'before': 'with_alignment_2', 'align': {'end': 400}}, 'size': 100},
'with_alignment_2': {'placement': {'before': 'app', 'align': {'start': 1000}}, 'size': 100},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 10000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'EMPTY_0', 10000, 300)
expect_addr_size(td, 'with_alignment', 10300, 100)
expect_addr_size(td, 'EMPTY_1', 10400, 600)
expect_addr_size(td, 'with_alignment_2', 11000, 100)
# FLASH (0x100000):
# +------------------------------------------+
# | 0x0: b0 (0x8000) |
# +---0x8000: s0 (0xc200)--------------------+
# | 0x8000: s0_pad (0x200) |
# +---0x8200: s0_image (0xc000)--------------+
# | 0x8200: mcuboot (0xc000) |
# | 0x14200: EMPTY_0 (0xe00) |
# +---0x15000: s1 (0xc200)-------------------+
# | 0x15000: s1_pad (0x200) |
# | 0x15200: s1_image (0xc000) |
# | 0x21200: EMPTY_1 (0xe00) |
# +---0x22000: mcuboot_primary (0x5d000)-----+
# | 0x22000: mcuboot_pad (0x200) |
# +---0x22200: mcuboot_primary_app (0x5ce00)-+
# | 0x22200: app (0x5ce00) |
# | 0x7f000: mcuboot_secondary (0x5d000) |
# | 0xdc000: EMPTY_2 (0x1000) |
# | 0xdd000: mcuboot_scratch (0x1e000) |
# | 0xfb000: mcuboot_storage (0x4000) |
# | 0xff000: provision (0x1000) |
# +------------------------------------------+
# Verify that alignment works with partition which shares size with app.
td = {'b0': {'placement': {'after': 'start'}, 'size': 0x8000},
's0': {'span': ['s0_pad', 's0_image']},
's0_pad': {'placement': {'after': 'b0', 'align': {'start': 0x1000}}, 'share_size': 'mcuboot_pad'},
's0_image': {'span': {'one_of': ['mcuboot', 'spm', 'app']}},
'mcuboot': {'placement': {'before': 'mcuboot_primary'}, 'size': 0xc000},
's1': {'span': ['s1_pad', 's1_image']},
's1_pad': {'placement': {'after': 's0', 'align': {'start': 0x1000}}, 'share_size': 'mcuboot_pad'},
's1_image': {'placement': {'after': 's1_pad'}, 'share_size': 'mcuboot'},
'mcuboot_primary': {'span': ['mcuboot_pad', 'mcuboot_primary_app']},
'mcuboot_pad': {'placement': {'before': 'mcuboot_primary_app', 'align': {'start': 0x1000}}, 'size': 0x200},
'mcuboot_primary_app': {'span': ['app']},
'app': {},
'mcuboot_secondary': {'placement': {'after': 'mcuboot_primary', 'align': {'start': 0x1000}}, 'share_size': 'mcuboot_primary'},
'mcuboot_scratch': {'placement': {'after': 'app', 'align': {'start': 0x1000}}, 'size': 0x1e000},
'mcuboot_storage': {'placement': {'after': 'mcuboot_scratch', 'align': {'start': 0x1000}}, 'size': 0x4000},
'provision': {'placement': {'before': 'end', 'align': {'start': 0x1000}}, 'size': 0x1000}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 0x100000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'EMPTY_0', 0x14200, 0xe00)
expect_addr_size(td, 'EMPTY_1', 0x21200, 0xe00)
expect_addr_size(td, 'EMPTY_2', 0xdc000, 0x1000)
assert td['mcuboot_secondary']['size'] == td['mcuboot_primary']['size']
# Verify that if a partition X uses 'share_size' with a non-existing partition, then partition X is given size 0,
# and is hence not created.
td = {'should_not_exist': {'placement': {'before': 'exists'}, 'share_size': 'does_not_exist'},
'exists': {'placement': {'before': 'app'}, 'size': 100},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
assert 'should_not_exist' not in td.keys()
# Verify that if a partition X uses 'share_size' with a non-existing partition, but has set a default size,
# then partition X is created with the default size.
td = {'should_exist': {'placement': {'before': 'exists'}, 'share_size': 'does_not_exist', 'size': 200},
'exists': {'placement': {'before': 'app'}, 'size': 100},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'should_exist', 0, 200)
td = {'spm': {'placement': {'before': ['app']}, 'size': 100},
'mcuboot': {'placement': {'before': ['spm', 'app']}, 'size': 200},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'mcuboot', 0, None)
expect_addr_size(td, 'spm', 200, None)
expect_addr_size(td, 'app', 300, 700)
td = {'spm': {'placement': {'before': ['app']}, 'size': 100, 'inside': ['mcuboot_slot0']},
'mcuboot': {'placement': {'before': ['spm', 'app']}, 'size': 200},
'mcuboot_slot0': {'span': ['app']},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'mcuboot', 0, None)
expect_addr_size(td, 'spm', 200, 100)
expect_addr_size(td, 'app', 300, 700)
expect_addr_size(td, 'mcuboot_slot0', 200, 800)
td = {'spm': {'placement': {'before': 'app'}, 'size': 100, 'inside': 'mcuboot_slot0'},
'mcuboot': {'placement': {'before': 'app'}, 'size': 200},
'mcuboot_pad': {'placement': {'after': 'mcuboot'}, 'inside': 'mcuboot_slot0', 'size': 10},
'app_partition': {'span': ['spm', 'app'], 'inside': 'mcuboot_slot0'},
'mcuboot_slot0': {'span': ['app', 'foo']},
'mcuboot_data': {'placement': {'after': ['mcuboot_slot0']}, 'size': 200},
'mcuboot_slot1': {'share_size': 'mcuboot_slot0', 'placement': {'after': 'mcuboot_data'}},
'mcuboot_slot2': {'share_size': 'mcuboot_slot1', 'placement': {'after': 'mcuboot_slot1'}},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'mcuboot', 0, None)
expect_addr_size(td, 'spm', 210, None)
expect_addr_size(td, 'mcuboot_slot0', 200, 200)
expect_addr_size(td, 'mcuboot_slot1', 600, 200)
expect_addr_size(td, 'mcuboot_slot2', 800, 200)
expect_addr_size(td, 'app', 310, 90)
expect_addr_size(td, 'mcuboot_pad', 200, 10)
expect_addr_size(td, 'mcuboot_data', 400, 200)
td = {'spm': {'placement': {'before': ['app']}, 'size': 100, 'inside': ['mcuboot_slot0']},
'mcuboot': {'placement': {'before': ['app']}, 'size': 200},
'mcuboot_pad': {'placement': {'after': ['mcuboot']}, 'inside': ['mcuboot_slot0'], 'size': 10},
'app_partition': {'span': ['spm', 'app'], 'inside': ['mcuboot_slot0']},
'mcuboot_slot0': {'span': 'app'},
'mcuboot_data': {'placement': {'after': ['mcuboot_slot0']}, 'size': 200},
'mcuboot_slot1': {'share_size': ['mcuboot_slot0'], 'placement': {'after': ['mcuboot_data']}},
'mcuboot_slot2': {'share_size': ['mcuboot_slot1'], 'placement': {'after': ['mcuboot_slot1']}},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'mcuboot', 0, None)
expect_addr_size(td, 'spm', 210, None)
expect_addr_size(td, 'mcuboot_slot0', 200, 200)
expect_addr_size(td, 'mcuboot_slot1', 600, 200)
expect_addr_size(td, 'mcuboot_slot2', 800, 200)
expect_addr_size(td, 'app', 310, 90)
expect_addr_size(td, 'mcuboot_pad', 200, 10)
expect_addr_size(td, 'mcuboot_data', 400, 200)
td = {
'e': {'placement': {'before': ['app']}, 'size': 100},
'a': {'placement': {'before': ['b']}, 'size': 100},
'd': {'placement': {'before': ['e']}, 'size': 100},
'c': {'placement': {'before': ['d']}, 'share_size': ['z', 'a', 'g']},
'j': {'placement': {'before': ['end']}, 'inside': ['k'], 'size': 20},
'i': {'placement': {'before': ['j']}, 'inside': ['k'], 'size': 20},
'h': {'placement': {'before': ['i']}, 'size': 20},
'f': {'placement': {'after': ['app']}, 'size': 20},
'g': {'placement': {'after': ['f']}, 'size': 20},
'b': {'placement': {'before': ['c']}, 'size': 20},
'k': {'span': []},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, {}, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'a', 0, None)
expect_addr_size(td, 'b', 100, None)
expect_addr_size(td, 'c', 120, None)
expect_addr_size(td, 'd', 220, None)
expect_addr_size(td, 'e', 320, None)
expect_addr_size(td, 'app', 420, 480)
expect_addr_size(td, 'f', 900, None)
expect_addr_size(td, 'g', 920, None)
expect_addr_size(td, 'h', 940, None)
expect_addr_size(td, 'i', 960, None)
expect_addr_size(td, 'j', 980, None)
expect_addr_size(td, 'k', 960, 40)
td = {'mcuboot': {'placement': {'before': ['app', 'spu']}, 'size': 200},
'b0': {'placement': {'before': ['mcuboot', 'app']}, 'size': 100},
'app': {}}
s, _ = resolve(td)
set_addresses_and_align(td, {}, s, 1000)
expect_addr_size(td, 'b0', 0, None)
expect_addr_size(td, 'mcuboot', 100, None)
expect_addr_size(td, 'app', 300, 700)
td = {'b0': {'placement': {'before': ['mcuboot', 'app']}, 'size': 100}, 'app': {}}
s, _ = resolve(td)
set_addresses_and_align(td, {}, s, 1000)
expect_addr_size(td, 'b0', 0, None)
expect_addr_size(td, 'app', 100, 900)
td = {'spu': {'placement': {'before': ['app']}, 'size': 100},
'mcuboot': {'placement': {'before': ['spu', 'app']}, 'size': 200},
'app': {}}
s, _ = resolve(td)
set_addresses_and_align(td, {}, s, 1000)
expect_addr_size(td, 'mcuboot', 0, None)
expect_addr_size(td, 'spu', 200, None)
expect_addr_size(td, 'app', 300, 700)
td = {'provision': {'placement': {'before': ['end']}, 'size': 100},
'mcuboot': {'placement': {'before': ['spu', 'app']}, 'size': 100},
'b0': {'placement': {'before': ['mcuboot', 'app']}, 'size': 50},
'spu': {'placement': {'before': ['app']}, 'size': 100},
'app': {}}
s, _ = resolve(td)
set_addresses_and_align(td, {}, s, 1000)
expect_addr_size(td, 'b0', 0, None)
expect_addr_size(td, 'mcuboot', 50, None)
expect_addr_size(td, 'spu', 150, None)
expect_addr_size(td, 'app', 250, 650)
expect_addr_size(td, 'provision', 900, None)
# Test #1 for removal of empty container partitions.
td = {'a': {'share_size': 'does_not_exist'}, # a should be removed
'b': {'span': 'a'}, # b through d should be removed because a is removed
'c': {'span': 'b'},
'd': {'span': 'c'},
'e': {'placement': {'before': ['end']}}}
s, sub = resolve(td)
expect_list(['e', 'app'], s)
expect_list([], sub)
# Test #2 for removal of empty container partitions.
td = {'a': {'share_size': 'does_not_exist'}, # a should be removed
'b': {'span': 'a'}, # b should not be removed, since d is placed inside it.
'c': {'placement': {'after': ['start']}},
'd': {'inside': ['does_not_exist', 'b'], 'placement': {'after': ['c']}}}
s, sub = resolve(td)
expect_list(['c', 'd', 'app'], s)
expect_list(['b'], sub)
expect_list(['d'], sub['b']['orig_span']) # Backup must contain edits.
print("All tests passed!")
if __name__ == "__main__":
if len(sys.argv) > 1:
main()
else:
print("No input, running tests.")
test()
|
#!/usr/bin/env python3
#
# Copyright (c) 2019 Nordic Semiconductor ASA
#
# SPDX-License-Identifier: LicenseRef-BSD-5-Clause-Nordic
import argparse
import yaml
from os import path
import sys
from pprint import pformat
PERMITTED_STR_KEYS = ['size', 'region']
END_TO_START = 'end_to_start'
START_TO_END = 'start_to_end'
COMPLEX = 'complex'
def remove_item_not_in_list(list_to_remove_from, list_to_check):
to_remove = [x for x in list_to_remove_from.copy() if x not in list_to_check and x != 'app']
list(map(list_to_remove_from.remove, to_remove))
def item_is_placed(d, item, after_or_before):
assert after_or_before in ['after', 'before']
return after_or_before in d['placement'] and d['placement'][after_or_before][0] == item
def resolve_one_of(reqs, partitions):
def empty_one_of(one_of_list):
return RuntimeError("'one_of' dict did not evaluate to any partition. "
"Available partitions {}, one_of {}".format(partitions, one_of_list))
for k, v in reqs.items():
if isinstance(v, dict):
if 'one_of' in v.keys():
if len(v.keys()) != 1:
raise RuntimeError("'one_of' must be the only key in its dict")
# Now fetch the first existing partition. Note that the value must be a list even if there is only
# one entry.
reqs[k] = [partition for partition in v['one_of'] if partition in partitions][:1]
if len(reqs[k]) == 0:
raise empty_one_of(v['one_of'])
else:
resolve_one_of(v, partitions)
# 'one_of' dicts can occur inside lists of partitions.
# dicts with 'one_of' key is the only dict supported inside lists
elif isinstance(v, list):
# Resolve all 'one-of' dicts inside the list
to_remove = list()
to_add = list()
for i in v:
if isinstance(i, dict):
if 'one_of' not in i.keys():
raise RuntimeError("Found illegal dict inside list. Only 'one_of' dicts are allowed")
try:
to_add.append([partition for partition in i['one_of'] if partition in partitions][0])
except IndexError:
raise empty_one_of(i['one_of'])
to_remove.append(i)
if to_add:
reqs[k] = [i if i not in to_remove else to_add.pop(0) for i in v]
def remove_all_zero_sized_partitions(reqs, to_delete=None):
first = False
if to_delete is None:
to_delete = list()
first = True
for k, v in reqs.items():
if 'size' in v and v['size'] == 0:
to_delete.append(k)
remove_all_zero_sized_partitions({k: v for k, v in reqs.items() if k not in to_delete}, to_delete)
if 'share_size' in v.keys():
non_zero_partitions = [p for p in reqs if 'size' not in reqs[p] or reqs[p]['size'] != 0]
actual_partitions = v['share_size'] if not isinstance(v['share_size'], dict) else v['share_size']['one_of']
remove_item_not_in_list(actual_partitions, non_zero_partitions)
if not v['share_size'] or ('one_of' in v['share_size'] and len(v['share_size']['one_of']) == 0):
del v['share_size']
if 'size' not in v.keys():
# The partition has no size, delete it, and rerun this function with the new reqs.
to_delete.append(k)
remove_all_zero_sized_partitions({k: v for k, v in reqs.items() if k not in to_delete}, to_delete)
if first and to_delete:
for k in list(set(to_delete)):
print (f"Dropping partition '{k}' since its size is 0.")
del reqs[k]
def remove_irrelevant_requirements(reqs):
remove_all_zero_sized_partitions(reqs)
# Verify that no partitions define an empty 'placement'
for k, v in reqs.items():
if 'placement' in v.keys() and len(v['placement']) == 0:
raise RuntimeError("Found empty 'placement' property for partition '{}'".format(k))
# Exchange all occurrences of 'one_of' list, with the first existing partition in the 'one_of' list.
# Extend the keys given as input with 'end' and 'start' as these are also valid references.
resolve_one_of(reqs, list(reqs.keys()) + ['end', 'start'])
# Remove dependencies to partitions which are not present
for k, v in reqs.items():
for before_after in ['before', 'after']:
if 'placement' in v.keys() and before_after in v['placement'].keys():
remove_item_not_in_list(v['placement'][before_after], [*reqs.keys(), 'start', 'end'])
if not v['placement'][before_after]:
del v['placement'][before_after]
if 'span' in v.keys():
remove_item_not_in_list(v['span'], reqs.keys())
if 'inside' in v.keys():
remove_item_not_in_list(v['inside'], reqs.keys())
if not v['inside']:
del v['inside']
def get_images_which_need_resolving(reqs, sub_partitions):
# Get candidates which have placement specs.
unsorted = {x for x in reqs.keys() if 'placement' in reqs[x].keys() and ('before' in reqs[x]['placement'].keys()
or 'after' in reqs[x]['placement'].keys())}
# Sort sub_partitions by whether they are inside other sub_partitions. Innermost first.
sorted_subs = sorted(sub_partitions.values(), key=lambda x: len(x['span']))
# Sort candidates by whether they are part of a sub_partitions.
# sub_partition parts come last in the result list so they are more likely
# to end up being placed next to each other, since they are inserted last.
result = []
for sub in sorted_subs:
result = [part for part in sub['span'] if part in unsorted and part not in result] + result
# Lastly, place non-partitioned parts at the front.
result = [part for part in unsorted if part not in result] + result
return result
def solve_direction(reqs, sub_partitions, unsolved, solution, ab):
assert ab in ['after', 'before']
current_index = 0
pool = solution + list(sub_partitions.keys())
current = pool[current_index]
while current:
depends = [x for x in unsolved if item_is_placed(reqs[x], current, ab)]
if depends:
# Place based on current, or based on the first/last element in the span of current.
if ab == 'before':
anchor = current if current in solution else next(solved for solved in solution
if solved in sub_partitions[current]['span'])
solution.insert(solution.index(anchor), depends[0])
else:
anchor = current if current in solution else next(solved for solved in reversed(solution)
if solved in sub_partitions[current]['span'])
solution.insert(solution.index(anchor) + 1, depends[0])
unsolved.remove(depends[0])
current = depends[0]
else:
current_index += 1
if current_index >= len(pool):
break
current = pool[current_index]
def solve_first_last(reqs, unsolved, solution):
for fl in [('after', 'start', lambda x: solution.insert(0, x)), ('before', 'end', solution.append)]:
first_or_last = [x for x in reqs.keys() if 'placement' in reqs[x]
and fl[0] in reqs[x]['placement'].keys()
and fl[1] in reqs[x]['placement'][fl[0]]]
if first_or_last:
fl[2](first_or_last[0])
if first_or_last[0] in unsolved:
unsolved.remove(first_or_last[0])
def solve_inside(reqs, sub_partitions):
for key, value in reqs.items():
if 'inside' in value.keys():
sub_partitions[value['inside'][0]]['span'].append(key)
def clean_sub_partitions(reqs, sub_partitions):
keys_to_delete = list()
new_deletion = True
# Remove empty partitions and partitions containing only empty partitions.
while new_deletion:
new_deletion = False
for key, value in sub_partitions.items():
if (len(value['span']) == 0) or all(x in keys_to_delete for x in value['span']):
if key not in keys_to_delete:
keys_to_delete.append(key)
new_deletion = True
for key in keys_to_delete:
print (f"Dropping partition '{key}' since it is empty.")
del sub_partitions[key]
# "Flatten" by changing all span lists to contain the innermost partitions.
done = False
while not done:
done = True
for key, value in sub_partitions.items():
assert len(value['span']) > 0, "partition {} is empty".format(key)
value['orig_span'] = value['span'].copy() # Take a "backup" of the span.
for part in (part for part in value['span'] if part in sub_partitions):
value['span'].extend(sub_partitions[part]['span'])
value['span'].remove(part)
value['span'] = list(set(value['span'])) # remove duplicates
done = False
for part in (part for part in value['span'] if part not in sub_partitions and part not in reqs):
value['span'].remove(part)
def convert_str_to_list(with_str):
for k, v in with_str.items():
if isinstance(v, dict):
convert_str_to_list(v)
elif isinstance(v, str) and k not in PERMITTED_STR_KEYS:
with_str[k] = list()
with_str[k].append(v)
def resolve(reqs):
convert_str_to_list(reqs)
solution = list(['app'])
remove_irrelevant_requirements(reqs)
sub_partitions = {k: v for k, v in reqs.items() if 'span' in v}
reqs = {k: v for k, v in reqs.items() if 'span' not in v}
solve_inside(reqs, sub_partitions)
clean_sub_partitions(reqs, sub_partitions)
unsolved = get_images_which_need_resolving(reqs, sub_partitions)
solve_first_last(reqs, unsolved, solution)
while unsolved:
solve_direction(reqs, sub_partitions, unsolved, solution, 'before')
solve_direction(reqs, sub_partitions, unsolved, solution, 'after')
# Validate partition spanning.
for sub in sub_partitions:
indices = [solution.index(part) for part in sub_partitions[sub]['span']]
assert ((not indices) or (max(indices) - min(indices) + 1 == len(indices))), \
"partition {} ({}) does not span over consecutive parts." \
" Solution: {}".format(sub, str(sub_partitions[sub]['span']), str(solution))
for part in sub_partitions[sub]['span']:
assert (part in solution), "Some or all parts of partition {} have not been placed.".format(part)
return solution, sub_partitions
def shared_size(reqs, share_with, total_size):
sharer_count = reqs[share_with]['sharers']
size = sizeof(reqs, share_with, total_size)
if share_with == 'app' or ('span' in reqs[share_with].keys() and 'app' in reqs[share_with]['span']):
size /= (sharer_count + 1)
return int(size)
def get_size_source(reqs, sharer):
size_source = sharer
while 'share_size' in reqs[size_source].keys():
# Find "original" source.
size_source = reqs[size_source]['share_size'][0]
return size_source
def set_shared_size(all_reqs, total_size):
for req in all_reqs.keys():
if 'share_size' in all_reqs[req].keys():
size_source = get_size_source(all_reqs, req)
if 'sharers' not in all_reqs[size_source].keys():
all_reqs[size_source]['sharers'] = 0
all_reqs[size_source]['sharers'] += 1
all_reqs[req]['share_size'] = [size_source]
new_sizes = dict()
# Find all partitions which share size with 'app' or a container partition which spans 'app'.
dynamic_size_sharers = get_dependent_partitions(all_reqs, 'app')
static_size_sharers = [k for k, v in all_reqs.items() if 'share_size' in v.keys() and k not in dynamic_size_sharers]
for req in static_size_sharers:
all_reqs[req]['size'] = shared_size(all_reqs, all_reqs[req]['share_size'][0], total_size)
for req in dynamic_size_sharers:
new_sizes[req] = shared_size(all_reqs, all_reqs[req]['share_size'][0], total_size)
# Update all sizes after-the-fact or else the calculation will be messed up.
for key, value in new_sizes.items():
all_reqs[key]['size'] = value
def get_dependent_partitions(all_reqs, target):
return [k for k, v in all_reqs.items() if 'share_size' in v.keys()
and (v['share_size'][0] == target
or ('span' in all_reqs[v['share_size'][0]].keys()
and target in all_reqs[v['share_size'][0]]['span']))]
def app_size(reqs, total_size):
size = total_size - sum([req['size'] for name, req in reqs.items() if 'size' in req.keys() and name != 'app'])
return size
def verify_layout(reqs, solution, total_size, flash_start):
# Verify no overlap, that all flash is assigned, and that the total amount of flash
# assigned corresponds to the total size available.
expected_address = flash_start + reqs[solution[0]]['size']
for p in solution[1:]:
actual_address = reqs[p]['address']
if actual_address != expected_address:
raise RuntimeError("Error when inspecting {}, invalid address {}".format(p, actual_address))
expected_address += reqs[p]['size']
last = reqs[solution[-1]]
assert last['address'] + last['size'] == flash_start + total_size
def set_addresses_and_align(reqs, sub_partitions, solution, size, start=0):
all_reqs = dict(reqs, **sub_partitions)
set_shared_size(all_reqs, size)
dynamic_partitions = ['app']
dynamic_partitions += get_dependent_partitions(all_reqs, 'app')
reqs['app']['size'] = app_size(reqs, size)
reqs[solution[0]]['address'] = start
if len(reqs) > 1:
_set_addresses_and_align(reqs, sub_partitions, solution, size, start, dynamic_partitions)
verify_layout(reqs, solution, size, start)
def first_partition_has_been_aligned(first, solution):
return 'placement' in first and 'align' in first['placement'] and 'end' in first['placement']['align'] \
and solution[1] == 'EMPTY_0'
def _set_addresses_and_align(reqs, sub_partitions, solution, size, start, dynamic_partitions):
# Perform address assignment and alignment in two steps, first from start to app, then from end to app.
for i in range(0, solution.index('app') + 1):
current = solution[i]
if i != 0:
previous = solution[i - 1]
reqs[current]['address'] = reqs[previous]['address'] + reqs[previous]['size']
# To avoid messing with vector table, don't store empty partition as the first.
insert_empty_partition_before = i != 0
# Special handling is needed when aligning the first partition
if i == 0 and first_partition_has_been_aligned(reqs[current], solution):
continue
if align_if_required(i, dynamic_partitions, insert_empty_partition_before, reqs, solution):
_set_addresses_and_align(reqs, sub_partitions, solution, size, start, dynamic_partitions)
for i in range(len(solution) - 1, solution.index('app'), -1):
current = solution[i]
if i == len(solution) - 1:
reqs[current]['address'] = (start + size) - reqs[current]['size']
else:
higher_partition = solution[i + 1]
reqs[current]['address'] = reqs[higher_partition]['address'] - reqs[current]['size']
if align_if_required(i, dynamic_partitions, False, reqs, solution):
_set_addresses_and_align(reqs, sub_partitions, solution, size, start, dynamic_partitions)
def align_if_required(i, dynamic_partitions, move_up, reqs, solution):
current = solution[i]
if 'placement' in reqs[current] and 'align' in reqs[current]['placement']:
required_offset = align_partition(current, reqs, move_up, dynamic_partitions)
if required_offset:
solution_index = i if move_up else i + 1
solution.insert(solution_index, required_offset)
return True
return False
def align_partition(current, reqs, move_up, dynamic_partitions):
required_offset = get_required_offset(align=reqs[current]['placement']['align'], start=reqs[current]['address'],
size=reqs[current]['size'], move_up=move_up)
if not required_offset:
return None
empty_partition_size = required_offset
if current not in dynamic_partitions:
if move_up:
empty_partition_address = reqs[current]['address']
reqs[current]['address'] += required_offset
else:
empty_partition_address = reqs[current]['address'] + reqs[current]['size']
if reqs[current]['address'] != 0: # Special handling for the first partition as it cannot be moved down
reqs[current]['address'] -= required_offset
elif not move_up:
empty_partition_address, empty_partition_size = \
align_dynamic_partition(dynamic_partitions, current, reqs, required_offset)
else:
raise RuntimeError("Invalid combination, can not have dynamic partition in front of app with alignment")
e = 'EMPTY_{}'.format(len([x for x in reqs.keys() if 'EMPTY' in x]))
reqs[e] = {'address': empty_partition_address,
'size': empty_partition_size,
'placement': {'before' if move_up else 'after': [current]}}
if current not in dynamic_partitions:
# We have stolen space from the 'app' partition. Hence, all partitions which share size with 'app' partition
# must have their sizes reduced. Note that the total amount of 'stealing' is divided between the partitions
# sharing size with app (including 'app' itself).
for p in dynamic_partitions:
reqs[p]['size'] = reqs[p]['size'] - (reqs[e]['size'] // len(dynamic_partitions))
return e
def align_dynamic_partition(app_dep_parts, current, reqs, required_offset):
# Since this is a dynamic partition, the introduced empty partition will take space from the 'app' partition
# and the partition being aligned. Take special care to ensure the offset becomes correct.
required_offset *= 2
for p in app_dep_parts:
reqs[p]['size'] -= required_offset // 2
reqs[current]['address'] -= required_offset
empty_partition_address = reqs[current]['address'] + reqs[current]['size']
empty_partition_size = required_offset
return empty_partition_address, empty_partition_size
def get_required_offset(align, start, size, move_up):
if len(align) != 1 or ('start' not in align and 'end' not in align):
raise RuntimeError("Invalid alignment requirement {}".format(align))
end = start + size
align_start = 'start' in align
try:
if (align_start and start % align['start'] == 0) or (not align_start and end % align['end'] == 0):
return 0
if move_up:
return align['start'] - (start % align['start']) if align_start else align['end'] - (end % align['end'])
else:
if align_start:
return start % align['start']
else:
# Special handling is needed if start is 0 since this partition can not be moved down
return end % align['end'] if start != 0 else align['end'] - (end % align['end'])
except TypeError as err:
keyword = 'start' if align_start else 'end'
raise TypeError(f"elements in align: {{{keyword}:{align[keyword]}}} is not type of \'int\'") from err
def set_size_addr(entry, size, address):
entry['size'] = size
entry['address'] = address
def set_sub_partition_address_and_size(reqs, sub_partitions):
for sp_name, sp_value in sub_partitions.items():
size = sum([reqs[part]['size'] for part in sp_value['span']])
if size == 0:
raise RuntimeError("No compatible parent partition found for {}".format(sp_name))
address = min([reqs[part]['address'] for part in sp_value['span']])
reqs[sp_name] = sp_value
reqs[sp_name]['span'] = reqs[sp_name]['orig_span'] # Restore "backup".
set_size_addr(reqs[sp_name], size, address)
def sizeof(reqs, req, total_size):
if req == 'app':
size = app_size(reqs, total_size)
elif 'span' not in reqs[req].keys():
size = reqs[req]['size'] if 'size' in reqs[req].keys() else 0
else:
size = sum([sizeof(reqs, part, total_size) for part in reqs[req]['span']])
return size
def load_reqs(input_config):
reqs = dict()
for ymlpath in input_config:
if path.exists(ymlpath):
with open(ymlpath, 'r') as f:
loaded_reqs = yaml.safe_load(f)
if loaded_reqs is None:
continue
for key in loaded_reqs.keys():
if key in reqs.keys() and loaded_reqs[key] != reqs[key]:
raise RuntimeError("Conflicting configuration found for '{}' value for key '{}' differs."
"val1: {} val2: {} ".format(f.name, key, loaded_reqs[key], reqs[key]))
reqs.update(loaded_reqs)
return reqs
def get_dynamic_area_start_and_size(static_config, flash_size):
# Remove app from this dict to simplify the case where partitions before and after are removed.
proper_partitions = [config for name, config in static_config.items()
if 'span' not in config.keys() and name != 'app']
starts = {flash_size} | {config['address'] for config in proper_partitions}
ends = {0} | {config['address'] + config['size'] for config in proper_partitions}
gaps = list(zip(sorted(ends - starts), sorted(starts - ends)))
assert len(gaps) == 1, "Incorrect amount of gaps found"
start, end = gaps[0]
return start, end - start
def get_region_config(pm_config, region_config, static_conf=None):
start = region_config['base_address']
size = region_config['size']
placement_strategy = region_config['placement_strategy']
region_name = region_config['name']
device = region_config['device']
if placement_strategy in [END_TO_START, START_TO_END]:
solve_simple_region(pm_config, start, size, placement_strategy, region_name, device, static_conf)
else:
solve_complex_region(pm_config, start, size, placement_strategy, region_name, device, static_conf)
def solve_simple_region(pm_config, start, size, placement_strategy, region_name, device, static_conf):
reserved = 0
if static_conf:
verify_static_conf(size, start, placement_strategy, static_conf)
reserved = sum([config['size'] for name, config in static_conf.items()
if 'region' in config.keys() and config['region'] == region_name and name != 'app'])
if placement_strategy == END_TO_START:
address = start + size - reserved
else:
address = start + reserved
for partition_name in pm_config:
if placement_strategy == END_TO_START:
address -= pm_config[partition_name]['size']
pm_config[partition_name]['address'] = address
if placement_strategy == START_TO_END:
address += pm_config[partition_name]['size']
if device:
pm_config[partition_name]['device'] = device
# Generate the region partition containing the non-reserved memory.
# But first, verify that the user hasn't created a partition with the name of the region.
if region_name in pm_config:
raise RuntimeError(f"Found partition named {region_name}, this is the name of a region, and is a reserved name")
pm_config[region_name] = dict()
pm_config[region_name]['region'] = region_name
if placement_strategy == END_TO_START:
pm_config[region_name]['address'] = start
pm_config[region_name]['size'] = address - start
else:
pm_config[region_name]['address'] = address
pm_config[region_name]['size'] = (start + size) - address
def verify_static_conf(size, start, placement_strategy, static_conf):
# Verify that all statically defined partitions has given address,
# and that they are packed at the end/start of the region.
starts = {start + size} | {c['address'] for c in static_conf.values() if 'size' in c}
ends = {start} | {c['address'] + c['size'] for c in static_conf.values() if 'size' in c}
gaps = list(zip(sorted(ends - starts), sorted(starts - ends)))
if placement_strategy == START_TO_END:
start_end_correct = gaps[0][0] == start + size
else:
start_end_correct = gaps[0][0] == start
if len(gaps) != 1 or not start_end_correct:
raise RuntimeError("Statically defined partitions are not packed at the start/end of region")
def solve_complex_region(pm_config, start, size, placement_strategy, region_name, device, static_conf):
free_size = size
if static_conf:
start, free_size = get_dynamic_area_start_and_size(static_conf, free_size)
# If nothing is unresolved (only app remaining), simply return the pre defined config with 'app'
if len(pm_config) == 1:
pm_config.update(static_conf)
pm_config['app']['address'] = start
pm_config['app']['size'] = free_size
return
solution, sub_partitions = resolve(pm_config)
set_addresses_and_align(pm_config, sub_partitions, solution, free_size, start)
set_sub_partition_address_and_size(pm_config, sub_partitions)
if static_conf:
# Merge the results, take the new 'app' as that has the correct size.
pm_config.update({name: config for name, config in static_conf.items() if name != 'app'})
def write_yaml_out_file(pm_config, out_path):
def hexint_presenter(dumper, data):
return dumper.represent_int(hex(data))
yaml.add_representer(int, hexint_presenter)
yamldump = yaml.dump(pm_config)
with open(out_path, 'w') as out_file:
out_file.write(yamldump)
def parse_args():
parser = argparse.ArgumentParser(
description='''Parse given 'pm.yml' partition manager configuration files to deduce the placement of partitions.
The partitions and their relative placement is defined in the 'pm.yml' files. The path to the 'pm.yml' files are used
to locate 'autoconf.h' files. These are used to find the partition sizes, as well as the total flash size.
This script generates a file for each partition - "pm_config.h".
This file contains all addresses and sizes of all partitions.
"pm_config.h" is in the same folder as the given 'pm.yml' file.''',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--input-files", required=True, type=str, nargs="+",
help="List of paths to input yaml files. ")
parser.add_argument("--output-partitions", required=True, type=str,
help="Path to output partition configuration file.")
parser.add_argument("--output-regions", required=True, type=str,
help="Path to output regions configuration file.")
parser.add_argument("--static-config", required=False, type=argparse.FileType(mode='r'),
help="Path static configuration.")
parser.add_argument("--regions", required=False, type=str, nargs='*',
help="Space separated list of regions. For each region specified here, one must specify"
"--{region_name}-base-addr and --{region_name}-size. If the region is associated"
"with a driver, the device name must be given in --{region_name}-device (e.g. an "
"external flash driver. For regions with placement strategy 'complex' it is possible"
"to specify the --{region_name}-dynamic-partition to set the name of the dynamic partition"
"which occupies all non-used area.")
main_args, region_args = parser.parse_known_args()
# Create new instance to parse regions
parser = argparse.ArgumentParser()
for x in main_args.regions:
# Generate arguments for each region dynamically
parser.add_argument(f'--{x}-size', required=True, type=lambda z: int(z, 0))
parser.add_argument(f'--{x}-base-address', required=False, type=lambda z: int(z, 0), default=0)
parser.add_argument(f'--{x}-placement-strategy', required=False, type=str,
choices=[START_TO_END, END_TO_START, COMPLEX], default=START_TO_END)
parser.add_argument(f'--{x}-device', required=False, type=str, default='')
parser.add_argument(f'--{x}-dynamic-partition', required=False, type=str, help="Name of dynamic partition")
ranges_configuration = parser.parse_args(region_args)
return main_args, ranges_configuration
def replace_app_with_dynamic_partition(d, dynamic_partition_name):
for k, v in d.items():
if isinstance(v, dict):
replace_app_with_dynamic_partition(v, dynamic_partition_name)
elif isinstance(v, list) and "app" in v:
d[k] = [o if o != "app" else dynamic_partition_name for o in v]
elif isinstance(v, str) and v == "app":
v = dynamic_partition_name
def set_flash_primary_region(pm_config):
for v in pm_config.values():
if 'region' not in v:
v['region'] = 'flash_primary'
def fix_syntactic_sugar(pm_config):
set_flash_primary_region(pm_config)
def get_region_config_from_args(args, ranges_configuration):
regions = {x: {k.replace(f'{x}_', ''): v
for k, v in vars(ranges_configuration).items() if k.startswith(x)}
for x in [y.replace('-', '_') for y in args.regions]} # Replace - with _ to match argparse namespace
return regions
def solve_region(pm_config, region, region_config, static_config):
solution = dict()
region_config['name'] = region
partitions = {k: v for k, v in pm_config.items() if region in v['region']}
static_partitions = {k: v for k, v in static_config.items() if region in v['region']}
get_region_config(partitions, region_config, static_partitions)
solution.update(partitions)
if region_config['dynamic_partition']:
solution[region_config['dynamic_partition'].strip()] = solution['app']
del solution['app']
replace_app_with_dynamic_partition(solution, region_config['dynamic_partition'].strip())
return solution
def load_static_configuration(args, pm_config):
static_config = yaml.safe_load(args.static_config)
fix_syntactic_sugar(static_config)
# Delete all statically defined partitions from the pm_config dict.
# This is done since all partitions in pm_config will be resolved.
for statically_defined_image in static_config:
if statically_defined_image in pm_config and statically_defined_image:
print (f"Dropping partition '{statically_defined_image}' since it is statically defined.")
del pm_config[statically_defined_image]
return static_config
def main():
args, ranges_configuration = parse_args()
pm_config = load_reqs(args.input_files)
static_config = load_static_configuration(args, pm_config) if args.static_config else dict()
pm_config['app'] = dict()
fix_syntactic_sugar(pm_config)
regions = get_region_config_from_args(args, ranges_configuration)
solution = dict()
for region, region_config in regions.items():
solution.update(solve_region(pm_config, region, region_config, static_config))
write_yaml_out_file(solution, args.output_partitions)
write_yaml_out_file(regions, args.output_regions)
def expect_addr_size(td, name, expected_address, expected_size):
if expected_size:
assert td[name]['size'] == expected_size, \
"Size of {} was {}, expected {}.\ntd:{}".format(name, td[name]['size'], expected_size, pformat(td))
if expected_address:
assert td[name]['address'] == expected_address, \
"Address of {} was {}, expected {}.\ntd:{}".format(name, td[name]['address'], expected_address, pformat(td))
def expect_list(expected, actual):
expected_list = list(sorted(expected))
actual_list = list(sorted(actual))
assert sorted(expected_list) == sorted(actual_list), "Expected list %s, was %s" % (str(expected_list), str(actual_list))
def test():
list_one = [1, 2, 3, 4]
items_to_check = [4]
remove_item_not_in_list(list_one, items_to_check)
assert list_one[0] == 4
assert len(list_one) == 1
test_config = {
'first': {'address': 0, 'size': 10},
# Gap from deleted partition.
'app': {'address': 20, 'size': 10},
# Gap from deleted partition.
'fourth': {'address': 40, 'size': 60}}
start, size = get_dynamic_area_start_and_size(test_config, 100)
assert start == 10
assert size == 40-10
test_config = {
'first': {'address': 0, 'size': 10},
'second': {'address': 10, 'size': 10},
'app': {'address': 20, 'size': 80}
# Gap from deleted partition.
}
start, size = get_dynamic_area_start_and_size(test_config, 100)
assert start == 20
assert size == 80
test_config = {
'app': {'address': 0, 'size': 10},
# Gap from deleted partition.
'second': {'address': 40, 'size': 60}}
start, size = get_dynamic_area_start_and_size(test_config, 100)
assert start == 0
assert size == 40
test_config = {
'first': {'address': 0, 'size': 10},
# Gap from deleted partition.
'app': {'address': 20, 'size': 10}}
start, size = get_dynamic_area_start_and_size(test_config, 100)
assert start == 10
assert size == 100 - 10
# Verify that all 'end' and 'start' are valid references in 'one_of' dicts
td = {
'a': {'placement': {'after': {'one_of': ['x0', 'x1', 'start']}}, 'size': 100},
'b': {'placement': {'before': {'one_of': ['x0', 'x1', 'end']}}, 'size': 200},
'app': {},
}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'a', 0, 100)
expect_addr_size(td, 'app', 100, 700)
expect_addr_size(td, 'b', 800, 200)
# Verify that START_TO_END region configuration is correct
td = {'b': {'size': 100, 'region': 'extflash'}}
test_region = {'name': 'extflash',
'size': 1000,
'base_address': 2000,
'placement_strategy': START_TO_END,
'device': 'some-driver-device'}
get_region_config(td, test_region)
assert td['b']['size'] == 100
assert td['extflash']['address'] == 2100
assert td['extflash']['size'] == 900
# Verify that RAM configuration is correct
td = {'b': {'size': 100, 'region': 'ram'}}
test_region = {'name': 'ram',
'size': 1000,
'base_address': 2000,
'placement_strategy': END_TO_START,
'device': None}
get_region_config(td, test_region)
assert td['b']['size'] == 100
assert td['ram']['address'] == 2000
assert td['ram']['size'] == 900
# Verify that RAM configuration is correct
td = {
'b': {'size': 100, 'region': 'ram'},
'c': {'size': 200, 'region': 'ram'},
'd': {'size': 300, 'region': 'ram'}
}
test_region = {'name': 'ram',
'size': 1000,
'base_address': 2000,
'placement_strategy': END_TO_START,
'device': None}
get_region_config(td, test_region)
assert td['ram']['address'] == 2000
assert td['ram']['size'] == 400
# Can not verify the placement, as this is random
assert td['b']['size'] == 100
assert td['c']['size'] == 200
assert td['d']['size'] == 300
# Verify that RAM configuration with given static configuration is correct
test_region = {'name': 'ram',
'size': 1000,
'base_address': 2000,
'placement_strategy': END_TO_START,
'device': None}
td = {
'b': {'size': 100, 'region': 'ram'},
'c': {'size': 200, 'region': 'ram'},
'd': {'size': 300, 'region': 'ram'},
}
get_region_config(td,
test_region,
static_conf={'s1': {'size': 100,
'address': (1000+2000)-100,
'region': 'ram'},
's2': {'size': 200,
'address': (1000+2000)-100-200,
'region': 'ram'}})
assert td['ram']['address'] == 2000
assert td['ram']['size'] == 100
# Can not verify the placement, as this is random
assert td['b']['size'] == 100
assert td['c']['size'] == 200
assert td['d']['size'] == 300
# Verify that RAM configuration with given static configuration fails if static RAM partitions are not
# packed at the end of flash, here there is a space between the two regions
test_region = {'name': 'ram',
'size': 1000,
'base_address': 2000,
'placement_strategy': END_TO_START,
'device': None}
failed = False
td = {
'a': {'placement': {'after': 'start'}, 'size': 100},
'b': {'size': 100, 'region': 'ram'},
'c': {'size': 200, 'region': 'ram'},
'd': {'size': 300, 'region': 'ram'},
'app': {}
}
try:
get_region_config(td,
test_region,
static_conf={'s1': {'size': 100,
'address': (1000+2000)-100,
'region': 'ram'},
's2': {'size': 200,
'address': (1000+2000)-100-300,
'region': 'ram'}}) # Note 300 not 200
except RuntimeError:
failed = True
assert failed
# Verify that RAM configuration with given static configuration fails if static RAM partitions are not
# packed at the end of flash, here the partitions are packed, but does not go to the end of RAM
failed = False
test_region = {'name': 'ram',
'size': 1000,
'base_address': 2000,
'placement_strategy': END_TO_START,
'device': None}
td = {
'a': {'placement': {'after': 'start'}, 'size': 100},
'b': {'size': 100, 'region': 'ram'},
'c': {'size': 200, 'region': 'ram'},
'd': {'size': 300, 'region': 'ram'},
'app': {}
}
try:
get_region_config(td,
test_region,
static_conf={'s1': {'size': 100,
'address': (1000+2000-50)-100,
'region': 'ram'}, # Note - 50
's2': {'size': 200,
'address': (1000+2000-50)-100-200,
'region': 'ram'}}) # Note - 50
except RuntimeError:
failed = True
assert failed
# Verify that all 'one_of' dicts are replaced with the first entry which corresponds to an existing partition
td = {
'a': {'placement': {'after': 'start'}, 'size': 100},
'b': {'placement': {'after': {'one_of': ['x0', 'x1', 'a', 'x2']}}, 'size': 200},
'c': {'placement': {'after': 'b'}, 'share_size': {'one_of': ['x0', 'x1', 'b', 'a']}},
'd': {'placement': {'after': 'c'}, 'share_size': {'one_of': ['a', 'b']}}, # Should take first existing
# We can use several 'one_of' - dicts inside lists
's': {'span': ['a', {'one_of': ['x0', 'b', 'd']}, {'one_of': ['x2', 'c', 'a']}]},
'app': {},
'e': {'placement': {'after': 'app'}, 'share_size': {'one_of': ['x0', 'app']}}, # app always exists
}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'a', 0, 100) # b is after a
expect_addr_size(td, 'b', 100, 200) # b is after a
expect_addr_size(td, 'c', 300, 200) # c shares size with b
expect_addr_size(td, 'd', 500, 100) # d shares size with a
expect_addr_size(td, 's', 0, 500) # s spans a, b and c
expect_addr_size(td, 'app', 600, 200) # s spans a, b and c
expect_addr_size(td, 'e', 800, 200) # s spans a, b and c
# Verify that all 'share_size' with value partition that has size 0 is compatible with 'one_of' dicts
td = {
'a': {'placement': {'after': 'start'}, 'size': 0},
'b': {'placement': {'after': {'one_of': ['a', 'start']}},
'share_size': ['a']},
'c': {'placement': {'after': {'one_of': ['a', 'b', 'start']}},
'share_size': {'one_of': ['a', 'b']}},
'd': {'placement': {'after': {'one_of': ['a', 'b', 'c', 'start']}},
'share_size': {'one_of': ['a', 'b', 'c']}},
# You get the point
'e': {'placement': {'after': {'one_of': ['a', 'b', 'c', 'd', 'start']}}, 'size': 100}
}
remove_all_zero_sized_partitions(td)
assert 'a' not in td
assert 'b' not in td
assert 'c' not in td
assert 'd' not in td
# Verify that all 'share_size' with value partition that has size 0 is compatible withe 'one_of' dicts.
td = {
'a': {'placement': {'after': 'start'}, 'size': 0},
'b': {'placement': {'after': {'one_of': ['a', 'start']}},
'share_size': ['a']},
'c': {'placement': {'after': {'one_of': ['a', 'b', 'start']}},
'share_size': {'one_of': ['a', 'b']}},
'd': {'placement': {'after': {'one_of': ['a', 'b', 'c', 'start']}},
'share_size': {'one_of': ['a', 'b', 'c']}},
# You get the point
'e': {'placement': {'after': {'one_of': ['a', 'b', 'c', 'd', 'start']}}, 'size': 100},
'app': {}
}
# Perform the same test as above, but run it through the 'resolve' function this time.
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
assert 'a' not in td
assert 'b' not in td
assert 'c' not in td
assert 'd' not in td
expect_addr_size(td, 'e', 0, 100)
# Verify that an error is raised when no partition inside 'one_of' dicts exist as dict value
failed = False
td = {
'a': {'placement': {'after': {'one_of': ['x0', 'x1']}}, 'size': 100},
'app': {}
}
try:
resolve(td)
except RuntimeError:
failed = True
assert failed
# Verify that an error is raised when no partition inside 'one_of' dicts exist as list item
failed = False
td = {
'app': {},
'a': {'placement': {'after': 'app'}, 'size': 100},
's': {'span': ['a', {'one_of': ['x0', 'x1']}]},
}
try:
resolve(td)
except RuntimeError:
failed = True
assert failed
# Verify that empty placement property throws error
td = {'spm': {'placement': {'before': ['app']}, 'size': 100, 'inside': ['mcuboot_slot0']},
'mcuboot': {'placement': {'before': ['spm', 'app']}, 'size': 200},
'mcuboot_slot0': {'span': ['app']},
'invalid': {'placement': {}},
'app': {}}
failed = False
try:
s, sub_partitions = resolve(td)
except RuntimeError:
failed = True
assert failed
# Verify that offset is correct when aligning partition not at address 0
offset = get_required_offset(align={'end': 800}, start=1400, size=100, move_up=False)
assert offset == 700
# Verify that offset is correct when aligning partition at address 0
offset = get_required_offset(align={'end': 800}, start=0, size=100, move_up=False)
assert offset == 700
# Verify that offset is correct when aligning partition at address 0
# and end of first partition is larger than the required alignment.
offset = get_required_offset(align={'end': 800}, start=0, size=1000, move_up=False)
assert offset == 600
for l in [
lambda : get_required_offset(align={'end': ["CONFIG_VAR"]}, start=0, size=1000, move_up=False),
lambda : get_required_offset(align={'start': ["CONFIG_VAR"]}, start=0, size=1000, move_up=False),
lambda : get_required_offset(align={'start': [[2]]},start=0, size=1000, move_up=False)
]:
failed = False
try:
l()
except TypeError:
failed = True
assert failed, "Should have received a TypeError."
# Verify that the first partition can be aligned, and that the inserted empty partition is placed behind it.
td = {'first': {'placement': {'before': 'app', 'align': {'end': 800}}, 'size': 100}, 'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'EMPTY_0', 100, 700)
expect_addr_size(td, 'app', 800, 200)
# Verify that providing a static configuration with nothing unresolved gives a valid configuration with 'app'.
static_config = {'spm': {'address': 0, 'placement': None, 'before': ['app'], 'size': 400}}
test_config = {'app': dict()}
flash_region = {
'name': 'flash_primary',
'placement_strategy': COMPLEX,
'size': 1000,
'base_address': 0,
'device': 'nordic_flash_stuff'
}
get_region_config(test_config, flash_region, static_config)
assert 'app' in test_config
assert test_config['app']['address'] == 400
assert test_config['app']['size'] == 600
assert 'spm' in test_config
assert test_config['spm']['address'] == 0
# Test a single partition with alignment where the address is smaller than the alignment value.
td = {'without_alignment': {'placement': {'before': 'with_alignment'}, 'size': 100},
'with_alignment': {'placement': {'before': 'app', 'align': {'start': 200}}, 'size': 100},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'EMPTY_0', 100, 100)
expect_addr_size(td, 'with_alignment', 200, 100)
# Test alignment after 'app'
td = {'without_alignment': {'placement': {'after': 'app'}, 'size': 100},
'with_alignment': {'placement': {'after': 'without_alignment', 'align': {'start': 400}}, 'size': 100},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'app', 0, 700)
expect_addr_size(td, 'with_alignment', 800, 100)
expect_addr_size(td, 'EMPTY_0', 900, 100)
# Test two partitions with alignment where the address is smaller than the alignment value.
td = {'without_alignment': {'placement': {'before': 'with_alignment'}, 'size': 100},
'with_alignment': {'placement': {'before': 'with_alignment_2', 'align': {'end': 400}}, 'size': 100},
'with_alignment_2': {'placement': {'before': 'app', 'align': {'start': 1000}}, 'size': 100},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 10000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'EMPTY_0', 100, 200)
expect_addr_size(td, 'with_alignment', 300, 100)
expect_addr_size(td, 'EMPTY_1', 400, 600)
expect_addr_size(td, 'with_alignment_2', 1000, 100)
# Test three partitions with alignment where the address is BIGGER than the alignment value.
td = {'without_alignment': {'placement': {'before': 'with_alignment'}, 'size': 10000},
'with_alignment': {'placement': {'before': 'with_alignment_2', 'align': {'end': 400}}, 'size': 100},
'with_alignment_2': {'placement': {'before': 'app', 'align': {'start': 1000}}, 'size': 100},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 10000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'EMPTY_0', 10000, 300)
expect_addr_size(td, 'with_alignment', 10300, 100)
expect_addr_size(td, 'EMPTY_1', 10400, 600)
expect_addr_size(td, 'with_alignment_2', 11000, 100)
# FLASH (0x100000):
# +------------------------------------------+
# | 0x0: b0 (0x8000) |
# +---0x8000: s0 (0xc200)--------------------+
# | 0x8000: s0_pad (0x200) |
# +---0x8200: s0_image (0xc000)--------------+
# | 0x8200: mcuboot (0xc000) |
# | 0x14200: EMPTY_0 (0xe00) |
# +---0x15000: s1 (0xc200)-------------------+
# | 0x15000: s1_pad (0x200) |
# | 0x15200: s1_image (0xc000) |
# | 0x21200: EMPTY_1 (0xe00) |
# +---0x22000: mcuboot_primary (0x5d000)-----+
# | 0x22000: mcuboot_pad (0x200) |
# +---0x22200: mcuboot_primary_app (0x5ce00)-+
# | 0x22200: app (0x5ce00) |
# | 0x7f000: mcuboot_secondary (0x5d000) |
# | 0xdc000: EMPTY_2 (0x1000) |
# | 0xdd000: mcuboot_scratch (0x1e000) |
# | 0xfb000: mcuboot_storage (0x4000) |
# | 0xff000: provision (0x1000) |
# +------------------------------------------+
# Verify that alignment works with partition which shares size with app.
td = {'b0': {'placement': {'after': 'start'}, 'size': 0x8000},
's0': {'span': ['s0_pad', 's0_image']},
's0_pad': {'placement': {'after': 'b0', 'align': {'start': 0x1000}}, 'share_size': 'mcuboot_pad'},
's0_image': {'span': {'one_of': ['mcuboot', 'spm', 'app']}},
'mcuboot': {'placement': {'before': 'mcuboot_primary'}, 'size': 0xc000},
's1': {'span': ['s1_pad', 's1_image']},
's1_pad': {'placement': {'after': 's0', 'align': {'start': 0x1000}}, 'share_size': 'mcuboot_pad'},
's1_image': {'placement': {'after': 's1_pad'}, 'share_size': 'mcuboot'},
'mcuboot_primary': {'span': ['mcuboot_pad', 'mcuboot_primary_app']},
'mcuboot_pad': {'placement': {'before': 'mcuboot_primary_app', 'align': {'start': 0x1000}}, 'size': 0x200},
'mcuboot_primary_app': {'span': ['app']},
'app': {},
'mcuboot_secondary': {'placement': {'after': 'mcuboot_primary', 'align': {'start': 0x1000}}, 'share_size': 'mcuboot_primary'},
'mcuboot_scratch': {'placement': {'after': 'app', 'align': {'start': 0x1000}}, 'size': 0x1e000},
'mcuboot_storage': {'placement': {'after': 'mcuboot_scratch', 'align': {'start': 0x1000}}, 'size': 0x4000},
'provision': {'placement': {'before': 'end', 'align': {'start': 0x1000}}, 'size': 0x1000}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 0x100000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'EMPTY_0', 0x14200, 0xe00)
expect_addr_size(td, 'EMPTY_1', 0x21200, 0xe00)
expect_addr_size(td, 'EMPTY_2', 0xdc000, 0x1000)
assert td['mcuboot_secondary']['size'] == td['mcuboot_primary']['size']
# Verify that if a partition X uses 'share_size' with a non-existing partition, then partition X is given size 0,
# and is hence not created.
td = {'should_not_exist': {'placement': {'before': 'exists'}, 'share_size': 'does_not_exist'},
'exists': {'placement': {'before': 'app'}, 'size': 100},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
assert 'should_not_exist' not in td.keys()
# Verify that if a partition X uses 'share_size' with a non-existing partition, but has set a default size,
# then partition X is created with the default size.
td = {'should_exist': {'placement': {'before': 'exists'}, 'share_size': 'does_not_exist', 'size': 200},
'exists': {'placement': {'before': 'app'}, 'size': 100},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'should_exist', 0, 200)
td = {'spm': {'placement': {'before': ['app']}, 'size': 100},
'mcuboot': {'placement': {'before': ['spm', 'app']}, 'size': 200},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'mcuboot', 0, None)
expect_addr_size(td, 'spm', 200, None)
expect_addr_size(td, 'app', 300, 700)
td = {'spm': {'placement': {'before': ['app']}, 'size': 100, 'inside': ['mcuboot_slot0']},
'mcuboot': {'placement': {'before': ['spm', 'app']}, 'size': 200},
'mcuboot_slot0': {'span': ['app']},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'mcuboot', 0, None)
expect_addr_size(td, 'spm', 200, 100)
expect_addr_size(td, 'app', 300, 700)
expect_addr_size(td, 'mcuboot_slot0', 200, 800)
td = {'spm': {'placement': {'before': 'app'}, 'size': 100, 'inside': 'mcuboot_slot0'},
'mcuboot': {'placement': {'before': 'app'}, 'size': 200},
'mcuboot_pad': {'placement': {'after': 'mcuboot'}, 'inside': 'mcuboot_slot0', 'size': 10},
'app_partition': {'span': ['spm', 'app'], 'inside': 'mcuboot_slot0'},
'mcuboot_slot0': {'span': ['app', 'foo']},
'mcuboot_data': {'placement': {'after': ['mcuboot_slot0']}, 'size': 200},
'mcuboot_slot1': {'share_size': 'mcuboot_slot0', 'placement': {'after': 'mcuboot_data'}},
'mcuboot_slot2': {'share_size': 'mcuboot_slot1', 'placement': {'after': 'mcuboot_slot1'}},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'mcuboot', 0, None)
expect_addr_size(td, 'spm', 210, None)
expect_addr_size(td, 'mcuboot_slot0', 200, 200)
expect_addr_size(td, 'mcuboot_slot1', 600, 200)
expect_addr_size(td, 'mcuboot_slot2', 800, 200)
expect_addr_size(td, 'app', 310, 90)
expect_addr_size(td, 'mcuboot_pad', 200, 10)
expect_addr_size(td, 'mcuboot_data', 400, 200)
td = {'spm': {'placement': {'before': ['app']}, 'size': 100, 'inside': ['mcuboot_slot0']},
'mcuboot': {'placement': {'before': ['app']}, 'size': 200},
'mcuboot_pad': {'placement': {'after': ['mcuboot']}, 'inside': ['mcuboot_slot0'], 'size': 10},
'app_partition': {'span': ['spm', 'app'], 'inside': ['mcuboot_slot0']},
'mcuboot_slot0': {'span': 'app'},
'mcuboot_data': {'placement': {'after': ['mcuboot_slot0']}, 'size': 200},
'mcuboot_slot1': {'share_size': ['mcuboot_slot0'], 'placement': {'after': ['mcuboot_data']}},
'mcuboot_slot2': {'share_size': ['mcuboot_slot1'], 'placement': {'after': ['mcuboot_slot1']}},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, sub_partitions, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'mcuboot', 0, None)
expect_addr_size(td, 'spm', 210, None)
expect_addr_size(td, 'mcuboot_slot0', 200, 200)
expect_addr_size(td, 'mcuboot_slot1', 600, 200)
expect_addr_size(td, 'mcuboot_slot2', 800, 200)
expect_addr_size(td, 'app', 310, 90)
expect_addr_size(td, 'mcuboot_pad', 200, 10)
expect_addr_size(td, 'mcuboot_data', 400, 200)
td = {
'e': {'placement': {'before': ['app']}, 'size': 100},
'a': {'placement': {'before': ['b']}, 'size': 100},
'd': {'placement': {'before': ['e']}, 'size': 100},
'c': {'placement': {'before': ['d']}, 'share_size': ['z', 'a', 'g']},
'j': {'placement': {'before': ['end']}, 'inside': ['k'], 'size': 20},
'i': {'placement': {'before': ['j']}, 'inside': ['k'], 'size': 20},
'h': {'placement': {'before': ['i']}, 'size': 20},
'f': {'placement': {'after': ['app']}, 'size': 20},
'g': {'placement': {'after': ['f']}, 'size': 20},
'b': {'placement': {'before': ['c']}, 'size': 20},
'k': {'span': []},
'app': {}}
s, sub_partitions = resolve(td)
set_addresses_and_align(td, {}, s, 1000)
set_sub_partition_address_and_size(td, sub_partitions)
expect_addr_size(td, 'a', 0, None)
expect_addr_size(td, 'b', 100, None)
expect_addr_size(td, 'c', 120, None)
expect_addr_size(td, 'd', 220, None)
expect_addr_size(td, 'e', 320, None)
expect_addr_size(td, 'app', 420, 480)
expect_addr_size(td, 'f', 900, None)
expect_addr_size(td, 'g', 920, None)
expect_addr_size(td, 'h', 940, None)
expect_addr_size(td, 'i', 960, None)
expect_addr_size(td, 'j', 980, None)
expect_addr_size(td, 'k', 960, 40)
td = {'mcuboot': {'placement': {'before': ['app', 'spu']}, 'size': 200},
'b0': {'placement': {'before': ['mcuboot', 'app']}, 'size': 100},
'app': {}}
s, _ = resolve(td)
set_addresses_and_align(td, {}, s, 1000)
expect_addr_size(td, 'b0', 0, None)
expect_addr_size(td, 'mcuboot', 100, None)
expect_addr_size(td, 'app', 300, 700)
td = {'b0': {'placement': {'before': ['mcuboot', 'app']}, 'size': 100}, 'app': {}}
s, _ = resolve(td)
set_addresses_and_align(td, {}, s, 1000)
expect_addr_size(td, 'b0', 0, None)
expect_addr_size(td, 'app', 100, 900)
td = {'spu': {'placement': {'before': ['app']}, 'size': 100},
'mcuboot': {'placement': {'before': ['spu', 'app']}, 'size': 200},
'app': {}}
s, _ = resolve(td)
set_addresses_and_align(td, {}, s, 1000)
expect_addr_size(td, 'mcuboot', 0, None)
expect_addr_size(td, 'spu', 200, None)
expect_addr_size(td, 'app', 300, 700)
td = {'provision': {'placement': {'before': ['end']}, 'size': 100},
'mcuboot': {'placement': {'before': ['spu', 'app']}, 'size': 100},
'b0': {'placement': {'before': ['mcuboot', 'app']}, 'size': 50},
'spu': {'placement': {'before': ['app']}, 'size': 100},
'app': {}}
s, _ = resolve(td)
set_addresses_and_align(td, {}, s, 1000)
expect_addr_size(td, 'b0', 0, None)
expect_addr_size(td, 'mcuboot', 50, None)
expect_addr_size(td, 'spu', 150, None)
expect_addr_size(td, 'app', 250, 650)
expect_addr_size(td, 'provision', 900, None)
# Test #1 for removal of empty container partitions.
td = {'a': {'share_size': 'does_not_exist'}, # a should be removed
'b': {'span': 'a'}, # b through d should be removed because a is removed
'c': {'span': 'b'},
'd': {'span': 'c'},
'e': {'placement': {'before': ['end']}}}
s, sub = resolve(td)
expect_list(['e', 'app'], s)
expect_list([], sub)
# Test #2 for removal of empty container partitions.
td = {'a': {'share_size': 'does_not_exist'}, # a should be removed
'b': {'span': 'a'}, # b should not be removed, since d is placed inside it.
'c': {'placement': {'after': ['start']}},
'd': {'inside': ['does_not_exist', 'b'], 'placement': {'after': ['c']}}}
s, sub = resolve(td)
expect_list(['c', 'd', 'app'], s)
expect_list(['b'], sub)
expect_list(['d'], sub['b']['orig_span']) # Backup must contain edits.
print("All tests passed!")
if __name__ == "__main__":
if len(sys.argv) > 1:
main()
else:
print("No input, running tests.")
test()
|
import pandas as pd
from pathlib import Path
from collections import Counter
import datetime
from collections import defaultdict
from faker import Factory
import faker
from preprocessing.nfeProvider import Invoice
import csv
def convert_to_numeric(num):
"""
Converte strings que representam valores monetários em Reais (R$) para
o padrão americano.
"""
num = num.strip()
if num != "":
num = num.replace(',', '.')
count_dot = num.count('.')
if count_dot >= 2:
while count_dot >= 2:
# armazena o index da primeira ocorrência da string ponto
slice_index = num.index('.')
# faz um slice baseado na localizacao desse index
new_str = num[0:slice_index] + num[slice_index + 1:]
num = new_str
count_dot = num.count('.')
return float(num)
else:
return float(num)
else:
return 0.0
def identify_encoding(filename: str) -> str:
"""
Identifica o encoding do arquivo filename retornando uma string com o nome do encoding.
Atributos:
filename: é o path (full ou relativo) do arquivo a ser analisado.
"""
try:
encoding = 'utf8'
with open(filename, "r", encoding=encoding) as file:
_ = file.readlines()
except UnicodeDecodeError:
encoding = 'latin1'
with open(filename, "r", encoding=encoding) as file:
_ = file.readlines()
finally:
return encoding
def report_pkl_into_csv(filename, foldername, logger):
"""
Produz um relatório do status do arquivo tabular gerado a partir dos arquivos .pkl
Atributos:
filename: é o nome do arquivo .csv que será analisado.
foldername: é o nome da sub pasta dos arquivos pkl dentro de ./data-storage/validacao/
"""
# VERIFICA SE ALGUM ARQUIVO .pkl NÃO FORAM PROCESSADOS.
df = pd.read_csv(f"./tabular-data/{filename}.csv", sep=';', encoding='latin1')
lista_chaves_processadas = set(df['nf_chave'].unique())
pkl_folder = Path(f"./data-storage/validacao/{foldername}")
pkl_folder = set(pkl_folder.rglob("*.pkl"))
pkl_folder = set([f.name[:-4][-44:] for f in pkl_folder])
num_arquivos_diff = lista_chaves_processadas.difference(pkl_folder)
if len(num_arquivos_diff) == 0:
logger.debug(f"Todos os arquivos .pkl foram processados. Ao todo foram processados {df["nf_chave"].nunique()} notas fiscais.\n")
else:
logger.critical(f"Não foram processados {len(num_arquivos_diff)} arquivos.\n")
for f in num_arquivos_diff:
logger.critical(f"Arquivo {f} não foi processado.\n")
# VALIDAÇÃO SE HÁ ARQUIVOS DUPLICADOS
files_check = Path(f"./data-storage/validacao/{foldername}")
files_check = list(files_check.rglob("*.pkl"))
files_check = [f.name[:-4][-44:] for f in files_check]
a = Counter()
for f in files_check:
a[f] += 1
for chave, count in a.items():
if count > 1:
logger.critical(f"CHAVE REPETIDA: {chave} # {count}")
# VERIFICA SE HÁ ALGUMA INCONSISTÊNCIA NOS VALORES DOS PRODUTOS E DA NOTA FISCAL
df['prod_valor_liquido'] = df.apply(lambda x: x['prod_valor'] - x['prod_valor_desconto'], axis='columns')
check_valor_nota_valores = df.groupby("nf_chave")['prod_valor_liquido'].sum().sort_values(ascending=False)
inconsistencia_count = 0
container = {}
for chave, valor in zip(check_valor_nota_valores.index, check_valor_nota_valores.values):
validacao = df.loc[df['nf_chave'] == chave, 'nf_valor'].values[0]
valor = round(valor, 2)
chave = chave.replace("-", "").replace(".", "").replace("/", "")
if validacao != valor:
inconsistencia_count += 1
diff_produtos = round(valor - validacao, 2)
container[chave] = diff_produtos
logger.critical(f"{chave} => Valor Nota: R${validacao} @ Valor Produtos: R${valor} @ Diferença: R${diff_produtos}\n")
def normalize_ncm(ncm: str) -> str:
"""
Normaliza a string que representa o código NCM
Atributos:
ncm : string que representa o código NCM
"""
if len(ncm) != 8:
ncm = "0" + ncm
return ncm
def get_ncm_values():
"""
Função que retorna um dicionário contendo uma lista de macro categorias e os codigos
NCM associados a cada um deles.
"""
sheet_names = [
'CARNES E OVOS',
'HORTIFRUTI',
'LIMPEZA',
'HIGIENE',
'LATICINIOS E DERIVADOS',
'BEBIDAS',
'PET',
'PADARIA',
'CEREAIS_GRAOS_SEMENTES',
'DOCES',
'VESTUARIO',
'FARINACEOS',
'MASSAS',
'TEMPEROS_MOLHOS',
'OUTROS'
]
categorias_ncm = {}
for sheet in sheet_names:
df = pd.read_excel("./data/others/compilado_ncm_mercado_mod.xlsx", sheet_name=sheet, dtype={'cod_ncm': str})
df['cod_ncm'] = df['cod_ncm'].astype(str)
df['cod_ncm'] = df['cod_ncm'].apply(normalize_ncm)
categorias_ncm[sheet] = df['cod_ncm'].unique().tolist()
return categorias_ncm
def get_weekday(value: int):
"""
Recebe um INT representando um datetime e retorna uma string com o dia da semana.
Atributos:
value: Inteiro representando um timestamp
"""
convert_int_to_day = {
0: 'Segunda-Feira',
1: 'Terça-Feira',
2: 'Quarta-Feira',
3: 'Quinta-Feira',
4: 'Sexta-Feira',
5: 'Sábado',
6: 'Domingo'
}
weekday = datetime.datetime.utcfromtimestamp(value / 1e9).weekday()
return convert_int_to_day[weekday]
def logging_report(report, list_required_fields, logger):
f = Path(report['tables'][0]['source'])
map_columns_to_number = report['tables'][0]['headers']
map_columns_to_number = {i: col for col, i in zip(map_columns_to_number, range(1, len(map_columns_to_number)))}
fields_required_error = {f: False for f in list_required_fields}
num_errors = report['error-count']
if report['valid']:
logger.debug(f"Arquivo: {f.name} válido pelo schema.\n")
return True, num_errors
else:
lista_errors = report['tables'][0]['errors']
if 0 < num_errors < 1000:
logger.debug(f"Arquivo {f.name} não validado com {num_errors} erros.")
for erro in lista_errors:
for feature, valor in erro.items():
if feature == 'code':
if valor == 'required-constraint':
# identify which column is null
col = map_columns_to_number[erro['column-number']]
# change validation status of this feature
if not fields_required_error[col]:
fields_required_error[col] = True
line = erro['row-number']
logger.critical(f"{f.name} @ Linha {line} possui {col} sem valor atribuído.")
elif valor == 'enumerable-constraint':
col = map_columns_to_number[erro['column-number']]
line = erro['row-number']
logger.critical(f"{f.name} @ Linha {line} e Coluna {col} erro: {erro["message"]} ")
else:
try:
col = map_columns_to_number[erro['column-number']]
except: # o erro associado não é referente a uma coluna
try:
line = erro['row-number']
logger.critical(f"{f.name} @ Linha {line} : {erro["message"]}")
except KeyError:
logger.critical(f"{f.name} @ {erro["message"]}")
return False, num_errors
def anonymize_rows(rows):
"""
Rows is an iterable of dictionaries that contain name and
email fields that need to be anonymized.
"""
# Load the faker and its providers
faker = Factory.create("pt_BR")
faker.add_provider(Invoice)
# Create mappings of names & emails to faked names & emails.
# https://stackoverflow.com/questions/18066837/passing-a-parameter-to-objects-created-by-defaultdict
nfecod = defaultdict(lambda: faker.nfce(**{'uf_code': 'DF'}))
cpf = defaultdict(faker.cpf)
nome = defaultdict(faker.name)
endereco = defaultdict(faker.address)
bairro = defaultdict(faker.bairro)
municipio = defaultdict(faker.city)
telefone = defaultdict(faker.phone_number)
uf = defaultdict(faker.state_abbr)
pais = defaultdict(faker.country)
email = defaultdict(faker.email)
# Iterate over the rows and yield anonymized rows.
for row in rows:
# Replace the name and email fields with faked fields.
row['nf_chave'] = nfecod[row['nf_chave']]
row['dest_cpf'] = cpf[row['dest_cpf']]
row['dest_rz'] = nome[row['dest_rz']]
row['dest_endereco'] = endereco[row['dest_endereco']]
row['dest_bairro'] = bairro[row['dest_bairro']]
row['dest_municipio'] = municipio[row['dest_municipio']]
row['dest_telefone'] = telefone[row['dest_telefone']]
row['dest_uf'] = uf[row['dest_uf']]
row['dest_pais'] = pais[row['dest_pais']]
row['dest_email'] = email[row['dest_email']]
# Yield the row back to the caller
yield row
def anonymize(source, target):
"""
The source argument is a path to a CSV file containing data to anonymize,
while target is a path to write the anonymized CSV data to.
"""
# https://pymotw.com/2/csv/
PARTIAL_SOURCE_DATA = Path("./tabular-data/") / f"{source}"
PARTIAL_DEST_DATA = Path("./tabular-data/") / f"{target}"
csv.register_dialect('semicolon', delimiter=';')
with open(PARTIAL_SOURCE_DATA, 'r') as f:
with open(PARTIAL_DEST_DATA, 'w') as o:
# Use the DictReader to easily extract fields
reader = csv.DictReader(f, dialect='semicolon')
writer = csv.DictWriter(o, reader.fieldnames, dialect='semicolon')
# write col names
writer.writeheader()
# Read and anonymize data, writing to target file.
for row in anonymize_rows(reader):
writer.writerow(row)
def subseting_data(dataframe: pd.core.frame.DataFrame, rootname: str):
"""
Salva um arquivo .csv com um subset das features originais
"""
dataframe = dataframe[['nf_dia_semana', 'nf_chave', 'nf_valor', 'em_rz',
'em_nomeFantasia', 'em_cnpj', 'em_endereco', 'em_bairro', 'em_cep', 'em_municipio',
'em_telefone', 'em_uf', 'em_pais', 'em_inscricao_estadual', 'em_inscricao_municipal',
'em_cnae_fiscal', 'dest_rz', 'dest_cpf', 'dest_endereco', 'dest_bairro', 'dest_municipio',
'dest_telefone', 'dest_uf', 'dest_pais', 'dest_inscricao_estadual', 'dest_email', 'prod_nome',
'prod_quantidade', 'prod_unidade', 'prod_valor', 'prod_codigo_produto', 'prod_codigo_ncm',
'prod_categoria_ncm', 'prod_cfop', 'prod_valor_desconto', 'prod_valor_tributos',
'prod_codigo_ean_cmc', 'prod_valor_unitario_cmc', 'prod_valor_unitario_trib', 'prod_unidade_trib']]
dataframe.to_csv(f"./tabular-data/PRE_ANONY_{rootname}.csv", sep=';', encoding='latin1', index=True)
|
import pandas as pd
from pathlib import Path
from collections import Counter
import datetime
from collections import defaultdict
from faker import Factory
import faker
from preprocessing.nfeProvider import Invoice
import csv
def convert_to_numeric(num):
"""
Converte strings que representam valores monetários em Reais (R$) para
o padrão americano.
"""
num = num.strip()
if num != "":
num = num.replace(',', '.')
count_dot = num.count('.')
if count_dot >= 2:
while count_dot >= 2:
# armazena o index da primeira ocorrência da string ponto
slice_index = num.index('.')
# faz um slice baseado na localizacao desse index
new_str = num[0:slice_index] + num[slice_index + 1:]
num = new_str
count_dot = num.count('.')
return float(num)
else:
return float(num)
else:
return 0.0
def identify_encoding(filename: str) -> str:
"""
Identifica o encoding do arquivo filename retornando uma string com o nome do encoding.
Atributos:
filename: é o path (full ou relativo) do arquivo a ser analisado.
"""
try:
encoding = 'utf8'
with open(filename, "r", encoding=encoding) as file:
_ = file.readlines()
except UnicodeDecodeError:
encoding = 'latin1'
with open(filename, "r", encoding=encoding) as file:
_ = file.readlines()
finally:
return encoding
def report_pkl_into_csv(filename, foldername, logger):
"""
Produz um relatório do status do arquivo tabular gerado a partir dos arquivos .pkl
Atributos:
filename: é o nome do arquivo .csv que será analisado.
foldername: é o nome da sub pasta dos arquivos pkl dentro de ./data-storage/validacao/
"""
# VERIFICA SE ALGUM ARQUIVO .pkl NÃO FORAM PROCESSADOS.
df = pd.read_csv(f"./tabular-data/{filename}.csv", sep=';', encoding='latin1')
lista_chaves_processadas = set(df['nf_chave'].unique())
pkl_folder = Path(f"./data-storage/validacao/{foldername}")
pkl_folder = set(pkl_folder.rglob("*.pkl"))
pkl_folder = set([f.name[:-4][-44:] for f in pkl_folder])
num_arquivos_diff = lista_chaves_processadas.difference(pkl_folder)
if len(num_arquivos_diff) == 0:
logger.debug(f"Todos os arquivos .pkl foram processados. Ao todo foram processados {df['nf_chave'].nunique()} notas fiscais.\n")
else:
logger.critical(f"Não foram processados {len(num_arquivos_diff)} arquivos.\n")
for f in num_arquivos_diff:
logger.critical(f"Arquivo {f} não foi processado.\n")
# VALIDAÇÃO SE HÁ ARQUIVOS DUPLICADOS
files_check = Path(f"./data-storage/validacao/{foldername}")
files_check = list(files_check.rglob("*.pkl"))
files_check = [f.name[:-4][-44:] for f in files_check]
a = Counter()
for f in files_check:
a[f] += 1
for chave, count in a.items():
if count > 1:
logger.critical(f"CHAVE REPETIDA: {chave} # {count}")
# VERIFICA SE HÁ ALGUMA INCONSISTÊNCIA NOS VALORES DOS PRODUTOS E DA NOTA FISCAL
df['prod_valor_liquido'] = df.apply(lambda x: x['prod_valor'] - x['prod_valor_desconto'], axis='columns')
check_valor_nota_valores = df.groupby("nf_chave")['prod_valor_liquido'].sum().sort_values(ascending=False)
inconsistencia_count = 0
container = {}
for chave, valor in zip(check_valor_nota_valores.index, check_valor_nota_valores.values):
validacao = df.loc[df['nf_chave'] == chave, 'nf_valor'].values[0]
valor = round(valor, 2)
chave = chave.replace("-", "").replace(".", "").replace("/", "")
if validacao != valor:
inconsistencia_count += 1
diff_produtos = round(valor - validacao, 2)
container[chave] = diff_produtos
logger.critical(f"{chave} => Valor Nota: R${validacao} @ Valor Produtos: R${valor} @ Diferença: R${diff_produtos}\n")
def normalize_ncm(ncm: str) -> str:
"""
Normaliza a string que representa o código NCM
Atributos:
ncm : string que representa o código NCM
"""
if len(ncm) != 8:
ncm = "0" + ncm
return ncm
def get_ncm_values():
"""
Função que retorna um dicionário contendo uma lista de macro categorias e os codigos
NCM associados a cada um deles.
"""
sheet_names = [
'CARNES E OVOS',
'HORTIFRUTI',
'LIMPEZA',
'HIGIENE',
'LATICINIOS E DERIVADOS',
'BEBIDAS',
'PET',
'PADARIA',
'CEREAIS_GRAOS_SEMENTES',
'DOCES',
'VESTUARIO',
'FARINACEOS',
'MASSAS',
'TEMPEROS_MOLHOS',
'OUTROS'
]
categorias_ncm = {}
for sheet in sheet_names:
df = pd.read_excel("./data/others/compilado_ncm_mercado_mod.xlsx", sheet_name=sheet, dtype={'cod_ncm': str})
df['cod_ncm'] = df['cod_ncm'].astype(str)
df['cod_ncm'] = df['cod_ncm'].apply(normalize_ncm)
categorias_ncm[sheet] = df['cod_ncm'].unique().tolist()
return categorias_ncm
def get_weekday(value: int):
"""
Recebe um INT representando um datetime e retorna uma string com o dia da semana.
Atributos:
value: Inteiro representando um timestamp
"""
convert_int_to_day = {
0: 'Segunda-Feira',
1: 'Terça-Feira',
2: 'Quarta-Feira',
3: 'Quinta-Feira',
4: 'Sexta-Feira',
5: 'Sábado',
6: 'Domingo'
}
weekday = datetime.datetime.utcfromtimestamp(value / 1e9).weekday()
return convert_int_to_day[weekday]
def logging_report(report, list_required_fields, logger):
f = Path(report['tables'][0]['source'])
map_columns_to_number = report['tables'][0]['headers']
map_columns_to_number = {i: col for col, i in zip(map_columns_to_number, range(1, len(map_columns_to_number)))}
fields_required_error = {f: False for f in list_required_fields}
num_errors = report['error-count']
if report['valid']:
logger.debug(f"Arquivo: {f.name} válido pelo schema.\n")
return True, num_errors
else:
lista_errors = report['tables'][0]['errors']
if 0 < num_errors < 1000:
logger.debug(f"Arquivo {f.name} não validado com {num_errors} erros.")
for erro in lista_errors:
for feature, valor in erro.items():
if feature == 'code':
if valor == 'required-constraint':
# identify which column is null
col = map_columns_to_number[erro['column-number']]
# change validation status of this feature
if not fields_required_error[col]:
fields_required_error[col] = True
line = erro['row-number']
logger.critical(f"{f.name} @ Linha {line} possui {col} sem valor atribuído.")
elif valor == 'enumerable-constraint':
col = map_columns_to_number[erro['column-number']]
line = erro['row-number']
logger.critical(f"{f.name} @ Linha {line} e Coluna {col} erro: {erro['message']} ")
else:
try:
col = map_columns_to_number[erro['column-number']]
except: # o erro associado não é referente a uma coluna
try:
line = erro['row-number']
logger.critical(f"{f.name} @ Linha {line} : {erro['message']}")
except KeyError:
logger.critical(f"{f.name} @ {erro['message']}")
return False, num_errors
def anonymize_rows(rows):
"""
Rows is an iterable of dictionaries that contain name and
email fields that need to be anonymized.
"""
# Load the faker and its providers
faker = Factory.create("pt_BR")
faker.add_provider(Invoice)
# Create mappings of names & emails to faked names & emails.
# https://stackoverflow.com/questions/18066837/passing-a-parameter-to-objects-created-by-defaultdict
nfecod = defaultdict(lambda: faker.nfce(**{'uf_code': 'DF'}))
cpf = defaultdict(faker.cpf)
nome = defaultdict(faker.name)
endereco = defaultdict(faker.address)
bairro = defaultdict(faker.bairro)
municipio = defaultdict(faker.city)
telefone = defaultdict(faker.phone_number)
uf = defaultdict(faker.state_abbr)
pais = defaultdict(faker.country)
email = defaultdict(faker.email)
# Iterate over the rows and yield anonymized rows.
for row in rows:
# Replace the name and email fields with faked fields.
row['nf_chave'] = nfecod[row['nf_chave']]
row['dest_cpf'] = cpf[row['dest_cpf']]
row['dest_rz'] = nome[row['dest_rz']]
row['dest_endereco'] = endereco[row['dest_endereco']]
row['dest_bairro'] = bairro[row['dest_bairro']]
row['dest_municipio'] = municipio[row['dest_municipio']]
row['dest_telefone'] = telefone[row['dest_telefone']]
row['dest_uf'] = uf[row['dest_uf']]
row['dest_pais'] = pais[row['dest_pais']]
row['dest_email'] = email[row['dest_email']]
# Yield the row back to the caller
yield row
def anonymize(source, target):
"""
The source argument is a path to a CSV file containing data to anonymize,
while target is a path to write the anonymized CSV data to.
"""
# https://pymotw.com/2/csv/
PARTIAL_SOURCE_DATA = Path("./tabular-data/") / f"{source}"
PARTIAL_DEST_DATA = Path("./tabular-data/") / f"{target}"
csv.register_dialect('semicolon', delimiter=';')
with open(PARTIAL_SOURCE_DATA, 'r') as f:
with open(PARTIAL_DEST_DATA, 'w') as o:
# Use the DictReader to easily extract fields
reader = csv.DictReader(f, dialect='semicolon')
writer = csv.DictWriter(o, reader.fieldnames, dialect='semicolon')
# write col names
writer.writeheader()
# Read and anonymize data, writing to target file.
for row in anonymize_rows(reader):
writer.writerow(row)
def subseting_data(dataframe: pd.core.frame.DataFrame, rootname: str):
"""
Salva um arquivo .csv com um subset das features originais
"""
dataframe = dataframe[['nf_dia_semana', 'nf_chave', 'nf_valor', 'em_rz',
'em_nomeFantasia', 'em_cnpj', 'em_endereco', 'em_bairro', 'em_cep', 'em_municipio',
'em_telefone', 'em_uf', 'em_pais', 'em_inscricao_estadual', 'em_inscricao_municipal',
'em_cnae_fiscal', 'dest_rz', 'dest_cpf', 'dest_endereco', 'dest_bairro', 'dest_municipio',
'dest_telefone', 'dest_uf', 'dest_pais', 'dest_inscricao_estadual', 'dest_email', 'prod_nome',
'prod_quantidade', 'prod_unidade', 'prod_valor', 'prod_codigo_produto', 'prod_codigo_ncm',
'prod_categoria_ncm', 'prod_cfop', 'prod_valor_desconto', 'prod_valor_tributos',
'prod_codigo_ean_cmc', 'prod_valor_unitario_cmc', 'prod_valor_unitario_trib', 'prod_unidade_trib']]
dataframe.to_csv(f"./tabular-data/PRE_ANONY_{rootname}.csv", sep=';', encoding='latin1', index=True)
|
from typing import Any, Dict, Iterable, cast
from openslides_backend.action.actions.meeting.shared_meeting import (
meeting_projector_default_replacements,
)
from openslides_backend.permissions.management_levels import (
CommitteeManagementLevel,
OrganizationManagementLevel,
)
from tests.system.action.base import BaseActionTestCase
class MeetingCreateActionTest(BaseActionTestCase):
def basic_test(self, datapart: Dict[str, Any]) -> Dict[str, Any]:
self.set_models(
{
"organization/1": {"limit_of_meetings": 0, "active_meeting_ids": []},
"committee/1": {
"name": "test_committee",
"user_ids": [2],
"organization_id": 1,
},
"group/1": {},
"user/2": {},
"organization_tag/3": {},
}
)
response = self.request(
"meeting.create",
{
"name": "test_name",
"committee_id": 1,
"organization_tag_ids": [3],
**datapart,
},
)
self.assert_status_code(response, 200)
return self.get_model("meeting/1")
def test_create_simple_and_complex_workflow(self) -> None:
meeting = self.basic_test(dict())
self.assertCountEqual(
cast(Iterable[Any], meeting.get("default_projector_$_id")),
meeting_projector_default_replacements,
)
self.assert_model_exists(
"meeting/1",
{
"name": "test_name",
"committee_id": 1,
"group_ids": [2, 3, 4, 5, 6],
"default_group_id": 2,
"admin_group_id": 3,
"motion_workflow_ids": [1, 2],
"motions_default_workflow_id": 1,
"motions_default_amendment_workflow_id": 1,
"motions_default_statute_amendment_workflow_id": 1,
"motion_state_ids": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
"list_of_speakers_countdown_id": 1,
"poll_countdown_id": 2,
"projector_countdown_warning_time": 0,
"organization_tag_ids": [3],
"is_active_in_organization_id": 1,
"assignment_poll_default_group_ids": [4],
"motion_poll_default_group_ids": [4],
**{
f"default_projector_${name}_id": 1
for name in meeting_projector_default_replacements
},
},
)
self.assert_model_exists("organization/1", {"active_meeting_ids": [1]})
self.assert_model_exists("group/2", {"name": "Default"})
self.assert_model_exists("group/3", {"name": "Admin"})
self.assert_model_exists("group/4", {"name": "Delegates"})
self.assert_model_exists("group/5", {"name": "Staff"})
self.assert_model_exists("group/6", {"name": "Committees"})
self.assert_model_exists(
"motion_workflow/1",
{
"name": "Simple Workflow",
"meeting_id": 1,
"default_workflow_meeting_id": 1,
"default_amendment_workflow_meeting_id": 1,
"default_statute_amendment_workflow_meeting_id": 1,
"state_ids": [1, 2, 3, 4],
"first_state_id": 1,
},
)
self.assert_model_exists(
"motion_state/1", {"name": "submitted", "next_state_ids": [2, 3, 4]}
)
self.assert_model_exists(
"motion_state/2",
{
"name": "accepted",
"previous_state_ids": [1],
"meeting_id": 1,
"workflow_id": 1,
},
)
self.assert_model_exists(
"motion_state/3", {"name": "rejected", "previous_state_ids": [1]}
)
self.assert_model_exists(
"motion_state/4", {"name": "not_decided", "previous_state_ids": [1]}
)
self.assert_model_exists(
"motion_workflow/2",
{
"name": "Complex Workflow",
"meeting_id": 1,
"state_ids": [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
"first_state_id": 5,
},
)
self.assert_model_exists(
"motion_state/5", {"name": "in progress", "next_state_ids": [6, 10]}
)
self.assert_model_exists(
"motion_state/6", {"name": "submitted", "previous_state_ids": [5]}
)
self.assert_model_exists(
"motion_state/7", {"name": "permitted", "previous_state_ids": [6]}
)
self.assert_model_exists(
"motion_state/8", {"name": "accepted", "previous_state_ids": [7]}
)
self.assert_model_exists(
"motion_state/9", {"name": "rejected", "previous_state_ids": [7]}
)
self.assert_model_exists(
"motion_state/10", {"name": "withdrawn", "previous_state_ids": [5, 6, 7]}
)
self.assert_model_exists(
"motion_state/11", {"name": "adjourned", "previous_state_ids": [7]}
)
self.assert_model_exists(
"motion_state/12", {"name": "not concerned", "previous_state_ids": [7]}
)
self.assert_model_exists(
"motion_state/13",
{"name": "referred to committee", "previous_state_ids": [7]},
)
self.assert_model_exists(
"motion_state/14", {"name": "needs review", "previous_state_ids": [7]}
)
self.assert_model_exists(
"motion_state/15",
{"name": "rejected (not authorized)", "previous_state_ids": [6]},
)
projector1 = self.get_model("projector/1")
self.assertCountEqual(
cast(Iterable[Any], projector1.get("used_as_default_$_in_meeting_id")),
meeting_projector_default_replacements,
)
self.assert_model_exists(
"projector/1",
{
"name": "Default projector",
"meeting_id": 1,
"used_as_reference_projector_meeting_id": 1,
**{
f"used_as_default_${name}_in_meeting_id": 1
for name in meeting_projector_default_replacements
},
},
)
self.assert_model_exists(
"projector_countdown/1",
{
"title": "List of speakers countdown",
"meeting_id": 1,
"used_as_list_of_speaker_countdown_meeting_id": 1,
"default_time": 60,
"countdown_time": 60,
},
)
self.assert_model_exists(
"projector_countdown/2",
{
"title": "Voting countdown",
"meeting_id": 1,
"used_as_poll_countdown_meeting_id": 1,
"default_time": 60,
"countdown_time": 60,
},
)
def test_check_action_data_fields(self) -> None:
meeting = self.basic_test(
{
"description": "RRfnzxHA",
"location": "LSFHPTgE",
"start_time": 1608120653,
"end_time": 1608121653,
"url_name": "JWdYZqDX",
}
)
assert meeting.get("description") == "RRfnzxHA"
assert meeting.get("location") == "LSFHPTgE"
assert meeting.get("start_time") == 1608120653
assert meeting.get("end_time") == 1608121653
assert meeting.get("url_name") == "JWdYZqDX"
# check two defaults:
assert meeting.get("assignment_poll_default_type") == "analog"
assert meeting.get("assignment_poll_default_method") == "Y"
def test_create_check_users(self) -> None:
meeting = self.basic_test({"user_ids": [2]})
assert meeting.get("user_ids") == [2]
default_group_id = meeting.get("default_group_id")
self.assert_model_exists(
"user/2", {f"group_${meeting["id"]}_ids": [default_group_id]}
)
def test_create_check_admins(self) -> None:
meeting = self.basic_test({"admin_ids": [2]})
assert meeting.get("user_ids") == [2]
admin_group_id = meeting.get("admin_group_id")
self.assert_model_exists(
"user/2", {f"group_${meeting["id"]}_ids": [admin_group_id]}
)
def test_create_with_same_user_in_users_and_admins(self) -> None:
meeting = self.basic_test({"user_ids": [2], "admin_ids": [2]})
assert meeting.get("user_ids") == [2]
admin_group_id = meeting.get("admin_group_id")
self.assert_model_exists(
"user/2", {f"group_${meeting["id"]}_ids": [admin_group_id]}
)
def test_create_multiple_users(self) -> None:
self.set_models(
{
"organization/1": {"limit_of_meetings": 0, "active_meeting_ids": []},
"committee/1": {"organization_id": 1},
"user/2": {},
"user/3": {},
}
)
response = self.request(
"meeting.create",
{
"name": "test_name",
"committee_id": 1,
"user_ids": [2, 3],
"admin_ids": [1],
},
)
self.assert_status_code(response, 200)
meeting = self.get_model("meeting/1")
default_group_id = meeting.get("default_group_id")
self.assert_model_exists(
"user/2", {"group_$1_ids": [default_group_id], "committee_ids": [1]}
)
self.assert_model_exists(
"user/3", {"group_$1_ids": [default_group_id], "committee_ids": [1]}
)
admin_group_id = meeting.get("admin_group_id")
self.assert_model_exists(
"user/1", {"group_$1_ids": [admin_group_id], "committee_ids": [1]}
)
self.assertCountEqual(meeting.get("user_ids"), [1, 2, 3])
committee = self.get_model("committee/1")
self.assertCountEqual(committee.get("user_ids"), [1, 2, 3])
def test_create_with_admins_empty_array(self) -> None:
meeting = self.basic_test({"admin_ids": []})
assert "admin_ids" not in meeting
def test_create_no_permissions(self) -> None:
self.set_models(
{
"user/1": {
"organization_management_level": OrganizationManagementLevel.CAN_MANAGE_USERS
},
"committee/1": {"name": "test_committee", "user_ids": [1, 2]},
"group/1": {},
"user/2": {},
}
)
response = self.request(
"meeting.create",
{
"name": "test_name",
"committee_id": 1,
},
)
self.assert_status_code(response, 403)
assert (
"Missing CommitteeManagementLevel: can_manage" in response.json["message"]
)
def test_create_permissions(self) -> None:
self.set_models(
{
"user/1": {
"organization_management_level": OrganizationManagementLevel.CAN_MANAGE_USERS,
"committee_$1_management_level": CommitteeManagementLevel.CAN_MANAGE,
}
}
)
self.basic_test({})
def test_create_with_admin_ids_and_permissions_cml(self) -> None:
self.set_models(
{
"user/1": {
"organization_management_level": None,
"committee_$1_management_level": CommitteeManagementLevel.CAN_MANAGE,
}
}
)
meeting = self.basic_test({"admin_ids": [2]})
assert meeting.get("user_ids") == [2]
admin_group_id = meeting.get("admin_group_id")
self.assert_model_exists(
"user/2", {f"group_${meeting["id"]}_ids": [admin_group_id]}
)
def test_create_with_admin_ids_and_permissions_oml(self) -> None:
self.set_models(
{
"user/1": {
"organization_management_level": OrganizationManagementLevel.CAN_MANAGE_ORGANIZATION,
"committee_$1_management_level": None,
}
}
)
meeting = self.basic_test({"admin_ids": [2]})
assert meeting.get("user_ids") == [2]
admin_group_id = meeting.get("admin_group_id")
self.assert_model_exists(
"user/2", {f"group_${meeting["id"]}_ids": [admin_group_id]}
)
def test_create_limit_of_meetings_reached(self) -> None:
self.set_models(
{
"organization/1": {"limit_of_meetings": 1, "active_meeting_ids": [1]},
"committee/1": {"organization_id": 1},
}
)
response = self.request(
"meeting.create",
{
"name": "test_name",
"committee_id": 1,
},
)
self.assert_status_code(response, 400)
self.assertIn(
"You cannot create a new meeting, because you reached your limit of 1 active meetings.",
response.json["message"],
)
|
from typing import Any, Dict, Iterable, cast
from openslides_backend.action.actions.meeting.shared_meeting import (
meeting_projector_default_replacements,
)
from openslides_backend.permissions.management_levels import (
CommitteeManagementLevel,
OrganizationManagementLevel,
)
from tests.system.action.base import BaseActionTestCase
class MeetingCreateActionTest(BaseActionTestCase):
def basic_test(self, datapart: Dict[str, Any]) -> Dict[str, Any]:
self.set_models(
{
"organization/1": {"limit_of_meetings": 0, "active_meeting_ids": []},
"committee/1": {
"name": "test_committee",
"user_ids": [2],
"organization_id": 1,
},
"group/1": {},
"user/2": {},
"organization_tag/3": {},
}
)
response = self.request(
"meeting.create",
{
"name": "test_name",
"committee_id": 1,
"organization_tag_ids": [3],
**datapart,
},
)
self.assert_status_code(response, 200)
return self.get_model("meeting/1")
def test_create_simple_and_complex_workflow(self) -> None:
meeting = self.basic_test(dict())
self.assertCountEqual(
cast(Iterable[Any], meeting.get("default_projector_$_id")),
meeting_projector_default_replacements,
)
self.assert_model_exists(
"meeting/1",
{
"name": "test_name",
"committee_id": 1,
"group_ids": [2, 3, 4, 5, 6],
"default_group_id": 2,
"admin_group_id": 3,
"motion_workflow_ids": [1, 2],
"motions_default_workflow_id": 1,
"motions_default_amendment_workflow_id": 1,
"motions_default_statute_amendment_workflow_id": 1,
"motion_state_ids": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
"list_of_speakers_countdown_id": 1,
"poll_countdown_id": 2,
"projector_countdown_warning_time": 0,
"organization_tag_ids": [3],
"is_active_in_organization_id": 1,
"assignment_poll_default_group_ids": [4],
"motion_poll_default_group_ids": [4],
**{
f"default_projector_${name}_id": 1
for name in meeting_projector_default_replacements
},
},
)
self.assert_model_exists("organization/1", {"active_meeting_ids": [1]})
self.assert_model_exists("group/2", {"name": "Default"})
self.assert_model_exists("group/3", {"name": "Admin"})
self.assert_model_exists("group/4", {"name": "Delegates"})
self.assert_model_exists("group/5", {"name": "Staff"})
self.assert_model_exists("group/6", {"name": "Committees"})
self.assert_model_exists(
"motion_workflow/1",
{
"name": "Simple Workflow",
"meeting_id": 1,
"default_workflow_meeting_id": 1,
"default_amendment_workflow_meeting_id": 1,
"default_statute_amendment_workflow_meeting_id": 1,
"state_ids": [1, 2, 3, 4],
"first_state_id": 1,
},
)
self.assert_model_exists(
"motion_state/1", {"name": "submitted", "next_state_ids": [2, 3, 4]}
)
self.assert_model_exists(
"motion_state/2",
{
"name": "accepted",
"previous_state_ids": [1],
"meeting_id": 1,
"workflow_id": 1,
},
)
self.assert_model_exists(
"motion_state/3", {"name": "rejected", "previous_state_ids": [1]}
)
self.assert_model_exists(
"motion_state/4", {"name": "not_decided", "previous_state_ids": [1]}
)
self.assert_model_exists(
"motion_workflow/2",
{
"name": "Complex Workflow",
"meeting_id": 1,
"state_ids": [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
"first_state_id": 5,
},
)
self.assert_model_exists(
"motion_state/5", {"name": "in progress", "next_state_ids": [6, 10]}
)
self.assert_model_exists(
"motion_state/6", {"name": "submitted", "previous_state_ids": [5]}
)
self.assert_model_exists(
"motion_state/7", {"name": "permitted", "previous_state_ids": [6]}
)
self.assert_model_exists(
"motion_state/8", {"name": "accepted", "previous_state_ids": [7]}
)
self.assert_model_exists(
"motion_state/9", {"name": "rejected", "previous_state_ids": [7]}
)
self.assert_model_exists(
"motion_state/10", {"name": "withdrawn", "previous_state_ids": [5, 6, 7]}
)
self.assert_model_exists(
"motion_state/11", {"name": "adjourned", "previous_state_ids": [7]}
)
self.assert_model_exists(
"motion_state/12", {"name": "not concerned", "previous_state_ids": [7]}
)
self.assert_model_exists(
"motion_state/13",
{"name": "referred to committee", "previous_state_ids": [7]},
)
self.assert_model_exists(
"motion_state/14", {"name": "needs review", "previous_state_ids": [7]}
)
self.assert_model_exists(
"motion_state/15",
{"name": "rejected (not authorized)", "previous_state_ids": [6]},
)
projector1 = self.get_model("projector/1")
self.assertCountEqual(
cast(Iterable[Any], projector1.get("used_as_default_$_in_meeting_id")),
meeting_projector_default_replacements,
)
self.assert_model_exists(
"projector/1",
{
"name": "Default projector",
"meeting_id": 1,
"used_as_reference_projector_meeting_id": 1,
**{
f"used_as_default_${name}_in_meeting_id": 1
for name in meeting_projector_default_replacements
},
},
)
self.assert_model_exists(
"projector_countdown/1",
{
"title": "List of speakers countdown",
"meeting_id": 1,
"used_as_list_of_speaker_countdown_meeting_id": 1,
"default_time": 60,
"countdown_time": 60,
},
)
self.assert_model_exists(
"projector_countdown/2",
{
"title": "Voting countdown",
"meeting_id": 1,
"used_as_poll_countdown_meeting_id": 1,
"default_time": 60,
"countdown_time": 60,
},
)
def test_check_action_data_fields(self) -> None:
meeting = self.basic_test(
{
"description": "RRfnzxHA",
"location": "LSFHPTgE",
"start_time": 1608120653,
"end_time": 1608121653,
"url_name": "JWdYZqDX",
}
)
assert meeting.get("description") == "RRfnzxHA"
assert meeting.get("location") == "LSFHPTgE"
assert meeting.get("start_time") == 1608120653
assert meeting.get("end_time") == 1608121653
assert meeting.get("url_name") == "JWdYZqDX"
# check two defaults:
assert meeting.get("assignment_poll_default_type") == "analog"
assert meeting.get("assignment_poll_default_method") == "Y"
def test_create_check_users(self) -> None:
meeting = self.basic_test({"user_ids": [2]})
assert meeting.get("user_ids") == [2]
default_group_id = meeting.get("default_group_id")
self.assert_model_exists(
"user/2", {f"group_${meeting['id']}_ids": [default_group_id]}
)
def test_create_check_admins(self) -> None:
meeting = self.basic_test({"admin_ids": [2]})
assert meeting.get("user_ids") == [2]
admin_group_id = meeting.get("admin_group_id")
self.assert_model_exists(
"user/2", {f"group_${meeting['id']}_ids": [admin_group_id]}
)
def test_create_with_same_user_in_users_and_admins(self) -> None:
meeting = self.basic_test({"user_ids": [2], "admin_ids": [2]})
assert meeting.get("user_ids") == [2]
admin_group_id = meeting.get("admin_group_id")
self.assert_model_exists(
"user/2", {f"group_${meeting['id']}_ids": [admin_group_id]}
)
def test_create_multiple_users(self) -> None:
self.set_models(
{
"organization/1": {"limit_of_meetings": 0, "active_meeting_ids": []},
"committee/1": {"organization_id": 1},
"user/2": {},
"user/3": {},
}
)
response = self.request(
"meeting.create",
{
"name": "test_name",
"committee_id": 1,
"user_ids": [2, 3],
"admin_ids": [1],
},
)
self.assert_status_code(response, 200)
meeting = self.get_model("meeting/1")
default_group_id = meeting.get("default_group_id")
self.assert_model_exists(
"user/2", {"group_$1_ids": [default_group_id], "committee_ids": [1]}
)
self.assert_model_exists(
"user/3", {"group_$1_ids": [default_group_id], "committee_ids": [1]}
)
admin_group_id = meeting.get("admin_group_id")
self.assert_model_exists(
"user/1", {"group_$1_ids": [admin_group_id], "committee_ids": [1]}
)
self.assertCountEqual(meeting.get("user_ids"), [1, 2, 3])
committee = self.get_model("committee/1")
self.assertCountEqual(committee.get("user_ids"), [1, 2, 3])
def test_create_with_admins_empty_array(self) -> None:
meeting = self.basic_test({"admin_ids": []})
assert "admin_ids" not in meeting
def test_create_no_permissions(self) -> None:
self.set_models(
{
"user/1": {
"organization_management_level": OrganizationManagementLevel.CAN_MANAGE_USERS
},
"committee/1": {"name": "test_committee", "user_ids": [1, 2]},
"group/1": {},
"user/2": {},
}
)
response = self.request(
"meeting.create",
{
"name": "test_name",
"committee_id": 1,
},
)
self.assert_status_code(response, 403)
assert (
"Missing CommitteeManagementLevel: can_manage" in response.json["message"]
)
def test_create_permissions(self) -> None:
self.set_models(
{
"user/1": {
"organization_management_level": OrganizationManagementLevel.CAN_MANAGE_USERS,
"committee_$1_management_level": CommitteeManagementLevel.CAN_MANAGE,
}
}
)
self.basic_test({})
def test_create_with_admin_ids_and_permissions_cml(self) -> None:
self.set_models(
{
"user/1": {
"organization_management_level": None,
"committee_$1_management_level": CommitteeManagementLevel.CAN_MANAGE,
}
}
)
meeting = self.basic_test({"admin_ids": [2]})
assert meeting.get("user_ids") == [2]
admin_group_id = meeting.get("admin_group_id")
self.assert_model_exists(
"user/2", {f"group_${meeting['id']}_ids": [admin_group_id]}
)
def test_create_with_admin_ids_and_permissions_oml(self) -> None:
self.set_models(
{
"user/1": {
"organization_management_level": OrganizationManagementLevel.CAN_MANAGE_ORGANIZATION,
"committee_$1_management_level": None,
}
}
)
meeting = self.basic_test({"admin_ids": [2]})
assert meeting.get("user_ids") == [2]
admin_group_id = meeting.get("admin_group_id")
self.assert_model_exists(
"user/2", {f"group_${meeting['id']}_ids": [admin_group_id]}
)
def test_create_limit_of_meetings_reached(self) -> None:
self.set_models(
{
"organization/1": {"limit_of_meetings": 1, "active_meeting_ids": [1]},
"committee/1": {"organization_id": 1},
}
)
response = self.request(
"meeting.create",
{
"name": "test_name",
"committee_id": 1,
},
)
self.assert_status_code(response, 400)
self.assertIn(
"You cannot create a new meeting, because you reached your limit of 1 active meetings.",
response.json["message"],
)
|
import os
import sqlite3
import traceback
class GeneralDB:
''' basic sqlite3 db which can be used for many purposes
this isnt very safe probably but i dont really care'''
def __init__(self, sessionName):
self.connection = None
self.cursor = None
self.sessionName = sessionName # unique String to each instance of this object (The DB name, not the table name)
def initCursor(self):
''' create and set the cursor'''
os.makedirs("DBSessions", exist_ok=True)
self.connection = sqlite3.connect("DBSessions/"+self.sessionName+".db")
self.cursor = self.connection.cursor()
def closeCursor(self, save=True):
''' close the connection and maybe save the changes'''
if self.connection is None:
return
if save:
self.connection.commit()
self.connection.close()
self.connection = None
self.cursor = None
def checkCursor(self):
''' shorten the cursor init even more'''
if self.connection is None:
self.initCursor()
def rawExecute(self, statement):
''' directly execute a command to the db
note: does not print anything. this meant for ddl related queries'''
self.checkCursor()
try:
self.cursor.execute(statement)
print("A statement was run: "+str(statement))
except:
traceback.print_exc()
self.closeCursor()
def rawExecuteAndPrint(self, statement):
''' directly execute a select statement'''
self.checkCursor()
g = ""
try:
self.cursor.execute(statement)
g = self.cursor.fetchall()
print(g)
except:
traceback.print_exc()
self.closeCursor()
return g
def getItem(self, table, column, rowID):
''' get a specific item from a table given a row ID and a column'''
self.checkCursor()
try:
self.cursor.execute("select ? from {} where id=?".format(table), (column, rowID,))
output = self.cursor.fetchone()
except:
traceback.print_exc()
output = None
self.closeCursor()
return output
def getRow(self, table, rowID):
''' get a row from a table given a row ID
returns None if row doesnt exist or a tuple if it does'''
self.checkCursor()
try:
self.cursor.execute("select * from {} where id=?".format(table), (rowID,))
output = self.cursor.fetchone()
except:
traceback.print_exc()
output = None
self.closeCursor()
return output
def getColumn(self, table, column):
''' get a column from a table as a list'''
self.checkCursor()
try:
self.cursor.execute("select {} from {}".format(column, table))
output = self.cursor.fetchall()
except:
traceback.print_exc()
output = None
self.closeCursor()
return output
def getTable(self, table):
''' get a whole table'''
self.checkCursor()
try:
self.cursor.execute("select * from {}".format(table))
output = self.cursor.fetchall()
except:
traceback.print_exc()
output = None
self.closeCursor()
return output
def addRow(self, table, values):
''' make a new row in a table with the given list of values'''
self.checkCursor()
try:
self.cursor.execute("insert into {} values ({})".format(table, ",".join(values)))
except:
traceback.print_exc()
self.closeCursor()
def addRows(self, table, values):
''' insert many rows into a table
it is expected that values is a list of rows matching the columns exactly'''
self.checkCursor()
try:
self.cursor.executemany(f"insert into {table} values ({",".join(["?" for _ in values[0]])})",
values
)
except:
traceback.print_exc()
self.closeCursor()
def emptyTable(self, table):
''' delete the contents of a table'''
self.checkCursor()
try:
self.cursor.execute("delete from {}".format(table))
except:
traceback.print_exc()
self.closeCursor()
def delRow(self, table, rowID):
''' delete a row from a table with the given row ID'''
self.checkCursor()
try:
self.cursor.execute("delete from {} where id = ?".format(table), (rowID,))
except:
traceback.print_exc()
self.closeCursor()
def editItem(self, table, rowID, column, newValue):
''' edit a value in a table with a given column and row ID'''
self.checkCursor()
try:
self.cursor.execute("update {} set {} = ? where id = ?".format(table, column), (newValue, rowID,))
except:
traceback.print_exc()
self.closeCursor()
def replaceRow(self, table, rowID, newRowValues):
''' edit a row in a table to replace all of its values with new info
newRowValues should be a list of new values not including the row ID'''
self.checkCursor()
try:
self.delRow(table, rowID)
self.addRow(table, ['"'+rowID+'"']+newRowValues)
except:
traceback.print_exc()
self.closeCursor()
def createTable(self, name, columns, suppress=False):
''' make a new table with these column names'''
self.checkCursor()
try:
self.cursor.execute("create table {} ({})".format(name, ",".join(columns)))
except:
if not suppress:
traceback.print_exc()
else:
pass
self.closeCursor()
def verifyTableExists(self, table):
''' check to see if a table exists and return true or false'''
self.checkCursor()
try:
self.cursor.execute("select * from {}".format(table))
output = True
except:
output = False
self.closeCursor()
return output
def verifyTableExistsWithRows(self, table, rowIDs):
''' check to see if a table exists with the given row IDs
return a list of row IDs that are missing'''
missingRows = []
if not(self.verifyTableExists(table)):
return rowIDs
self.checkCursor()
for rowID in rowIDs:
try:
self.cursor.execute("select * from {} where id = {}".format(table, rowID))
if len(self.cursor.fetchone()) == 0:
missingRows.append(rowID)
except:
missingRows.append(rowID)
self.closeCursor()
return missingRows
|
import os
import sqlite3
import traceback
class GeneralDB:
''' basic sqlite3 db which can be used for many purposes
this isnt very safe probably but i dont really care'''
def __init__(self, sessionName):
self.connection = None
self.cursor = None
self.sessionName = sessionName # unique String to each instance of this object (The DB name, not the table name)
def initCursor(self):
''' create and set the cursor'''
os.makedirs("DBSessions", exist_ok=True)
self.connection = sqlite3.connect("DBSessions/"+self.sessionName+".db")
self.cursor = self.connection.cursor()
def closeCursor(self, save=True):
''' close the connection and maybe save the changes'''
if self.connection is None:
return
if save:
self.connection.commit()
self.connection.close()
self.connection = None
self.cursor = None
def checkCursor(self):
''' shorten the cursor init even more'''
if self.connection is None:
self.initCursor()
def rawExecute(self, statement):
''' directly execute a command to the db
note: does not print anything. this meant for ddl related queries'''
self.checkCursor()
try:
self.cursor.execute(statement)
print("A statement was run: "+str(statement))
except:
traceback.print_exc()
self.closeCursor()
def rawExecuteAndPrint(self, statement):
''' directly execute a select statement'''
self.checkCursor()
g = ""
try:
self.cursor.execute(statement)
g = self.cursor.fetchall()
print(g)
except:
traceback.print_exc()
self.closeCursor()
return g
def getItem(self, table, column, rowID):
''' get a specific item from a table given a row ID and a column'''
self.checkCursor()
try:
self.cursor.execute("select ? from {} where id=?".format(table), (column, rowID,))
output = self.cursor.fetchone()
except:
traceback.print_exc()
output = None
self.closeCursor()
return output
def getRow(self, table, rowID):
''' get a row from a table given a row ID
returns None if row doesnt exist or a tuple if it does'''
self.checkCursor()
try:
self.cursor.execute("select * from {} where id=?".format(table), (rowID,))
output = self.cursor.fetchone()
except:
traceback.print_exc()
output = None
self.closeCursor()
return output
def getColumn(self, table, column):
''' get a column from a table as a list'''
self.checkCursor()
try:
self.cursor.execute("select {} from {}".format(column, table))
output = self.cursor.fetchall()
except:
traceback.print_exc()
output = None
self.closeCursor()
return output
def getTable(self, table):
''' get a whole table'''
self.checkCursor()
try:
self.cursor.execute("select * from {}".format(table))
output = self.cursor.fetchall()
except:
traceback.print_exc()
output = None
self.closeCursor()
return output
def addRow(self, table, values):
''' make a new row in a table with the given list of values'''
self.checkCursor()
try:
self.cursor.execute("insert into {} values ({})".format(table, ",".join(values)))
except:
traceback.print_exc()
self.closeCursor()
def addRows(self, table, values):
''' insert many rows into a table
it is expected that values is a list of rows matching the columns exactly'''
self.checkCursor()
try:
self.cursor.executemany(f"insert into {table} values ({','.join(['?' for _ in values[0]])})",
values
)
except:
traceback.print_exc()
self.closeCursor()
def emptyTable(self, table):
''' delete the contents of a table'''
self.checkCursor()
try:
self.cursor.execute("delete from {}".format(table))
except:
traceback.print_exc()
self.closeCursor()
def delRow(self, table, rowID):
''' delete a row from a table with the given row ID'''
self.checkCursor()
try:
self.cursor.execute("delete from {} where id = ?".format(table), (rowID,))
except:
traceback.print_exc()
self.closeCursor()
def editItem(self, table, rowID, column, newValue):
''' edit a value in a table with a given column and row ID'''
self.checkCursor()
try:
self.cursor.execute("update {} set {} = ? where id = ?".format(table, column), (newValue, rowID,))
except:
traceback.print_exc()
self.closeCursor()
def replaceRow(self, table, rowID, newRowValues):
''' edit a row in a table to replace all of its values with new info
newRowValues should be a list of new values not including the row ID'''
self.checkCursor()
try:
self.delRow(table, rowID)
self.addRow(table, ['"'+rowID+'"']+newRowValues)
except:
traceback.print_exc()
self.closeCursor()
def createTable(self, name, columns, suppress=False):
''' make a new table with these column names'''
self.checkCursor()
try:
self.cursor.execute("create table {} ({})".format(name, ",".join(columns)))
except:
if not suppress:
traceback.print_exc()
else:
pass
self.closeCursor()
def verifyTableExists(self, table):
''' check to see if a table exists and return true or false'''
self.checkCursor()
try:
self.cursor.execute("select * from {}".format(table))
output = True
except:
output = False
self.closeCursor()
return output
def verifyTableExistsWithRows(self, table, rowIDs):
''' check to see if a table exists with the given row IDs
return a list of row IDs that are missing'''
missingRows = []
if not(self.verifyTableExists(table)):
return rowIDs
self.checkCursor()
for rowID in rowIDs:
try:
self.cursor.execute("select * from {} where id = {}".format(table, rowID))
if len(self.cursor.fetchone()) == 0:
missingRows.append(rowID)
except:
missingRows.append(rowID)
self.closeCursor()
return missingRows
|
#!/usr/bin/env python
'''
Advent of Code 2021 - Day 9: Smoke Basin (Part 1)
https://adventofcode.com/2021/day/9
'''
import numpy as np
class HeightMap():
def __init__(self) -> None:
self._grid = np.array([])
def add_row(self, row):
np_row = np.array(row)
if self._grid.size != 0:
self._grid = np.vstack([self._grid, np_row])
else:
self._grid = np_row
def find_low_points(self, radius=1):
low_points = []
for index, point in np.ndenumerate(self._grid):
neighbor_points = self._neighbors(radius, coordinates=index)
if point < min(neighbor_points):
low_points.append(point)
return low_points
def _neighbors(self, radius, coordinates=(0, 0)):
neighbors = []
row = coordinates[0]
column = coordinates[1]
# Get UP neighbor value
if row >= 1:
neighbors.append(self._grid[row - radius, column])
# Get LEFT neighbor value
if column >= 1:
neighbors.append(self._grid[row, column - radius])
# Get RIGHT neighbor value
if column < len(self._grid[0]) - radius:
neighbors.append(self._grid[row, column + radius])
# Get DOWN neighbor value
if row < len(self._grid) - radius:
neighbors.append(self._grid[row + radius, column])
return neighbors
def __str__(self) -> str:
output = ""
for row in self._grid:
for elem in row:
output = output + f"{elem:>3}"
output = output + "\n"
return output
def calculate_risk(heights):
# Risk is 1 plus the height
return sum([height + 1 for height in heights])
def main():
filename = input("What is the input file name? ")
try:
with open(filename, "r") as file:
# Create a new board
area = HeightMap()
# Read the rows and setup the HeightMap
for line in file:
line = line.strip()
input_row = [int(x) for x in str(line)]
area.add_row(input_row)
print("The input grid: ")
print(area)
low_points = area.find_low_points()
sum_risk_levels = calculate_risk(
low_points) if low_points else None
if sum_risk_levels:
low_points_str = [str(point) for point in low_points]
print(f"Number of low points: {len(low_points)}")
print(f"Low points: {", ".join(low_points_str)}")
print(
f"\nThe sum of the risk levels of all low points is: {sum_risk_levels}\n")
else:
print("The sum of the risk levels of all low points not found.\n")
except FileNotFoundError:
print(f"No such file or directory: '{filename}'")
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
'''
Advent of Code 2021 - Day 9: Smoke Basin (Part 1)
https://adventofcode.com/2021/day/9
'''
import numpy as np
class HeightMap():
def __init__(self) -> None:
self._grid = np.array([])
def add_row(self, row):
np_row = np.array(row)
if self._grid.size != 0:
self._grid = np.vstack([self._grid, np_row])
else:
self._grid = np_row
def find_low_points(self, radius=1):
low_points = []
for index, point in np.ndenumerate(self._grid):
neighbor_points = self._neighbors(radius, coordinates=index)
if point < min(neighbor_points):
low_points.append(point)
return low_points
def _neighbors(self, radius, coordinates=(0, 0)):
neighbors = []
row = coordinates[0]
column = coordinates[1]
# Get UP neighbor value
if row >= 1:
neighbors.append(self._grid[row - radius, column])
# Get LEFT neighbor value
if column >= 1:
neighbors.append(self._grid[row, column - radius])
# Get RIGHT neighbor value
if column < len(self._grid[0]) - radius:
neighbors.append(self._grid[row, column + radius])
# Get DOWN neighbor value
if row < len(self._grid) - radius:
neighbors.append(self._grid[row + radius, column])
return neighbors
def __str__(self) -> str:
output = ""
for row in self._grid:
for elem in row:
output = output + f"{elem:>3}"
output = output + "\n"
return output
def calculate_risk(heights):
# Risk is 1 plus the height
return sum([height + 1 for height in heights])
def main():
filename = input("What is the input file name? ")
try:
with open(filename, "r") as file:
# Create a new board
area = HeightMap()
# Read the rows and setup the HeightMap
for line in file:
line = line.strip()
input_row = [int(x) for x in str(line)]
area.add_row(input_row)
print("The input grid: ")
print(area)
low_points = area.find_low_points()
sum_risk_levels = calculate_risk(
low_points) if low_points else None
if sum_risk_levels:
low_points_str = [str(point) for point in low_points]
print(f"Number of low points: {len(low_points)}")
print(f"Low points: {', '.join(low_points_str)}")
print(
f"\nThe sum of the risk levels of all low points is: {sum_risk_levels}\n")
else:
print("The sum of the risk levels of all low points not found.\n")
except FileNotFoundError:
print(f"No such file or directory: '{filename}'")
if __name__ == "__main__":
main()
|
import argparse
from ast import literal_eval
from astropy.io import fits
from astropy.visualization import (
AsymmetricPercentileInterval,
LinearStretch,
LogStretch,
ImageNormalize,
)
import base64
from bson.json_util import loads
import confluent_kafka
from copy import deepcopy
import dask.distributed
import datetime
import fastavro
import gzip
import io
import matplotlib.pyplot as plt
import multiprocessing
import numpy as np
import os
import pandas as pd
import pathlib
import requests
from requests.packages.urllib3.util.retry import Retry
import subprocess
import sys
import tensorflow as tf
from tensorflow.keras.models import load_model
import threading
import time
import traceback
from typing import Mapping, Optional, Sequence
from utils import (
deg2dms,
deg2hms,
great_circle_distance,
in_ellipse,
init_db_sync,
load_config,
log,
memoize,
Mongo,
radec2lb,
time_stamp,
timer,
TimeoutHTTPAdapter,
ZTFAlert,
)
tf.config.optimizer.set_jit(True)
""" load config and secrets """
config = load_config(config_file="config.yaml")["kowalski"]
def read_schema_data(bytes_io):
"""
Read data that already has an Avro schema.
:param bytes_io: `_io.BytesIO` Data to be decoded.
:return: `dict` Decoded data.
"""
bytes_io.seek(0)
message = fastavro.reader(bytes_io)
return message
class EopError(Exception):
"""
Exception raised when reaching end of a Kafka topic partition.
"""
def __init__(self, msg):
"""
:param msg: The Kafka message result from consumer.poll()
"""
message = (
f"{time_stamp()}: topic:{msg.topic()}, partition:{msg.partition()}, "
f"status:end, offset:{msg.offset()}, key:{str(msg.key())}\n"
)
self.message = message
def __str__(self):
return self.message
def make_photometry(alert: dict, jd_start: float = None):
"""
Make a de-duplicated pandas.DataFrame with photometry of alert['objectId']
:param alert: ZTF alert packet/dict
:param jd_start:
:return:
"""
alert = deepcopy(alert)
df_candidate = pd.DataFrame(alert["candidate"], index=[0])
df_prv_candidates = pd.DataFrame(alert["prv_candidates"])
df_light_curve = pd.concat(
[df_candidate, df_prv_candidates], ignore_index=True, sort=False
)
ztf_filters = {1: "ztfg", 2: "ztfr", 3: "ztfi"}
df_light_curve["ztf_filter"] = df_light_curve["fid"].apply(lambda x: ztf_filters[x])
df_light_curve["magsys"] = "ab"
df_light_curve["mjd"] = df_light_curve["jd"] - 2400000.5
df_light_curve["mjd"] = df_light_curve["mjd"].apply(lambda x: np.float64(x))
df_light_curve["magpsf"] = df_light_curve["magpsf"].apply(lambda x: np.float32(x))
df_light_curve["sigmapsf"] = df_light_curve["sigmapsf"].apply(
lambda x: np.float32(x)
)
df_light_curve = (
df_light_curve.drop_duplicates(subset=["mjd", "magpsf"])
.reset_index(drop=True)
.sort_values(by=["mjd"])
)
# filter out bad data:
mask_good_diffmaglim = df_light_curve["diffmaglim"] > 0
df_light_curve = df_light_curve.loc[mask_good_diffmaglim]
# convert from mag to flux
# step 1: calculate the coefficient that determines whether the
# flux should be negative or positive
coeff = df_light_curve["isdiffpos"].apply(
lambda x: 1.0 if x in [True, 1, "y", "Y", "t", "1"] else -1.0
)
# step 2: calculate the flux normalized to an arbitrary AB zeropoint of
# 23.9 (results in flux in uJy)
df_light_curve["flux"] = coeff * 10 ** (-0.4 * (df_light_curve["magpsf"] - 23.9))
# step 3: separate detections from non detections
detected = np.isfinite(df_light_curve["magpsf"])
undetected = ~detected
# step 4: calculate the flux error
df_light_curve["fluxerr"] = None # initialize the column
# step 4a: calculate fluxerr for detections using sigmapsf
df_light_curve.loc[detected, "fluxerr"] = np.abs(
df_light_curve.loc[detected, "sigmapsf"]
* df_light_curve.loc[detected, "flux"]
* np.log(10)
/ 2.5
)
# step 4b: calculate fluxerr for non detections using diffmaglim
df_light_curve.loc[undetected, "fluxerr"] = (
10 ** (-0.4 * (df_light_curve.loc[undetected, "diffmaglim"] - 23.9)) / 5.0
) # as diffmaglim is the 5-sigma depth
# step 5: set the zeropoint and magnitude system
df_light_curve["zp"] = 23.9
df_light_curve["zpsys"] = "ab"
# only "new" photometry requested?
if jd_start is not None:
w_after_jd = df_light_curve["jd"] > jd_start
df_light_curve = df_light_curve.loc[w_after_jd]
return df_light_curve
def make_thumbnail(alert, ttype: str, ztftype: str):
"""
Convert lossless FITS cutouts from ZTF alerts into PNGs
:param alert: ZTF alert packet/dict
:param ttype: <new|ref|sub>
:param ztftype: <Science|Template|Difference>
:return:
"""
alert = deepcopy(alert)
cutout_data = alert[f"cutout{ztftype}"]["stampData"]
with gzip.open(io.BytesIO(cutout_data), "rb") as f:
with fits.open(io.BytesIO(f.read())) as hdu:
# header = hdu[0].header
data_flipped_y = np.flipud(hdu[0].data)
# fixme: png, switch to fits eventually
buff = io.BytesIO()
plt.close("all")
fig = plt.figure()
fig.set_size_inches(4, 4, forward=False)
ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0])
ax.set_axis_off()
fig.add_axes(ax)
# replace nans with median:
img = np.array(data_flipped_y)
# replace dubiously large values
xl = np.greater(np.abs(img), 1e20, where=~np.isnan(img))
if img[xl].any():
img[xl] = np.nan
if np.isnan(img).any():
median = float(np.nanmean(img.flatten()))
img = np.nan_to_num(img, nan=median)
norm = ImageNormalize(
img, stretch=LinearStretch() if ztftype == "Difference" else LogStretch()
)
img_norm = norm(img)
normalizer = AsymmetricPercentileInterval(lower_percentile=1, upper_percentile=100)
vmin, vmax = normalizer.get_limits(img_norm)
ax.imshow(img_norm, cmap="bone", origin="lower", vmin=vmin, vmax=vmax)
plt.savefig(buff, dpi=42)
buff.seek(0)
plt.close("all")
thumb = {
"obj_id": alert["objectId"],
"data": base64.b64encode(buff.read()).decode("utf-8"),
"ttype": ttype,
}
return thumb
""" Alert filters """
def make_triplet(alert, to_tpu: bool = False):
"""
Make an L2-normalized cutout triplet out of a ZTF alert
:param alert:
:param to_tpu:
:return:
"""
cutout_dict = dict()
for cutout in ("science", "template", "difference"):
cutout_data = alert[f"cutout{cutout.capitalize()}"]["stampData"]
# unzip
with gzip.open(io.BytesIO(cutout_data), "rb") as f:
with fits.open(io.BytesIO(f.read())) as hdu:
data = hdu[0].data
# replace nans with zeros
cutout_dict[cutout] = np.nan_to_num(data)
# L2-normalize
cutout_dict[cutout] /= np.linalg.norm(cutout_dict[cutout])
# pad to 63x63 if smaller
shape = cutout_dict[cutout].shape
if shape != (63, 63):
# print(f'Shape of {candid}/{cutout}: {shape}, padding to (63, 63)')
cutout_dict[cutout] = np.pad(
cutout_dict[cutout],
[(0, 63 - shape[0]), (0, 63 - shape[1])],
mode="constant",
constant_values=1e-9,
)
triplet = np.zeros((63, 63, 3))
triplet[:, :, 0] = cutout_dict["science"]
triplet[:, :, 1] = cutout_dict["template"]
triplet[:, :, 2] = cutout_dict["difference"]
if to_tpu:
# Edge TPUs require additional processing
triplet = np.rint(triplet * 128 + 128).astype(np.uint8).flatten()
return triplet
def alert_filter__ml(alert, ml_models: dict = None) -> dict:
"""Execute ML models on a ZTF alert
:param alert:
:param ml_models:
:return:
"""
scores = dict()
if ml_models is not None and len(ml_models) > 0:
try:
with timer("ZTFAlert(alert)"):
ztf_alert = ZTFAlert(alert)
with timer("Prepping features"):
features = np.expand_dims(ztf_alert.data["features"], axis=[0, -1])
triplet = np.expand_dims(ztf_alert.data["triplet"], axis=[0])
# braai
if "braai" in ml_models.keys():
with timer("braai"):
braai = ml_models["braai"]["model"].predict(x=triplet)[0]
scores["braai"] = float(braai)
scores["braai_version"] = ml_models["braai"]["version"]
# acai
for model_name in ("acai_h", "acai_v", "acai_o", "acai_n", "acai_b"):
if model_name in ml_models.keys():
with timer(model_name):
score = ml_models[model_name]["model"].predict(
[features, triplet]
)[0]
scores[model_name] = float(score)
scores[f"{model_name}_version"] = ml_models[model_name][
"version"
]
except Exception as e:
log(str(e))
return scores
# cone search radius:
cone_search_radius = float(config["database"]["xmatch"]["cone_search_radius"])
# convert to rad:
if config["database"]["xmatch"]["cone_search_unit"] == "arcsec":
cone_search_radius *= np.pi / 180.0 / 3600.0
elif config["database"]["xmatch"]["cone_search_unit"] == "arcmin":
cone_search_radius *= np.pi / 180.0 / 60.0
elif config["database"]["xmatch"]["cone_search_unit"] == "deg":
cone_search_radius *= np.pi / 180.0
elif config["database"]["xmatch"]["cone_search_unit"] == "rad":
cone_search_radius *= 1
else:
raise Exception("Unknown cone search unit. Must be in [deg, rad, arcsec, arcmin]")
def alert_filter__xmatch(database, alert) -> dict:
"""
Cross-match alerts
"""
xmatches = dict()
try:
ra_geojson = float(alert["candidate"]["ra"])
# geojson-friendly ra:
ra_geojson -= 180.0
dec_geojson = float(alert["candidate"]["dec"])
""" catalogs """
for catalog in config["database"]["xmatch"]["catalogs"]:
catalog_filter = config["database"]["xmatch"]["catalogs"][catalog]["filter"]
catalog_projection = config["database"]["xmatch"]["catalogs"][catalog][
"projection"
]
object_position_query = dict()
object_position_query["coordinates.radec_geojson"] = {
"$geoWithin": {
"$centerSphere": [[ra_geojson, dec_geojson], cone_search_radius]
}
}
s = database[catalog].find(
{**object_position_query, **catalog_filter}, {**catalog_projection}
)
xmatches[catalog] = list(s)
except Exception as e:
log(str(e))
return xmatches
# cone search radius in deg:
cone_search_radius_clu = 3.0
# convert deg to rad:
cone_search_radius_clu *= np.pi / 180.0
def alert_filter__xmatch_clu(
database, alert, size_margin=3, clu_version="CLU_20190625"
) -> dict:
"""
Run cross-match with the CLU catalog
:param database:
:param alert:
:param size_margin: multiply galaxy size by this much before looking for a match
:param clu_version: CLU catalog version
:return:
"""
xmatches = dict()
try:
ra = float(alert["candidate"]["ra"])
dec = float(alert["candidate"]["dec"])
# geojson-friendly ra:
ra_geojson = float(alert["candidate"]["ra"]) - 180.0
dec_geojson = dec
catalog_filter = {}
catalog_projection = {
"_id": 1,
"name": 1,
"ra": 1,
"dec": 1,
"a": 1,
"b2a": 1,
"pa": 1,
"z": 1,
"sfr_fuv": 1,
"mstar": 1,
"sfr_ha": 1,
"coordinates.radec_str": 1,
}
# first do a coarse search of everything that is around
object_position_query = dict()
object_position_query["coordinates.radec_geojson"] = {
"$geoWithin": {
"$centerSphere": [[ra_geojson, dec_geojson], cone_search_radius_clu]
}
}
galaxies = list(
database[clu_version].find(
{**object_position_query, **catalog_filter}, {**catalog_projection}
)
)
# these guys are very big, so check them separately
M31 = {
"_id": 596900,
"name": "PGC2557",
"ra": 10.6847,
"dec": 41.26901,
"a": 6.35156,
"b2a": 0.32,
"pa": 35.0,
"z": -0.00100100006,
"sfr_fuv": None,
"mstar": 253816876.412914,
"sfr_ha": 0,
"coordinates": {"radec_str": ["00:42:44.3503", "41:16:08.634"]},
}
M33 = {
"_id": 597543,
"name": "PGC5818",
"ra": 23.46204,
"dec": 30.66022,
"a": 2.35983,
"b2a": 0.59,
"pa": 23.0,
"z": -0.000597000006,
"sfr_fuv": None,
"mstar": 4502777.420493,
"sfr_ha": 0,
"coordinates": {"radec_str": ["01:33:50.8900", "30:39:36.800"]},
}
# do elliptical matches
matches = []
for galaxy in galaxies + [M31, M33]:
alpha1, delta01 = galaxy["ra"], galaxy["dec"]
redshift = galaxy["z"]
# By default, set the cross-match radius to 50 kpc at the redshift of the host galaxy
cm_radius = 50.0 * (0.05 / redshift) / 3600
if redshift < 0.01:
# for nearby galaxies and galaxies with negative redshifts, do a 5 arc-minute cross-match
# (cross-match radius would otherwise get un-physically large for nearby galaxies)
cm_radius = 300.0 / 3600
in_galaxy = in_ellipse(ra, dec, alpha1, delta01, cm_radius, 1, 0)
if in_galaxy:
match = galaxy
distance_arcsec = round(
great_circle_distance(ra, dec, alpha1, delta01) * 3600, 2
)
# also add a physical distance parameter for redshifts in the Hubble flow
if redshift > 0.005:
distance_kpc = round(
great_circle_distance(ra, dec, alpha1, delta01)
* 3600
* (redshift / 0.05),
2,
)
else:
distance_kpc = -1
match["coordinates"]["distance_arcsec"] = distance_arcsec
match["coordinates"]["distance_kpc"] = distance_kpc
matches.append(match)
xmatches[clu_version] = matches
except Exception as e:
log(str(e))
return xmatches
def alert_filter__user_defined(
database,
filter_templates,
alert,
catalog: str = "ZTF_alerts",
max_time_ms: int = 500,
) -> list:
"""
Evaluate user-defined filters
:param database:
:param filter_templates:
:param alert:
:param catalog:
:param max_time_ms:
:return:
"""
passed_filters = []
for filter_template in filter_templates:
try:
_filter = deepcopy(filter_template)
# match candid
_filter["pipeline"][0]["$match"]["candid"] = alert["candid"]
filtered_data = list(
database[catalog].aggregate(
_filter["pipeline"], allowDiskUse=False, maxTimeMS=max_time_ms
)
)
# passed filter? then len(passed_filter) must be = 1
if len(filtered_data) == 1:
log(
f'{alert['objectId']} {alert['candid']} passed filter {_filter['fid']}'
)
passed_filters.append(
{
"group_id": _filter["group_id"],
"filter_id": _filter["filter_id"],
"group_name": _filter["group_name"],
"filter_name": _filter["filter_name"],
"fid": _filter["fid"],
"permissions": _filter["permissions"],
"autosave": _filter["autosave"],
"update_annotations": _filter["update_annotations"],
"data": filtered_data[0],
}
)
except Exception as e:
log(
f'Filter {filter_template['fid']} execution failed on alert {alert['candid']}: {e}'
)
continue
return passed_filters
def process_alert(record, topic):
"""Alert brokering task run by dask.distributed workers
:param record: decoded alert from IPAC's Kafka stream
:param topic: Kafka stream topic name for bookkeeping
:return:
"""
candid = record["candid"]
objectId = record["objectId"]
# get worker running current task
worker = dask.distributed.get_worker()
alert_worker = worker.plugins["worker-init"].alert_worker
log(f"{topic} {objectId} {candid} {worker.address}")
# return if this alert packet has already been processed and ingested into collection_alerts:
if (
alert_worker.mongo.db[alert_worker.collection_alerts].count_documents(
{"candid": candid}, limit=1
)
== 1
):
return
# candid not in db, ingest decoded avro packet into db
# todo: ?? restructure alerts even further?
# move cutouts to ZTF_alerts_cutouts? reduce the main db size for performance
# group by objectId similar to prv_candidates?? maybe this is too much
with timer(f"Mongification of {objectId} {candid}", alert_worker.verbose > 1):
alert, prv_candidates = alert_worker.alert_mongify(record)
# ML models:
with timer(f"MLing of {objectId} {candid}", alert_worker.verbose > 1):
scores = alert_filter__ml(record, ml_models=alert_worker.ml_models)
alert["classifications"] = scores
with timer(f"Ingesting {objectId} {candid}", alert_worker.verbose > 1):
alert_worker.mongo.insert_one(
collection=alert_worker.collection_alerts, document=alert
)
# prv_candidates: pop nulls - save space
prv_candidates = [
{kk: vv for kk, vv in prv_candidate.items() if vv is not None}
for prv_candidate in prv_candidates
]
# cross-match with external catalogs if objectId not in collection_alerts_aux:
if (
alert_worker.mongo.db[alert_worker.collection_alerts_aux].count_documents(
{"_id": objectId}, limit=1
)
== 0
):
with timer(f"Cross-match of {objectId} {candid}", alert_worker.verbose > 1):
xmatches = alert_filter__xmatch(alert_worker.mongo.db, alert)
# CLU cross-match:
with timer(f"CLU cross-match {objectId} {candid}", alert_worker.verbose > 1):
xmatches = {
**xmatches,
**alert_filter__xmatch_clu(alert_worker.mongo.db, alert),
}
alert_aux = {
"_id": objectId,
"cross_matches": xmatches,
"prv_candidates": prv_candidates,
}
with timer(f"Aux ingesting {objectId} {candid}", alert_worker.verbose > 1):
alert_worker.mongo.insert_one(
collection=alert_worker.collection_alerts_aux, document=alert_aux
)
else:
with timer(f"Aux updating of {objectId} {candid}", alert_worker.verbose > 1):
alert_worker.mongo.db[alert_worker.collection_alerts_aux].update_one(
{"_id": objectId},
{"$addToSet": {"prv_candidates": {"$each": prv_candidates}}},
upsert=True,
)
if config["misc"]["broker"]:
# execute user-defined alert filters
with timer(f"Filtering of {objectId} {candid}", alert_worker.verbose > 1):
passed_filters = alert_filter__user_defined(
alert_worker.mongo.db, alert_worker.filter_templates, alert
)
if alert_worker.verbose > 1:
log(f"{objectId} {candid} number of filters passed: {len(passed_filters)}")
# post to SkyPortal
alert_worker.alert_sentinel_skyportal(alert, prv_candidates, passed_filters)
# clean up after thyself
del record, alert, prv_candidates
class AlertConsumer:
"""
Creates an alert stream Kafka consumer for a given topic.
"""
def __init__(self, topic, dask_client, **kwargs):
self.verbose = kwargs.get("verbose", 2)
self.dask_client = dask_client
# keep track of disconnected partitions
self.num_disconnected_partitions = 0
self.topic = topic
def error_cb(err, _self=self):
log(f"error_cb --------> {err}")
# print(err.code())
if err.code() == -195:
_self.num_disconnected_partitions += 1
if _self.num_disconnected_partitions == _self.num_partitions:
log("All partitions got disconnected, killing thread")
sys.exit()
else:
log(
f"{_self.topic}: disconnected from partition. total: {_self.num_disconnected_partitions}"
)
# 'error_cb': error_cb
kwargs["error_cb"] = error_cb
self.consumer = confluent_kafka.Consumer(**kwargs)
self.num_partitions = 0
def on_assign(consumer, partitions, _self=self):
# force-reset offsets when subscribing to a topic:
for part in partitions:
# -2 stands for beginning and -1 for end
part.offset = -2
# keep number of partitions.
# when reaching end of last partition, kill thread and start from beginning
_self.num_partitions += 1
log(consumer.get_watermark_offsets(part))
self.consumer.subscribe([topic], on_assign=on_assign)
# set up own mongo client
self.collection_alerts = config["database"]["collections"]["alerts_ztf"]
self.mongo = Mongo(
host=config["database"]["host"],
port=config["database"]["port"],
replica_set=config["database"]["replica_set"],
username=config["database"]["username"],
password=config["database"]["password"],
db=config["database"]["db"],
verbose=self.verbose,
)
# create indexes
if config["database"]["build_indexes"]:
for index in config["database"]["indexes"][self.collection_alerts]:
try:
ind = [tuple(ii) for ii in index["fields"]]
self.mongo.db[self.collection_alerts].create_index(
keys=ind,
name=index["name"],
background=True,
unique=index["unique"],
)
except Exception as e:
log(e)
@staticmethod
def decode_message(msg):
"""
Decode Avro message according to a schema.
:param msg: The Kafka message result from consumer.poll()
:return:
"""
message = msg.value()
decoded_msg = message
try:
bytes_io = io.BytesIO(message)
decoded_msg = read_schema_data(bytes_io)
except AssertionError:
decoded_msg = None
except IndexError:
literal_msg = literal_eval(
str(message, encoding="utf-8")
) # works to give bytes
bytes_io = io.BytesIO(literal_msg) # works to give <class '_io.BytesIO'>
decoded_msg = read_schema_data(bytes_io) # yields reader
except Exception:
decoded_msg = message
finally:
return decoded_msg
def poll(self):
"""
Polls Kafka broker to consume a topic.
:return:
"""
msg = self.consumer.poll()
if msg is None:
log("Caught error: msg is None")
if msg.error():
log(f"Caught error: {msg.error()}")
raise EopError(msg)
elif msg is not None:
try:
# decode avro packet
with timer("Decoding alert", self.verbose > 1):
msg_decoded = self.decode_message(msg)
for record in msg_decoded:
# submit only unprocessed alerts:
if (
self.mongo.db[self.collection_alerts].count_documents(
{"candid": record["candid"]}, limit=1
)
== 0
):
with timer(
f"Submitting alert {record["objectId"]} {record["candid"]} for processing",
self.verbose > 1,
):
future = self.dask_client.submit(
process_alert, record, self.topic, pure=True
)
dask.distributed.fire_and_forget(future)
future.release()
del future
except Exception as e:
log(e)
_err = traceback.format_exc()
log(_err)
class AlertWorker:
"""Tools to handle alert processing: database ingestion, filtering, ml'ing, cross-matches, reporting to SP"""
def __init__(self, **kwargs):
self.verbose = kwargs.get("verbose", 2)
self.config = config
# Kowalski version
path_version_file = pathlib.Path(__file__).parent.absolute() / "version.txt"
version = f"v{self.config["server"]["version"]}"
if path_version_file.exists():
with open(
pathlib.Path(__file__).parent.absolute() / "version.txt", "r"
) as version_file:
version = version_file.read().strip()
# MongoDB collections to store the alerts:
self.collection_alerts = self.config["database"]["collections"]["alerts_ztf"]
self.collection_alerts_aux = self.config["database"]["collections"][
"alerts_ztf_aux"
]
self.collection_alerts_filter = self.config["database"]["collections"][
"alerts_ztf_filter"
]
self.mongo = Mongo(
host=config["database"]["host"],
port=config["database"]["port"],
replica_set=config["database"]["replica_set"],
username=config["database"]["username"],
password=config["database"]["password"],
db=config["database"]["db"],
verbose=self.verbose,
)
# ML models
self.ml_models = dict()
for model in config["ml_models"]:
try:
model_version = config["ml_models"][model]["version"]
# todo: allow other formats such as SavedModel
model_filepath = os.path.join(
config["path"]["ml_models"], f"{model}.{model_version}.h5"
)
self.ml_models[model] = {
"model": load_model(model_filepath),
"version": model_version,
}
except Exception as e:
log(f"Error loading ML model {model}: {str(e)}")
_err = traceback.format_exc()
log(_err)
continue
# talking to SkyPortal?
if not config["misc"]["broker"]:
return
# session to talk to SkyPortal
self.session = requests.Session()
self.session_headers = {
"Authorization": f"token {config["skyportal"]["token"]}",
"User-Agent": f"Kowalski {version}",
}
retries = Retry(
total=5,
backoff_factor=2,
status_forcelist=[405, 429, 500, 502, 503, 504],
method_whitelist=["HEAD", "GET", "PUT", "POST", "PATCH"],
)
adapter = TimeoutHTTPAdapter(timeout=5, max_retries=retries)
self.session.mount("https://", adapter)
self.session.mount("http://", adapter)
# get ZTF instrument id
self.instrument_id = 1
with timer("Getting ZTF instrument_id from SkyPortal", self.verbose > 1):
response = self.api_skyportal("GET", "/api/instrument", {"name": "ZTF"})
if response.json()["status"] == "success" and len(response.json()["data"]) > 0:
self.instrument_id = response.json()["data"][0]["id"]
log(f"Got ZTF instrument_id from SkyPortal: {self.instrument_id}")
else:
log("Failed to get ZTF instrument_id from SkyPortal")
raise ValueError("Failed to get ZTF instrument_id from SkyPortal")
# get ZTF alert stream ids to program ids mapping
self.ztf_program_id_to_stream_id = dict()
with timer("Getting ZTF alert stream ids from SkyPortal", self.verbose > 1):
response = self.api_skyportal("GET", "/api/streams")
if response.json()["status"] == "success" and len(response.json()["data"]) > 0:
for stream in response.json()["data"]:
if stream.get("name") == "ZTF Public":
self.ztf_program_id_to_stream_id[1] = stream["id"]
if stream.get("name") == "ZTF Public+Partnership":
self.ztf_program_id_to_stream_id[2] = stream["id"]
if stream.get("name") == "ZTF Public+Partnership+Caltech":
# programid=0 is engineering data
self.ztf_program_id_to_stream_id[0] = stream["id"]
self.ztf_program_id_to_stream_id[3] = stream["id"]
if len(self.ztf_program_id_to_stream_id) != 4:
log("Failed to map ZTF alert stream ids from SkyPortal to program ids")
raise ValueError(
"Failed to map ZTF alert stream ids from SkyPortal to program ids"
)
log(
f"Got ZTF program id to SP stream id mapping: {self.ztf_program_id_to_stream_id}"
)
else:
log("Failed to get ZTF alert stream ids from SkyPortal")
raise ValueError("Failed to get ZTF alert stream ids from SkyPortal")
# filter pipeline upstream: select current alert, ditch cutouts, and merge with aux data
# including archival photometry and cross-matches:
self.filter_pipeline_upstream = config["database"]["filters"][
self.collection_alerts
]
log("Upstream filtering pipeline:")
log(self.filter_pipeline_upstream)
# load *active* user-defined alert filter templates and pre-populate them
active_filters = self.get_active_filters()
self.filter_templates = self.make_filter_templates(active_filters)
# set up watchdog for periodic refresh of the filter templates, in case those change
self.filter_monitor = threading.Thread(target=self.reload_filters)
self.filter_monitor.start()
log("Loaded user-defined filters:")
log(self.filter_templates)
def api_skyportal(self, method: str, endpoint: str, data: Optional[Mapping] = None):
"""Make an API call to a SkyPortal instance
:param method:
:param endpoint:
:param data:
:return:
"""
method = method.lower()
methods = {
"head": self.session.head,
"get": self.session.get,
"post": self.session.post,
"put": self.session.put,
"patch": self.session.patch,
"delete": self.session.delete,
}
if endpoint is None:
raise ValueError("Endpoint not specified")
if method not in ["head", "get", "post", "put", "patch", "delete"]:
raise ValueError(f"Unsupported method: {method}")
if method == "get":
response = methods[method](
f"{config["skyportal"]["protocol"]}://"
f"{config["skyportal"]["host"]}:{config["skyportal"]["port"]}"
f"{endpoint}",
params=data,
headers=self.session_headers,
)
else:
response = methods[method](
f"{config["skyportal"]["protocol"]}://"
f"{config["skyportal"]["host"]}:{config["skyportal"]["port"]}"
f"{endpoint}",
json=data,
headers=self.session_headers,
)
return response
@memoize
def api_skyportal_get_group(self, group_id):
return self.api_skyportal(
"GET", f"/api/groups/{group_id}?includeGroupUsers=False"
)
def get_active_filters(self):
"""
Fetch user-defined filters from own db marked as active
:return:
"""
# todo: query SP to make sure the filters still exist there and we're not out of sync;
# clean up if necessary
return list(
self.mongo.db[config["database"]["collections"]["filters"]].aggregate(
[
{
"$match": {
"catalog": config["database"]["collections"]["alerts_ztf"],
"active": True,
}
},
{
"$project": {
"group_id": 1,
"filter_id": 1,
"permissions": 1,
"autosave": 1,
"update_annotations": 1,
"fv": {
"$arrayElemAt": [
{
"$filter": {
"input": "$fv",
"as": "fvv",
"cond": {
"$eq": ["$$fvv.fid", "$active_fid"]
},
}
},
0,
]
},
}
},
]
)
)
def make_filter_templates(self, active_filters: Sequence):
"""
Make filter templates by adding metadata, prepending upstream aggregation stages and setting permissions
:param active_filters:
:return:
"""
filter_templates = []
for active_filter in active_filters:
try:
# collect additional info from SkyPortal
with timer(
f"Getting info on group id={active_filter["group_id"]} from SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal_get_group(active_filter["group_id"])
if self.verbose > 1:
log(response.json())
if response.json()["status"] == "success":
group_name = (
response.json()["data"]["nickname"]
if response.json()["data"]["nickname"] is not None
else response.json()["data"]["name"]
)
filter_name = [
filtr["name"]
for filtr in response.json()["data"]["filters"]
if filtr["id"] == active_filter["filter_id"]
][0]
else:
log(
f"Failed to get info on group id={active_filter["group_id"]} from SkyPortal"
)
group_name, filter_name = None, None
# raise ValueError(f"Failed to get info on group id={active_filter["group_id"]} from SkyPortal")
log(f"Group name: {group_name}, filter name: {filter_name}")
# prepend upstream aggregation stages:
pipeline = deepcopy(self.filter_pipeline_upstream) + loads(
active_filter["fv"]["pipeline"]
)
# set permissions
pipeline[0]["$match"]["candidate.programid"]["$in"] = active_filter[
"permissions"
]
pipeline[3]["$project"]["prv_candidates"]["$filter"]["cond"]["$and"][0][
"$in"
][1] = active_filter["permissions"]
filter_template = {
"group_id": active_filter["group_id"],
"filter_id": active_filter["filter_id"],
"group_name": group_name,
"filter_name": filter_name,
"fid": active_filter["fv"]["fid"],
"permissions": active_filter["permissions"],
"autosave": active_filter["autosave"],
"update_annotations": active_filter["update_annotations"],
"pipeline": deepcopy(pipeline),
}
filter_templates.append(filter_template)
except Exception as e:
log(
"Failed to generate filter template for "
f"group_id={active_filter["group_id"]} filter_id={active_filter["filter_id"]}: {e}"
)
continue
return filter_templates
def reload_filters(self):
"""
Helper function to periodically reload filters from SkyPortal
:return:
"""
while True:
time.sleep(60 * 5)
active_filters = self.get_active_filters()
self.filter_templates = self.make_filter_templates(active_filters)
@staticmethod
def alert_mongify(alert: Mapping):
"""
Prepare a raw ZTF alert for ingestion into MongoDB:
- add a placeholder for ML-based classifications
- add coordinates for 2D spherical indexing and compute Galactic coordinates
- cut off the prv_candidates section
:param alert:
:return:
"""
doc = dict(alert)
# let mongo create a unique _id
# placeholders for classifications
doc["classifications"] = dict()
# GeoJSON for 2D indexing
doc["coordinates"] = {}
_ra = doc["candidate"]["ra"]
_dec = doc["candidate"]["dec"]
# string format: H:M:S, D:M:S
_radec_str = [deg2hms(_ra), deg2dms(_dec)]
doc["coordinates"]["radec_str"] = _radec_str
# for GeoJSON, must be lon:[-180, 180], lat:[-90, 90] (i.e. in deg)
_radec_geojson = [_ra - 180.0, _dec]
doc["coordinates"]["radec_geojson"] = {
"type": "Point",
"coordinates": _radec_geojson,
}
# Galactic coordinates l and b
l, b = radec2lb(doc["candidate"]["ra"], doc["candidate"]["dec"])
doc["coordinates"]["l"] = l
doc["coordinates"]["b"] = b
prv_candidates = deepcopy(doc["prv_candidates"])
doc.pop("prv_candidates", None)
if prv_candidates is None:
prv_candidates = []
return doc, prv_candidates
def alert_post_candidate(self, alert: Mapping, filter_ids: Sequence):
"""
Post a ZTF alert as a candidate for filters on SkyPortal
:param alert:
:param filter_ids:
:return:
"""
# post metadata with all filter_ids in single call to /api/candidates
alert_thin = {
"id": alert["objectId"],
"ra": alert["candidate"].get("ra"),
"dec": alert["candidate"].get("dec"),
"score": alert["candidate"].get("drb", alert["candidate"]["rb"]),
"filter_ids": filter_ids,
"passing_alert_id": alert["candid"],
"passed_at": datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f"),
"origin": "Kowalski",
}
if self.verbose > 1:
log(alert_thin)
with timer(
f"Posting metadata of {alert["objectId"]} {alert["candid"]} to SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal("POST", "/api/candidates", alert_thin)
if response.json()["status"] == "success":
log(f"Posted {alert["objectId"]} {alert["candid"]} metadata to SkyPortal")
else:
log(
f"Failed to post {alert["objectId"]} {alert["candid"]} metadata to SkyPortal"
)
log(response.json())
def alert_post_source(self, alert: Mapping, group_ids: Sequence):
"""
Save a ZTF alert as a source to groups on SkyPortal
:param alert:
:param group_ids:
:return:
"""
# save source
alert_thin = {
"id": alert["objectId"],
"group_ids": group_ids,
"origin": "Kowalski",
}
if self.verbose > 1:
log(alert_thin)
with timer(
f"Saving {alert["objectId"]} {alert["candid"]} as a Source on SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal("POST", "/api/sources", alert_thin)
if response.json()["status"] == "success":
log(f"Saved {alert["objectId"]} {alert["candid"]} as a Source on SkyPortal")
else:
log(
f"Failed to save {alert["objectId"]} {alert["candid"]} as a Source on SkyPortal"
)
log(response.json())
def alert_post_annotations(self, alert: Mapping, passed_filters: Sequence):
"""
Post annotations to SkyPortal for an alert that passed user-defined filters
:param alert:
:param passed_filters:
:return:
"""
for passed_filter in passed_filters:
annotations = {
"obj_id": alert["objectId"],
"origin": f"{passed_filter.get("group_name")}:{passed_filter.get("filter_name")}",
"data": passed_filter.get("data", dict()).get("annotations", dict()),
"group_ids": [passed_filter.get("group_id")],
}
if len(annotations["data"]) > 0:
with timer(
f"Posting annotation for {alert["objectId"]} {alert["candid"]} to SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal(
"POST", "/api/annotation", annotations
)
if response.json()["status"] == "success":
log(f"Posted {alert["objectId"]} annotation to SkyPortal")
else:
log(f"Failed to post {alert["objectId"]} annotation to SkyPortal")
log(response.json())
def alert_put_annotations(self, alert: Mapping, passed_filters: Sequence):
"""
Update annotations on SkyPortal for an alert that passed user-defined filters
:param alert:
:param passed_filters:
:return:
"""
# first need to learn existing annotation id's and corresponding author id's to use with the PUT call
with timer(
f"Getting annotations for {alert["objectId"]} from SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal(
"GET", f"/api/sources/{alert["objectId"]}/annotations"
)
if response.json()["status"] == "success":
log(f"Got {alert["objectId"]} annotations from SkyPortal")
else:
log(f"Failed to get {alert["objectId"]} annotations from SkyPortal")
log(response.json())
return False
existing_annotations = {
annotation["origin"]: {
"annotation_id": annotation["id"],
"author_id": annotation["author_id"],
}
for annotation in response.json()["data"]
}
for passed_filter in passed_filters:
origin = (
f"{passed_filter.get("group_name")}:{passed_filter.get("filter_name")}"
)
# no annotation exists on SkyPortal for this object? just post then
if origin not in existing_annotations:
self.alert_post_annotations(alert, [passed_filter])
continue
annotations = {
"author_id": existing_annotations[origin]["author_id"],
"obj_id": alert["objectId"],
"origin": origin,
"data": passed_filter.get("data", dict()).get("annotations", dict()),
"group_ids": [passed_filter.get("group_id")],
}
if len(annotations["data"]) > 0 and passed_filter.get(
"update_annotations", False
):
with timer(
f"Putting annotation for {alert["objectId"]} {alert["candid"]} to SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal(
"PUT",
f"/api/annotation/{existing_annotations[origin]["annotation_id"]}",
annotations,
)
if response.json()["status"] == "success":
log(f"Posted {alert["objectId"]} annotation to SkyPortal")
else:
log(f"Failed to post {alert["objectId"]} annotation to SkyPortal")
log(response.json())
def alert_post_thumbnails(self, alert: Mapping):
"""
Post ZTF alert thumbnails to SkyPortal
:param alert:
:return:
"""
for ttype, ztftype in [
("new", "Science"),
("ref", "Template"),
("sub", "Difference"),
]:
with timer(
f"Making {ztftype} thumbnail for {alert["objectId"]} {alert["candid"]}",
self.verbose > 1,
):
thumb = make_thumbnail(alert, ttype, ztftype)
with timer(
f"Posting {ztftype} thumbnail for {alert["objectId"]} {alert["candid"]} to SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal("POST", "/api/thumbnail", thumb)
if response.json()["status"] == "success":
log(
f"Posted {alert["objectId"]} {alert["candid"]} {ztftype} cutout to SkyPortal"
)
else:
log(
f"Failed to post {alert["objectId"]} {alert["candid"]} {ztftype} cutout to SkyPortal"
)
log(response.json())
def alert_put_photometry(self, alert):
"""PUT photometry to SkyPortal
:param alert:
:return:
"""
with timer(
f"Making alert photometry of {alert["objectId"]} {alert["candid"]}",
self.verbose > 1,
):
df_photometry = make_photometry(alert)
df_photometry["stream_id"] = df_photometry["programid"].apply(
lambda programid: self.ztf_program_id_to_stream_id[programid]
)
# post photometry by stream_id
for stream_id in set(df_photometry.stream_id.unique()):
stream_id_mask = df_photometry.stream_id == int(stream_id)
photometry = {
"obj_id": alert["objectId"],
"stream_ids": [int(stream_id)],
"instrument_id": self.instrument_id,
"mjd": df_photometry.loc[stream_id_mask, "mjd"].tolist(),
"flux": df_photometry.loc[stream_id_mask, "flux"].tolist(),
"fluxerr": df_photometry.loc[stream_id_mask, "fluxerr"].tolist(),
"zp": df_photometry.loc[stream_id_mask, "zp"].tolist(),
"magsys": df_photometry.loc[stream_id_mask, "zpsys"].tolist(),
"filter": df_photometry.loc[stream_id_mask, "ztf_filter"].tolist(),
"ra": df_photometry.loc[stream_id_mask, "ra"].tolist(),
"dec": df_photometry.loc[stream_id_mask, "dec"].tolist(),
}
if (len(photometry.get("flux", ())) > 0) or (
len(photometry.get("fluxerr", ())) > 0
):
with timer(
f"Posting photometry of {alert["objectId"]} {alert["candid"]}, "
f"stream_id={stream_id} to SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal("PUT", "/api/photometry", photometry)
if response.json()["status"] == "success":
log(
f"Posted {alert["objectId"]} photometry stream_id={stream_id} to SkyPortal"
)
else:
log(
f"Failed to post {alert["objectId"]} photometry stream_id={stream_id} to SkyPortal"
)
log(response.json())
def alert_sentinel_skyportal(self, alert, prv_candidates, passed_filters):
"""
Post alerts to SkyPortal, if need be.
Logic:
- check if candidate/source exist on SP
- if candidate does not exist and len(passed_filters) > 0
- post metadata with all filter_ids in single call to /api/candidates
- post full light curve with all group_ids in single call to /api/photometry
- post thumbnails
- if candidate exists:
- get filter_ids of saved candidate from SP
- post to /api/candidates with new_filter_ids, if any
- post alert light curve in single PUT call to /api/photometry specifying stream_ids
- if source exists:
- get groups and check stream access
- decide which points to post to what groups based on permissions
- post alert light curve in single PUT call to /api/photometry specifying stream_ids
:param alert: ZTF_alert with a stripped-off prv_candidates section
:param prv_candidates: could be plain prv_candidates section of an alert, or extended alert history
:param passed_filters: list of filters that alert passed, with their output
:return:
"""
# check if candidate/source exist in SP:
with timer(
f"Checking if {alert["objectId"]} is Candidate in SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal(
"HEAD", f"/api/candidates/{alert["objectId"]}"
)
is_candidate = response.status_code == 200
if self.verbose > 1:
log(
f"{alert["objectId"]} {"is" if is_candidate else "is not"} Candidate in SkyPortal"
)
with timer(
f"Checking if {alert["objectId"]} is Source in SkyPortal", self.verbose > 1
):
response = self.api_skyportal("HEAD", f"/api/sources/{alert["objectId"]}")
is_source = response.status_code == 200
if self.verbose > 1:
log(
f"{alert["objectId"]} {"is" if is_source else "is not"} Source in SkyPortal"
)
# obj does not exit in SP:
if (not is_candidate) and (not is_source):
# passed at least one filter?
if len(passed_filters) > 0:
# post candidate
filter_ids = [f.get("filter_id") for f in passed_filters]
self.alert_post_candidate(alert, filter_ids)
# post annotations
self.alert_post_annotations(alert, passed_filters)
# post full light curve
try:
alert["prv_candidates"] = list(
self.mongo.db[self.collection_alerts_aux].find(
{"_id": alert["objectId"]}, {"prv_candidates": 1}, limit=1
)
)[0]["prv_candidates"]
except Exception as e:
# this should never happen, but just in case
log(e)
alert["prv_candidates"] = prv_candidates
self.alert_put_photometry(alert)
# post thumbnails
self.alert_post_thumbnails(alert)
# post source if autosave=True
autosave_group_ids = [
f.get("group_id")
for f in passed_filters
if f.get("autosave", False)
]
if len(autosave_group_ids) > 0:
self.alert_post_source(alert, autosave_group_ids)
# obj exists in SP:
else:
if len(passed_filters) > 0:
filter_ids = [f.get("filter_id") for f in passed_filters]
# post candidate with new filter ids
self.alert_post_candidate(alert, filter_ids)
# put annotations
self.alert_put_annotations(alert, passed_filters)
# already saved as a source?
if is_source:
# get info on the corresponding groups:
with timer(
f"Getting source groups info on {alert["objectId"]} from SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal(
"GET", f"/api/sources/{alert["objectId"]}/groups"
)
if response.json()["status"] == "success":
existing_groups = response.json()["data"]
existing_group_ids = [g["id"] for g in existing_groups]
# post source if autosave=True and not already saved
autosave_group_ids = [
f.get("group_id")
for f in passed_filters
if f.get("autosave", False)
and (f.get("group_id") not in existing_group_ids)
]
if len(autosave_group_ids) > 0:
self.alert_post_source(alert, autosave_group_ids)
else:
log(f"Failed to get source groups info on {alert["objectId"]}")
else:
# post source if autosave=True and not is_source
autosave_group_ids = [
f.get("group_id")
for f in passed_filters
if f.get("autosave", False)
]
if len(autosave_group_ids) > 0:
self.alert_post_source(alert, autosave_group_ids)
# post alert photometry in single call to /api/photometry
alert["prv_candidates"] = prv_candidates
self.alert_put_photometry(alert)
class WorkerInitializer(dask.distributed.WorkerPlugin):
def __init__(self, *args, **kwargs):
self.alert_worker = None
def setup(self, worker: dask.distributed.Worker):
self.alert_worker = AlertWorker()
def topic_listener(
topic,
bootstrap_servers: str,
offset_reset: str = "earliest",
group: str = None,
test: bool = False,
):
"""
Listen to a topic
:param topic:
:param bootstrap_servers:
:param offset_reset:
:param group:
:param test: when testing, terminate once reached end of partition
:return:
"""
# Configure dask client
dask_client = dask.distributed.Client(
address=f"{config["dask"]["host"]}:{config["dask"]["scheduler_port"]}"
)
# init each worker with AlertWorker instance
worker_initializer = WorkerInitializer()
dask_client.register_worker_plugin(worker_initializer, name="worker-init")
# Configure consumer connection to Kafka broker
conf = {
"bootstrap.servers": bootstrap_servers,
"default.topic.config": {"auto.offset.reset": offset_reset},
}
if group is not None:
conf["group.id"] = group
else:
conf["group.id"] = os.environ.get("HOSTNAME", "kowalski")
# make it unique:
conf[
"group.id"
] = f"{conf["group.id"]}_{datetime.datetime.utcnow().strftime("%Y-%m-%d_%H:%M:%S.%f")}"
# Start alert stream consumer
stream_reader = AlertConsumer(topic, dask_client, **conf)
while True:
try:
# poll!
stream_reader.poll()
except EopError as e:
# Write when reaching end of partition
log(e.message)
if test:
# when testing, terminate once reached end of partition:
sys.exit()
except IndexError:
log("Data cannot be decoded\n")
except UnicodeDecodeError:
log("Unexpected data format received\n")
except KeyboardInterrupt:
log("Aborted by user\n")
sys.exit()
except Exception as e:
log(str(e))
_err = traceback.format_exc()
log(_err)
sys.exit()
def watchdog(obs_date: str = None, test: bool = False):
"""
Watchdog for topic listeners
:param obs_date: observing date: YYYYMMDD
:param test: test mode
:return:
"""
init_db_sync(config=config, verbose=True)
topics_on_watch = dict()
while True:
try:
# get kafka topic names with kafka-topics command
if not test:
# Production Kafka stream at IPAC
kafka_cmd = [
os.path.join(config["path"]["kafka"], "bin", "kafka-topics.sh"),
"--zookeeper",
config["kafka"]["zookeeper"],
"-list",
]
else:
# Local test stream
kafka_cmd = [
os.path.join(config["path"]["kafka"], "bin", "kafka-topics.sh"),
"--zookeeper",
config["kafka"]["zookeeper.test"],
"-list",
]
topics = (
subprocess.run(kafka_cmd, stdout=subprocess.PIPE)
.stdout.decode("utf-8")
.split("\n")[:-1]
)
if obs_date is None:
datestr = datetime.datetime.utcnow().strftime("%Y%m%d")
else:
datestr = obs_date
# as of 20180403, the naming convention is ztf_%Y%m%d_programidN
# exclude ZUDS, ingest separately
topics_tonight = [
t
for t in topics
if (datestr in t) and ("programid" in t) and ("zuds" not in t)
]
log(f"Topics: {topics_tonight}")
for t in topics_tonight:
if t not in topics_on_watch:
log(f"Starting listener thread for {t}")
offset_reset = config["kafka"]["default.topic.config"][
"auto.offset.reset"
]
if not test:
bootstrap_servers = config["kafka"]["bootstrap.servers"]
else:
bootstrap_servers = config["kafka"]["bootstrap.test.servers"]
group = config["kafka"]["group"]
topics_on_watch[t] = multiprocessing.Process(
target=topic_listener,
args=(t, bootstrap_servers, offset_reset, group, test),
)
topics_on_watch[t].daemon = True
topics_on_watch[t].start()
else:
log(f"Performing thread health check for {t}")
try:
if not topics_on_watch[t].is_alive():
log(f"Thread {t} died, removing")
# topics_on_watch[t].terminate()
topics_on_watch.pop(t, None)
else:
log(f"Thread {t} appears normal")
except Exception as _e:
log(f"Failed to perform health check: {_e}")
pass
if test:
time.sleep(120)
# when testing, wait for topic listeners to pull all the data, then break
# fixme: do this more gracefully
for t in topics_on_watch:
topics_on_watch[t].kill()
break
except Exception as e:
log(str(e))
_err = traceback.format_exc()
log(str(_err))
time.sleep(60)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Kowalski's ZTF Alert Broker")
parser.add_argument("--obsdate", help="observing date YYYYMMDD")
parser.add_argument("--test", help="listen to the test stream", action="store_true")
args = parser.parse_args()
watchdog(obs_date=args.obsdate, test=args.test)
|
import argparse
from ast import literal_eval
from astropy.io import fits
from astropy.visualization import (
AsymmetricPercentileInterval,
LinearStretch,
LogStretch,
ImageNormalize,
)
import base64
from bson.json_util import loads
import confluent_kafka
from copy import deepcopy
import dask.distributed
import datetime
import fastavro
import gzip
import io
import matplotlib.pyplot as plt
import multiprocessing
import numpy as np
import os
import pandas as pd
import pathlib
import requests
from requests.packages.urllib3.util.retry import Retry
import subprocess
import sys
import tensorflow as tf
from tensorflow.keras.models import load_model
import threading
import time
import traceback
from typing import Mapping, Optional, Sequence
from utils import (
deg2dms,
deg2hms,
great_circle_distance,
in_ellipse,
init_db_sync,
load_config,
log,
memoize,
Mongo,
radec2lb,
time_stamp,
timer,
TimeoutHTTPAdapter,
ZTFAlert,
)
tf.config.optimizer.set_jit(True)
""" load config and secrets """
config = load_config(config_file="config.yaml")["kowalski"]
def read_schema_data(bytes_io):
"""
Read data that already has an Avro schema.
:param bytes_io: `_io.BytesIO` Data to be decoded.
:return: `dict` Decoded data.
"""
bytes_io.seek(0)
message = fastavro.reader(bytes_io)
return message
class EopError(Exception):
"""
Exception raised when reaching end of a Kafka topic partition.
"""
def __init__(self, msg):
"""
:param msg: The Kafka message result from consumer.poll()
"""
message = (
f"{time_stamp()}: topic:{msg.topic()}, partition:{msg.partition()}, "
f"status:end, offset:{msg.offset()}, key:{str(msg.key())}\n"
)
self.message = message
def __str__(self):
return self.message
def make_photometry(alert: dict, jd_start: float = None):
"""
Make a de-duplicated pandas.DataFrame with photometry of alert['objectId']
:param alert: ZTF alert packet/dict
:param jd_start:
:return:
"""
alert = deepcopy(alert)
df_candidate = pd.DataFrame(alert["candidate"], index=[0])
df_prv_candidates = pd.DataFrame(alert["prv_candidates"])
df_light_curve = pd.concat(
[df_candidate, df_prv_candidates], ignore_index=True, sort=False
)
ztf_filters = {1: "ztfg", 2: "ztfr", 3: "ztfi"}
df_light_curve["ztf_filter"] = df_light_curve["fid"].apply(lambda x: ztf_filters[x])
df_light_curve["magsys"] = "ab"
df_light_curve["mjd"] = df_light_curve["jd"] - 2400000.5
df_light_curve["mjd"] = df_light_curve["mjd"].apply(lambda x: np.float64(x))
df_light_curve["magpsf"] = df_light_curve["magpsf"].apply(lambda x: np.float32(x))
df_light_curve["sigmapsf"] = df_light_curve["sigmapsf"].apply(
lambda x: np.float32(x)
)
df_light_curve = (
df_light_curve.drop_duplicates(subset=["mjd", "magpsf"])
.reset_index(drop=True)
.sort_values(by=["mjd"])
)
# filter out bad data:
mask_good_diffmaglim = df_light_curve["diffmaglim"] > 0
df_light_curve = df_light_curve.loc[mask_good_diffmaglim]
# convert from mag to flux
# step 1: calculate the coefficient that determines whether the
# flux should be negative or positive
coeff = df_light_curve["isdiffpos"].apply(
lambda x: 1.0 if x in [True, 1, "y", "Y", "t", "1"] else -1.0
)
# step 2: calculate the flux normalized to an arbitrary AB zeropoint of
# 23.9 (results in flux in uJy)
df_light_curve["flux"] = coeff * 10 ** (-0.4 * (df_light_curve["magpsf"] - 23.9))
# step 3: separate detections from non detections
detected = np.isfinite(df_light_curve["magpsf"])
undetected = ~detected
# step 4: calculate the flux error
df_light_curve["fluxerr"] = None # initialize the column
# step 4a: calculate fluxerr for detections using sigmapsf
df_light_curve.loc[detected, "fluxerr"] = np.abs(
df_light_curve.loc[detected, "sigmapsf"]
* df_light_curve.loc[detected, "flux"]
* np.log(10)
/ 2.5
)
# step 4b: calculate fluxerr for non detections using diffmaglim
df_light_curve.loc[undetected, "fluxerr"] = (
10 ** (-0.4 * (df_light_curve.loc[undetected, "diffmaglim"] - 23.9)) / 5.0
) # as diffmaglim is the 5-sigma depth
# step 5: set the zeropoint and magnitude system
df_light_curve["zp"] = 23.9
df_light_curve["zpsys"] = "ab"
# only "new" photometry requested?
if jd_start is not None:
w_after_jd = df_light_curve["jd"] > jd_start
df_light_curve = df_light_curve.loc[w_after_jd]
return df_light_curve
def make_thumbnail(alert, ttype: str, ztftype: str):
"""
Convert lossless FITS cutouts from ZTF alerts into PNGs
:param alert: ZTF alert packet/dict
:param ttype: <new|ref|sub>
:param ztftype: <Science|Template|Difference>
:return:
"""
alert = deepcopy(alert)
cutout_data = alert[f"cutout{ztftype}"]["stampData"]
with gzip.open(io.BytesIO(cutout_data), "rb") as f:
with fits.open(io.BytesIO(f.read())) as hdu:
# header = hdu[0].header
data_flipped_y = np.flipud(hdu[0].data)
# fixme: png, switch to fits eventually
buff = io.BytesIO()
plt.close("all")
fig = plt.figure()
fig.set_size_inches(4, 4, forward=False)
ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0])
ax.set_axis_off()
fig.add_axes(ax)
# replace nans with median:
img = np.array(data_flipped_y)
# replace dubiously large values
xl = np.greater(np.abs(img), 1e20, where=~np.isnan(img))
if img[xl].any():
img[xl] = np.nan
if np.isnan(img).any():
median = float(np.nanmean(img.flatten()))
img = np.nan_to_num(img, nan=median)
norm = ImageNormalize(
img, stretch=LinearStretch() if ztftype == "Difference" else LogStretch()
)
img_norm = norm(img)
normalizer = AsymmetricPercentileInterval(lower_percentile=1, upper_percentile=100)
vmin, vmax = normalizer.get_limits(img_norm)
ax.imshow(img_norm, cmap="bone", origin="lower", vmin=vmin, vmax=vmax)
plt.savefig(buff, dpi=42)
buff.seek(0)
plt.close("all")
thumb = {
"obj_id": alert["objectId"],
"data": base64.b64encode(buff.read()).decode("utf-8"),
"ttype": ttype,
}
return thumb
""" Alert filters """
def make_triplet(alert, to_tpu: bool = False):
"""
Make an L2-normalized cutout triplet out of a ZTF alert
:param alert:
:param to_tpu:
:return:
"""
cutout_dict = dict()
for cutout in ("science", "template", "difference"):
cutout_data = alert[f"cutout{cutout.capitalize()}"]["stampData"]
# unzip
with gzip.open(io.BytesIO(cutout_data), "rb") as f:
with fits.open(io.BytesIO(f.read())) as hdu:
data = hdu[0].data
# replace nans with zeros
cutout_dict[cutout] = np.nan_to_num(data)
# L2-normalize
cutout_dict[cutout] /= np.linalg.norm(cutout_dict[cutout])
# pad to 63x63 if smaller
shape = cutout_dict[cutout].shape
if shape != (63, 63):
# print(f'Shape of {candid}/{cutout}: {shape}, padding to (63, 63)')
cutout_dict[cutout] = np.pad(
cutout_dict[cutout],
[(0, 63 - shape[0]), (0, 63 - shape[1])],
mode="constant",
constant_values=1e-9,
)
triplet = np.zeros((63, 63, 3))
triplet[:, :, 0] = cutout_dict["science"]
triplet[:, :, 1] = cutout_dict["template"]
triplet[:, :, 2] = cutout_dict["difference"]
if to_tpu:
# Edge TPUs require additional processing
triplet = np.rint(triplet * 128 + 128).astype(np.uint8).flatten()
return triplet
def alert_filter__ml(alert, ml_models: dict = None) -> dict:
"""Execute ML models on a ZTF alert
:param alert:
:param ml_models:
:return:
"""
scores = dict()
if ml_models is not None and len(ml_models) > 0:
try:
with timer("ZTFAlert(alert)"):
ztf_alert = ZTFAlert(alert)
with timer("Prepping features"):
features = np.expand_dims(ztf_alert.data["features"], axis=[0, -1])
triplet = np.expand_dims(ztf_alert.data["triplet"], axis=[0])
# braai
if "braai" in ml_models.keys():
with timer("braai"):
braai = ml_models["braai"]["model"].predict(x=triplet)[0]
scores["braai"] = float(braai)
scores["braai_version"] = ml_models["braai"]["version"]
# acai
for model_name in ("acai_h", "acai_v", "acai_o", "acai_n", "acai_b"):
if model_name in ml_models.keys():
with timer(model_name):
score = ml_models[model_name]["model"].predict(
[features, triplet]
)[0]
scores[model_name] = float(score)
scores[f"{model_name}_version"] = ml_models[model_name][
"version"
]
except Exception as e:
log(str(e))
return scores
# cone search radius:
cone_search_radius = float(config["database"]["xmatch"]["cone_search_radius"])
# convert to rad:
if config["database"]["xmatch"]["cone_search_unit"] == "arcsec":
cone_search_radius *= np.pi / 180.0 / 3600.0
elif config["database"]["xmatch"]["cone_search_unit"] == "arcmin":
cone_search_radius *= np.pi / 180.0 / 60.0
elif config["database"]["xmatch"]["cone_search_unit"] == "deg":
cone_search_radius *= np.pi / 180.0
elif config["database"]["xmatch"]["cone_search_unit"] == "rad":
cone_search_radius *= 1
else:
raise Exception("Unknown cone search unit. Must be in [deg, rad, arcsec, arcmin]")
def alert_filter__xmatch(database, alert) -> dict:
"""
Cross-match alerts
"""
xmatches = dict()
try:
ra_geojson = float(alert["candidate"]["ra"])
# geojson-friendly ra:
ra_geojson -= 180.0
dec_geojson = float(alert["candidate"]["dec"])
""" catalogs """
for catalog in config["database"]["xmatch"]["catalogs"]:
catalog_filter = config["database"]["xmatch"]["catalogs"][catalog]["filter"]
catalog_projection = config["database"]["xmatch"]["catalogs"][catalog][
"projection"
]
object_position_query = dict()
object_position_query["coordinates.radec_geojson"] = {
"$geoWithin": {
"$centerSphere": [[ra_geojson, dec_geojson], cone_search_radius]
}
}
s = database[catalog].find(
{**object_position_query, **catalog_filter}, {**catalog_projection}
)
xmatches[catalog] = list(s)
except Exception as e:
log(str(e))
return xmatches
# cone search radius in deg:
cone_search_radius_clu = 3.0
# convert deg to rad:
cone_search_radius_clu *= np.pi / 180.0
def alert_filter__xmatch_clu(
database, alert, size_margin=3, clu_version="CLU_20190625"
) -> dict:
"""
Run cross-match with the CLU catalog
:param database:
:param alert:
:param size_margin: multiply galaxy size by this much before looking for a match
:param clu_version: CLU catalog version
:return:
"""
xmatches = dict()
try:
ra = float(alert["candidate"]["ra"])
dec = float(alert["candidate"]["dec"])
# geojson-friendly ra:
ra_geojson = float(alert["candidate"]["ra"]) - 180.0
dec_geojson = dec
catalog_filter = {}
catalog_projection = {
"_id": 1,
"name": 1,
"ra": 1,
"dec": 1,
"a": 1,
"b2a": 1,
"pa": 1,
"z": 1,
"sfr_fuv": 1,
"mstar": 1,
"sfr_ha": 1,
"coordinates.radec_str": 1,
}
# first do a coarse search of everything that is around
object_position_query = dict()
object_position_query["coordinates.radec_geojson"] = {
"$geoWithin": {
"$centerSphere": [[ra_geojson, dec_geojson], cone_search_radius_clu]
}
}
galaxies = list(
database[clu_version].find(
{**object_position_query, **catalog_filter}, {**catalog_projection}
)
)
# these guys are very big, so check them separately
M31 = {
"_id": 596900,
"name": "PGC2557",
"ra": 10.6847,
"dec": 41.26901,
"a": 6.35156,
"b2a": 0.32,
"pa": 35.0,
"z": -0.00100100006,
"sfr_fuv": None,
"mstar": 253816876.412914,
"sfr_ha": 0,
"coordinates": {"radec_str": ["00:42:44.3503", "41:16:08.634"]},
}
M33 = {
"_id": 597543,
"name": "PGC5818",
"ra": 23.46204,
"dec": 30.66022,
"a": 2.35983,
"b2a": 0.59,
"pa": 23.0,
"z": -0.000597000006,
"sfr_fuv": None,
"mstar": 4502777.420493,
"sfr_ha": 0,
"coordinates": {"radec_str": ["01:33:50.8900", "30:39:36.800"]},
}
# do elliptical matches
matches = []
for galaxy in galaxies + [M31, M33]:
alpha1, delta01 = galaxy["ra"], galaxy["dec"]
redshift = galaxy["z"]
# By default, set the cross-match radius to 50 kpc at the redshift of the host galaxy
cm_radius = 50.0 * (0.05 / redshift) / 3600
if redshift < 0.01:
# for nearby galaxies and galaxies with negative redshifts, do a 5 arc-minute cross-match
# (cross-match radius would otherwise get un-physically large for nearby galaxies)
cm_radius = 300.0 / 3600
in_galaxy = in_ellipse(ra, dec, alpha1, delta01, cm_radius, 1, 0)
if in_galaxy:
match = galaxy
distance_arcsec = round(
great_circle_distance(ra, dec, alpha1, delta01) * 3600, 2
)
# also add a physical distance parameter for redshifts in the Hubble flow
if redshift > 0.005:
distance_kpc = round(
great_circle_distance(ra, dec, alpha1, delta01)
* 3600
* (redshift / 0.05),
2,
)
else:
distance_kpc = -1
match["coordinates"]["distance_arcsec"] = distance_arcsec
match["coordinates"]["distance_kpc"] = distance_kpc
matches.append(match)
xmatches[clu_version] = matches
except Exception as e:
log(str(e))
return xmatches
def alert_filter__user_defined(
database,
filter_templates,
alert,
catalog: str = "ZTF_alerts",
max_time_ms: int = 500,
) -> list:
"""
Evaluate user-defined filters
:param database:
:param filter_templates:
:param alert:
:param catalog:
:param max_time_ms:
:return:
"""
passed_filters = []
for filter_template in filter_templates:
try:
_filter = deepcopy(filter_template)
# match candid
_filter["pipeline"][0]["$match"]["candid"] = alert["candid"]
filtered_data = list(
database[catalog].aggregate(
_filter["pipeline"], allowDiskUse=False, maxTimeMS=max_time_ms
)
)
# passed filter? then len(passed_filter) must be = 1
if len(filtered_data) == 1:
log(
f'{alert["objectId"]} {alert["candid"]} passed filter {_filter["fid"]}'
)
passed_filters.append(
{
"group_id": _filter["group_id"],
"filter_id": _filter["filter_id"],
"group_name": _filter["group_name"],
"filter_name": _filter["filter_name"],
"fid": _filter["fid"],
"permissions": _filter["permissions"],
"autosave": _filter["autosave"],
"update_annotations": _filter["update_annotations"],
"data": filtered_data[0],
}
)
except Exception as e:
log(
f'Filter {filter_template["fid"]} execution failed on alert {alert["candid"]}: {e}'
)
continue
return passed_filters
def process_alert(record, topic):
"""Alert brokering task run by dask.distributed workers
:param record: decoded alert from IPAC's Kafka stream
:param topic: Kafka stream topic name for bookkeeping
:return:
"""
candid = record["candid"]
objectId = record["objectId"]
# get worker running current task
worker = dask.distributed.get_worker()
alert_worker = worker.plugins["worker-init"].alert_worker
log(f"{topic} {objectId} {candid} {worker.address}")
# return if this alert packet has already been processed and ingested into collection_alerts:
if (
alert_worker.mongo.db[alert_worker.collection_alerts].count_documents(
{"candid": candid}, limit=1
)
== 1
):
return
# candid not in db, ingest decoded avro packet into db
# todo: ?? restructure alerts even further?
# move cutouts to ZTF_alerts_cutouts? reduce the main db size for performance
# group by objectId similar to prv_candidates?? maybe this is too much
with timer(f"Mongification of {objectId} {candid}", alert_worker.verbose > 1):
alert, prv_candidates = alert_worker.alert_mongify(record)
# ML models:
with timer(f"MLing of {objectId} {candid}", alert_worker.verbose > 1):
scores = alert_filter__ml(record, ml_models=alert_worker.ml_models)
alert["classifications"] = scores
with timer(f"Ingesting {objectId} {candid}", alert_worker.verbose > 1):
alert_worker.mongo.insert_one(
collection=alert_worker.collection_alerts, document=alert
)
# prv_candidates: pop nulls - save space
prv_candidates = [
{kk: vv for kk, vv in prv_candidate.items() if vv is not None}
for prv_candidate in prv_candidates
]
# cross-match with external catalogs if objectId not in collection_alerts_aux:
if (
alert_worker.mongo.db[alert_worker.collection_alerts_aux].count_documents(
{"_id": objectId}, limit=1
)
== 0
):
with timer(f"Cross-match of {objectId} {candid}", alert_worker.verbose > 1):
xmatches = alert_filter__xmatch(alert_worker.mongo.db, alert)
# CLU cross-match:
with timer(f"CLU cross-match {objectId} {candid}", alert_worker.verbose > 1):
xmatches = {
**xmatches,
**alert_filter__xmatch_clu(alert_worker.mongo.db, alert),
}
alert_aux = {
"_id": objectId,
"cross_matches": xmatches,
"prv_candidates": prv_candidates,
}
with timer(f"Aux ingesting {objectId} {candid}", alert_worker.verbose > 1):
alert_worker.mongo.insert_one(
collection=alert_worker.collection_alerts_aux, document=alert_aux
)
else:
with timer(f"Aux updating of {objectId} {candid}", alert_worker.verbose > 1):
alert_worker.mongo.db[alert_worker.collection_alerts_aux].update_one(
{"_id": objectId},
{"$addToSet": {"prv_candidates": {"$each": prv_candidates}}},
upsert=True,
)
if config["misc"]["broker"]:
# execute user-defined alert filters
with timer(f"Filtering of {objectId} {candid}", alert_worker.verbose > 1):
passed_filters = alert_filter__user_defined(
alert_worker.mongo.db, alert_worker.filter_templates, alert
)
if alert_worker.verbose > 1:
log(f"{objectId} {candid} number of filters passed: {len(passed_filters)}")
# post to SkyPortal
alert_worker.alert_sentinel_skyportal(alert, prv_candidates, passed_filters)
# clean up after thyself
del record, alert, prv_candidates
class AlertConsumer:
"""
Creates an alert stream Kafka consumer for a given topic.
"""
def __init__(self, topic, dask_client, **kwargs):
self.verbose = kwargs.get("verbose", 2)
self.dask_client = dask_client
# keep track of disconnected partitions
self.num_disconnected_partitions = 0
self.topic = topic
def error_cb(err, _self=self):
log(f"error_cb --------> {err}")
# print(err.code())
if err.code() == -195:
_self.num_disconnected_partitions += 1
if _self.num_disconnected_partitions == _self.num_partitions:
log("All partitions got disconnected, killing thread")
sys.exit()
else:
log(
f"{_self.topic}: disconnected from partition. total: {_self.num_disconnected_partitions}"
)
# 'error_cb': error_cb
kwargs["error_cb"] = error_cb
self.consumer = confluent_kafka.Consumer(**kwargs)
self.num_partitions = 0
def on_assign(consumer, partitions, _self=self):
# force-reset offsets when subscribing to a topic:
for part in partitions:
# -2 stands for beginning and -1 for end
part.offset = -2
# keep number of partitions.
# when reaching end of last partition, kill thread and start from beginning
_self.num_partitions += 1
log(consumer.get_watermark_offsets(part))
self.consumer.subscribe([topic], on_assign=on_assign)
# set up own mongo client
self.collection_alerts = config["database"]["collections"]["alerts_ztf"]
self.mongo = Mongo(
host=config["database"]["host"],
port=config["database"]["port"],
replica_set=config["database"]["replica_set"],
username=config["database"]["username"],
password=config["database"]["password"],
db=config["database"]["db"],
verbose=self.verbose,
)
# create indexes
if config["database"]["build_indexes"]:
for index in config["database"]["indexes"][self.collection_alerts]:
try:
ind = [tuple(ii) for ii in index["fields"]]
self.mongo.db[self.collection_alerts].create_index(
keys=ind,
name=index["name"],
background=True,
unique=index["unique"],
)
except Exception as e:
log(e)
@staticmethod
def decode_message(msg):
"""
Decode Avro message according to a schema.
:param msg: The Kafka message result from consumer.poll()
:return:
"""
message = msg.value()
decoded_msg = message
try:
bytes_io = io.BytesIO(message)
decoded_msg = read_schema_data(bytes_io)
except AssertionError:
decoded_msg = None
except IndexError:
literal_msg = literal_eval(
str(message, encoding="utf-8")
) # works to give bytes
bytes_io = io.BytesIO(literal_msg) # works to give <class '_io.BytesIO'>
decoded_msg = read_schema_data(bytes_io) # yields reader
except Exception:
decoded_msg = message
finally:
return decoded_msg
def poll(self):
"""
Polls Kafka broker to consume a topic.
:return:
"""
msg = self.consumer.poll()
if msg is None:
log("Caught error: msg is None")
if msg.error():
log(f"Caught error: {msg.error()}")
raise EopError(msg)
elif msg is not None:
try:
# decode avro packet
with timer("Decoding alert", self.verbose > 1):
msg_decoded = self.decode_message(msg)
for record in msg_decoded:
# submit only unprocessed alerts:
if (
self.mongo.db[self.collection_alerts].count_documents(
{"candid": record["candid"]}, limit=1
)
== 0
):
with timer(
f"Submitting alert {record['objectId']} {record['candid']} for processing",
self.verbose > 1,
):
future = self.dask_client.submit(
process_alert, record, self.topic, pure=True
)
dask.distributed.fire_and_forget(future)
future.release()
del future
except Exception as e:
log(e)
_err = traceback.format_exc()
log(_err)
class AlertWorker:
"""Tools to handle alert processing: database ingestion, filtering, ml'ing, cross-matches, reporting to SP"""
def __init__(self, **kwargs):
self.verbose = kwargs.get("verbose", 2)
self.config = config
# Kowalski version
path_version_file = pathlib.Path(__file__).parent.absolute() / "version.txt"
version = f"v{self.config['server']['version']}"
if path_version_file.exists():
with open(
pathlib.Path(__file__).parent.absolute() / "version.txt", "r"
) as version_file:
version = version_file.read().strip()
# MongoDB collections to store the alerts:
self.collection_alerts = self.config["database"]["collections"]["alerts_ztf"]
self.collection_alerts_aux = self.config["database"]["collections"][
"alerts_ztf_aux"
]
self.collection_alerts_filter = self.config["database"]["collections"][
"alerts_ztf_filter"
]
self.mongo = Mongo(
host=config["database"]["host"],
port=config["database"]["port"],
replica_set=config["database"]["replica_set"],
username=config["database"]["username"],
password=config["database"]["password"],
db=config["database"]["db"],
verbose=self.verbose,
)
# ML models
self.ml_models = dict()
for model in config["ml_models"]:
try:
model_version = config["ml_models"][model]["version"]
# todo: allow other formats such as SavedModel
model_filepath = os.path.join(
config["path"]["ml_models"], f"{model}.{model_version}.h5"
)
self.ml_models[model] = {
"model": load_model(model_filepath),
"version": model_version,
}
except Exception as e:
log(f"Error loading ML model {model}: {str(e)}")
_err = traceback.format_exc()
log(_err)
continue
# talking to SkyPortal?
if not config["misc"]["broker"]:
return
# session to talk to SkyPortal
self.session = requests.Session()
self.session_headers = {
"Authorization": f"token {config['skyportal']['token']}",
"User-Agent": f"Kowalski {version}",
}
retries = Retry(
total=5,
backoff_factor=2,
status_forcelist=[405, 429, 500, 502, 503, 504],
method_whitelist=["HEAD", "GET", "PUT", "POST", "PATCH"],
)
adapter = TimeoutHTTPAdapter(timeout=5, max_retries=retries)
self.session.mount("https://", adapter)
self.session.mount("http://", adapter)
# get ZTF instrument id
self.instrument_id = 1
with timer("Getting ZTF instrument_id from SkyPortal", self.verbose > 1):
response = self.api_skyportal("GET", "/api/instrument", {"name": "ZTF"})
if response.json()["status"] == "success" and len(response.json()["data"]) > 0:
self.instrument_id = response.json()["data"][0]["id"]
log(f"Got ZTF instrument_id from SkyPortal: {self.instrument_id}")
else:
log("Failed to get ZTF instrument_id from SkyPortal")
raise ValueError("Failed to get ZTF instrument_id from SkyPortal")
# get ZTF alert stream ids to program ids mapping
self.ztf_program_id_to_stream_id = dict()
with timer("Getting ZTF alert stream ids from SkyPortal", self.verbose > 1):
response = self.api_skyportal("GET", "/api/streams")
if response.json()["status"] == "success" and len(response.json()["data"]) > 0:
for stream in response.json()["data"]:
if stream.get("name") == "ZTF Public":
self.ztf_program_id_to_stream_id[1] = stream["id"]
if stream.get("name") == "ZTF Public+Partnership":
self.ztf_program_id_to_stream_id[2] = stream["id"]
if stream.get("name") == "ZTF Public+Partnership+Caltech":
# programid=0 is engineering data
self.ztf_program_id_to_stream_id[0] = stream["id"]
self.ztf_program_id_to_stream_id[3] = stream["id"]
if len(self.ztf_program_id_to_stream_id) != 4:
log("Failed to map ZTF alert stream ids from SkyPortal to program ids")
raise ValueError(
"Failed to map ZTF alert stream ids from SkyPortal to program ids"
)
log(
f"Got ZTF program id to SP stream id mapping: {self.ztf_program_id_to_stream_id}"
)
else:
log("Failed to get ZTF alert stream ids from SkyPortal")
raise ValueError("Failed to get ZTF alert stream ids from SkyPortal")
# filter pipeline upstream: select current alert, ditch cutouts, and merge with aux data
# including archival photometry and cross-matches:
self.filter_pipeline_upstream = config["database"]["filters"][
self.collection_alerts
]
log("Upstream filtering pipeline:")
log(self.filter_pipeline_upstream)
# load *active* user-defined alert filter templates and pre-populate them
active_filters = self.get_active_filters()
self.filter_templates = self.make_filter_templates(active_filters)
# set up watchdog for periodic refresh of the filter templates, in case those change
self.filter_monitor = threading.Thread(target=self.reload_filters)
self.filter_monitor.start()
log("Loaded user-defined filters:")
log(self.filter_templates)
def api_skyportal(self, method: str, endpoint: str, data: Optional[Mapping] = None):
"""Make an API call to a SkyPortal instance
:param method:
:param endpoint:
:param data:
:return:
"""
method = method.lower()
methods = {
"head": self.session.head,
"get": self.session.get,
"post": self.session.post,
"put": self.session.put,
"patch": self.session.patch,
"delete": self.session.delete,
}
if endpoint is None:
raise ValueError("Endpoint not specified")
if method not in ["head", "get", "post", "put", "patch", "delete"]:
raise ValueError(f"Unsupported method: {method}")
if method == "get":
response = methods[method](
f"{config['skyportal']['protocol']}://"
f"{config['skyportal']['host']}:{config['skyportal']['port']}"
f"{endpoint}",
params=data,
headers=self.session_headers,
)
else:
response = methods[method](
f"{config['skyportal']['protocol']}://"
f"{config['skyportal']['host']}:{config['skyportal']['port']}"
f"{endpoint}",
json=data,
headers=self.session_headers,
)
return response
@memoize
def api_skyportal_get_group(self, group_id):
return self.api_skyportal(
"GET", f"/api/groups/{group_id}?includeGroupUsers=False"
)
def get_active_filters(self):
"""
Fetch user-defined filters from own db marked as active
:return:
"""
# todo: query SP to make sure the filters still exist there and we're not out of sync;
# clean up if necessary
return list(
self.mongo.db[config["database"]["collections"]["filters"]].aggregate(
[
{
"$match": {
"catalog": config["database"]["collections"]["alerts_ztf"],
"active": True,
}
},
{
"$project": {
"group_id": 1,
"filter_id": 1,
"permissions": 1,
"autosave": 1,
"update_annotations": 1,
"fv": {
"$arrayElemAt": [
{
"$filter": {
"input": "$fv",
"as": "fvv",
"cond": {
"$eq": ["$$fvv.fid", "$active_fid"]
},
}
},
0,
]
},
}
},
]
)
)
def make_filter_templates(self, active_filters: Sequence):
"""
Make filter templates by adding metadata, prepending upstream aggregation stages and setting permissions
:param active_filters:
:return:
"""
filter_templates = []
for active_filter in active_filters:
try:
# collect additional info from SkyPortal
with timer(
f"Getting info on group id={active_filter['group_id']} from SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal_get_group(active_filter["group_id"])
if self.verbose > 1:
log(response.json())
if response.json()["status"] == "success":
group_name = (
response.json()["data"]["nickname"]
if response.json()["data"]["nickname"] is not None
else response.json()["data"]["name"]
)
filter_name = [
filtr["name"]
for filtr in response.json()["data"]["filters"]
if filtr["id"] == active_filter["filter_id"]
][0]
else:
log(
f"Failed to get info on group id={active_filter['group_id']} from SkyPortal"
)
group_name, filter_name = None, None
# raise ValueError(f"Failed to get info on group id={active_filter['group_id']} from SkyPortal")
log(f"Group name: {group_name}, filter name: {filter_name}")
# prepend upstream aggregation stages:
pipeline = deepcopy(self.filter_pipeline_upstream) + loads(
active_filter["fv"]["pipeline"]
)
# set permissions
pipeline[0]["$match"]["candidate.programid"]["$in"] = active_filter[
"permissions"
]
pipeline[3]["$project"]["prv_candidates"]["$filter"]["cond"]["$and"][0][
"$in"
][1] = active_filter["permissions"]
filter_template = {
"group_id": active_filter["group_id"],
"filter_id": active_filter["filter_id"],
"group_name": group_name,
"filter_name": filter_name,
"fid": active_filter["fv"]["fid"],
"permissions": active_filter["permissions"],
"autosave": active_filter["autosave"],
"update_annotations": active_filter["update_annotations"],
"pipeline": deepcopy(pipeline),
}
filter_templates.append(filter_template)
except Exception as e:
log(
"Failed to generate filter template for "
f"group_id={active_filter['group_id']} filter_id={active_filter['filter_id']}: {e}"
)
continue
return filter_templates
def reload_filters(self):
"""
Helper function to periodically reload filters from SkyPortal
:return:
"""
while True:
time.sleep(60 * 5)
active_filters = self.get_active_filters()
self.filter_templates = self.make_filter_templates(active_filters)
@staticmethod
def alert_mongify(alert: Mapping):
"""
Prepare a raw ZTF alert for ingestion into MongoDB:
- add a placeholder for ML-based classifications
- add coordinates for 2D spherical indexing and compute Galactic coordinates
- cut off the prv_candidates section
:param alert:
:return:
"""
doc = dict(alert)
# let mongo create a unique _id
# placeholders for classifications
doc["classifications"] = dict()
# GeoJSON for 2D indexing
doc["coordinates"] = {}
_ra = doc["candidate"]["ra"]
_dec = doc["candidate"]["dec"]
# string format: H:M:S, D:M:S
_radec_str = [deg2hms(_ra), deg2dms(_dec)]
doc["coordinates"]["radec_str"] = _radec_str
# for GeoJSON, must be lon:[-180, 180], lat:[-90, 90] (i.e. in deg)
_radec_geojson = [_ra - 180.0, _dec]
doc["coordinates"]["radec_geojson"] = {
"type": "Point",
"coordinates": _radec_geojson,
}
# Galactic coordinates l and b
l, b = radec2lb(doc["candidate"]["ra"], doc["candidate"]["dec"])
doc["coordinates"]["l"] = l
doc["coordinates"]["b"] = b
prv_candidates = deepcopy(doc["prv_candidates"])
doc.pop("prv_candidates", None)
if prv_candidates is None:
prv_candidates = []
return doc, prv_candidates
def alert_post_candidate(self, alert: Mapping, filter_ids: Sequence):
"""
Post a ZTF alert as a candidate for filters on SkyPortal
:param alert:
:param filter_ids:
:return:
"""
# post metadata with all filter_ids in single call to /api/candidates
alert_thin = {
"id": alert["objectId"],
"ra": alert["candidate"].get("ra"),
"dec": alert["candidate"].get("dec"),
"score": alert["candidate"].get("drb", alert["candidate"]["rb"]),
"filter_ids": filter_ids,
"passing_alert_id": alert["candid"],
"passed_at": datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f"),
"origin": "Kowalski",
}
if self.verbose > 1:
log(alert_thin)
with timer(
f"Posting metadata of {alert['objectId']} {alert['candid']} to SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal("POST", "/api/candidates", alert_thin)
if response.json()["status"] == "success":
log(f"Posted {alert['objectId']} {alert['candid']} metadata to SkyPortal")
else:
log(
f"Failed to post {alert['objectId']} {alert['candid']} metadata to SkyPortal"
)
log(response.json())
def alert_post_source(self, alert: Mapping, group_ids: Sequence):
"""
Save a ZTF alert as a source to groups on SkyPortal
:param alert:
:param group_ids:
:return:
"""
# save source
alert_thin = {
"id": alert["objectId"],
"group_ids": group_ids,
"origin": "Kowalski",
}
if self.verbose > 1:
log(alert_thin)
with timer(
f"Saving {alert['objectId']} {alert['candid']} as a Source on SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal("POST", "/api/sources", alert_thin)
if response.json()["status"] == "success":
log(f"Saved {alert['objectId']} {alert['candid']} as a Source on SkyPortal")
else:
log(
f"Failed to save {alert['objectId']} {alert['candid']} as a Source on SkyPortal"
)
log(response.json())
def alert_post_annotations(self, alert: Mapping, passed_filters: Sequence):
"""
Post annotations to SkyPortal for an alert that passed user-defined filters
:param alert:
:param passed_filters:
:return:
"""
for passed_filter in passed_filters:
annotations = {
"obj_id": alert["objectId"],
"origin": f"{passed_filter.get('group_name')}:{passed_filter.get('filter_name')}",
"data": passed_filter.get("data", dict()).get("annotations", dict()),
"group_ids": [passed_filter.get("group_id")],
}
if len(annotations["data"]) > 0:
with timer(
f"Posting annotation for {alert['objectId']} {alert['candid']} to SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal(
"POST", "/api/annotation", annotations
)
if response.json()["status"] == "success":
log(f"Posted {alert['objectId']} annotation to SkyPortal")
else:
log(f"Failed to post {alert['objectId']} annotation to SkyPortal")
log(response.json())
def alert_put_annotations(self, alert: Mapping, passed_filters: Sequence):
"""
Update annotations on SkyPortal for an alert that passed user-defined filters
:param alert:
:param passed_filters:
:return:
"""
# first need to learn existing annotation id's and corresponding author id's to use with the PUT call
with timer(
f"Getting annotations for {alert['objectId']} from SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal(
"GET", f"/api/sources/{alert['objectId']}/annotations"
)
if response.json()["status"] == "success":
log(f"Got {alert['objectId']} annotations from SkyPortal")
else:
log(f"Failed to get {alert['objectId']} annotations from SkyPortal")
log(response.json())
return False
existing_annotations = {
annotation["origin"]: {
"annotation_id": annotation["id"],
"author_id": annotation["author_id"],
}
for annotation in response.json()["data"]
}
for passed_filter in passed_filters:
origin = (
f"{passed_filter.get('group_name')}:{passed_filter.get('filter_name')}"
)
# no annotation exists on SkyPortal for this object? just post then
if origin not in existing_annotations:
self.alert_post_annotations(alert, [passed_filter])
continue
annotations = {
"author_id": existing_annotations[origin]["author_id"],
"obj_id": alert["objectId"],
"origin": origin,
"data": passed_filter.get("data", dict()).get("annotations", dict()),
"group_ids": [passed_filter.get("group_id")],
}
if len(annotations["data"]) > 0 and passed_filter.get(
"update_annotations", False
):
with timer(
f"Putting annotation for {alert['objectId']} {alert['candid']} to SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal(
"PUT",
f"/api/annotation/{existing_annotations[origin]['annotation_id']}",
annotations,
)
if response.json()["status"] == "success":
log(f"Posted {alert['objectId']} annotation to SkyPortal")
else:
log(f"Failed to post {alert['objectId']} annotation to SkyPortal")
log(response.json())
def alert_post_thumbnails(self, alert: Mapping):
"""
Post ZTF alert thumbnails to SkyPortal
:param alert:
:return:
"""
for ttype, ztftype in [
("new", "Science"),
("ref", "Template"),
("sub", "Difference"),
]:
with timer(
f"Making {ztftype} thumbnail for {alert['objectId']} {alert['candid']}",
self.verbose > 1,
):
thumb = make_thumbnail(alert, ttype, ztftype)
with timer(
f"Posting {ztftype} thumbnail for {alert['objectId']} {alert['candid']} to SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal("POST", "/api/thumbnail", thumb)
if response.json()["status"] == "success":
log(
f"Posted {alert['objectId']} {alert['candid']} {ztftype} cutout to SkyPortal"
)
else:
log(
f"Failed to post {alert['objectId']} {alert['candid']} {ztftype} cutout to SkyPortal"
)
log(response.json())
def alert_put_photometry(self, alert):
"""PUT photometry to SkyPortal
:param alert:
:return:
"""
with timer(
f"Making alert photometry of {alert['objectId']} {alert['candid']}",
self.verbose > 1,
):
df_photometry = make_photometry(alert)
df_photometry["stream_id"] = df_photometry["programid"].apply(
lambda programid: self.ztf_program_id_to_stream_id[programid]
)
# post photometry by stream_id
for stream_id in set(df_photometry.stream_id.unique()):
stream_id_mask = df_photometry.stream_id == int(stream_id)
photometry = {
"obj_id": alert["objectId"],
"stream_ids": [int(stream_id)],
"instrument_id": self.instrument_id,
"mjd": df_photometry.loc[stream_id_mask, "mjd"].tolist(),
"flux": df_photometry.loc[stream_id_mask, "flux"].tolist(),
"fluxerr": df_photometry.loc[stream_id_mask, "fluxerr"].tolist(),
"zp": df_photometry.loc[stream_id_mask, "zp"].tolist(),
"magsys": df_photometry.loc[stream_id_mask, "zpsys"].tolist(),
"filter": df_photometry.loc[stream_id_mask, "ztf_filter"].tolist(),
"ra": df_photometry.loc[stream_id_mask, "ra"].tolist(),
"dec": df_photometry.loc[stream_id_mask, "dec"].tolist(),
}
if (len(photometry.get("flux", ())) > 0) or (
len(photometry.get("fluxerr", ())) > 0
):
with timer(
f"Posting photometry of {alert['objectId']} {alert['candid']}, "
f"stream_id={stream_id} to SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal("PUT", "/api/photometry", photometry)
if response.json()["status"] == "success":
log(
f"Posted {alert['objectId']} photometry stream_id={stream_id} to SkyPortal"
)
else:
log(
f"Failed to post {alert['objectId']} photometry stream_id={stream_id} to SkyPortal"
)
log(response.json())
def alert_sentinel_skyportal(self, alert, prv_candidates, passed_filters):
"""
Post alerts to SkyPortal, if need be.
Logic:
- check if candidate/source exist on SP
- if candidate does not exist and len(passed_filters) > 0
- post metadata with all filter_ids in single call to /api/candidates
- post full light curve with all group_ids in single call to /api/photometry
- post thumbnails
- if candidate exists:
- get filter_ids of saved candidate from SP
- post to /api/candidates with new_filter_ids, if any
- post alert light curve in single PUT call to /api/photometry specifying stream_ids
- if source exists:
- get groups and check stream access
- decide which points to post to what groups based on permissions
- post alert light curve in single PUT call to /api/photometry specifying stream_ids
:param alert: ZTF_alert with a stripped-off prv_candidates section
:param prv_candidates: could be plain prv_candidates section of an alert, or extended alert history
:param passed_filters: list of filters that alert passed, with their output
:return:
"""
# check if candidate/source exist in SP:
with timer(
f"Checking if {alert['objectId']} is Candidate in SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal(
"HEAD", f"/api/candidates/{alert['objectId']}"
)
is_candidate = response.status_code == 200
if self.verbose > 1:
log(
f"{alert['objectId']} {'is' if is_candidate else 'is not'} Candidate in SkyPortal"
)
with timer(
f"Checking if {alert['objectId']} is Source in SkyPortal", self.verbose > 1
):
response = self.api_skyportal("HEAD", f"/api/sources/{alert['objectId']}")
is_source = response.status_code == 200
if self.verbose > 1:
log(
f"{alert['objectId']} {'is' if is_source else 'is not'} Source in SkyPortal"
)
# obj does not exit in SP:
if (not is_candidate) and (not is_source):
# passed at least one filter?
if len(passed_filters) > 0:
# post candidate
filter_ids = [f.get("filter_id") for f in passed_filters]
self.alert_post_candidate(alert, filter_ids)
# post annotations
self.alert_post_annotations(alert, passed_filters)
# post full light curve
try:
alert["prv_candidates"] = list(
self.mongo.db[self.collection_alerts_aux].find(
{"_id": alert["objectId"]}, {"prv_candidates": 1}, limit=1
)
)[0]["prv_candidates"]
except Exception as e:
# this should never happen, but just in case
log(e)
alert["prv_candidates"] = prv_candidates
self.alert_put_photometry(alert)
# post thumbnails
self.alert_post_thumbnails(alert)
# post source if autosave=True
autosave_group_ids = [
f.get("group_id")
for f in passed_filters
if f.get("autosave", False)
]
if len(autosave_group_ids) > 0:
self.alert_post_source(alert, autosave_group_ids)
# obj exists in SP:
else:
if len(passed_filters) > 0:
filter_ids = [f.get("filter_id") for f in passed_filters]
# post candidate with new filter ids
self.alert_post_candidate(alert, filter_ids)
# put annotations
self.alert_put_annotations(alert, passed_filters)
# already saved as a source?
if is_source:
# get info on the corresponding groups:
with timer(
f"Getting source groups info on {alert['objectId']} from SkyPortal",
self.verbose > 1,
):
response = self.api_skyportal(
"GET", f"/api/sources/{alert['objectId']}/groups"
)
if response.json()["status"] == "success":
existing_groups = response.json()["data"]
existing_group_ids = [g["id"] for g in existing_groups]
# post source if autosave=True and not already saved
autosave_group_ids = [
f.get("group_id")
for f in passed_filters
if f.get("autosave", False)
and (f.get("group_id") not in existing_group_ids)
]
if len(autosave_group_ids) > 0:
self.alert_post_source(alert, autosave_group_ids)
else:
log(f"Failed to get source groups info on {alert['objectId']}")
else:
# post source if autosave=True and not is_source
autosave_group_ids = [
f.get("group_id")
for f in passed_filters
if f.get("autosave", False)
]
if len(autosave_group_ids) > 0:
self.alert_post_source(alert, autosave_group_ids)
# post alert photometry in single call to /api/photometry
alert["prv_candidates"] = prv_candidates
self.alert_put_photometry(alert)
class WorkerInitializer(dask.distributed.WorkerPlugin):
def __init__(self, *args, **kwargs):
self.alert_worker = None
def setup(self, worker: dask.distributed.Worker):
self.alert_worker = AlertWorker()
def topic_listener(
topic,
bootstrap_servers: str,
offset_reset: str = "earliest",
group: str = None,
test: bool = False,
):
"""
Listen to a topic
:param topic:
:param bootstrap_servers:
:param offset_reset:
:param group:
:param test: when testing, terminate once reached end of partition
:return:
"""
# Configure dask client
dask_client = dask.distributed.Client(
address=f"{config['dask']['host']}:{config['dask']['scheduler_port']}"
)
# init each worker with AlertWorker instance
worker_initializer = WorkerInitializer()
dask_client.register_worker_plugin(worker_initializer, name="worker-init")
# Configure consumer connection to Kafka broker
conf = {
"bootstrap.servers": bootstrap_servers,
"default.topic.config": {"auto.offset.reset": offset_reset},
}
if group is not None:
conf["group.id"] = group
else:
conf["group.id"] = os.environ.get("HOSTNAME", "kowalski")
# make it unique:
conf[
"group.id"
] = f"{conf['group.id']}_{datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S.%f')}"
# Start alert stream consumer
stream_reader = AlertConsumer(topic, dask_client, **conf)
while True:
try:
# poll!
stream_reader.poll()
except EopError as e:
# Write when reaching end of partition
log(e.message)
if test:
# when testing, terminate once reached end of partition:
sys.exit()
except IndexError:
log("Data cannot be decoded\n")
except UnicodeDecodeError:
log("Unexpected data format received\n")
except KeyboardInterrupt:
log("Aborted by user\n")
sys.exit()
except Exception as e:
log(str(e))
_err = traceback.format_exc()
log(_err)
sys.exit()
def watchdog(obs_date: str = None, test: bool = False):
"""
Watchdog for topic listeners
:param obs_date: observing date: YYYYMMDD
:param test: test mode
:return:
"""
init_db_sync(config=config, verbose=True)
topics_on_watch = dict()
while True:
try:
# get kafka topic names with kafka-topics command
if not test:
# Production Kafka stream at IPAC
kafka_cmd = [
os.path.join(config["path"]["kafka"], "bin", "kafka-topics.sh"),
"--zookeeper",
config["kafka"]["zookeeper"],
"-list",
]
else:
# Local test stream
kafka_cmd = [
os.path.join(config["path"]["kafka"], "bin", "kafka-topics.sh"),
"--zookeeper",
config["kafka"]["zookeeper.test"],
"-list",
]
topics = (
subprocess.run(kafka_cmd, stdout=subprocess.PIPE)
.stdout.decode("utf-8")
.split("\n")[:-1]
)
if obs_date is None:
datestr = datetime.datetime.utcnow().strftime("%Y%m%d")
else:
datestr = obs_date
# as of 20180403, the naming convention is ztf_%Y%m%d_programidN
# exclude ZUDS, ingest separately
topics_tonight = [
t
for t in topics
if (datestr in t) and ("programid" in t) and ("zuds" not in t)
]
log(f"Topics: {topics_tonight}")
for t in topics_tonight:
if t not in topics_on_watch:
log(f"Starting listener thread for {t}")
offset_reset = config["kafka"]["default.topic.config"][
"auto.offset.reset"
]
if not test:
bootstrap_servers = config["kafka"]["bootstrap.servers"]
else:
bootstrap_servers = config["kafka"]["bootstrap.test.servers"]
group = config["kafka"]["group"]
topics_on_watch[t] = multiprocessing.Process(
target=topic_listener,
args=(t, bootstrap_servers, offset_reset, group, test),
)
topics_on_watch[t].daemon = True
topics_on_watch[t].start()
else:
log(f"Performing thread health check for {t}")
try:
if not topics_on_watch[t].is_alive():
log(f"Thread {t} died, removing")
# topics_on_watch[t].terminate()
topics_on_watch.pop(t, None)
else:
log(f"Thread {t} appears normal")
except Exception as _e:
log(f"Failed to perform health check: {_e}")
pass
if test:
time.sleep(120)
# when testing, wait for topic listeners to pull all the data, then break
# fixme: do this more gracefully
for t in topics_on_watch:
topics_on_watch[t].kill()
break
except Exception as e:
log(str(e))
_err = traceback.format_exc()
log(str(_err))
time.sleep(60)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Kowalski's ZTF Alert Broker")
parser.add_argument("--obsdate", help="observing date YYYYMMDD")
parser.add_argument("--test", help="listen to the test stream", action="store_true")
args = parser.parse_args()
watchdog(obs_date=args.obsdate, test=args.test)
|
import os
import copy
import ray
import argparse
import tqdm
import logging
import pandas as pd
from pathlib import Path
from datetime import datetime
import shutil
import uuid as uu_id
from typing import ClassVar, Dict, Generator, Tuple, List, Any
from proseco.evaluator.remote import run
from proseco.evaluator.progressBar import ProgressBar
import proseco.utility.io as io
import proseco.utility.ui as ui
from proseco.dashboard.model import (
get_run_directories,
is_successful_result,
load_result,
load_options,
load_scenario,
load_uuid,
)
from proseco.evaluator.util import (
flatten_dictionary,
nest_dictionary,
hash_dict,
get_permutations,
)
class Head:
"""Head node of the Evaluation."""
pack_path: str
args: argparse.Namespace
bulk_config: Dict[str, Any]
time_string: str
number_evaluations: int
iteration_index: int
logger: logging.Logger
c_error: ClassVar[str] = "\033[91m\033[1m"
c_warning: ClassVar[str] = "\33[33m\033["
c_okay: ClassVar[str] = "\033[92m\033[1m"
c_reset: ClassVar[str] = "\033[0m"
def __init__(
self, args: argparse.Namespace, time_string: str = None, init_ray: bool = True
):
"""Constructor.
Parameters
----------
args : argparse.Namespace
Command line arguments for starting the evaluation.
time_string : str
Optional time stamp when this head node was started.
init_ray : bool
When True the evaluation will connect the local ray client to a cluster (default: True).
It is used to prevent multiple ray-inits when running evaluations in sequence, else ray init will throw an exception.
"""
self.logger = ui.get_logger("ProSeCo Evaluator")
# Arguments with flags
self.args = args
# Openend config directory
self.bulk_config = self.get_file(file_type="evaluator", file=args.config)
# Timestamp of the head node
if time_string == None:
self.time_string = io.create_timestamp()
else:
self.time_string = time_string
# How many evaluations this config will generate
self.number_evaluations = self.determine_number_evaluations()
# The current iteration index the evaluation is at
self.iteration_index = 0
self.path = self.create_evaluation_directory()
# The time in seconds, when a task is considered to be timed out.
self.task_timeout = 120
self.isolate_binary()
self.init_ray()
def __del__(self):
"""Destructor.
Removes the temporary binary for the evaluation.
"""
self.remove_binary()
def determine_number_evaluations(self) -> int:
"""Determines the number of MCTS-Evaluations to be started.
Returns
-------
int
Total number of scenario evaluations.
"""
len_options = len(self.bulk_config["options"])
len_scenarios = len(self.bulk_config["scenarios"])
number_runs = self.bulk_config["number_runs"]
alterations = flatten_dictionary(self.bulk_config["options_alterations"])
scenario_alterations = flatten_dictionary(
self.bulk_config["scenario_alterations"]
)
alterations.update(scenario_alterations)
len_alter = 1
for _, v in alterations.items():
if type(v) is list:
len_alter = len_alter * len(v)
else:
pass
number_evaluations = len_options * len_scenarios * len_alter * number_runs
return number_evaluations
def create_evaluation_directory(self) -> Path:
"""Creates a directory for the evaluation.
Returns
-------
Path
The path to the evaluation directory.
"""
path = (
io.get_user_temp_dir()
/ f"proseco_evaluator_output/{self.time_string}_{self.bulk_config["evaluation_name"]}"
)
path.mkdir(parents=True)
self.logger.info(f"Creating evaluation directory {path}")
return path
def isolate_binary(self) -> None:
"""Moves the binary to a unique directory, so that rebuilding the binary does not affect the evaluation."""
devel_binary_path = (Path(io.get_ros_pack_path()).parents[1]).joinpath(
"devel_isolated/ros_proseco_planning/lib/ros_proseco_planning"
)
temp_binary_path = Path(f"~/evaluator_bin_{self.time_string}").expanduser()
shutil.copytree(devel_binary_path, temp_binary_path)
self.logger.debug(f"Created temporary binary path {temp_binary_path}")
self.binary_path = temp_binary_path
def remove_binary(self) -> None:
"""Removes the temporary binary for the evaluation."""
self.logger.debug(f"Removed temporary binary path {self.binary_path}")
shutil.rmtree(self.binary_path)
def init_ray(self) -> None:
"""Initializes the ray cluster, determines the maximum number of workers and creates the workers."""
self.logger.debug(f"Initializing ray cluster")
ray.init(
address=self.args.address or self.bulk_config["ray_cluster"]["address"],
include_dashboard=not self.args.no_dashboard,
dashboard_host="0.0.0.0",
log_to_driver=self.args.debug,
_temp_dir=str(io.get_user_temp_dir() / "ray"),
)
def print_evaluation_scenarios(self) -> None:
"""Prints the different combinations of settings for the evaluation."""
self.logger.debug(f"EVALUATOR CONFIGURATION: {self.bulk_config}")
scenarios = ",".join(self.bulk_config["scenarios"])
self.logger.info(f"Evaluating Scenarios: [{scenarios}]")
@staticmethod
def permute_dictionary(
dictionary: Dict[str, any], permutation: Dict[str, any]
) -> Dict[str, any]:
"""Creates a new dictionary with the given permutation applied.
Parameters
----------
dictionary : Dict[str, any]
The dictionary to apply the permutation to.
permutation : Dict[str, any]
The permutation to apply.
Returns
-------
Dict[str, any]
The new options dictionary.
"""
dictionary = flatten_dictionary(dictionary)
dictionary.update(permutation)
return nest_dictionary(dictionary)
@staticmethod
def remove_random_seed(options: Dict[str, any]) -> Dict[str, any]:
"""Removes the random seed from the options.
Parameters
----------
options : Dict[str, any]
The options to remove the random seed from.
Returns
-------
Dict[str, any]
The options without the random seed.
"""
options = copy.deepcopy(options)
options["compute_options"].pop("random_seed")
return options
@staticmethod
def hash_options_and_scenario(
options: Dict[str, Any], scenario: Dict[str, Any]
) -> str:
"""Hashes the options and the scenario.
Parameters
----------
options : Dict[str, Any]
The options to hash.
scenario : Dict[str, Any]
The scenario to hash.
Returns
-------
str
The combination of the options and the scenario.
"""
options_scenario = {}
options_scenario.update(options)
options_scenario.update(scenario)
return hash_dict(options_scenario)
def get_file(self, file_type: str, file: str) -> Dict[str, Any]:
"""Returns a scenario, config or options dictionary.
The files are loaded via json.
Parameters
----------
file_type : str
String indicating whether the file to load contains options, a scenario or a config for the evaluator.
file : str
Name of the file to load.
Returns
-------
Dict[str, Any]
Loaded options dictionary.
"""
if isinstance(file, dict):
data = file
elif type(file) is str:
if not file.endswith(".json"):
file += ".json"
if (
file_type == "options"
or file_type == "scenarios"
or file_type == "evaluator"
):
path = io.get_ros_pack_path() / "config" / file_type / file
else:
raise Exception(f"Unknown file type {file_type}")
data = io.load_data(path)
return data
def options_iterator(
self,
) -> Generator[
Tuple[Dict[str, Dict[str, Any]], Dict[str, Any], List[str], Dict[str, Any]],
None,
None,
]:
"""Defines an iterator for the option permutations.
The iterator returns the necessary files for evaluating a scenario with a worker node.
Additionally, it also returns an info dict used to update the progress bar.
Yields
-------
options: Tuple[Dict[str, Dict[str, Any]], Dict[str, Any], List[str], Dict[str, Any]]
Tuple containing all the necessary files to initiate an evaluation run. Contains:
- new_options : Dictionary containing compute options.
- new_scenario : Loaded scenario configuration.
- uuid : Unique IDs and scenario-options hashes.
- info_dict: Information to update the progress bar.
"""
# Iterate over all provided compute option files
for options in self.bulk_config["options"]:
options = io.load_options(options)
flat_options = flatten_dictionary(self.bulk_config["options_alterations"])
options_permutations = get_permutations(flat_options)
# Iterate over all permutations of options alterations
for options_permutation in options_permutations:
self.logger.debug(f"Permuting options with: {options_permutation}")
new_options = self.permute_dictionary(options, options_permutation)
# Unique ID for the options file
uuid_options = str(uu_id.uuid4())
new_options_wo_random_seed = self.remove_random_seed(new_options)
options_hash = hash_dict(new_options_wo_random_seed)
# Iterate over every scenario
for scenario in self.bulk_config["scenarios"]:
scenario = io.load_scenario(scenario)
flat_scenario = flatten_dictionary(
self.bulk_config["scenario_alterations"]
)
scenarios_permutations = get_permutations(flat_scenario)
# Iterate over all scenario permutations of the scenario alterations
for scenarios_permutation in scenarios_permutations:
self.logger.debug(
f"Permuting scenario with: {scenarios_permutation}"
)
new_scenario = self.permute_dictionary(
scenario, scenarios_permutation
)
# Unique ID for the options and scenario tuple
uuid_scenario = str(uu_id.uuid4())
options_scenario_hash = self.hash_options_and_scenario(
new_options_wo_random_seed, new_scenario
)
uuids = {
"options_uuid": uuid_options,
"options_scenario_uuid": uuid_scenario,
"options_hash": options_hash,
"options_scenario_hash": options_scenario_hash,
}
# Iterate over number of runs
for _ in range(self.bulk_config["number_runs"]):
yield new_options, new_scenario, uuids
def run_tasks(self):
"""Starts the tasks and waits for them to finish.
Parameters
----------
results : List[List[Any]]
The list of results.
"""
pb = ProgressBar(self.number_evaluations)
results_ref = []
for options, scenario, uuids in self.options_iterator():
results_ref.append(
run.remote(
options,
scenario,
uuids,
self.binary_path,
self.args.debug,
pb.actor,
)
)
pb.print_until_done_or_timeout(self.task_timeout)
results = ray.get(results_ref)
return results
def start(self) -> Tuple[List[List[Any]], bool]:
"""Creates all the ray futures according to the configuration file and evaluates them.
Main method for running the evaluation and delegating tasks to workers.
Notes
-----
If the result list becomes larger than 5GB, the result list returned by this method does not contain all results.
The return value `complete` indicates whether all results are contained.
Despite the limitation of the returned list, all results are always persisted to disk in the evaluation directory.
Returns
-------
Tuple[List[List[Any]], bool]
Tuple containing the results of the evaluation.
The first element is a list with the current results.
The second element is a boolean flag indicating whether the list contains ALL results.
"""
self.print_evaluation_scenarios()
# Starting time of the evaluation
beginning = datetime.now()
results = self.run_tasks()
self.save_results(results)
# create summary json if specified
if not self.args.no_summary:
_ = self.create_summary(True)
self.logger.info(
f"{self.c_okay}The {self.iteration_index}-th evaluation terminated successfully in {datetime.now()-beginning}{self.c_reset}"
)
self.iteration_index += 1
return results
def save_results(self, results: List[List[Any]]) -> None:
"""Saves the results to disk.
Parameters
----------
results : List[List[Any]]
The list containing the results of all runs.
"""
for result_index in range(len(results)):
self.save_result(results[result_index], result_index)
def save_result(self, result: List[Any], result_index: int) -> None:
"""Saves a single result to disk.
Parameters
----------
result : List[Any]
The list containing the partial results of a single run.
result_index : int
The index of the result.
"""
result_path = (
self.path
/ f"iteration_{self.iteration_index}"
/ f"result_{str(result_index).zfill(io.get_number_digits(self.number_evaluations-1))}of{str(self.number_evaluations-1).zfill(io.get_number_digits(self.number_evaluations-1))}"
)
result_path.mkdir(parents=True)
for file_name, data in result:
io.save_data(data, result_path / file_name)
# append the path to the result
result.append(["%PATH%", str(result_path)])
@staticmethod
def _check_alterations_key(key: str, value: Any) -> bool:
"""Checks whether the specified key is being iterated over in the evaluator bulk config.
Parameters
----------
key : str
String dictionary key of an options file.
value : Any
Dictionary value.
Returns
-------
bool
True if the key is a compute_options alteration, False else.
"""
return (
isinstance(value, list)
and key.startswith("options_alterations")
and not key.endswith("seed")
and not key.split("/")[1] == "output_options"
)
def create_summary(self, save_to_disk: bool = False) -> List[Dict]:
"""Returns a summary of the results and saves it in the evaluation directory if the flag argument is True.
Parameters
----------
save_to_disk : bool = False
Flag for saving the summary to disk.
Returns
-------
List[Dict]
List containing summary dicts for each run. The summary contains:
Scenario name, options_uuid, path to the results folder, success flag.
"""
self.logger.info("Summarizing results")
results = []
# Get the config keys that define alterations from the bulk config
co_keys = [
key.lstrip("options_alterations/")
for key, value in (flatten_dictionary(self.bulk_config)).items()
if self._check_alterations_key(key, value)
]
for run_dir in get_run_directories(self.path):
data = load_result(run_dir)
options = load_options(run_dir)
scenario = load_scenario(run_dir)
uuid = load_uuid(run_dir)
data["scenario"] = scenario["name"]
# Add uuids and hashes to the results file
data.update(uuid)
# added the path to the results so that the folder can be found for the visualization
data["path"] = str(run_dir.resolve())
# Load the altered options from the options file and append them to the results
if co_keys:
flat_opt = flatten_dictionary(options)
for key in co_keys:
name = key.split("/")[-1]
data[name] = flat_opt[key]
# determine whether the run was successful or not
data["success"] = is_successful_result(data)
results.append(data)
# persist summary to disk
if save_to_disk:
path = self.path / "results.json"
io.save_data(results, path)
return results
def create_result_dataframe(self, save_to_disk: bool = False) -> pd.DataFrame:
"""Returns detailed result information including the compute options and saves them in the evaluation directory if the flag argument is True.
Parameters
----------
save_to_disk : bool = False
Flag for saving the summary to disk.
Returns
-------
pandas.DataFrame
DataFrame generated from the individual run results. Contains for each run:
Complete options, detailed result, uuid, scenario name.
"""
all_data = []
for result_path, options_path, scenario_path, uuid_path in tqdm(
zip(
self.path.rglob("result.json"),
self.path.rglob("options_output.json"),
self.path.rglob("scenario_output.json"),
self.path.rglob("uuid.json"),
),
total=self.number_evaluations,
ascii=True,
desc="Generating DataFrame",
):
# open all files and extract the information
options = pd.json_normalize(io.load_data(options_path))
results = pd.json_normalize(io.load_data(result_path))
uuid = pd.json_normalize(io.load_data(uuid_path))
run_data = pd.concat([options, uuid, results], axis=1)
run_data["scenario"] = io.load_data(scenario_path)["name"]
all_data.append(run_data)
df_all = pd.concat(all_data, axis=0)
df_all.reset_index(drop=True, inplace=True)
if save_to_disk:
df_all.to_pickle(self.path / "run_data.gz", compression="gzip")
return df_all
|
import os
import copy
import ray
import argparse
import tqdm
import logging
import pandas as pd
from pathlib import Path
from datetime import datetime
import shutil
import uuid as uu_id
from typing import ClassVar, Dict, Generator, Tuple, List, Any
from proseco.evaluator.remote import run
from proseco.evaluator.progressBar import ProgressBar
import proseco.utility.io as io
import proseco.utility.ui as ui
from proseco.dashboard.model import (
get_run_directories,
is_successful_result,
load_result,
load_options,
load_scenario,
load_uuid,
)
from proseco.evaluator.util import (
flatten_dictionary,
nest_dictionary,
hash_dict,
get_permutations,
)
class Head:
"""Head node of the Evaluation."""
pack_path: str
args: argparse.Namespace
bulk_config: Dict[str, Any]
time_string: str
number_evaluations: int
iteration_index: int
logger: logging.Logger
c_error: ClassVar[str] = "\033[91m\033[1m"
c_warning: ClassVar[str] = "\33[33m\033["
c_okay: ClassVar[str] = "\033[92m\033[1m"
c_reset: ClassVar[str] = "\033[0m"
def __init__(
self, args: argparse.Namespace, time_string: str = None, init_ray: bool = True
):
"""Constructor.
Parameters
----------
args : argparse.Namespace
Command line arguments for starting the evaluation.
time_string : str
Optional time stamp when this head node was started.
init_ray : bool
When True the evaluation will connect the local ray client to a cluster (default: True).
It is used to prevent multiple ray-inits when running evaluations in sequence, else ray init will throw an exception.
"""
self.logger = ui.get_logger("ProSeCo Evaluator")
# Arguments with flags
self.args = args
# Openend config directory
self.bulk_config = self.get_file(file_type="evaluator", file=args.config)
# Timestamp of the head node
if time_string == None:
self.time_string = io.create_timestamp()
else:
self.time_string = time_string
# How many evaluations this config will generate
self.number_evaluations = self.determine_number_evaluations()
# The current iteration index the evaluation is at
self.iteration_index = 0
self.path = self.create_evaluation_directory()
# The time in seconds, when a task is considered to be timed out.
self.task_timeout = 120
self.isolate_binary()
self.init_ray()
def __del__(self):
"""Destructor.
Removes the temporary binary for the evaluation.
"""
self.remove_binary()
def determine_number_evaluations(self) -> int:
"""Determines the number of MCTS-Evaluations to be started.
Returns
-------
int
Total number of scenario evaluations.
"""
len_options = len(self.bulk_config["options"])
len_scenarios = len(self.bulk_config["scenarios"])
number_runs = self.bulk_config["number_runs"]
alterations = flatten_dictionary(self.bulk_config["options_alterations"])
scenario_alterations = flatten_dictionary(
self.bulk_config["scenario_alterations"]
)
alterations.update(scenario_alterations)
len_alter = 1
for _, v in alterations.items():
if type(v) is list:
len_alter = len_alter * len(v)
else:
pass
number_evaluations = len_options * len_scenarios * len_alter * number_runs
return number_evaluations
def create_evaluation_directory(self) -> Path:
"""Creates a directory for the evaluation.
Returns
-------
Path
The path to the evaluation directory.
"""
path = (
io.get_user_temp_dir()
/ f"proseco_evaluator_output/{self.time_string}_{self.bulk_config['evaluation_name']}"
)
path.mkdir(parents=True)
self.logger.info(f"Creating evaluation directory {path}")
return path
def isolate_binary(self) -> None:
"""Moves the binary to a unique directory, so that rebuilding the binary does not affect the evaluation."""
devel_binary_path = (Path(io.get_ros_pack_path()).parents[1]).joinpath(
"devel_isolated/ros_proseco_planning/lib/ros_proseco_planning"
)
temp_binary_path = Path(f"~/evaluator_bin_{self.time_string}").expanduser()
shutil.copytree(devel_binary_path, temp_binary_path)
self.logger.debug(f"Created temporary binary path {temp_binary_path}")
self.binary_path = temp_binary_path
def remove_binary(self) -> None:
"""Removes the temporary binary for the evaluation."""
self.logger.debug(f"Removed temporary binary path {self.binary_path}")
shutil.rmtree(self.binary_path)
def init_ray(self) -> None:
"""Initializes the ray cluster, determines the maximum number of workers and creates the workers."""
self.logger.debug(f"Initializing ray cluster")
ray.init(
address=self.args.address or self.bulk_config["ray_cluster"]["address"],
include_dashboard=not self.args.no_dashboard,
dashboard_host="0.0.0.0",
log_to_driver=self.args.debug,
_temp_dir=str(io.get_user_temp_dir() / "ray"),
)
def print_evaluation_scenarios(self) -> None:
"""Prints the different combinations of settings for the evaluation."""
self.logger.debug(f"EVALUATOR CONFIGURATION: {self.bulk_config}")
scenarios = ",".join(self.bulk_config["scenarios"])
self.logger.info(f"Evaluating Scenarios: [{scenarios}]")
@staticmethod
def permute_dictionary(
dictionary: Dict[str, any], permutation: Dict[str, any]
) -> Dict[str, any]:
"""Creates a new dictionary with the given permutation applied.
Parameters
----------
dictionary : Dict[str, any]
The dictionary to apply the permutation to.
permutation : Dict[str, any]
The permutation to apply.
Returns
-------
Dict[str, any]
The new options dictionary.
"""
dictionary = flatten_dictionary(dictionary)
dictionary.update(permutation)
return nest_dictionary(dictionary)
@staticmethod
def remove_random_seed(options: Dict[str, any]) -> Dict[str, any]:
"""Removes the random seed from the options.
Parameters
----------
options : Dict[str, any]
The options to remove the random seed from.
Returns
-------
Dict[str, any]
The options without the random seed.
"""
options = copy.deepcopy(options)
options["compute_options"].pop("random_seed")
return options
@staticmethod
def hash_options_and_scenario(
options: Dict[str, Any], scenario: Dict[str, Any]
) -> str:
"""Hashes the options and the scenario.
Parameters
----------
options : Dict[str, Any]
The options to hash.
scenario : Dict[str, Any]
The scenario to hash.
Returns
-------
str
The combination of the options and the scenario.
"""
options_scenario = {}
options_scenario.update(options)
options_scenario.update(scenario)
return hash_dict(options_scenario)
def get_file(self, file_type: str, file: str) -> Dict[str, Any]:
"""Returns a scenario, config or options dictionary.
The files are loaded via json.
Parameters
----------
file_type : str
String indicating whether the file to load contains options, a scenario or a config for the evaluator.
file : str
Name of the file to load.
Returns
-------
Dict[str, Any]
Loaded options dictionary.
"""
if isinstance(file, dict):
data = file
elif type(file) is str:
if not file.endswith(".json"):
file += ".json"
if (
file_type == "options"
or file_type == "scenarios"
or file_type == "evaluator"
):
path = io.get_ros_pack_path() / "config" / file_type / file
else:
raise Exception(f"Unknown file type {file_type}")
data = io.load_data(path)
return data
def options_iterator(
self,
) -> Generator[
Tuple[Dict[str, Dict[str, Any]], Dict[str, Any], List[str], Dict[str, Any]],
None,
None,
]:
"""Defines an iterator for the option permutations.
The iterator returns the necessary files for evaluating a scenario with a worker node.
Additionally, it also returns an info dict used to update the progress bar.
Yields
-------
options: Tuple[Dict[str, Dict[str, Any]], Dict[str, Any], List[str], Dict[str, Any]]
Tuple containing all the necessary files to initiate an evaluation run. Contains:
- new_options : Dictionary containing compute options.
- new_scenario : Loaded scenario configuration.
- uuid : Unique IDs and scenario-options hashes.
- info_dict: Information to update the progress bar.
"""
# Iterate over all provided compute option files
for options in self.bulk_config["options"]:
options = io.load_options(options)
flat_options = flatten_dictionary(self.bulk_config["options_alterations"])
options_permutations = get_permutations(flat_options)
# Iterate over all permutations of options alterations
for options_permutation in options_permutations:
self.logger.debug(f"Permuting options with: {options_permutation}")
new_options = self.permute_dictionary(options, options_permutation)
# Unique ID for the options file
uuid_options = str(uu_id.uuid4())
new_options_wo_random_seed = self.remove_random_seed(new_options)
options_hash = hash_dict(new_options_wo_random_seed)
# Iterate over every scenario
for scenario in self.bulk_config["scenarios"]:
scenario = io.load_scenario(scenario)
flat_scenario = flatten_dictionary(
self.bulk_config["scenario_alterations"]
)
scenarios_permutations = get_permutations(flat_scenario)
# Iterate over all scenario permutations of the scenario alterations
for scenarios_permutation in scenarios_permutations:
self.logger.debug(
f"Permuting scenario with: {scenarios_permutation}"
)
new_scenario = self.permute_dictionary(
scenario, scenarios_permutation
)
# Unique ID for the options and scenario tuple
uuid_scenario = str(uu_id.uuid4())
options_scenario_hash = self.hash_options_and_scenario(
new_options_wo_random_seed, new_scenario
)
uuids = {
"options_uuid": uuid_options,
"options_scenario_uuid": uuid_scenario,
"options_hash": options_hash,
"options_scenario_hash": options_scenario_hash,
}
# Iterate over number of runs
for _ in range(self.bulk_config["number_runs"]):
yield new_options, new_scenario, uuids
def run_tasks(self):
"""Starts the tasks and waits for them to finish.
Parameters
----------
results : List[List[Any]]
The list of results.
"""
pb = ProgressBar(self.number_evaluations)
results_ref = []
for options, scenario, uuids in self.options_iterator():
results_ref.append(
run.remote(
options,
scenario,
uuids,
self.binary_path,
self.args.debug,
pb.actor,
)
)
pb.print_until_done_or_timeout(self.task_timeout)
results = ray.get(results_ref)
return results
def start(self) -> Tuple[List[List[Any]], bool]:
"""Creates all the ray futures according to the configuration file and evaluates them.
Main method for running the evaluation and delegating tasks to workers.
Notes
-----
If the result list becomes larger than 5GB, the result list returned by this method does not contain all results.
The return value `complete` indicates whether all results are contained.
Despite the limitation of the returned list, all results are always persisted to disk in the evaluation directory.
Returns
-------
Tuple[List[List[Any]], bool]
Tuple containing the results of the evaluation.
The first element is a list with the current results.
The second element is a boolean flag indicating whether the list contains ALL results.
"""
self.print_evaluation_scenarios()
# Starting time of the evaluation
beginning = datetime.now()
results = self.run_tasks()
self.save_results(results)
# create summary json if specified
if not self.args.no_summary:
_ = self.create_summary(True)
self.logger.info(
f"{self.c_okay}The {self.iteration_index}-th evaluation terminated successfully in {datetime.now()-beginning}{self.c_reset}"
)
self.iteration_index += 1
return results
def save_results(self, results: List[List[Any]]) -> None:
"""Saves the results to disk.
Parameters
----------
results : List[List[Any]]
The list containing the results of all runs.
"""
for result_index in range(len(results)):
self.save_result(results[result_index], result_index)
def save_result(self, result: List[Any], result_index: int) -> None:
"""Saves a single result to disk.
Parameters
----------
result : List[Any]
The list containing the partial results of a single run.
result_index : int
The index of the result.
"""
result_path = (
self.path
/ f"iteration_{self.iteration_index}"
/ f"result_{str(result_index).zfill(io.get_number_digits(self.number_evaluations-1))}of{str(self.number_evaluations-1).zfill(io.get_number_digits(self.number_evaluations-1))}"
)
result_path.mkdir(parents=True)
for file_name, data in result:
io.save_data(data, result_path / file_name)
# append the path to the result
result.append(["%PATH%", str(result_path)])
@staticmethod
def _check_alterations_key(key: str, value: Any) -> bool:
"""Checks whether the specified key is being iterated over in the evaluator bulk config.
Parameters
----------
key : str
String dictionary key of an options file.
value : Any
Dictionary value.
Returns
-------
bool
True if the key is a compute_options alteration, False else.
"""
return (
isinstance(value, list)
and key.startswith("options_alterations")
and not key.endswith("seed")
and not key.split("/")[1] == "output_options"
)
def create_summary(self, save_to_disk: bool = False) -> List[Dict]:
"""Returns a summary of the results and saves it in the evaluation directory if the flag argument is True.
Parameters
----------
save_to_disk : bool = False
Flag for saving the summary to disk.
Returns
-------
List[Dict]
List containing summary dicts for each run. The summary contains:
Scenario name, options_uuid, path to the results folder, success flag.
"""
self.logger.info("Summarizing results")
results = []
# Get the config keys that define alterations from the bulk config
co_keys = [
key.lstrip("options_alterations/")
for key, value in (flatten_dictionary(self.bulk_config)).items()
if self._check_alterations_key(key, value)
]
for run_dir in get_run_directories(self.path):
data = load_result(run_dir)
options = load_options(run_dir)
scenario = load_scenario(run_dir)
uuid = load_uuid(run_dir)
data["scenario"] = scenario["name"]
# Add uuids and hashes to the results file
data.update(uuid)
# added the path to the results so that the folder can be found for the visualization
data["path"] = str(run_dir.resolve())
# Load the altered options from the options file and append them to the results
if co_keys:
flat_opt = flatten_dictionary(options)
for key in co_keys:
name = key.split("/")[-1]
data[name] = flat_opt[key]
# determine whether the run was successful or not
data["success"] = is_successful_result(data)
results.append(data)
# persist summary to disk
if save_to_disk:
path = self.path / "results.json"
io.save_data(results, path)
return results
def create_result_dataframe(self, save_to_disk: bool = False) -> pd.DataFrame:
"""Returns detailed result information including the compute options and saves them in the evaluation directory if the flag argument is True.
Parameters
----------
save_to_disk : bool = False
Flag for saving the summary to disk.
Returns
-------
pandas.DataFrame
DataFrame generated from the individual run results. Contains for each run:
Complete options, detailed result, uuid, scenario name.
"""
all_data = []
for result_path, options_path, scenario_path, uuid_path in tqdm(
zip(
self.path.rglob("result.json"),
self.path.rglob("options_output.json"),
self.path.rglob("scenario_output.json"),
self.path.rglob("uuid.json"),
),
total=self.number_evaluations,
ascii=True,
desc="Generating DataFrame",
):
# open all files and extract the information
options = pd.json_normalize(io.load_data(options_path))
results = pd.json_normalize(io.load_data(result_path))
uuid = pd.json_normalize(io.load_data(uuid_path))
run_data = pd.concat([options, uuid, results], axis=1)
run_data["scenario"] = io.load_data(scenario_path)["name"]
all_data.append(run_data)
df_all = pd.concat(all_data, axis=0)
df_all.reset_index(drop=True, inplace=True)
if save_to_disk:
df_all.to_pickle(self.path / "run_data.gz", compression="gzip")
return df_all
|
"""Support for the Netatmo cameras."""
import logging
import aiohttp
import pyatmo
import voluptuous as vol
from openpeerpower.components.camera import SUPPORT_STREAM, Camera
from openpeerpower.core import callback
from openpeerpower.exceptions import PlatformNotReady
from openpeerpower.helpers import config_validation as cv, entity_platform
from openpeerpower.helpers.dispatcher import async_dispatcher_connect
from .const import (
ATTR_CAMERA_LIGHT_MODE,
ATTR_PERSON,
ATTR_PERSONS,
ATTR_PSEUDO,
CAMERA_LIGHT_MODES,
DATA_CAMERAS,
DATA_EVENTS,
DATA_HANDLER,
DATA_PERSONS,
DOMAIN,
EVENT_TYPE_LIGHT_MODE,
EVENT_TYPE_OFF,
EVENT_TYPE_ON,
MANUFACTURER,
MODELS,
SERVICE_SET_CAMERA_LIGHT,
SERVICE_SET_PERSON_AWAY,
SERVICE_SET_PERSONS_HOME,
SIGNAL_NAME,
WEBHOOK_LIGHT_MODE,
WEBHOOK_NACAMERA_CONNECTION,
WEBHOOK_PUSH_TYPE,
)
from .data_handler import CAMERA_DATA_CLASS_NAME
from .netatmo_entity_base import NetatmoBase
_LOGGER = logging.getLogger(__name__)
DEFAULT_QUALITY = "high"
async def async_setup_entry(opp, entry, async_add_entities):
"""Set up the Netatmo camera platform."""
if "access_camera" not in entry.data["token"]["scope"]:
_LOGGER.info(
"Cameras are currently not supported with this authentication method"
)
data_handler = opp.data[DOMAIN][entry.entry_id][DATA_HANDLER]
await data_handler.register_data_class(
CAMERA_DATA_CLASS_NAME, CAMERA_DATA_CLASS_NAME, None
)
data_class = data_handler.data.get(CAMERA_DATA_CLASS_NAME)
if not data_class or not data_class.raw_data:
raise PlatformNotReady
all_cameras = []
for home in data_class.cameras.values():
for camera in home.values():
all_cameras.append(camera)
entities = [
NetatmoCamera(
data_handler,
camera["id"],
camera["type"],
camera["home_id"],
DEFAULT_QUALITY,
)
for camera in all_cameras
]
for person_id, person_data in data_handler.data[
CAMERA_DATA_CLASS_NAME
].persons.items():
opp.data[DOMAIN][DATA_PERSONS][person_id] = person_data.get(ATTR_PSEUDO)
_LOGGER.debug("Adding cameras %s", entities)
async_add_entities(entities, True)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SET_PERSONS_HOME,
{vol.Required(ATTR_PERSONS): vol.All(cv.ensure_list, [cv.string])},
"_service_set_persons_home",
)
platform.async_register_entity_service(
SERVICE_SET_PERSON_AWAY,
{vol.Optional(ATTR_PERSON): cv.string},
"_service_set_person_away",
)
platform.async_register_entity_service(
SERVICE_SET_CAMERA_LIGHT,
{vol.Required(ATTR_CAMERA_LIGHT_MODE): vol.In(CAMERA_LIGHT_MODES)},
"_service_set_camera_light",
)
class NetatmoCamera(NetatmoBase, Camera):
"""Representation of a Netatmo camera."""
def __init__(
self,
data_handler,
camera_id,
camera_type,
home_id,
quality,
):
"""Set up for access to the Netatmo camera images."""
Camera.__init__(self)
super().__init__(data_handler)
self._data_classes.append(
{"name": CAMERA_DATA_CLASS_NAME, SIGNAL_NAME: CAMERA_DATA_CLASS_NAME}
)
self._id = camera_id
self._home_id = home_id
self._device_name = self._data.get_camera(camera_id=camera_id).get("name")
self._name = f"{MANUFACTURER} {self._device_name}"
self._model = camera_type
self._unique_id = f"{self._id}-{self._model}"
self._quality = quality
self._vpnurl = None
self._localurl = None
self._status = None
self._sd_status = None
self._alim_status = None
self._is_local = None
self._light_state = None
async def async_added_to_opp(self) -> None:
"""Entity created."""
await super().async_added_to_opp()
for event_type in (EVENT_TYPE_LIGHT_MODE, EVENT_TYPE_OFF, EVENT_TYPE_ON):
self._listeners.append(
async_dispatcher_connect(
self.opp,
f"signal-{DOMAIN}-webhook-{event_type}",
self.handle_event,
)
)
self.opp.data[DOMAIN][DATA_CAMERAS][self._id] = self._device_name
@callback
def handle_event(self, event):
"""Handle webhook events."""
data = event["data"]
if not data.get("camera_id"):
return
if data["home_id"] == self._home_id and data["camera_id"] == self._id:
if data[WEBHOOK_PUSH_TYPE] in ["NACamera-off", "NACamera-disconnection"]:
self.is_streaming = False
self._status = "off"
elif data[WEBHOOK_PUSH_TYPE] in [
"NACamera-on",
WEBHOOK_NACAMERA_CONNECTION,
]:
self.is_streaming = True
self._status = "on"
elif data[WEBHOOK_PUSH_TYPE] == WEBHOOK_LIGHT_MODE:
self._light_state = data["sub_type"]
self.async_write_op_state()
return
async def async_camera_image(self):
"""Return a still image response from the camera."""
try:
return await self._data.async_get_live_snapshot(camera_id=self._id)
except (
aiohttp.ClientPayloadError,
aiohttp.ContentTypeError,
aiohttp.ServerDisconnectedError,
aiohttp.ClientConnectorError,
pyatmo.exceptions.ApiError,
) as err:
_LOGGER.debug("Could not fetch live camera image (%s)", err)
return None
@property
def extra_state_attributes(self):
"""Return the Netatmo-specific camera state attributes."""
return {
"id": self._id,
"status": self._status,
"sd_status": self._sd_status,
"alim_status": self._alim_status,
"is_local": self._is_local,
"vpn_url": self._vpnurl,
"local_url": self._localurl,
"light_state": self._light_state,
}
@property
def available(self):
"""Return True if entity is available."""
return bool(self._alim_status == "on" or self._status == "disconnected")
@property
def supported_features(self):
"""Return supported features."""
return SUPPORT_STREAM
@property
def brand(self):
"""Return the camera brand."""
return MANUFACTURER
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return bool(self._status == "on")
@property
def is_on(self):
"""Return true if on."""
return self.is_streaming
async def async_turn_off(self):
"""Turn off camera."""
await self._data.async_set_state(
home_id=self._home_id, camera_id=self._id, monitoring="off"
)
async def async_turn_on(self):
"""Turn on camera."""
await self._data.async_set_state(
home_id=self._home_id, camera_id=self._id, monitoring="on"
)
async def stream_source(self):
"""Return the stream source."""
url = "{0}/live/files/{1}/index.m3u8"
if self._localurl:
return url.format(self._localurl, self._quality)
return url.format(self._vpnurl, self._quality)
@property
def model(self):
"""Return the camera model."""
return MODELS[self._model]
@callback
def async_update_callback(self):
"""Update the entity's state."""
camera = self._data.get_camera(self._id)
self._vpnurl, self._localurl = self._data.camera_urls(self._id)
self._status = camera.get("status")
self._sd_status = camera.get("sd_status")
self._alim_status = camera.get("alim_status")
self._is_local = camera.get("is_local")
self.is_streaming = bool(self._status == "on")
if self._model == "NACamera": # Smart Indoor Camera
self.opp.data[DOMAIN][DATA_EVENTS][self._id] = self.process_events(
self._data.events.get(self._id, {})
)
elif self._model == "NOC": # Smart Outdoor Camera
self.opp.data[DOMAIN][DATA_EVENTS][self._id] = self.process_events(
self._data.outdoor_events.get(self._id, {})
)
def process_events(self, events):
"""Add meta data to events."""
for event in events.values():
if "video_id" not in event:
continue
if self._is_local:
event[
"media_url"
] = f"{self._localurl}/vod/{event["video_id"]}/files/{self._quality}/index.m3u8"
else:
event[
"media_url"
] = f"{self._vpnurl}/vod/{event["video_id"]}/files/{self._quality}/index.m3u8"
return events
async def _service_set_persons_home(self, **kwargs):
"""Service to change current home schedule."""
persons = kwargs.get(ATTR_PERSONS)
person_ids = []
for person in persons:
for pid, data in self._data.persons.items():
if data.get("pseudo") == person:
person_ids.append(pid)
await self._data.async_set_persons_home(
person_ids=person_ids, home_id=self._home_id
)
_LOGGER.debug("Set %s as at home", persons)
async def _service_set_person_away(self, **kwargs):
"""Service to mark a person as away or set the home as empty."""
person = kwargs.get(ATTR_PERSON)
person_id = None
if person:
for pid, data in self._data.persons.items():
if data.get("pseudo") == person:
person_id = pid
if person_id:
await self._data.async_set_persons_away(
person_id=person_id,
home_id=self._home_id,
)
_LOGGER.debug("Set %s as away", person)
else:
await self._data.async_set_persons_away(
person_id=person_id,
home_id=self._home_id,
)
_LOGGER.debug("Set home as empty")
async def _service_set_camera_light(self, **kwargs):
"""Service to set light mode."""
mode = kwargs.get(ATTR_CAMERA_LIGHT_MODE)
_LOGGER.debug("Turn %s camera light for '%s'", mode, self._name)
await self._data.async_set_state(
home_id=self._home_id,
camera_id=self._id,
floodlight=mode,
)
|
"""Support for the Netatmo cameras."""
import logging
import aiohttp
import pyatmo
import voluptuous as vol
from openpeerpower.components.camera import SUPPORT_STREAM, Camera
from openpeerpower.core import callback
from openpeerpower.exceptions import PlatformNotReady
from openpeerpower.helpers import config_validation as cv, entity_platform
from openpeerpower.helpers.dispatcher import async_dispatcher_connect
from .const import (
ATTR_CAMERA_LIGHT_MODE,
ATTR_PERSON,
ATTR_PERSONS,
ATTR_PSEUDO,
CAMERA_LIGHT_MODES,
DATA_CAMERAS,
DATA_EVENTS,
DATA_HANDLER,
DATA_PERSONS,
DOMAIN,
EVENT_TYPE_LIGHT_MODE,
EVENT_TYPE_OFF,
EVENT_TYPE_ON,
MANUFACTURER,
MODELS,
SERVICE_SET_CAMERA_LIGHT,
SERVICE_SET_PERSON_AWAY,
SERVICE_SET_PERSONS_HOME,
SIGNAL_NAME,
WEBHOOK_LIGHT_MODE,
WEBHOOK_NACAMERA_CONNECTION,
WEBHOOK_PUSH_TYPE,
)
from .data_handler import CAMERA_DATA_CLASS_NAME
from .netatmo_entity_base import NetatmoBase
_LOGGER = logging.getLogger(__name__)
DEFAULT_QUALITY = "high"
async def async_setup_entry(opp, entry, async_add_entities):
"""Set up the Netatmo camera platform."""
if "access_camera" not in entry.data["token"]["scope"]:
_LOGGER.info(
"Cameras are currently not supported with this authentication method"
)
data_handler = opp.data[DOMAIN][entry.entry_id][DATA_HANDLER]
await data_handler.register_data_class(
CAMERA_DATA_CLASS_NAME, CAMERA_DATA_CLASS_NAME, None
)
data_class = data_handler.data.get(CAMERA_DATA_CLASS_NAME)
if not data_class or not data_class.raw_data:
raise PlatformNotReady
all_cameras = []
for home in data_class.cameras.values():
for camera in home.values():
all_cameras.append(camera)
entities = [
NetatmoCamera(
data_handler,
camera["id"],
camera["type"],
camera["home_id"],
DEFAULT_QUALITY,
)
for camera in all_cameras
]
for person_id, person_data in data_handler.data[
CAMERA_DATA_CLASS_NAME
].persons.items():
opp.data[DOMAIN][DATA_PERSONS][person_id] = person_data.get(ATTR_PSEUDO)
_LOGGER.debug("Adding cameras %s", entities)
async_add_entities(entities, True)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SET_PERSONS_HOME,
{vol.Required(ATTR_PERSONS): vol.All(cv.ensure_list, [cv.string])},
"_service_set_persons_home",
)
platform.async_register_entity_service(
SERVICE_SET_PERSON_AWAY,
{vol.Optional(ATTR_PERSON): cv.string},
"_service_set_person_away",
)
platform.async_register_entity_service(
SERVICE_SET_CAMERA_LIGHT,
{vol.Required(ATTR_CAMERA_LIGHT_MODE): vol.In(CAMERA_LIGHT_MODES)},
"_service_set_camera_light",
)
class NetatmoCamera(NetatmoBase, Camera):
"""Representation of a Netatmo camera."""
def __init__(
self,
data_handler,
camera_id,
camera_type,
home_id,
quality,
):
"""Set up for access to the Netatmo camera images."""
Camera.__init__(self)
super().__init__(data_handler)
self._data_classes.append(
{"name": CAMERA_DATA_CLASS_NAME, SIGNAL_NAME: CAMERA_DATA_CLASS_NAME}
)
self._id = camera_id
self._home_id = home_id
self._device_name = self._data.get_camera(camera_id=camera_id).get("name")
self._name = f"{MANUFACTURER} {self._device_name}"
self._model = camera_type
self._unique_id = f"{self._id}-{self._model}"
self._quality = quality
self._vpnurl = None
self._localurl = None
self._status = None
self._sd_status = None
self._alim_status = None
self._is_local = None
self._light_state = None
async def async_added_to_opp(self) -> None:
"""Entity created."""
await super().async_added_to_opp()
for event_type in (EVENT_TYPE_LIGHT_MODE, EVENT_TYPE_OFF, EVENT_TYPE_ON):
self._listeners.append(
async_dispatcher_connect(
self.opp,
f"signal-{DOMAIN}-webhook-{event_type}",
self.handle_event,
)
)
self.opp.data[DOMAIN][DATA_CAMERAS][self._id] = self._device_name
@callback
def handle_event(self, event):
"""Handle webhook events."""
data = event["data"]
if not data.get("camera_id"):
return
if data["home_id"] == self._home_id and data["camera_id"] == self._id:
if data[WEBHOOK_PUSH_TYPE] in ["NACamera-off", "NACamera-disconnection"]:
self.is_streaming = False
self._status = "off"
elif data[WEBHOOK_PUSH_TYPE] in [
"NACamera-on",
WEBHOOK_NACAMERA_CONNECTION,
]:
self.is_streaming = True
self._status = "on"
elif data[WEBHOOK_PUSH_TYPE] == WEBHOOK_LIGHT_MODE:
self._light_state = data["sub_type"]
self.async_write_op_state()
return
async def async_camera_image(self):
"""Return a still image response from the camera."""
try:
return await self._data.async_get_live_snapshot(camera_id=self._id)
except (
aiohttp.ClientPayloadError,
aiohttp.ContentTypeError,
aiohttp.ServerDisconnectedError,
aiohttp.ClientConnectorError,
pyatmo.exceptions.ApiError,
) as err:
_LOGGER.debug("Could not fetch live camera image (%s)", err)
return None
@property
def extra_state_attributes(self):
"""Return the Netatmo-specific camera state attributes."""
return {
"id": self._id,
"status": self._status,
"sd_status": self._sd_status,
"alim_status": self._alim_status,
"is_local": self._is_local,
"vpn_url": self._vpnurl,
"local_url": self._localurl,
"light_state": self._light_state,
}
@property
def available(self):
"""Return True if entity is available."""
return bool(self._alim_status == "on" or self._status == "disconnected")
@property
def supported_features(self):
"""Return supported features."""
return SUPPORT_STREAM
@property
def brand(self):
"""Return the camera brand."""
return MANUFACTURER
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return bool(self._status == "on")
@property
def is_on(self):
"""Return true if on."""
return self.is_streaming
async def async_turn_off(self):
"""Turn off camera."""
await self._data.async_set_state(
home_id=self._home_id, camera_id=self._id, monitoring="off"
)
async def async_turn_on(self):
"""Turn on camera."""
await self._data.async_set_state(
home_id=self._home_id, camera_id=self._id, monitoring="on"
)
async def stream_source(self):
"""Return the stream source."""
url = "{0}/live/files/{1}/index.m3u8"
if self._localurl:
return url.format(self._localurl, self._quality)
return url.format(self._vpnurl, self._quality)
@property
def model(self):
"""Return the camera model."""
return MODELS[self._model]
@callback
def async_update_callback(self):
"""Update the entity's state."""
camera = self._data.get_camera(self._id)
self._vpnurl, self._localurl = self._data.camera_urls(self._id)
self._status = camera.get("status")
self._sd_status = camera.get("sd_status")
self._alim_status = camera.get("alim_status")
self._is_local = camera.get("is_local")
self.is_streaming = bool(self._status == "on")
if self._model == "NACamera": # Smart Indoor Camera
self.opp.data[DOMAIN][DATA_EVENTS][self._id] = self.process_events(
self._data.events.get(self._id, {})
)
elif self._model == "NOC": # Smart Outdoor Camera
self.opp.data[DOMAIN][DATA_EVENTS][self._id] = self.process_events(
self._data.outdoor_events.get(self._id, {})
)
def process_events(self, events):
"""Add meta data to events."""
for event in events.values():
if "video_id" not in event:
continue
if self._is_local:
event[
"media_url"
] = f"{self._localurl}/vod/{event['video_id']}/files/{self._quality}/index.m3u8"
else:
event[
"media_url"
] = f"{self._vpnurl}/vod/{event['video_id']}/files/{self._quality}/index.m3u8"
return events
async def _service_set_persons_home(self, **kwargs):
"""Service to change current home schedule."""
persons = kwargs.get(ATTR_PERSONS)
person_ids = []
for person in persons:
for pid, data in self._data.persons.items():
if data.get("pseudo") == person:
person_ids.append(pid)
await self._data.async_set_persons_home(
person_ids=person_ids, home_id=self._home_id
)
_LOGGER.debug("Set %s as at home", persons)
async def _service_set_person_away(self, **kwargs):
"""Service to mark a person as away or set the home as empty."""
person = kwargs.get(ATTR_PERSON)
person_id = None
if person:
for pid, data in self._data.persons.items():
if data.get("pseudo") == person:
person_id = pid
if person_id:
await self._data.async_set_persons_away(
person_id=person_id,
home_id=self._home_id,
)
_LOGGER.debug("Set %s as away", person)
else:
await self._data.async_set_persons_away(
person_id=person_id,
home_id=self._home_id,
)
_LOGGER.debug("Set home as empty")
async def _service_set_camera_light(self, **kwargs):
"""Service to set light mode."""
mode = kwargs.get(ATTR_CAMERA_LIGHT_MODE)
_LOGGER.debug("Turn %s camera light for '%s'", mode, self._name)
await self._data.async_set_state(
home_id=self._home_id,
camera_id=self._id,
floodlight=mode,
)
|
import os
import re
import math
import json
import tempfile
import zipfile
import bagit
import asyncio
from datetime import datetime
from asyncio import events
from ratelimit import sleep_and_retry
from ratelimit.exception import RateLimitException
from aiohttp import ClientSession, http_exceptions
import internetarchive
from datacite import DataCiteMDSClient
from datacite.errors import DataCiteNotFoundError
from osf_pigeon import settings
async def stream_files_to_dir(from_url, to_dir, name):
async with ClientSession() as session:
async with session.get(from_url) as resp:
with open(os.path.join(to_dir, name), "wb") as fp:
async for chunk in resp.content.iter_any():
fp.write(chunk)
async def dump_json_to_dir(from_url, to_dir, name, parse_json=None):
pages = await get_paginated_data(from_url, parse_json)
with open(os.path.join(to_dir, name), "w") as fp:
json.dump(pages, fp)
return pages
def create_zip(temp_dir):
with zipfile.ZipFile(os.path.join(temp_dir, "bag.zip"), "w") as fp:
for root, dirs, files in os.walk(os.path.join(temp_dir, "bag")):
for file in files:
file_path = os.path.join(root, file)
file_name = re.sub(f"^{temp_dir}", "", file_path)
fp.write(file_path, arcname=file_name)
async def get_relationship_attribute(key, url, func):
data = await get_paginated_data(url)
if "data" in data:
return {key: list(map(func, data["data"]))}
return {key: list(map(func, data))}
async def get_metadata_for_ia_item(json_metadata):
"""
This is meant to take the response JSON metadata and format it for IA buckets, this is not
used to generate JSON to be uploaded as raw data into the buckets.
:param json_metadata: metadata from OSF registration view contains attributes and relationship
urls.
Note: Internet Archive advises that all metadata that points to internal OSF features should
have a specific `osf_` prefix. Example: `registry` should be `osf_registry`, however metadata
such as affiliated_institutions is self-explanatory and doesn't need a prefix.
:return: ia_metadata the metadata for an IA bucket. Should include the following if they are
not null:
- publisher
- title
- description
- date
- osf_category
- osf_subjects
- osf_tags
- osf_registration_doi
- osf_registry
- osf_registration_schema
- creator (biblographic contributors, IA recommended this keyword)
- article_doi
- parent
- children
- source
- affiliated_institutions
- license
"""
relationship_data = [
get_relationship_attribute(
"creator",
f'{settings.OSF_API_URL}v2/registrations/{json_metadata['data']['id']}/contributors/'
f"?filter[bibliographic]=true&",
lambda contrib: contrib["embeds"]["users"]["data"]["attributes"][
"full_name"
],
),
get_relationship_attribute(
"affiliated_institutions",
f'{settings.OSF_API_URL}v2/registrations/{json_metadata['data']['id']}/institutions/',
lambda institution: institution["attributes"]["name"],
),
get_relationship_attribute(
"osf_subjects",
f'{settings.OSF_API_URL}v2/registrations/{json_metadata['data']['id']}/subjects/',
lambda subject: subject["attributes"]["text"],
),
get_relationship_attribute(
"children",
f'{settings.OSF_API_URL}v2/registrations/{json_metadata['data']['id']}/children/',
lambda child: f"https://archive.org/details/"
f'{settings.REG_ID_TEMPLATE.format(guid=child['id'])}',
),
]
relationship_data = {
k: v
for pair in await asyncio.gather(*relationship_data)
for k, v in pair.items()
} # merge all the pairs
parent = json_metadata["data"]["relationships"]["parent"]["data"]
if parent:
relationship_data[
"parent"
] = f"https://archive.org/details/{settings.REG_ID_TEMPLATE.format(guid=parent["id"])}"
embeds = json_metadata["data"]["embeds"]
if not embeds["license"].get(
"errors"
): # The reported error here is just a 404, so ignore if no license
relationship_data["license"] = embeds["license"]["data"]["attributes"]["url"]
doi = next(
(
identifier["attributes"]["value"]
for identifier in embeds["identifiers"]["data"]
if identifier["attributes"]["category"] == "doi"
),
None,
)
osf_url = "/".join(json_metadata["data"]["links"]["html"].split("/")[:3]) + "/"
attributes = json_metadata["data"]["attributes"]
article_doi = json_metadata["data"]["attributes"]["article_doi"]
ia_metadata = {
"publisher": "Center for Open Science",
"osf_registration_doi": doi,
"title": attributes["title"],
"description": attributes["description"],
"osf_category": attributes["category"],
"osf_tags": attributes["tags"],
"date": str(
datetime.strptime(
attributes["date_created"], "%Y-%m-%dT%H:%M:%S.%fZ"
).date()
),
"article_doi": f"urn:doi:{article_doi}" if article_doi else "",
"osf_registry": embeds["provider"]["data"]["attributes"]["name"],
"osf_registration_schema": embeds["registration_schema"]["data"]["attributes"][
"name"
],
"source": osf_url
+ json_metadata["data"]["relationships"]["registered_from"]["data"]["id"],
**relationship_data,
}
return ia_metadata
async def write_datacite_metadata(guid, temp_dir, metadata):
try:
doi = next(
(
identifier["attributes"]["value"]
for identifier in metadata["data"]["embeds"]["identifiers"]["data"]
if identifier["attributes"]["category"] == "doi"
)
)
except StopIteration:
raise DataCiteNotFoundError(
f"Datacite DOI not found for registration {guid} on OSF server."
)
client = DataCiteMDSClient(
url=settings.DATACITE_URL,
username=settings.DATACITE_USERNAME,
password=settings.DATACITE_PASSWORD,
prefix=settings.DATACITE_PREFIX,
)
try:
xml_metadata = client.metadata_get(doi)
except DataCiteNotFoundError:
raise DataCiteNotFoundError(
f"Datacite DOI {doi} not found for registration {guid} on Datacite server."
)
with open(os.path.join(temp_dir, "datacite.xml"), "w") as fp:
fp.write(xml_metadata)
return xml_metadata
@sleep_and_retry
async def get_with_retry(url, retry_on=(), sleep_period=None, headers=None):
if not headers:
headers = {}
if settings.OSF_BEARER_TOKEN:
headers["Authorization"] = f"Bearer {settings.OSF_BEARER_TOKEN}"
async with ClientSession() as session:
async with session.get(url, headers=headers) as resp:
if resp.status in retry_on:
raise RateLimitException(
message="Too many requests, sleeping.",
period_remaining=sleep_period
or int(resp.headers.get("Retry-After") or 0),
) # This will be caught by @sleep_and_retry and retried
resp.raise_for_status()
return await resp.json()
async def get_pages(url, page, result={}, parse_json=None):
url = f"{url}?page={page}&page={page}"
data = await get_with_retry(url, retry_on=(429,))
result[page] = data["data"]
if parse_json:
result[page] = parse_json(data)["data"]
return result
async def get_additional_contributor_info(response):
contributor_data_list = []
for contributor in response["data"]:
contributor_data = {}
embed_data = contributor["embeds"]["users"]["data"]
institution_url = embed_data["relationships"]["institutions"]["links"][
"related"
]["href"]
data = await get_with_retry(institution_url)
institution_data = data["data"]
institution_list = [
institution["attributes"]["name"] for institution in institution_data
]
contributor_data["affiliated_institutions"] = institution_list
contributor.update(contributor_data)
contributor_data_list.append(contributor)
response["data"] = contributor_data_list
return response
async def get_paginated_data(url, parse_json=None):
data = await get_with_retry(url, retry_on=(429,))
tasks = []
is_paginated = data.get("links", {}).get("next")
if parse_json:
data = await parse_json(data)
if is_paginated:
result = {1: data["data"]}
total = data["links"].get("meta", {}).get("total") or data["meta"].get("total")
per_page = data["links"].get("meta", {}).get("per_page") or data["meta"].get(
"per_page"
)
pages = math.ceil(int(total) / int(per_page))
for i in range(1, pages):
task = get_pages(url, i + 1, result)
tasks.append(task)
await asyncio.gather(*tasks)
pages_as_list = []
# through the magic of async all our pages have loaded.
for page in list(result.values()):
pages_as_list += page
return pages_as_list
else:
return data
def get_ia_item(guid):
session = internetarchive.get_session(
config={
"s3": {"access": settings.IA_ACCESS_KEY, "secret": settings.IA_SECRET_KEY},
},
)
return session.get_item(guid)
def sync_metadata(guid, metadata):
"""
This is used to sync the metadata of archive.org items with OSF Registrations.
synced is as follows:
- title
- description
- date
- category
- subjects
- tags
- affiliated_institutions
- license
- article_doi
`moderation_state` is an allowable key, but only to determine a withdrawal status of a
registration.
:param guid:
:param metadata:
:return:
"""
if not metadata:
raise http_exceptions.PayloadEncodingError(
"Metadata Payload not included in request"
)
valid_updatable_metadata_keys = [
"title",
"description",
"date",
"modified",
"osf_category",
"osf_subjects",
"osf_tags",
"article_doi",
"affiliated_institutions",
"license",
"withdrawal_justification",
]
invalid_keys = set(metadata.keys()).difference(set(valid_updatable_metadata_keys))
if invalid_keys:
raise http_exceptions.PayloadEncodingError(
f"Metadata payload contained invalid tag(s): `{", ".join(list(invalid_keys))}`"
f" not included in valid keys: `{", ".join(valid_updatable_metadata_keys)}`.",
)
item_name = settings.REG_ID_TEMPLATE.format(guid=guid)
ia_item = get_ia_item(item_name)
if not metadata.get("withdrawal_justification"): # withdrawn == not searchable
ia_item.modify_metadata(metadata)
else:
description = ia_item.metadata.get("description")
if description:
metadata[
"description"
] = f"Note this registration has been withdrawn: \n{description}"
else:
metadata["description"] = "This registration has been withdrawn"
ia_item.modify_metadata(metadata)
ia_item.modify_metadata({"noindex": True})
return ia_item, list(metadata.keys())
async def upload(item_name, temp_dir, metadata):
ia_item = get_ia_item(item_name)
ia_metadata = await get_metadata_for_ia_item(metadata)
provider_id = metadata["data"]["embeds"]["provider"]["data"]["id"]
ia_item.upload(
os.path.join(temp_dir, "bag.zip"),
metadata={
"collection": settings.PROVIDER_ID_TEMPLATE.format(provider_id=provider_id),
**ia_metadata,
},
access_key=settings.IA_ACCESS_KEY,
secret_key=settings.IA_SECRET_KEY,
)
return ia_item
async def get_registration_metadata(guid, temp_dir, filename):
metadata = await get_paginated_data(
f"{settings.OSF_API_URL}v2/registrations/{guid}/"
f"?embed=parent"
f"&embed=children"
f"&embed=provider"
f"&embed=identifiers"
f"&embed=license"
f"&embed=registration_schema"
f"&related_counts=true"
f"&version=2.20"
)
if metadata["data"]["attributes"]["withdrawn"]:
raise PermissionError(f"Registration {guid} is withdrawn")
with open(os.path.join(temp_dir, filename), "w") as fp:
json.dump(metadata, fp)
return metadata
async def archive(guid):
with tempfile.TemporaryDirectory(
dir=settings.PIGEON_TEMP_DIR, prefix=settings.REG_ID_TEMPLATE.format(guid=guid)
) as temp_dir:
os.mkdir(os.path.join(temp_dir, "bag"))
# await first to check if withdrawn
metadata = await get_registration_metadata(guid, os.path.join(temp_dir, "bag"), "registration.json")
tasks = [
write_datacite_metadata(guid, temp_dir, metadata),
dump_json_to_dir(
from_url=f"{settings.OSF_API_URL}v2/registrations/{guid}/wikis/"
f"?page[size]=100",
to_dir=os.path.join(temp_dir, "bag"),
name="wikis.json",
),
dump_json_to_dir(
from_url=f"{settings.OSF_API_URL}v2/registrations/{guid}/logs/"
f"?page[size]=100",
to_dir=os.path.join(temp_dir, "bag"),
name="logs.json",
),
dump_json_to_dir(
from_url=f"{settings.OSF_API_URL}v2/registrations/{guid}/contributors/"
f"?page[size]=100",
to_dir=os.path.join(temp_dir, "bag"),
name="contributors.json",
parse_json=get_additional_contributor_info,
),
]
# only download archived data if there are files
file_count = metadata["data"]["relationships"]["files"]["links"]["related"][
"meta"
]["count"]
if file_count:
tasks.append(
stream_files_to_dir(
f"{settings.OSF_FILES_URL}v1/resources/{guid}/providers/osfstorage/?zip=",
os.path.join(temp_dir, "bag"),
"archived_files.zip",
)
)
await asyncio.gather(*tasks)
bagit.make_bag(os.path.join(temp_dir, "bag"))
bag = bagit.Bag(os.path.join(temp_dir, "bag"))
assert bag.is_valid()
create_zip(temp_dir)
ia_item = await upload(
settings.REG_ID_TEMPLATE.format(guid=guid), temp_dir, metadata
)
return ia_item, guid
def run(coroutine):
loop = events.new_event_loop()
try:
events.set_event_loop(loop)
return loop.run_until_complete(coroutine)
finally:
try:
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
events.set_event_loop(None)
loop.close()
|
import os
import re
import math
import json
import tempfile
import zipfile
import bagit
import asyncio
from datetime import datetime
from asyncio import events
from ratelimit import sleep_and_retry
from ratelimit.exception import RateLimitException
from aiohttp import ClientSession, http_exceptions
import internetarchive
from datacite import DataCiteMDSClient
from datacite.errors import DataCiteNotFoundError
from osf_pigeon import settings
async def stream_files_to_dir(from_url, to_dir, name):
async with ClientSession() as session:
async with session.get(from_url) as resp:
with open(os.path.join(to_dir, name), "wb") as fp:
async for chunk in resp.content.iter_any():
fp.write(chunk)
async def dump_json_to_dir(from_url, to_dir, name, parse_json=None):
pages = await get_paginated_data(from_url, parse_json)
with open(os.path.join(to_dir, name), "w") as fp:
json.dump(pages, fp)
return pages
def create_zip(temp_dir):
with zipfile.ZipFile(os.path.join(temp_dir, "bag.zip"), "w") as fp:
for root, dirs, files in os.walk(os.path.join(temp_dir, "bag")):
for file in files:
file_path = os.path.join(root, file)
file_name = re.sub(f"^{temp_dir}", "", file_path)
fp.write(file_path, arcname=file_name)
async def get_relationship_attribute(key, url, func):
data = await get_paginated_data(url)
if "data" in data:
return {key: list(map(func, data["data"]))}
return {key: list(map(func, data))}
async def get_metadata_for_ia_item(json_metadata):
"""
This is meant to take the response JSON metadata and format it for IA buckets, this is not
used to generate JSON to be uploaded as raw data into the buckets.
:param json_metadata: metadata from OSF registration view contains attributes and relationship
urls.
Note: Internet Archive advises that all metadata that points to internal OSF features should
have a specific `osf_` prefix. Example: `registry` should be `osf_registry`, however metadata
such as affiliated_institutions is self-explanatory and doesn't need a prefix.
:return: ia_metadata the metadata for an IA bucket. Should include the following if they are
not null:
- publisher
- title
- description
- date
- osf_category
- osf_subjects
- osf_tags
- osf_registration_doi
- osf_registry
- osf_registration_schema
- creator (biblographic contributors, IA recommended this keyword)
- article_doi
- parent
- children
- source
- affiliated_institutions
- license
"""
relationship_data = [
get_relationship_attribute(
"creator",
f'{settings.OSF_API_URL}v2/registrations/{json_metadata["data"]["id"]}/contributors/'
f"?filter[bibliographic]=true&",
lambda contrib: contrib["embeds"]["users"]["data"]["attributes"][
"full_name"
],
),
get_relationship_attribute(
"affiliated_institutions",
f'{settings.OSF_API_URL}v2/registrations/{json_metadata["data"]["id"]}/institutions/',
lambda institution: institution["attributes"]["name"],
),
get_relationship_attribute(
"osf_subjects",
f'{settings.OSF_API_URL}v2/registrations/{json_metadata["data"]["id"]}/subjects/',
lambda subject: subject["attributes"]["text"],
),
get_relationship_attribute(
"children",
f'{settings.OSF_API_URL}v2/registrations/{json_metadata["data"]["id"]}/children/',
lambda child: f"https://archive.org/details/"
f'{settings.REG_ID_TEMPLATE.format(guid=child["id"])}',
),
]
relationship_data = {
k: v
for pair in await asyncio.gather(*relationship_data)
for k, v in pair.items()
} # merge all the pairs
parent = json_metadata["data"]["relationships"]["parent"]["data"]
if parent:
relationship_data[
"parent"
] = f"https://archive.org/details/{settings.REG_ID_TEMPLATE.format(guid=parent['id'])}"
embeds = json_metadata["data"]["embeds"]
if not embeds["license"].get(
"errors"
): # The reported error here is just a 404, so ignore if no license
relationship_data["license"] = embeds["license"]["data"]["attributes"]["url"]
doi = next(
(
identifier["attributes"]["value"]
for identifier in embeds["identifiers"]["data"]
if identifier["attributes"]["category"] == "doi"
),
None,
)
osf_url = "/".join(json_metadata["data"]["links"]["html"].split("/")[:3]) + "/"
attributes = json_metadata["data"]["attributes"]
article_doi = json_metadata["data"]["attributes"]["article_doi"]
ia_metadata = {
"publisher": "Center for Open Science",
"osf_registration_doi": doi,
"title": attributes["title"],
"description": attributes["description"],
"osf_category": attributes["category"],
"osf_tags": attributes["tags"],
"date": str(
datetime.strptime(
attributes["date_created"], "%Y-%m-%dT%H:%M:%S.%fZ"
).date()
),
"article_doi": f"urn:doi:{article_doi}" if article_doi else "",
"osf_registry": embeds["provider"]["data"]["attributes"]["name"],
"osf_registration_schema": embeds["registration_schema"]["data"]["attributes"][
"name"
],
"source": osf_url
+ json_metadata["data"]["relationships"]["registered_from"]["data"]["id"],
**relationship_data,
}
return ia_metadata
async def write_datacite_metadata(guid, temp_dir, metadata):
try:
doi = next(
(
identifier["attributes"]["value"]
for identifier in metadata["data"]["embeds"]["identifiers"]["data"]
if identifier["attributes"]["category"] == "doi"
)
)
except StopIteration:
raise DataCiteNotFoundError(
f"Datacite DOI not found for registration {guid} on OSF server."
)
client = DataCiteMDSClient(
url=settings.DATACITE_URL,
username=settings.DATACITE_USERNAME,
password=settings.DATACITE_PASSWORD,
prefix=settings.DATACITE_PREFIX,
)
try:
xml_metadata = client.metadata_get(doi)
except DataCiteNotFoundError:
raise DataCiteNotFoundError(
f"Datacite DOI {doi} not found for registration {guid} on Datacite server."
)
with open(os.path.join(temp_dir, "datacite.xml"), "w") as fp:
fp.write(xml_metadata)
return xml_metadata
@sleep_and_retry
async def get_with_retry(url, retry_on=(), sleep_period=None, headers=None):
if not headers:
headers = {}
if settings.OSF_BEARER_TOKEN:
headers["Authorization"] = f"Bearer {settings.OSF_BEARER_TOKEN}"
async with ClientSession() as session:
async with session.get(url, headers=headers) as resp:
if resp.status in retry_on:
raise RateLimitException(
message="Too many requests, sleeping.",
period_remaining=sleep_period
or int(resp.headers.get("Retry-After") or 0),
) # This will be caught by @sleep_and_retry and retried
resp.raise_for_status()
return await resp.json()
async def get_pages(url, page, result={}, parse_json=None):
url = f"{url}?page={page}&page={page}"
data = await get_with_retry(url, retry_on=(429,))
result[page] = data["data"]
if parse_json:
result[page] = parse_json(data)["data"]
return result
async def get_additional_contributor_info(response):
contributor_data_list = []
for contributor in response["data"]:
contributor_data = {}
embed_data = contributor["embeds"]["users"]["data"]
institution_url = embed_data["relationships"]["institutions"]["links"][
"related"
]["href"]
data = await get_with_retry(institution_url)
institution_data = data["data"]
institution_list = [
institution["attributes"]["name"] for institution in institution_data
]
contributor_data["affiliated_institutions"] = institution_list
contributor.update(contributor_data)
contributor_data_list.append(contributor)
response["data"] = contributor_data_list
return response
async def get_paginated_data(url, parse_json=None):
data = await get_with_retry(url, retry_on=(429,))
tasks = []
is_paginated = data.get("links", {}).get("next")
if parse_json:
data = await parse_json(data)
if is_paginated:
result = {1: data["data"]}
total = data["links"].get("meta", {}).get("total") or data["meta"].get("total")
per_page = data["links"].get("meta", {}).get("per_page") or data["meta"].get(
"per_page"
)
pages = math.ceil(int(total) / int(per_page))
for i in range(1, pages):
task = get_pages(url, i + 1, result)
tasks.append(task)
await asyncio.gather(*tasks)
pages_as_list = []
# through the magic of async all our pages have loaded.
for page in list(result.values()):
pages_as_list += page
return pages_as_list
else:
return data
def get_ia_item(guid):
session = internetarchive.get_session(
config={
"s3": {"access": settings.IA_ACCESS_KEY, "secret": settings.IA_SECRET_KEY},
},
)
return session.get_item(guid)
def sync_metadata(guid, metadata):
"""
This is used to sync the metadata of archive.org items with OSF Registrations.
synced is as follows:
- title
- description
- date
- category
- subjects
- tags
- affiliated_institutions
- license
- article_doi
`moderation_state` is an allowable key, but only to determine a withdrawal status of a
registration.
:param guid:
:param metadata:
:return:
"""
if not metadata:
raise http_exceptions.PayloadEncodingError(
"Metadata Payload not included in request"
)
valid_updatable_metadata_keys = [
"title",
"description",
"date",
"modified",
"osf_category",
"osf_subjects",
"osf_tags",
"article_doi",
"affiliated_institutions",
"license",
"withdrawal_justification",
]
invalid_keys = set(metadata.keys()).difference(set(valid_updatable_metadata_keys))
if invalid_keys:
raise http_exceptions.PayloadEncodingError(
f"Metadata payload contained invalid tag(s): `{', '.join(list(invalid_keys))}`"
f" not included in valid keys: `{', '.join(valid_updatable_metadata_keys)}`.",
)
item_name = settings.REG_ID_TEMPLATE.format(guid=guid)
ia_item = get_ia_item(item_name)
if not metadata.get("withdrawal_justification"): # withdrawn == not searchable
ia_item.modify_metadata(metadata)
else:
description = ia_item.metadata.get("description")
if description:
metadata[
"description"
] = f"Note this registration has been withdrawn: \n{description}"
else:
metadata["description"] = "This registration has been withdrawn"
ia_item.modify_metadata(metadata)
ia_item.modify_metadata({"noindex": True})
return ia_item, list(metadata.keys())
async def upload(item_name, temp_dir, metadata):
ia_item = get_ia_item(item_name)
ia_metadata = await get_metadata_for_ia_item(metadata)
provider_id = metadata["data"]["embeds"]["provider"]["data"]["id"]
ia_item.upload(
os.path.join(temp_dir, "bag.zip"),
metadata={
"collection": settings.PROVIDER_ID_TEMPLATE.format(provider_id=provider_id),
**ia_metadata,
},
access_key=settings.IA_ACCESS_KEY,
secret_key=settings.IA_SECRET_KEY,
)
return ia_item
async def get_registration_metadata(guid, temp_dir, filename):
metadata = await get_paginated_data(
f"{settings.OSF_API_URL}v2/registrations/{guid}/"
f"?embed=parent"
f"&embed=children"
f"&embed=provider"
f"&embed=identifiers"
f"&embed=license"
f"&embed=registration_schema"
f"&related_counts=true"
f"&version=2.20"
)
if metadata["data"]["attributes"]["withdrawn"]:
raise PermissionError(f"Registration {guid} is withdrawn")
with open(os.path.join(temp_dir, filename), "w") as fp:
json.dump(metadata, fp)
return metadata
async def archive(guid):
with tempfile.TemporaryDirectory(
dir=settings.PIGEON_TEMP_DIR, prefix=settings.REG_ID_TEMPLATE.format(guid=guid)
) as temp_dir:
os.mkdir(os.path.join(temp_dir, "bag"))
# await first to check if withdrawn
metadata = await get_registration_metadata(guid, os.path.join(temp_dir, "bag"), "registration.json")
tasks = [
write_datacite_metadata(guid, temp_dir, metadata),
dump_json_to_dir(
from_url=f"{settings.OSF_API_URL}v2/registrations/{guid}/wikis/"
f"?page[size]=100",
to_dir=os.path.join(temp_dir, "bag"),
name="wikis.json",
),
dump_json_to_dir(
from_url=f"{settings.OSF_API_URL}v2/registrations/{guid}/logs/"
f"?page[size]=100",
to_dir=os.path.join(temp_dir, "bag"),
name="logs.json",
),
dump_json_to_dir(
from_url=f"{settings.OSF_API_URL}v2/registrations/{guid}/contributors/"
f"?page[size]=100",
to_dir=os.path.join(temp_dir, "bag"),
name="contributors.json",
parse_json=get_additional_contributor_info,
),
]
# only download archived data if there are files
file_count = metadata["data"]["relationships"]["files"]["links"]["related"][
"meta"
]["count"]
if file_count:
tasks.append(
stream_files_to_dir(
f"{settings.OSF_FILES_URL}v1/resources/{guid}/providers/osfstorage/?zip=",
os.path.join(temp_dir, "bag"),
"archived_files.zip",
)
)
await asyncio.gather(*tasks)
bagit.make_bag(os.path.join(temp_dir, "bag"))
bag = bagit.Bag(os.path.join(temp_dir, "bag"))
assert bag.is_valid()
create_zip(temp_dir)
ia_item = await upload(
settings.REG_ID_TEMPLATE.format(guid=guid), temp_dir, metadata
)
return ia_item, guid
def run(coroutine):
loop = events.new_event_loop()
try:
events.set_event_loop(loop)
return loop.run_until_complete(coroutine)
finally:
try:
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
events.set_event_loop(None)
loop.close()
|
import re
import smtplib
from email.message import EmailMessage
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Union, List, Dict
# for 3.7 we need typing.List, typing.Dict, etc...
RebuiltLogLines = List[str]
ResultData = Union[int, RebuiltLogLines]
PerDayDict = Dict[str, ResultData]
ResultDict = Dict[str, PerDayDict]
FILE_LOCATION = "/home/yman/gandi_logs/access.log"
JUNK_QUERIES = frozenset(["robots.txt", "detectproxy", "wp-",
"preview=true", "internal dummy connection", "xmlrpc", "favicon"])
JUNK_AGENTS = frozenset(["wordpress", "bytespider", "barkrowler",
"go-http-client", "scaninfo@paloaltonetworks.com", "go http package",
"netsystemsresearch.com", "favicon"])
LOG_REGEX = re.compile(r"""(?:\S+)\s # server name
(?P<clip>[0-9a-f:.]+) # remote IP
\s-\s-\s # remote logname + user
\[(?P<ts>.+?)\]\s # timestamp
\(\d+\ss\)\s # processing time
\"(?P<req>.+?)\"\s # request
(?P<status>\d+)\s # status code
(?:-|\d+)\s # response size
\".+?\"\s # referer
\"(?P<uagent>.+)\" # user-agent""", re.VERBOSE)
SENDER = "parser@yman.site"
RECEIVER = "yman@protonmail.ch"
def is_junk_request(request: str, agent: str) -> bool:
"""Filter user-agents that contain line from 'junk_agents' list, i.e. bots
and some misc stuff."""
request = request.lower()
agent = agent.lower()
if any(word in request for word in JUNK_QUERIES):
return True
if re.search(r"bot\W", agent):
return True
if any(word in agent for word in JUNK_AGENTS):
return True
return False
def parse_access_log(file_location: str) -> ResultDict:
"""Parse httpd access log (Gandi simple hosting instance config), filter
out uninteresting requests, bogus, bots, etc. Good lines will be packed
into per-day dict with hits count and altered log lines."""
result = defaultdict(lambda: {"hits": 0, "lines": []})
with open(file_location, "r", encoding="utf-8") as logfile:
for line in logfile:
match_result = LOG_REGEX.match(line)
if not match_result:
print("Can not parse line:", line)
continue
(clip, timestamp, request, status_code,
uagent) = match_result.groups()
status_code = int(status_code)
# filter bots and maintenance requests
if is_junk_request(request, uagent):
continue
# filter not successful status codes
if status_code < 200 or status_code > 299:
continue
full_ts = datetime.strptime(timestamp, "%d/%b/%Y:%H:%M:%S %z")
day = datetime.strftime(full_ts, "%d-%m-%Y")
result[day]["hits"] += 1
rebuilt_line = f"{clip} : {status_code} : {request} : {uagent}"
result[day]["lines"].append(rebuilt_line)
return result
def email_results(date: str, one_day_data: PerDayDict) -> None:
"""Email results of one day."""
report_string = f"Results for {date}\nTotal hits: {one_day_data["hits"]}\n"
for line in one_day_data["lines"]:
report_string = f"{report_string}{line}\n"
msg = EmailMessage()
msg.set_content(report_string[:-1])
msg["Subject"] = "Daily dvjourney access.log parse results"
msg["From"] = SENDER
msg["To"] = RECEIVER
server = smtplib.SMTP("localhost")
server.set_debuglevel(1)
server.send_message(msg)
server.quit()
if __name__ == "__main__":
res = parse_access_log(FILE_LOCATION)
yesterday = datetime.now() - timedelta(days=1)
yesterday_formatted = datetime.strftime(yesterday, "%d-%m-%Y")
email_results(yesterday_formatted, res[yesterday_formatted])
|
import re
import smtplib
from email.message import EmailMessage
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Union, List, Dict
# for 3.7 we need typing.List, typing.Dict, etc...
RebuiltLogLines = List[str]
ResultData = Union[int, RebuiltLogLines]
PerDayDict = Dict[str, ResultData]
ResultDict = Dict[str, PerDayDict]
FILE_LOCATION = "/home/yman/gandi_logs/access.log"
JUNK_QUERIES = frozenset(["robots.txt", "detectproxy", "wp-",
"preview=true", "internal dummy connection", "xmlrpc", "favicon"])
JUNK_AGENTS = frozenset(["wordpress", "bytespider", "barkrowler",
"go-http-client", "scaninfo@paloaltonetworks.com", "go http package",
"netsystemsresearch.com", "favicon"])
LOG_REGEX = re.compile(r"""(?:\S+)\s # server name
(?P<clip>[0-9a-f:.]+) # remote IP
\s-\s-\s # remote logname + user
\[(?P<ts>.+?)\]\s # timestamp
\(\d+\ss\)\s # processing time
\"(?P<req>.+?)\"\s # request
(?P<status>\d+)\s # status code
(?:-|\d+)\s # response size
\".+?\"\s # referer
\"(?P<uagent>.+)\" # user-agent""", re.VERBOSE)
SENDER = "parser@yman.site"
RECEIVER = "yman@protonmail.ch"
def is_junk_request(request: str, agent: str) -> bool:
"""Filter user-agents that contain line from 'junk_agents' list, i.e. bots
and some misc stuff."""
request = request.lower()
agent = agent.lower()
if any(word in request for word in JUNK_QUERIES):
return True
if re.search(r"bot\W", agent):
return True
if any(word in agent for word in JUNK_AGENTS):
return True
return False
def parse_access_log(file_location: str) -> ResultDict:
"""Parse httpd access log (Gandi simple hosting instance config), filter
out uninteresting requests, bogus, bots, etc. Good lines will be packed
into per-day dict with hits count and altered log lines."""
result = defaultdict(lambda: {"hits": 0, "lines": []})
with open(file_location, "r", encoding="utf-8") as logfile:
for line in logfile:
match_result = LOG_REGEX.match(line)
if not match_result:
print("Can not parse line:", line)
continue
(clip, timestamp, request, status_code,
uagent) = match_result.groups()
status_code = int(status_code)
# filter bots and maintenance requests
if is_junk_request(request, uagent):
continue
# filter not successful status codes
if status_code < 200 or status_code > 299:
continue
full_ts = datetime.strptime(timestamp, "%d/%b/%Y:%H:%M:%S %z")
day = datetime.strftime(full_ts, "%d-%m-%Y")
result[day]["hits"] += 1
rebuilt_line = f"{clip} : {status_code} : {request} : {uagent}"
result[day]["lines"].append(rebuilt_line)
return result
def email_results(date: str, one_day_data: PerDayDict) -> None:
"""Email results of one day."""
report_string = f"Results for {date}\nTotal hits: {one_day_data['hits']}\n"
for line in one_day_data["lines"]:
report_string = f"{report_string}{line}\n"
msg = EmailMessage()
msg.set_content(report_string[:-1])
msg["Subject"] = "Daily dvjourney access.log parse results"
msg["From"] = SENDER
msg["To"] = RECEIVER
server = smtplib.SMTP("localhost")
server.set_debuglevel(1)
server.send_message(msg)
server.quit()
if __name__ == "__main__":
res = parse_access_log(FILE_LOCATION)
yesterday = datetime.now() - timedelta(days=1)
yesterday_formatted = datetime.strftime(yesterday, "%d-%m-%Y")
email_results(yesterday_formatted, res[yesterday_formatted])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @NetflixMovieslk
import re
import pyrogram
from pyrogram import (
filters,
Client
)
from pyrogram.types import (
InlineKeyboardButton,
InlineKeyboardMarkup,
Message,
CallbackQuery
)
from bot import Bot
from script import script
from database.mdb import searchquery
from plugins.channel import deleteallfilters
from config import AUTH_USERS
BUTTONS = {}
@Client.on_message(filters.group & filters.text)
async def filter(client: Bot, message: Message):
if re.findall("((^\/|^,|^!|^\.|^[\U0001F600-\U000E007F]).*)", message.text):
return
if 2 < len(message.text) < 50:
btn = []
group_id = message.chat.id
name = message.text
filenames, links = await searchquery(group_id, name)
if filenames and links:
for filename, link in zip(filenames, links):
btn.append(
[InlineKeyboardButton(text=f"{filename}",url=f"{link}")]
)
else:
return
if not btn:
return
if len(btn) > 10:
btns = list(split_list(btn, 10))
keyword = f"{message.chat.id}-{message.message_id}"
BUTTONS[keyword] = {
"total" : len(btns),
"buttons" : btns
}
else:
buttons = btn
buttons.append(
[InlineKeyboardButton(text="📃 Pages 1/1",callback_data="pages")]
)
await message.reply_text(
f"<b> Here is the result for {message.text}</b>",
reply_markup=InlineKeyboardMarkup(buttons)
)
return
data = BUTTONS[keyword]
buttons = data['buttons'][0].copy()
buttons.append(
[InlineKeyboardButton(text="NEXT ⏩",callback_data=f"next_0_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(text=f"📃 Pages 1/{data["total"]}",callback_data="pages")]
)
await message.reply_text(
f"<b> Here is the result for {message.text}</b>",
reply_markup=InlineKeyboardMarkup(buttons)
)
@Client.on_callback_query()
async def cb_handler(client: Bot, query: CallbackQuery):
clicked = query.from_user.id
typed = query.message.reply_to_message.from_user.id
if (clicked == typed) or (clicked in AUTH_USERS):
if query.data.startswith("next"):
await query.answer()
ident, index, keyword = query.data.split("_")
data = BUTTONS[keyword]
if int(index) == int(data["total"]) - 2:
buttons = data['buttons'][int(index)+1].copy()
buttons.append(
[InlineKeyboardButton("⏪ BACK", callback_data=f"back_{int(index)+1}_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(f"📃 Pages {int(index)+2}/{data["total"]}", callback_data="pages")]
)
await query.edit_message_reply_markup(
reply_markup=InlineKeyboardMarkup(buttons)
)
return
else:
buttons = data['buttons'][int(index)+1].copy()
buttons.append(
[InlineKeyboardButton("⏪ BACK", callback_data=f"back_{int(index)+1}_{keyword}"),InlineKeyboardButton("NEXT ⏩", callback_data=f"next_{int(index)+1}_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(f"📃 Pages {int(index)+2}/{data["total"]}", callback_data="pages")]
)
await query.edit_message_reply_markup(
reply_markup=InlineKeyboardMarkup(buttons)
)
return
elif query.data.startswith("back"):
await query.answer()
ident, index, keyword = query.data.split("_")
data = BUTTONS[keyword]
if int(index) == 1:
buttons = data['buttons'][int(index)-1].copy()
buttons.append(
[InlineKeyboardButton("NEXT ⏩", callback_data=f"next_{int(index)-1}_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(f"📃 Pages {int(index)}/{data["total"]}", callback_data="pages")]
)
await query.edit_message_reply_markup(
reply_markup=InlineKeyboardMarkup(buttons)
)
return
else:
buttons = data['buttons'][int(index)-1].copy()
buttons.append(
[InlineKeyboardButton("⏪ BACK", callback_data=f"back_{int(index)-1}_{keyword}"),InlineKeyboardButton("NEXT ⏩", callback_data=f"next_{int(index)-1}_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(f"📃 Pages {int(index)}/{data["total"]}", callback_data="pages")]
)
await query.edit_message_reply_markup(
reply_markup=InlineKeyboardMarkup(buttons)
)
return
elif query.data == "pages":
await query.answer()
elif query.data == "start_data":
await query.answer()
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton("HELP", callback_data="help_data"),
InlineKeyboardButton("ABOUT", callback_data="about_data")],
[InlineKeyboardButton("⭕️ JOIN OUR CHANNEL ⭕️", url="https://t.me/NetflixMovieslk")]
])
await query.message.edit_text(
script.START_MSG.format(query.from_user.mention),
reply_markup=keyboard,
disable_web_page_preview=True
)
elif query.data == "help_data":
await query.answer()
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton("BACK", callback_data="start_data"),
InlineKeyboardButton("ABOUT", callback_data="about_data")],
[InlineKeyboardButton("⭕️ SUPPORT ⭕️", url="https://t.me/NetflixMovieslk")]
])
await query.message.edit_text(
script.HELP_MSG,
reply_markup=keyboard,
disable_web_page_preview=True
)
elif query.data == "about_data":
await query.answer()
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton("BACK", callback_data="help_data"),
InlineKeyboardButton("START", callback_data="start_data")],
[InlineKeyboardButton("SOURCE CODE", url="https://github.com/TroJanzHEX/Auto-Filter-Bot-V2")]
])
await query.message.edit_text(
script.ABOUT_MSG,
reply_markup=keyboard,
disable_web_page_preview=True
)
elif query.data == "delallconfirm":
await query.message.delete()
await deleteallfilters(client, query.message)
elif query.data == "delallcancel":
await query.message.reply_to_message.delete()
await query.message.delete()
else:
await query.answer("Thats not for you!!",show_alert=True)
def split_list(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @NetflixMovieslk
import re
import pyrogram
from pyrogram import (
filters,
Client
)
from pyrogram.types import (
InlineKeyboardButton,
InlineKeyboardMarkup,
Message,
CallbackQuery
)
from bot import Bot
from script import script
from database.mdb import searchquery
from plugins.channel import deleteallfilters
from config import AUTH_USERS
BUTTONS = {}
@Client.on_message(filters.group & filters.text)
async def filter(client: Bot, message: Message):
if re.findall("((^\/|^,|^!|^\.|^[\U0001F600-\U000E007F]).*)", message.text):
return
if 2 < len(message.text) < 50:
btn = []
group_id = message.chat.id
name = message.text
filenames, links = await searchquery(group_id, name)
if filenames and links:
for filename, link in zip(filenames, links):
btn.append(
[InlineKeyboardButton(text=f"{filename}",url=f"{link}")]
)
else:
return
if not btn:
return
if len(btn) > 10:
btns = list(split_list(btn, 10))
keyword = f"{message.chat.id}-{message.message_id}"
BUTTONS[keyword] = {
"total" : len(btns),
"buttons" : btns
}
else:
buttons = btn
buttons.append(
[InlineKeyboardButton(text="📃 Pages 1/1",callback_data="pages")]
)
await message.reply_text(
f"<b> Here is the result for {message.text}</b>",
reply_markup=InlineKeyboardMarkup(buttons)
)
return
data = BUTTONS[keyword]
buttons = data['buttons'][0].copy()
buttons.append(
[InlineKeyboardButton(text="NEXT ⏩",callback_data=f"next_0_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(text=f"📃 Pages 1/{data['total']}",callback_data="pages")]
)
await message.reply_text(
f"<b> Here is the result for {message.text}</b>",
reply_markup=InlineKeyboardMarkup(buttons)
)
@Client.on_callback_query()
async def cb_handler(client: Bot, query: CallbackQuery):
clicked = query.from_user.id
typed = query.message.reply_to_message.from_user.id
if (clicked == typed) or (clicked in AUTH_USERS):
if query.data.startswith("next"):
await query.answer()
ident, index, keyword = query.data.split("_")
data = BUTTONS[keyword]
if int(index) == int(data["total"]) - 2:
buttons = data['buttons'][int(index)+1].copy()
buttons.append(
[InlineKeyboardButton("⏪ BACK", callback_data=f"back_{int(index)+1}_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(f"📃 Pages {int(index)+2}/{data['total']}", callback_data="pages")]
)
await query.edit_message_reply_markup(
reply_markup=InlineKeyboardMarkup(buttons)
)
return
else:
buttons = data['buttons'][int(index)+1].copy()
buttons.append(
[InlineKeyboardButton("⏪ BACK", callback_data=f"back_{int(index)+1}_{keyword}"),InlineKeyboardButton("NEXT ⏩", callback_data=f"next_{int(index)+1}_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(f"📃 Pages {int(index)+2}/{data['total']}", callback_data="pages")]
)
await query.edit_message_reply_markup(
reply_markup=InlineKeyboardMarkup(buttons)
)
return
elif query.data.startswith("back"):
await query.answer()
ident, index, keyword = query.data.split("_")
data = BUTTONS[keyword]
if int(index) == 1:
buttons = data['buttons'][int(index)-1].copy()
buttons.append(
[InlineKeyboardButton("NEXT ⏩", callback_data=f"next_{int(index)-1}_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(f"📃 Pages {int(index)}/{data['total']}", callback_data="pages")]
)
await query.edit_message_reply_markup(
reply_markup=InlineKeyboardMarkup(buttons)
)
return
else:
buttons = data['buttons'][int(index)-1].copy()
buttons.append(
[InlineKeyboardButton("⏪ BACK", callback_data=f"back_{int(index)-1}_{keyword}"),InlineKeyboardButton("NEXT ⏩", callback_data=f"next_{int(index)-1}_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(f"📃 Pages {int(index)}/{data['total']}", callback_data="pages")]
)
await query.edit_message_reply_markup(
reply_markup=InlineKeyboardMarkup(buttons)
)
return
elif query.data == "pages":
await query.answer()
elif query.data == "start_data":
await query.answer()
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton("HELP", callback_data="help_data"),
InlineKeyboardButton("ABOUT", callback_data="about_data")],
[InlineKeyboardButton("⭕️ JOIN OUR CHANNEL ⭕️", url="https://t.me/NetflixMovieslk")]
])
await query.message.edit_text(
script.START_MSG.format(query.from_user.mention),
reply_markup=keyboard,
disable_web_page_preview=True
)
elif query.data == "help_data":
await query.answer()
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton("BACK", callback_data="start_data"),
InlineKeyboardButton("ABOUT", callback_data="about_data")],
[InlineKeyboardButton("⭕️ SUPPORT ⭕️", url="https://t.me/NetflixMovieslk")]
])
await query.message.edit_text(
script.HELP_MSG,
reply_markup=keyboard,
disable_web_page_preview=True
)
elif query.data == "about_data":
await query.answer()
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton("BACK", callback_data="help_data"),
InlineKeyboardButton("START", callback_data="start_data")],
[InlineKeyboardButton("SOURCE CODE", url="https://github.com/TroJanzHEX/Auto-Filter-Bot-V2")]
])
await query.message.edit_text(
script.ABOUT_MSG,
reply_markup=keyboard,
disable_web_page_preview=True
)
elif query.data == "delallconfirm":
await query.message.delete()
await deleteallfilters(client, query.message)
elif query.data == "delallcancel":
await query.message.reply_to_message.delete()
await query.message.delete()
else:
await query.answer("Thats not for you!!",show_alert=True)
def split_list(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
|
import json
import discord
from discord.ext import commands, tasks
from utils import *
class HelpCmd(commands.HelpCommand):
async def send_bot_help(self, mapping):
ctx = self.context
bot = ctx.bot
commands = bot.commands
result = []
for cmd in commands:
sign = self.get_command_signature(cmd)
result.append(f"`{sign.strip()}`: {cmd.help}")
await ctx.send("\n".join(result))
send_cog_help = send_command_help = send_group_help = send_bot_help
class MsgLeaderBot(commands.Bot):
def __init__(self):
helpattr = {"usage": ""}
super().__init__(
command_prefix="-",
help_command=HelpCmd(command_attrs=helpattr),
allowed_mentions=discord.AllowedMentions.none(),
)
# start json updater and file saver
self.json_updater.start()
self.save.start()
async def on_ready(self):
# launch everytime bot is online (not only first boot)
# just a way to know if the bot is online
print("Bot online!")
@tasks.loop(hours=8)
async def json_updater(self):
# update json every 8 hours
print("Updated!")
update_json(bot.msg_dic)
@tasks.loop(hours=24)
async def save(self):
# create/update json for every server every 24 hours
saver()
@json_updater.before_loop
async def before_update(self):
await bot.wait_until_ready()
@save.before_loop
async def before_save(self):
await bot.wait_until_ready()
bot = MsgLeaderBot()
try:
with open("settings.json", "r") as a:
bot.settings = json.loads(a.read())
bot.settings["token"]
except (FileNotFoundError, KeyError, json.decoder.JSONDecodeError):
token = input("input bot token: ")
bot.settings = {"token": token}
with open("settings.json", "w+") as a:
json.dump(bot.settings, a, indent=4)
try:
with open("messages.json", "r") as b:
bot.msg_dic = json.loads(b.read())
except (FileNotFoundError, json.decoder.JSONDecodeError):
bot.msg_dic = {}
@bot.command()
@commands.has_guild_permissions(manage_channels=True)
async def autoupdate(ctx):
"""turns on/off automatic addition of new users to the leaderboard"""
server = str(ctx.message.guild.id)
if bot.settings[server]["listen_to_all"]:
bot.settings[server]["listen_to_all"] = False
update_settings(bot.settings)
return await ctx.send(
"New users **will not** get added to the leaderboard anymore"
)
else:
bot.settings[server]["listen_to_all"] = True
update_settings(bot.settings)
return await ctx.send("New users **will** get added to the leaderboard")
@bot.command()
@commands.has_guild_permissions(manage_channels=True)
async def edit(ctx, user: discord.User, message_number: int):
"""update a user's message number"""
name = user.name
server = str(ctx.message.guild.id)
if str(user.id) not in bot.msg_dic[server]:
bot.msg_dic[server][str(user.id)] = {
"messages": message_number,
"name": name,
"alt": None,
"is_alt": False,
"is_bot": False,
}
else:
bot.msg_dic[server][str(user.id)]["messages"] = message_number
update_json(bot.msg_dic)
await ctx.send(f"{name} was saved with {message_number} messages")
@edit.error
async def edit_err(ctx, error):
# error handler for edit command
if isinstance(error, commands.BadArgument):
return await ctx.send("Error: you must input a valid number of messages")
await on_command_error(ctx, error, bypass_check=True)
@bot.command()
@commands.has_guild_permissions(manage_channels=True)
async def alt(ctx, user: discord.User, alt: discord.User):
"""adds up the alt's messages to the user's messages"""
await ctx.send(alt_handler(bot, ctx, user, alt))
@bot.command()
@commands.has_guild_permissions(manage_channels=True)
async def removealt(ctx, user: discord.User, alt: discord.User):
"""removes alt from user"""
await ctx.send(alt_handler(bot, ctx, user, alt, add=False))
@bot.command()
@commands.has_guild_permissions(manage_channels=True)
async def addbot(ctx, user: discord.User):
"""saves a user as a bot (displayed on the bottom of the leaderboard)"""
server = str(ctx.message.guild.id)
try:
if bot.msg_dic[server][str(user.id)]["is_bot"]:
await ctx.send(f"{user} is already a bot")
else:
bot.msg_dic[server][str(user.id)]["is_bot"] = True
update_json(bot.msg_dic)
await ctx.send(f"{user} is now a bot")
except KeyError:
await ctx.send(f"Error: {user} is not listed in the leaderboard")
@bot.command()
@commands.has_guild_permissions(manage_channels=True)
async def rmvbot(ctx, user: discord.User):
"""removes bot tag from a user"""
server = str(ctx.message.guild.id)
try:
if not bot.msg_dic[server][str(user.id)]["is_bot"]:
await ctx.send(f"{user} is already not a bot")
else:
bot.msg_dic[server][str(user.id)]["is_bot"] = False
update_json(bot.msg_dic)
await ctx.send(f"{user} is no longer a bot")
except KeyError:
await ctx.send(f"Error: {user} is not listed in the leaderboard")
@bot.command()
@commands.has_guild_permissions(manage_channels=True)
async def delete(ctx, user: discord.User):
"""delete a user from the leaderboard"""
server = str(ctx.message.guild.id)
try:
bot.msg_dic[server].pop(str(user.id))
update_json(bot.msg_dic)
await ctx.send(f"{user} was deleted")
except KeyError:
await ctx.send(f"Error: {user} is not listed in the leaderboard")
@bot.command()
@commands.has_guild_permissions(manage_channels=True)
async def minimum(ctx, value: int):
"""change the minimum amount of messages necessary to appear on the leaderboard (defaults to 20000)"""
server = str(ctx.message.guild.id)
bot.settings[server]["minimum"] = value
update_settings(bot.settings)
if value == 1:
await ctx.send(
f"Every user with more than {value} message will now be displayed on the leadeboard"
)
else:
await ctx.send(
f"Every user with more than {value} messages will now be displayed on the leadeboard"
)
@minimum.error
async def minimum_err(ctx, error):
# error handler for minimum command
if isinstance(error, commands.BadArgument):
return await ctx.send("Error: invalid value")
await on_command_error(ctx, error, bypass_check=True)
@bot.command()
async def source(ctx):
"""prints the source code link"""
await ctx.send("https://github.com/RafaeISilva/Message_LeaderBot")
@bot.command()
async def ping(ctx):
"""Tells the ping of the bot to the discord servers"""
update_json(bot.msg_dic) # because why not
await ctx.send(f"Pong! {round(bot.latency*1000)}ms")
@bot.command()
async def minfo(ctx):
"""prints the current minimum value to appear on the leaderboard"""
await ctx.send(
f"The current minimum is {bot.settings[str(ctx.message.guild.id)]["minimum"]} messages"
)
@bot.command()
async def name(ctx):
"""updates author's name on the leadeboard"""
author = ctx.author
msg_dic = bot.msg_dic[str(ctx.message.guild.id)]
if str(author.id) not in msg_dic:
return
name = author.name
if name == msg_dic[str(author.id)]["name"]:
return await ctx.send("Your name is already up to date")
else:
msg_dic[str(author.id)]["name"] = name
await ctx.send(f"Name updated to {name}")
@bot.command()
async def msglb(ctx):
"""prints the message leaderboard"""
update_json(bot.msg_dic)
server = str(ctx.message.guild.id)
author = str(ctx.author.id)
smgs_dic = {}
msg_lb = ""
bots_lb = ""
top_users = []
msg_dic = bot.msg_dic[server]
if author in msg_dic and msg_dic[author]["is_alt"]:
for id in msg_dic:
if msg_dic[id]["alt"] is not None and author in msg_dic[id]["alt"]:
author = id
break
for id in msg_dic:
# excludes alt users from the leadeboard
if not msg_dic[id]["is_alt"]:
if not msg_dic[id]["alt"]:
smgs_dic[id] = msg_dic[id]["messages"]
# sums the number of messages of users with alts to its respective alts
if msg_dic[id]["alt"]:
messages = 0
for alt in msg_dic[id]["alt"]:
messages += msg_dic[alt]["messages"]
smgs_dic[id] = msg_dic[id]["messages"] + messages
# sorts the leaderboard
smgs_dic = dict(sorted(smgs_dic.items(), key=lambda item: item[1], reverse=True))
# restricts the leaderboard to only users with more than a certain minimum
for user in smgs_dic:
if int(smgs_dic[user]) >= bot.settings[server]["minimum"]:
top_users.append(user)
# prevents bots from being on the top
if msg_dic[user]["is_bot"]:
bots_lb += f"{smgs_dic[user]}: {msg_dic[user]["name"]}\n"
elif msg_dic[user]["alt"] is not None:
if author == user:
msg_lb += "**"
if len(msg_dic[user]["alt"]) == 1:
msg_lb += f"{smgs_dic[user]}: {msg_dic[user]["name"]} + alt\n"
else:
alts = len(msg_dic[user]["alt"])
msg_lb += (
f"{smgs_dic[user]}: {msg_dic[user]["name"]} +{alts} alts\n"
)
if author == user:
msg_lb += "**"
else:
if author == user:
msg_lb += "**"
msg_lb += f"{smgs_dic[user]}: {msg_dic[user]["name"]}\n"
if author == user:
msg_lb += "**"
# adds bots to the end
msg_lb += "\n" + bots_lb
# adds message author to the end if not already on the leaderboard
if author in msg_dic and author not in top_users:
if msg_dic[author]["alt"]:
if len(msg_dic[author]["alt"]) == 1:
msg_lb += f"**{smgs_dic[author]}: {msg_dic[author]["name"]} + alt**"
else:
alts = len(msg_dic[author]["alt"])
msg_lb += (
f"**{smgs_dic[author]}: {msg_dic[author]["name"]} +{alts} alts**"
)
else:
msg_lb += f"**{smgs_dic[author]}: {msg_dic[author]["name"]}**"
embed = discord.Embed(
title="Message Leaderboard", color=7419530, description=msg_lb
)
await ctx.send(embed=embed)
@bot.command()
async def msg(ctx, username: str = ""):
"""check how many messages a user has"""
msg_dic = bot.msg_dic[str(ctx.message.guild.id)]
success = False
if not username:
username = str(ctx.author.id)
# checks if input is a user's id on the leadeboard
if username.isdecimal():
try:
msg_dic[username]
success = True
except KeyError:
await ctx.send(
discord.utils.escape_mentions(f"Error: {username} not found")
)
# checks if input is a user mention and if user's id is on the leaderboard
elif "<@" in username:
if "!" in username:
username = username.replace("!", "")
username = username.replace("<@", "").replace(">", "")
try:
msg_dic[username]
success = True
except KeyError:
await ctx.send(
discord.utils.escape_mentions(f"Error: {username} not found")
)
# checks if input is a username on the leaderboard
else:
for id in msg_dic:
if msg_dic[id]["name"].lower() == username.lower():
username = id
success = True
break
try:
msg_dic[username]
except KeyError:
await ctx.send(
discord.utils.escape_mentions(f"Error: {username} not found")
)
if success:
name = msg_dic[username]["name"]
messages = msg_dic[username]["messages"]
if msg_dic[username]["alt"] is None:
await ctx.send(
discord.utils.escape_mentions(f"{name} has {messages} messages")
)
else:
alt_messages = 0
for alt in msg_dic[username]["alt"]:
alt_messages += msg_dic[alt]["messages"]
await ctx.send(
discord.utils.escape_mentions(
f"{name} has {messages} (+{alt_messages}) messages"
)
)
@bot.command()
async def altinfo(ctx, username: str):
"""check the name of a user's alt or vice versa"""
msg_dic = bot.msg_dic[str(ctx.message.guild.id)]
result = ""
success = False
# checks if input is a user's id on the leadeboard
if username.isdecimal():
try:
msg_dic[username]
success = True
except KeyError:
await ctx.send(
discord.utils.escape_mentions(f"Error: {username} not found")
)
# checks if input is a user mention and if user's id is on the leaderboard
elif "<@" in username:
if "!" in username:
username = username.replace("!", "")
username = username.replace("<@", "").replace(">", "")
try:
msg_dic[username]
success = True
except KeyError:
await ctx.send(
discord.utils.escape_mentions(f"Error: {username} not found")
)
# checks if input is a username on the leaderboard
else:
for id in msg_dic:
if msg_dic[id]["name"].lower() == username.lower():
username = id
success = True
break
try:
msg_dic[username]
except KeyError:
await ctx.send(
discord.utils.escape_mentions(f"Error: {username} not found")
)
if success:
# checks if username is an alt and gets its name
if msg_dic[username]["is_alt"]:
for id in msg_dic:
if msg_dic[id]["alt"] is not None and username in msg_dic[id]["alt"]:
result = f"{msg_dic[username]["name"]} is an alt of {msg_dic[id]["name"]}"
break
# checks if username has an alt and gets its name
elif msg_dic[username]["alt"] is not None:
if len(msg_dic[username]["alt"]) == 1:
result = f"{msg_dic[msg_dic[username]["alt"][0]]["name"]} is an alt of {msg_dic[username]["name"]}"
else:
alt_list = msg_dic[username]["alt"]
result = ", ".join(msg_dic[alt]["name"] for alt in alt_list[0:-1])
result += f" and {msg_dic[alt_list[-1]]["name"]} are alts of {msg_dic[username]["name"]}"
else:
result = f"{msg_dic[username]["name"]} has no alts/is not an alt"
await ctx.send(result)
@bot.event
async def on_message(message):
user = message.author
if user == bot.user:
return
try:
msg_dic = bot.msg_dic[str(message.guild.id)]
except KeyError:
msg_dic = bot.msg_dic[str(message.guild.id)] = {}
try:
bot.settings[str(message.guild.id)]["minimum"]
bot.settings[str(message.guild.id)]["listen_to_all"]
except KeyError:
bot.settings[str(message.guild.id)] = {"minimum": 20000, "listen_to_all": True}
update_settings(bot.settings)
settings = bot.settings[str(message.guild.id)]
# adds a point to the author everytime a message is sent
if str(user.id) not in msg_dic and settings["listen_to_all"]:
if user.bot:
msg_dic[str(user.id)] = {
"messages": 1,
"name": user.name,
"alt": None,
"is_alt": False,
"is_bot": True,
}
else:
msg_dic[str(user.id)] = {
"messages": 1,
"name": user.name,
"alt": None,
"is_alt": False,
"is_bot": False,
}
elif str(user.id) in msg_dic:
msg_dic[str(user.id)]["messages"] += 1
# process a command (if valid)
await bot.process_commands(message)
@bot.event
async def on_message_delete(message):
user = str(message.author.id)
msg_dic = bot.msg_dic[str(message.guild.id)]
if user in msg_dic:
msg_dic[user]["messages"] -= 1
@bot.event
async def on_command_error(
ctx, error: commands.CommandError, *, bypass_check: bool = False
):
# handles command error
if ctx.command and ctx.command.has_error_handler() and not bypass_check:
# already have error handler
return
# get the "real" error
error = getattr(error, "original", error)
if isinstance(error, commands.CommandNotFound):
# command not found is annoying for most bot users, so just return nothing
return
if isinstance(error, commands.UserNotFound):
return await ctx.send(f"Error: user '{error.argument}' not found")
if isinstance(error, commands.MissingRequiredArgument):
return await ctx.send(f"Error: you must input a valid `{error.param.name}`")
if isinstance(error, commands.MissingPermissions):
# probably i made it too over-complicated,
# but its so that the message stays consistent with the other error messages
error = str(error)
return await ctx.send(f"Error: {error[0].lower()}{error[1:-1]}")
try:
raise error
except discord.errors.Forbidden:
await ctx.author.send(f"```\n{error}\n```")
if __name__ == "__main__":
bot.run(bot.settings["token"])
|
import json
import discord
from discord.ext import commands, tasks
from utils import *
class HelpCmd(commands.HelpCommand):
async def send_bot_help(self, mapping):
ctx = self.context
bot = ctx.bot
commands = bot.commands
result = []
for cmd in commands:
sign = self.get_command_signature(cmd)
result.append(f"`{sign.strip()}`: {cmd.help}")
await ctx.send("\n".join(result))
send_cog_help = send_command_help = send_group_help = send_bot_help
class MsgLeaderBot(commands.Bot):
def __init__(self):
helpattr = {"usage": ""}
super().__init__(
command_prefix="-",
help_command=HelpCmd(command_attrs=helpattr),
allowed_mentions=discord.AllowedMentions.none(),
)
# start json updater and file saver
self.json_updater.start()
self.save.start()
async def on_ready(self):
# launch everytime bot is online (not only first boot)
# just a way to know if the bot is online
print("Bot online!")
@tasks.loop(hours=8)
async def json_updater(self):
# update json every 8 hours
print("Updated!")
update_json(bot.msg_dic)
@tasks.loop(hours=24)
async def save(self):
# create/update json for every server every 24 hours
saver()
@json_updater.before_loop
async def before_update(self):
await bot.wait_until_ready()
@save.before_loop
async def before_save(self):
await bot.wait_until_ready()
bot = MsgLeaderBot()
try:
with open("settings.json", "r") as a:
bot.settings = json.loads(a.read())
bot.settings["token"]
except (FileNotFoundError, KeyError, json.decoder.JSONDecodeError):
token = input("input bot token: ")
bot.settings = {"token": token}
with open("settings.json", "w+") as a:
json.dump(bot.settings, a, indent=4)
try:
with open("messages.json", "r") as b:
bot.msg_dic = json.loads(b.read())
except (FileNotFoundError, json.decoder.JSONDecodeError):
bot.msg_dic = {}
@bot.command()
@commands.has_guild_permissions(manage_channels=True)
async def autoupdate(ctx):
"""turns on/off automatic addition of new users to the leaderboard"""
server = str(ctx.message.guild.id)
if bot.settings[server]["listen_to_all"]:
bot.settings[server]["listen_to_all"] = False
update_settings(bot.settings)
return await ctx.send(
"New users **will not** get added to the leaderboard anymore"
)
else:
bot.settings[server]["listen_to_all"] = True
update_settings(bot.settings)
return await ctx.send("New users **will** get added to the leaderboard")
@bot.command()
@commands.has_guild_permissions(manage_channels=True)
async def edit(ctx, user: discord.User, message_number: int):
"""update a user's message number"""
name = user.name
server = str(ctx.message.guild.id)
if str(user.id) not in bot.msg_dic[server]:
bot.msg_dic[server][str(user.id)] = {
"messages": message_number,
"name": name,
"alt": None,
"is_alt": False,
"is_bot": False,
}
else:
bot.msg_dic[server][str(user.id)]["messages"] = message_number
update_json(bot.msg_dic)
await ctx.send(f"{name} was saved with {message_number} messages")
@edit.error
async def edit_err(ctx, error):
# error handler for edit command
if isinstance(error, commands.BadArgument):
return await ctx.send("Error: you must input a valid number of messages")
await on_command_error(ctx, error, bypass_check=True)
@bot.command()
@commands.has_guild_permissions(manage_channels=True)
async def alt(ctx, user: discord.User, alt: discord.User):
"""adds up the alt's messages to the user's messages"""
await ctx.send(alt_handler(bot, ctx, user, alt))
@bot.command()
@commands.has_guild_permissions(manage_channels=True)
async def removealt(ctx, user: discord.User, alt: discord.User):
"""removes alt from user"""
await ctx.send(alt_handler(bot, ctx, user, alt, add=False))
@bot.command()
@commands.has_guild_permissions(manage_channels=True)
async def addbot(ctx, user: discord.User):
"""saves a user as a bot (displayed on the bottom of the leaderboard)"""
server = str(ctx.message.guild.id)
try:
if bot.msg_dic[server][str(user.id)]["is_bot"]:
await ctx.send(f"{user} is already a bot")
else:
bot.msg_dic[server][str(user.id)]["is_bot"] = True
update_json(bot.msg_dic)
await ctx.send(f"{user} is now a bot")
except KeyError:
await ctx.send(f"Error: {user} is not listed in the leaderboard")
@bot.command()
@commands.has_guild_permissions(manage_channels=True)
async def rmvbot(ctx, user: discord.User):
"""removes bot tag from a user"""
server = str(ctx.message.guild.id)
try:
if not bot.msg_dic[server][str(user.id)]["is_bot"]:
await ctx.send(f"{user} is already not a bot")
else:
bot.msg_dic[server][str(user.id)]["is_bot"] = False
update_json(bot.msg_dic)
await ctx.send(f"{user} is no longer a bot")
except KeyError:
await ctx.send(f"Error: {user} is not listed in the leaderboard")
@bot.command()
@commands.has_guild_permissions(manage_channels=True)
async def delete(ctx, user: discord.User):
"""delete a user from the leaderboard"""
server = str(ctx.message.guild.id)
try:
bot.msg_dic[server].pop(str(user.id))
update_json(bot.msg_dic)
await ctx.send(f"{user} was deleted")
except KeyError:
await ctx.send(f"Error: {user} is not listed in the leaderboard")
@bot.command()
@commands.has_guild_permissions(manage_channels=True)
async def minimum(ctx, value: int):
"""change the minimum amount of messages necessary to appear on the leaderboard (defaults to 20000)"""
server = str(ctx.message.guild.id)
bot.settings[server]["minimum"] = value
update_settings(bot.settings)
if value == 1:
await ctx.send(
f"Every user with more than {value} message will now be displayed on the leadeboard"
)
else:
await ctx.send(
f"Every user with more than {value} messages will now be displayed on the leadeboard"
)
@minimum.error
async def minimum_err(ctx, error):
# error handler for minimum command
if isinstance(error, commands.BadArgument):
return await ctx.send("Error: invalid value")
await on_command_error(ctx, error, bypass_check=True)
@bot.command()
async def source(ctx):
"""prints the source code link"""
await ctx.send("https://github.com/RafaeISilva/Message_LeaderBot")
@bot.command()
async def ping(ctx):
"""Tells the ping of the bot to the discord servers"""
update_json(bot.msg_dic) # because why not
await ctx.send(f"Pong! {round(bot.latency*1000)}ms")
@bot.command()
async def minfo(ctx):
"""prints the current minimum value to appear on the leaderboard"""
await ctx.send(
f"The current minimum is {bot.settings[str(ctx.message.guild.id)]['minimum']} messages"
)
@bot.command()
async def name(ctx):
"""updates author's name on the leadeboard"""
author = ctx.author
msg_dic = bot.msg_dic[str(ctx.message.guild.id)]
if str(author.id) not in msg_dic:
return
name = author.name
if name == msg_dic[str(author.id)]["name"]:
return await ctx.send("Your name is already up to date")
else:
msg_dic[str(author.id)]["name"] = name
await ctx.send(f"Name updated to {name}")
@bot.command()
async def msglb(ctx):
"""prints the message leaderboard"""
update_json(bot.msg_dic)
server = str(ctx.message.guild.id)
author = str(ctx.author.id)
smgs_dic = {}
msg_lb = ""
bots_lb = ""
top_users = []
msg_dic = bot.msg_dic[server]
if author in msg_dic and msg_dic[author]["is_alt"]:
for id in msg_dic:
if msg_dic[id]["alt"] is not None and author in msg_dic[id]["alt"]:
author = id
break
for id in msg_dic:
# excludes alt users from the leadeboard
if not msg_dic[id]["is_alt"]:
if not msg_dic[id]["alt"]:
smgs_dic[id] = msg_dic[id]["messages"]
# sums the number of messages of users with alts to its respective alts
if msg_dic[id]["alt"]:
messages = 0
for alt in msg_dic[id]["alt"]:
messages += msg_dic[alt]["messages"]
smgs_dic[id] = msg_dic[id]["messages"] + messages
# sorts the leaderboard
smgs_dic = dict(sorted(smgs_dic.items(), key=lambda item: item[1], reverse=True))
# restricts the leaderboard to only users with more than a certain minimum
for user in smgs_dic:
if int(smgs_dic[user]) >= bot.settings[server]["minimum"]:
top_users.append(user)
# prevents bots from being on the top
if msg_dic[user]["is_bot"]:
bots_lb += f"{smgs_dic[user]}: {msg_dic[user]['name']}\n"
elif msg_dic[user]["alt"] is not None:
if author == user:
msg_lb += "**"
if len(msg_dic[user]["alt"]) == 1:
msg_lb += f"{smgs_dic[user]}: {msg_dic[user]['name']} + alt\n"
else:
alts = len(msg_dic[user]["alt"])
msg_lb += (
f"{smgs_dic[user]}: {msg_dic[user]['name']} +{alts} alts\n"
)
if author == user:
msg_lb += "**"
else:
if author == user:
msg_lb += "**"
msg_lb += f"{smgs_dic[user]}: {msg_dic[user]['name']}\n"
if author == user:
msg_lb += "**"
# adds bots to the end
msg_lb += "\n" + bots_lb
# adds message author to the end if not already on the leaderboard
if author in msg_dic and author not in top_users:
if msg_dic[author]["alt"]:
if len(msg_dic[author]["alt"]) == 1:
msg_lb += f"**{smgs_dic[author]}: {msg_dic[author]['name']} + alt**"
else:
alts = len(msg_dic[author]["alt"])
msg_lb += (
f"**{smgs_dic[author]}: {msg_dic[author]['name']} +{alts} alts**"
)
else:
msg_lb += f"**{smgs_dic[author]}: {msg_dic[author]['name']}**"
embed = discord.Embed(
title="Message Leaderboard", color=7419530, description=msg_lb
)
await ctx.send(embed=embed)
@bot.command()
async def msg(ctx, username: str = ""):
"""check how many messages a user has"""
msg_dic = bot.msg_dic[str(ctx.message.guild.id)]
success = False
if not username:
username = str(ctx.author.id)
# checks if input is a user's id on the leadeboard
if username.isdecimal():
try:
msg_dic[username]
success = True
except KeyError:
await ctx.send(
discord.utils.escape_mentions(f"Error: {username} not found")
)
# checks if input is a user mention and if user's id is on the leaderboard
elif "<@" in username:
if "!" in username:
username = username.replace("!", "")
username = username.replace("<@", "").replace(">", "")
try:
msg_dic[username]
success = True
except KeyError:
await ctx.send(
discord.utils.escape_mentions(f"Error: {username} not found")
)
# checks if input is a username on the leaderboard
else:
for id in msg_dic:
if msg_dic[id]["name"].lower() == username.lower():
username = id
success = True
break
try:
msg_dic[username]
except KeyError:
await ctx.send(
discord.utils.escape_mentions(f"Error: {username} not found")
)
if success:
name = msg_dic[username]["name"]
messages = msg_dic[username]["messages"]
if msg_dic[username]["alt"] is None:
await ctx.send(
discord.utils.escape_mentions(f"{name} has {messages} messages")
)
else:
alt_messages = 0
for alt in msg_dic[username]["alt"]:
alt_messages += msg_dic[alt]["messages"]
await ctx.send(
discord.utils.escape_mentions(
f"{name} has {messages} (+{alt_messages}) messages"
)
)
@bot.command()
async def altinfo(ctx, username: str):
"""check the name of a user's alt or vice versa"""
msg_dic = bot.msg_dic[str(ctx.message.guild.id)]
result = ""
success = False
# checks if input is a user's id on the leadeboard
if username.isdecimal():
try:
msg_dic[username]
success = True
except KeyError:
await ctx.send(
discord.utils.escape_mentions(f"Error: {username} not found")
)
# checks if input is a user mention and if user's id is on the leaderboard
elif "<@" in username:
if "!" in username:
username = username.replace("!", "")
username = username.replace("<@", "").replace(">", "")
try:
msg_dic[username]
success = True
except KeyError:
await ctx.send(
discord.utils.escape_mentions(f"Error: {username} not found")
)
# checks if input is a username on the leaderboard
else:
for id in msg_dic:
if msg_dic[id]["name"].lower() == username.lower():
username = id
success = True
break
try:
msg_dic[username]
except KeyError:
await ctx.send(
discord.utils.escape_mentions(f"Error: {username} not found")
)
if success:
# checks if username is an alt and gets its name
if msg_dic[username]["is_alt"]:
for id in msg_dic:
if msg_dic[id]["alt"] is not None and username in msg_dic[id]["alt"]:
result = f"{msg_dic[username]['name']} is an alt of {msg_dic[id]['name']}"
break
# checks if username has an alt and gets its name
elif msg_dic[username]["alt"] is not None:
if len(msg_dic[username]["alt"]) == 1:
result = f"{msg_dic[msg_dic[username]['alt'][0]]['name']} is an alt of {msg_dic[username]['name']}"
else:
alt_list = msg_dic[username]["alt"]
result = ", ".join(msg_dic[alt]["name"] for alt in alt_list[0:-1])
result += f" and {msg_dic[alt_list[-1]]['name']} are alts of {msg_dic[username]['name']}"
else:
result = f"{msg_dic[username]['name']} has no alts/is not an alt"
await ctx.send(result)
@bot.event
async def on_message(message):
user = message.author
if user == bot.user:
return
try:
msg_dic = bot.msg_dic[str(message.guild.id)]
except KeyError:
msg_dic = bot.msg_dic[str(message.guild.id)] = {}
try:
bot.settings[str(message.guild.id)]["minimum"]
bot.settings[str(message.guild.id)]["listen_to_all"]
except KeyError:
bot.settings[str(message.guild.id)] = {"minimum": 20000, "listen_to_all": True}
update_settings(bot.settings)
settings = bot.settings[str(message.guild.id)]
# adds a point to the author everytime a message is sent
if str(user.id) not in msg_dic and settings["listen_to_all"]:
if user.bot:
msg_dic[str(user.id)] = {
"messages": 1,
"name": user.name,
"alt": None,
"is_alt": False,
"is_bot": True,
}
else:
msg_dic[str(user.id)] = {
"messages": 1,
"name": user.name,
"alt": None,
"is_alt": False,
"is_bot": False,
}
elif str(user.id) in msg_dic:
msg_dic[str(user.id)]["messages"] += 1
# process a command (if valid)
await bot.process_commands(message)
@bot.event
async def on_message_delete(message):
user = str(message.author.id)
msg_dic = bot.msg_dic[str(message.guild.id)]
if user in msg_dic:
msg_dic[user]["messages"] -= 1
@bot.event
async def on_command_error(
ctx, error: commands.CommandError, *, bypass_check: bool = False
):
# handles command error
if ctx.command and ctx.command.has_error_handler() and not bypass_check:
# already have error handler
return
# get the "real" error
error = getattr(error, "original", error)
if isinstance(error, commands.CommandNotFound):
# command not found is annoying for most bot users, so just return nothing
return
if isinstance(error, commands.UserNotFound):
return await ctx.send(f"Error: user '{error.argument}' not found")
if isinstance(error, commands.MissingRequiredArgument):
return await ctx.send(f"Error: you must input a valid `{error.param.name}`")
if isinstance(error, commands.MissingPermissions):
# probably i made it too over-complicated,
# but its so that the message stays consistent with the other error messages
error = str(error)
return await ctx.send(f"Error: {error[0].lower()}{error[1:-1]}")
try:
raise error
except discord.errors.Forbidden:
await ctx.author.send(f"```\n{error}\n```")
if __name__ == "__main__":
bot.run(bot.settings["token"])
|
import requests
from urllib.parse import urlparse
from bs4 import BeautifulSoup as bs
from presscontrol.utils import tprint, read_cookies
from presscontrol.config import config
import pandas as pd
import random
import urllib
import newspaper
from urllib.parse import urlparse
import datetime
from googlesearch import search
def process_link(link):
link = get_direct_link(link)
if link != '':
try:
d = process_outer(link)
except requests.exceptions.ConnectionError:
error = '[-] Connection Error '+link
tprint(error, important=False)
d = {'error': 1, 'info': 'ConnectionError'}
except Exception as exc:
error = '[-] Error General '+link+' :' + str(exc)
error = error[:275]
tprint(error, important=False)
d = {'error': 1, 'info': error}
else:
# Mark for deletion if tweet does not contain any links.
error = '[-] Link Vacío en process_link'
tprint(error, important=False)
d = {'error': 1, 'borrar': 1, 'info': error}
return d
def get_direct_link(twitter_link):
'''
Extract direct link from tweet.
'''
if 'https://twitter.com' in twitter_link:
indirect_links = []
try:
page = requests.get(twitter_link)
lxml = bs(page.content, 'lxml')
box = lxml.find_all('p', {"class": "TweetTextSize--jumbo"})[0]
links = [l for l in box.find_all('a') if 'pic.twitter' not in l]
for l in links:
try:
indirect_links.append(l['data-expanded-url'])
except Exception as exc:
pass
if not links:
try:
indirect_links += [x for x in box.text.split() if 'http' in x]
except:
pass
except Exception as exc:
pass
#tprint(str(exc)[:60], important=False)
return indirect_links[0] if len(indirect_links)!=0 else ''
else:
return twitter_link
def process_inner(requests_page):
art = newspaper.Article('')
art.set_html(requests_page.content)
art.parse()
d = {}
d['title'] = art.title
d['description'] = art.meta_description
d['body'] = art.text
d['authors'] = ', '.join(art.authors)
d['date'] = art.publish_date
d['link'] = requests_page.url
d['source'] = urlparse(requests_page.url).netloc.split('.')[-2].capitalize()
d['image'] = art.top_image
d['error'] = 0
try:
d['year'] = art.date_publish.year
except:
pass
try:
d['date'] = d['date'].replace(tzinfo=None)
except:
pass
return d
def process_outer(link):
page = requests.get(link)
source = urlparse(page.url).netloc.split('.')[-2].capitalize()
# Personalized treatment by source
action = {
'Df': process_Df,
'Emol': process_Emol,
'Latercera': process_Latercera,
'Cooperativa': process_Cooperativa,
'Elmostrador': process_Elmostrador,
'Biobiochile': process_Biobiochile,
}
if source in action.keys():
d = action[source](page)
else:
d = process_inner(page)
# Add year
try:
d['year'] = d['date'].year
except:
pass
if d['error'] == 0:
# Mark for deletion if link isn't from any source in config.txt.
try:
if d['source'] not in config['SOURCES']:
d['borrar'] = 1
d['info'] = f"Borrar, no pertenece a los dominios buscados. ({str(d["link"])[:25]}...)"
except:
pass
# Restrict text field to 50.000 characters
try:
d['body'] = d['body'][:50000]
except:
pass
# Encode Links
try:
d['image'] = urllib.parse.quote(d['image']).replace('%3A', ':')
except:
pass
try:
d['link'] = urllib.parse.quote(d['link']).replace('%3A', ':')
except:
pass
# Encode content and description and title
try:
d['body'] = d['body'].encode('latin1', 'ignore').decode('latin1')
#d['body'] = d['body'].replace('\x80', '')
except:
pass
try:
d['description'] = d['description'].encode('latin1', 'ignore').decode('latin1')
d['description'] = d['description'].replace('\x80', '')
except:
pass
try:
d['title'] = d['title'].encode('latin1', 'ignore').decode('latin1')
d['title'] = d['title'].replace('\x80', '')
except:
pass
try:
if d['title']==None or d['body']==None or d['title']=='' or d['body']=='':
d['error'] = 1
d['borrar'] = 1
d['info'] = 'Title and content blank'
except:
pass
return d
# Add aditional steps by source:
def process_Df(page):
cookies = read_cookies()
page = requests.get(page.url, cookies=cookies['df'])
if '¡Página no encontrada!' in page.text:
try:
tprint('[·] df.cl page not found. Searching for title...', important=False)
title_ = page.url.split('/')[3].replace('-', '+')
search_ = f'https://www.df.cl/cgi-bin/prontus_search.cgi?search_texto="{title_}"&search_prontus=noticias&search_tmp=search.html&search_idx=ALL&search_modo=and&search_form=yes'
soup = bs(page.content, 'lxml')
page = requests.get(search_)
soup = bs(page.content, 'lxml')
box = soup.find('div', {'id': 'wrap-noticias'})
new_url = 'https://www.df.cl'+box.find('article').h2.a['href']
tprint('[+] df.cl page found!', important=False)
page = requests.get(new_url, cookies=cookies['df'])
except Exception as exc:
tprint('[-] df.cl page not found', important=False)
d = process_inner(page)
soup = bs(page.content, 'lxml')
try:
d['section'] = soup.find('meta', {'name': 'keywords'})['content'].strip()
except Exception as exc:
tprint('[-] Error parsing section (Df) - ', exc, important=False)
try:
d['body'] = '\n'.join([p for p in d['body'].split('\n') if len(p.split())>4 and p!=d['description']])
except Exception as exc:
tprint('[-] Error parsing body (Df) - ', exc, important=False)
return d
def process_Emol(page):
d = process_inner(page)
soup = bs(page.content, 'lxml')
try:
d['section'] = d['link'].split('/')[4].capitalize()
except Exception as exc:
tprint('[-] Error parsing section (Emol) - ', exc, important=False)
try:
d['authors'] = soup.find('div', {'class', 'info-notaemol-porfecha'}).text.split('|')[-1].strip().replace('Por ','').replace('Redactado por ', '')
except Exception as exc:
tprint('[-] Error parsing section (Emol) - ', exc, important=False)
return d
def process_Latercera(page):
d = {}
if 'Lo sentimos, estamos actualizando el sitio' not in page.text:
d = process_inner(page)
else:
### Buscar noticia en google, si es necesario.
scraped_link = page.url.strip('/')
tprint('[-] Link Latercera no encontrado', page.url, important=False)
new_link = 'https://www.latercera.com/noticia/'+'-'.join([p for p in scraped_link.split('/')[-1].split('.')[0].split('-') if not p.isdigit()])
#print(new_link)
page = requests.get(new_link)
if 'Lo sentimos, estamos actualizando el sitio' not in page.text:
d = process_inner(page)
tprint('[+] Link Latercera encontrado (intento:1): ', new_link, important=False)
else:
try:
tprint('[·] Google Searching...', important=False)
buscar = ' '.join([p for p in scraped_link.split('/')[-1].split('.')[0].split('-') if not p.isdigit()]) + ' site:latercera.com'
results = search(buscar, stop=5)
rs = []
for r in results:
rs.append(r)
result = [r for r in rs if 'sitemap' not in r][0]
if 'sitemap' not in result:
tprint('[+] Resultado en Google (intento:2):',result, important=False)
page = requests.get(result)
d = process_inner(page)
else:
d['error'] = 1
d['info'] = 'Link Latercera no encontrado en google'
except Exception as exc:
tprint('[-] Link Latercera no encontrado', important=False)
d['error'] = 1
d['info'] = 'Link Latercera no encontrado en google'
soup = bs(page.content, 'lxml')
### Recuperar Image.
try:
d['image'] = soup.find('figure').find('img')['src']
except Exception as exc:
tprint('[-] Error parsing image (Latercera) - ', exc, important=False)
### Recuperar Autor
try:
d['authors'] = [h.text for h in soup.find_all('h4') if 'Autor' in h.text][0].replace('Autor: ', '')
except Exception as exc:
tprint('[-] Error parsing authors (Latercera) - ', exc, important=False)
try:
if d['description'] == None:
d['description'] = soup.find('div', {'class': 'bajada-art'}).text
except Exception as exc:
tprint('[-] Error parsing description (Latercera) - ', exc, important=False)
try:
if d['date'] == None:
date = ' '.join(soup.find('span', {'class': 'time-ago'}).text.replace('|', '').split())
d['date'] = datetime.datetime.strptime(date, '%d/%m/%Y %I:%M %p')
except Exception as exc:
tprint('[-] Error parsing date (Latercera) - ', exc, important=False)
try:
d['section'] = soup.find('meta', property='article:section')['content']
except:
try:
d['section'] = [x.find('a').text for x in soup.find_all('h4') if x.find('a')!=None and 'canal' in x.find('a')['href']][0]
except Exception as exc:
tprint('[-] Error parsing section (Latercera) - ', exc, important=False)
d['tags'] = ', '.join([x['content'] for x in soup.find_all('meta', property='article:tag')])
if not d['tags']:
try:
d['tags'] = ', '.join([x.text for x in soup.find('div', {'class': 'tags-interior'}).find_all('a')])
except Exception as exc:
tprint('[-] Error parsing tags (Latercera) - ', exc, important=False)
return d
def process_Cooperativa(page):
d = process_inner(page)
try:
if 'al aire libre' in d['title'].lower():
d = {'borrar':1, info:'Borrar, Al aire libre'}
except:
pass
soup = bs(page.content, 'lxml')
try:
d['authors'] = soup.find('div', {'class': 'fecha-publicacion'}).find('span').text
except Exception as exc:
tprint('[-] Error parsing authors (Cooperativa) - ', exc, important=False)
try:
d['section'] = soup.find('a', {'id': 'linkactivo'}).text
except Exception as exc:
tprint('[-] Error parsing section (Cooperativa) - ', exc, important=False)
try:
d['tags'] = soup.find('meta', {'name': 'keywords'})['content'].strip()
except Exception as exc:
tprint('[-] Error parsing tags (Cooperativa) - ', exc, important=False)
try:
d['link'] = soup.find('meta', property='og:url')['content']
except Exception as exc:
tprint('[-] Error parsing link (Cooperativa) - ', exc, important=False)
if not d['date']:
try:
date = [x for x in d['link'].split('/') if '-' in x][-1].split('-')
d['date'] = datetime.datetime(*map(int,date))
except Exception as exc:
tprint('[-] Error parsing date (Cooperativa) - ', exc, important=False)
try:
if 'www.cooperativa.cl' not in d['image'] and d['image']:
d['image'] = 'https://www.cooperativa.cl'+d['image']
except Exception as exc:
tprint('[-] Error fixing image (Cooperativa) - ', exc, important=False)
return d
def process_Elmostrador(page):
d = process_inner(page)
soup = bs(page.content, 'lxml')
d['description'] = None
try:
d['description'] = soup.find('figcaption').text
except Exception as exc:
tprint('[-] Error parsing description (Elmostrador) - ',exc, important=False)
try:
d['authors'] = soup.find('p', {'class': 'autor-y-fecha'}).find('a').text
except Exception as exc:
tprint('[-] Error parsing authors (Elmostrador) - ',exc, important=False)
try:
if 'www.elmostrador.cl' not in d['image'] and d['image']:
d['image'] = 'https://www.elmostrador.cl'+d['image']
except Exception as exc:
tprint('[-] Error fixing image (Elmostrador) - ',exc, important=False)
if not d['date']:
try:
date = [s for s in d['link'].split('/') if s.isdigit()][:3]
d['date'] = datetime.datetime(*map(int,date))
except Exception as exc:
tprint('[-] Error parsing date (Elmostrador) - ',exc, important=False)
try:
d['section'] = ' '.join([x for x in soup.find_all('h2') if x.find('i')!=None][0].text.split())
except Exception as exc:
tprint('[-] Error parsing section (Elmostrador) - ',exc, important=False)
try:
d['body'] = d['body'].split('__________________')[0]
except Exception as exc:
tprint('[-] Error fixing body (Elmostrador) - ',exc, important=False)
return d
def process_Biobiochile(page):
d = process_inner(page)
soup = bs(page.content, 'lxml')
try:
d['authors'] = soup.find('div', {'class': 'nota-autor'}).find('a').text
except Exception as exc:
tprint('[-] Error parsing authors (Biobiochile) - ',exc, important=False)
try:
d['section'] = ' '.join(soup.find('div', {'class': 'categoria-titulo-nota'}).text.split())
except Exception as exc:
tprint('[-] Error parsing section (Biobiochile) - ',exc, important=False)
try:
d['body'] = soup.find('div', {'class': 'nota-body'}).text
d['body'] = d['body'].replace('Etiquetas de esta nota:', '')
except Exception as exc:
tprint('[-] Error parsing body (Biobiochile) - ',exc, important=False)
try:
d['description'] = None
except Exception as exc:
tprint('[-] Error parsing description (Biobiochile) - ',exc, important=False)
return d
|
import requests
from urllib.parse import urlparse
from bs4 import BeautifulSoup as bs
from presscontrol.utils import tprint, read_cookies
from presscontrol.config import config
import pandas as pd
import random
import urllib
import newspaper
from urllib.parse import urlparse
import datetime
from googlesearch import search
def process_link(link):
link = get_direct_link(link)
if link != '':
try:
d = process_outer(link)
except requests.exceptions.ConnectionError:
error = '[-] Connection Error '+link
tprint(error, important=False)
d = {'error': 1, 'info': 'ConnectionError'}
except Exception as exc:
error = '[-] Error General '+link+' :' + str(exc)
error = error[:275]
tprint(error, important=False)
d = {'error': 1, 'info': error}
else:
# Mark for deletion if tweet does not contain any links.
error = '[-] Link Vacío en process_link'
tprint(error, important=False)
d = {'error': 1, 'borrar': 1, 'info': error}
return d
def get_direct_link(twitter_link):
'''
Extract direct link from tweet.
'''
if 'https://twitter.com' in twitter_link:
indirect_links = []
try:
page = requests.get(twitter_link)
lxml = bs(page.content, 'lxml')
box = lxml.find_all('p', {"class": "TweetTextSize--jumbo"})[0]
links = [l for l in box.find_all('a') if 'pic.twitter' not in l]
for l in links:
try:
indirect_links.append(l['data-expanded-url'])
except Exception as exc:
pass
if not links:
try:
indirect_links += [x for x in box.text.split() if 'http' in x]
except:
pass
except Exception as exc:
pass
#tprint(str(exc)[:60], important=False)
return indirect_links[0] if len(indirect_links)!=0 else ''
else:
return twitter_link
def process_inner(requests_page):
art = newspaper.Article('')
art.set_html(requests_page.content)
art.parse()
d = {}
d['title'] = art.title
d['description'] = art.meta_description
d['body'] = art.text
d['authors'] = ', '.join(art.authors)
d['date'] = art.publish_date
d['link'] = requests_page.url
d['source'] = urlparse(requests_page.url).netloc.split('.')[-2].capitalize()
d['image'] = art.top_image
d['error'] = 0
try:
d['year'] = art.date_publish.year
except:
pass
try:
d['date'] = d['date'].replace(tzinfo=None)
except:
pass
return d
def process_outer(link):
page = requests.get(link)
source = urlparse(page.url).netloc.split('.')[-2].capitalize()
# Personalized treatment by source
action = {
'Df': process_Df,
'Emol': process_Emol,
'Latercera': process_Latercera,
'Cooperativa': process_Cooperativa,
'Elmostrador': process_Elmostrador,
'Biobiochile': process_Biobiochile,
}
if source in action.keys():
d = action[source](page)
else:
d = process_inner(page)
# Add year
try:
d['year'] = d['date'].year
except:
pass
if d['error'] == 0:
# Mark for deletion if link isn't from any source in config.txt.
try:
if d['source'] not in config['SOURCES']:
d['borrar'] = 1
d['info'] = f"Borrar, no pertenece a los dominios buscados. ({str(d['link'])[:25]}...)"
except:
pass
# Restrict text field to 50.000 characters
try:
d['body'] = d['body'][:50000]
except:
pass
# Encode Links
try:
d['image'] = urllib.parse.quote(d['image']).replace('%3A', ':')
except:
pass
try:
d['link'] = urllib.parse.quote(d['link']).replace('%3A', ':')
except:
pass
# Encode content and description and title
try:
d['body'] = d['body'].encode('latin1', 'ignore').decode('latin1')
#d['body'] = d['body'].replace('\x80', '')
except:
pass
try:
d['description'] = d['description'].encode('latin1', 'ignore').decode('latin1')
d['description'] = d['description'].replace('\x80', '')
except:
pass
try:
d['title'] = d['title'].encode('latin1', 'ignore').decode('latin1')
d['title'] = d['title'].replace('\x80', '')
except:
pass
try:
if d['title']==None or d['body']==None or d['title']=='' or d['body']=='':
d['error'] = 1
d['borrar'] = 1
d['info'] = 'Title and content blank'
except:
pass
return d
# Add aditional steps by source:
def process_Df(page):
cookies = read_cookies()
page = requests.get(page.url, cookies=cookies['df'])
if '¡Página no encontrada!' in page.text:
try:
tprint('[·] df.cl page not found. Searching for title...', important=False)
title_ = page.url.split('/')[3].replace('-', '+')
search_ = f'https://www.df.cl/cgi-bin/prontus_search.cgi?search_texto="{title_}"&search_prontus=noticias&search_tmp=search.html&search_idx=ALL&search_modo=and&search_form=yes'
soup = bs(page.content, 'lxml')
page = requests.get(search_)
soup = bs(page.content, 'lxml')
box = soup.find('div', {'id': 'wrap-noticias'})
new_url = 'https://www.df.cl'+box.find('article').h2.a['href']
tprint('[+] df.cl page found!', important=False)
page = requests.get(new_url, cookies=cookies['df'])
except Exception as exc:
tprint('[-] df.cl page not found', important=False)
d = process_inner(page)
soup = bs(page.content, 'lxml')
try:
d['section'] = soup.find('meta', {'name': 'keywords'})['content'].strip()
except Exception as exc:
tprint('[-] Error parsing section (Df) - ', exc, important=False)
try:
d['body'] = '\n'.join([p for p in d['body'].split('\n') if len(p.split())>4 and p!=d['description']])
except Exception as exc:
tprint('[-] Error parsing body (Df) - ', exc, important=False)
return d
def process_Emol(page):
d = process_inner(page)
soup = bs(page.content, 'lxml')
try:
d['section'] = d['link'].split('/')[4].capitalize()
except Exception as exc:
tprint('[-] Error parsing section (Emol) - ', exc, important=False)
try:
d['authors'] = soup.find('div', {'class', 'info-notaemol-porfecha'}).text.split('|')[-1].strip().replace('Por ','').replace('Redactado por ', '')
except Exception as exc:
tprint('[-] Error parsing section (Emol) - ', exc, important=False)
return d
def process_Latercera(page):
d = {}
if 'Lo sentimos, estamos actualizando el sitio' not in page.text:
d = process_inner(page)
else:
### Buscar noticia en google, si es necesario.
scraped_link = page.url.strip('/')
tprint('[-] Link Latercera no encontrado', page.url, important=False)
new_link = 'https://www.latercera.com/noticia/'+'-'.join([p for p in scraped_link.split('/')[-1].split('.')[0].split('-') if not p.isdigit()])
#print(new_link)
page = requests.get(new_link)
if 'Lo sentimos, estamos actualizando el sitio' not in page.text:
d = process_inner(page)
tprint('[+] Link Latercera encontrado (intento:1): ', new_link, important=False)
else:
try:
tprint('[·] Google Searching...', important=False)
buscar = ' '.join([p for p in scraped_link.split('/')[-1].split('.')[0].split('-') if not p.isdigit()]) + ' site:latercera.com'
results = search(buscar, stop=5)
rs = []
for r in results:
rs.append(r)
result = [r for r in rs if 'sitemap' not in r][0]
if 'sitemap' not in result:
tprint('[+] Resultado en Google (intento:2):',result, important=False)
page = requests.get(result)
d = process_inner(page)
else:
d['error'] = 1
d['info'] = 'Link Latercera no encontrado en google'
except Exception as exc:
tprint('[-] Link Latercera no encontrado', important=False)
d['error'] = 1
d['info'] = 'Link Latercera no encontrado en google'
soup = bs(page.content, 'lxml')
### Recuperar Image.
try:
d['image'] = soup.find('figure').find('img')['src']
except Exception as exc:
tprint('[-] Error parsing image (Latercera) - ', exc, important=False)
### Recuperar Autor
try:
d['authors'] = [h.text for h in soup.find_all('h4') if 'Autor' in h.text][0].replace('Autor: ', '')
except Exception as exc:
tprint('[-] Error parsing authors (Latercera) - ', exc, important=False)
try:
if d['description'] == None:
d['description'] = soup.find('div', {'class': 'bajada-art'}).text
except Exception as exc:
tprint('[-] Error parsing description (Latercera) - ', exc, important=False)
try:
if d['date'] == None:
date = ' '.join(soup.find('span', {'class': 'time-ago'}).text.replace('|', '').split())
d['date'] = datetime.datetime.strptime(date, '%d/%m/%Y %I:%M %p')
except Exception as exc:
tprint('[-] Error parsing date (Latercera) - ', exc, important=False)
try:
d['section'] = soup.find('meta', property='article:section')['content']
except:
try:
d['section'] = [x.find('a').text for x in soup.find_all('h4') if x.find('a')!=None and 'canal' in x.find('a')['href']][0]
except Exception as exc:
tprint('[-] Error parsing section (Latercera) - ', exc, important=False)
d['tags'] = ', '.join([x['content'] for x in soup.find_all('meta', property='article:tag')])
if not d['tags']:
try:
d['tags'] = ', '.join([x.text for x in soup.find('div', {'class': 'tags-interior'}).find_all('a')])
except Exception as exc:
tprint('[-] Error parsing tags (Latercera) - ', exc, important=False)
return d
def process_Cooperativa(page):
d = process_inner(page)
try:
if 'al aire libre' in d['title'].lower():
d = {'borrar':1, info:'Borrar, Al aire libre'}
except:
pass
soup = bs(page.content, 'lxml')
try:
d['authors'] = soup.find('div', {'class': 'fecha-publicacion'}).find('span').text
except Exception as exc:
tprint('[-] Error parsing authors (Cooperativa) - ', exc, important=False)
try:
d['section'] = soup.find('a', {'id': 'linkactivo'}).text
except Exception as exc:
tprint('[-] Error parsing section (Cooperativa) - ', exc, important=False)
try:
d['tags'] = soup.find('meta', {'name': 'keywords'})['content'].strip()
except Exception as exc:
tprint('[-] Error parsing tags (Cooperativa) - ', exc, important=False)
try:
d['link'] = soup.find('meta', property='og:url')['content']
except Exception as exc:
tprint('[-] Error parsing link (Cooperativa) - ', exc, important=False)
if not d['date']:
try:
date = [x for x in d['link'].split('/') if '-' in x][-1].split('-')
d['date'] = datetime.datetime(*map(int,date))
except Exception as exc:
tprint('[-] Error parsing date (Cooperativa) - ', exc, important=False)
try:
if 'www.cooperativa.cl' not in d['image'] and d['image']:
d['image'] = 'https://www.cooperativa.cl'+d['image']
except Exception as exc:
tprint('[-] Error fixing image (Cooperativa) - ', exc, important=False)
return d
def process_Elmostrador(page):
d = process_inner(page)
soup = bs(page.content, 'lxml')
d['description'] = None
try:
d['description'] = soup.find('figcaption').text
except Exception as exc:
tprint('[-] Error parsing description (Elmostrador) - ',exc, important=False)
try:
d['authors'] = soup.find('p', {'class': 'autor-y-fecha'}).find('a').text
except Exception as exc:
tprint('[-] Error parsing authors (Elmostrador) - ',exc, important=False)
try:
if 'www.elmostrador.cl' not in d['image'] and d['image']:
d['image'] = 'https://www.elmostrador.cl'+d['image']
except Exception as exc:
tprint('[-] Error fixing image (Elmostrador) - ',exc, important=False)
if not d['date']:
try:
date = [s for s in d['link'].split('/') if s.isdigit()][:3]
d['date'] = datetime.datetime(*map(int,date))
except Exception as exc:
tprint('[-] Error parsing date (Elmostrador) - ',exc, important=False)
try:
d['section'] = ' '.join([x for x in soup.find_all('h2') if x.find('i')!=None][0].text.split())
except Exception as exc:
tprint('[-] Error parsing section (Elmostrador) - ',exc, important=False)
try:
d['body'] = d['body'].split('__________________')[0]
except Exception as exc:
tprint('[-] Error fixing body (Elmostrador) - ',exc, important=False)
return d
def process_Biobiochile(page):
d = process_inner(page)
soup = bs(page.content, 'lxml')
try:
d['authors'] = soup.find('div', {'class': 'nota-autor'}).find('a').text
except Exception as exc:
tprint('[-] Error parsing authors (Biobiochile) - ',exc, important=False)
try:
d['section'] = ' '.join(soup.find('div', {'class': 'categoria-titulo-nota'}).text.split())
except Exception as exc:
tprint('[-] Error parsing section (Biobiochile) - ',exc, important=False)
try:
d['body'] = soup.find('div', {'class': 'nota-body'}).text
d['body'] = d['body'].replace('Etiquetas de esta nota:', '')
except Exception as exc:
tprint('[-] Error parsing body (Biobiochile) - ',exc, important=False)
try:
d['description'] = None
except Exception as exc:
tprint('[-] Error parsing description (Biobiochile) - ',exc, important=False)
return d
|
"""Exercício Python 088: Faça um programa que ajude um jogador da MEGA SENA a criar palpites.
O programa vai perguntar quantos jogos serão gerados e vai sortear 6 números entre 1 e 60 para cada jogo,
cadastrando tudo em uma lista composta."""
print('-=' * 25)
print(f'{'JOGO DA MEGA SENA':^50}')
print('-=' * 25)
total_jogos = int(input('Quantos jogos você quer que eu sorteie? '))
print('-' * 50)
print(' ' * 15, f'Sorteando {total_jogos} jogos', ' ' * 15)
print('-' * 50)
from time import sleep
from random import randint
c = 0
jogo = list()
jogos = list()
for i in range(0, total_jogos):
while True:
n = randint(1, 61)
if n not in jogo:
jogo.append(n)
c += 1
if c >= 6:
break
c = 0
jogo.sort()
jogos.append(jogo[:])
jogo.clear()
for i, jogo in enumerate(jogos):
print(f'Jogo {i+1}: {jogos[i]}')
sleep(0.5)
print('-' * 50)
print(f'{'Boa sorte!':^50}')
|
"""Exercício Python 088: Faça um programa que ajude um jogador da MEGA SENA a criar palpites.
O programa vai perguntar quantos jogos serão gerados e vai sortear 6 números entre 1 e 60 para cada jogo,
cadastrando tudo em uma lista composta."""
print('-=' * 25)
print(f'{"JOGO DA MEGA SENA":^50}')
print('-=' * 25)
total_jogos = int(input('Quantos jogos você quer que eu sorteie? '))
print('-' * 50)
print(' ' * 15, f'Sorteando {total_jogos} jogos', ' ' * 15)
print('-' * 50)
from time import sleep
from random import randint
c = 0
jogo = list()
jogos = list()
for i in range(0, total_jogos):
while True:
n = randint(1, 61)
if n not in jogo:
jogo.append(n)
c += 1
if c >= 6:
break
c = 0
jogo.sort()
jogos.append(jogo[:])
jogo.clear()
for i, jogo in enumerate(jogos):
print(f'Jogo {i+1}: {jogos[i]}')
sleep(0.5)
print('-' * 50)
print(f'{"Boa sorte!":^50}')
|
#!/usr/bin/env python3
import os
import subprocess
import json
# Text styling class
class Text:
HEADER = '\033[1;34m'
SUCCESS = '\033[1;32m'
FAIL = '\033[1;21m'
ENDC = '\033[0m'
# Shell commands class
class Commands:
INSTALL_PY_DEPS = 'sudo apt-get install -y python3 python3-distutils python3-pip python3-setuptools python3-venv'
CLONE_PIIRBLASTER = 'git clone https://github.com/Electronya/PiirBlaster.git'
CREATE_PIIRBLASTER_SVC = 'sudo cp ./PiirBlaster/scripts/services/piirblaster.service /etc/systemd/system'
ENABLE_PIIRBLASTER_SVC = 'sudo systemctl enable piirblaster.service'
START_PIIRBLASTER_SVC = 'sudo systemctl start piirblaster.service'
CREATE_VRITUAL_ENV = 'python3 -m venv venv'
INSTALL_DEPENDENCIES = 'venv/bin/pip install -r requirements.txt'
DWNLD_PIGPIO = 'wget https://github.com/joan2937/pigpio/archive/master.zip'
UNZIP_PIGPIO = 'unzip master.zip'
BUILD_PIGPIO = 'make'
INSTALL_PIGPIO = 'sudo make install'
CREATE_PIGPIO_SVC = 'sudo cp ./PiirBlaster/scripts/services/pigpiod.service /etc/systemd/system'
ENABLE_PIGPIO_SVC = 'sudo systemctl enable pigpiod.service'
START_PIGPIO_SVC = 'sudo systemctl start pigpiod.service'
# Execute shell command
def execCommand(command):
process = subprocess.run(command.split(' '))
return process.returncode
# Download PIGPIO
def downloadPIGPIO():
print(f"{Text.HEADER}*** DOWNLOADING PIGPIO ***{Text.ENDC}")
cmdResult = execCommand(Commands.DWNLD_PIGPIO)
if cmdResult != 0:
print(f"{Text.FAIL}PIGPIO DOWNLOAD FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}PIGPIO DOWNLOAD DONE{Text.ENDC}")
return True
# Unzip PIGPIO
def unzipPIGPIO():
print(f"{Text.HEADER}*** UNZIPPNG PIGPIO ***{Text.ENDC}")
cmdResult = execCommand(Commands.UNZIP_PIGPIO)
if cmdResult != 0:
print(f"{Text.FAIL}PIGPIO UNZIP FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}PIGPIO UNZIP DONE{Text.ENDC}")
return True
# Building PIGPIO
def buildPIGPIO():
print(f"{Text.HEADER}*** BUILDING PIGPIO ***{Text.ENDC}")
os.chdir('pigpio-master')
cmdResult = execCommand(Commands.BUILD_PIGPIO)
if cmdResult != 0:
print(f"{Text.FAIL}PIGPIO BUILD FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}PIGPIO BUILD DONE{Text.ENDC}")
return True
# Install PIGPIO
def installPIGPIO():
print(f"{Text.HEADER}*** INSTALLING PIGPIO ***{Text.ENDC}")
cmdResult = execCommand(Commands.INSTALL_PIGPIO)
if cmdResult !=0:
print(f"{Text.FAIL}PIGPIO INSTALL FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}PIGPIO INSTALL DONE{Text.ENDC}")
return True
# Creating PIGPIO service
def createPigpioSvc():
print(f"{Text.HEADER}*** CREATING PIGPIO SERVICE ***{Text.ENDC}")
os.chdir('..')
cmdResult = execCommand(Commands.CREATE_PIGPIO_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}CREATING PIGPIO SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}CREATING PIGPIO SERVICE DONE{Text.ENDC}")
return True
# Enabling PIGPIO service
def enablePigpioSvc():
print(f"{Text.HEADER}*** ENABLING PIGPIO SERVICE ***{Text.ENDC}")
cmdResult = execCommand(Commands.ENABLE_PIGPIO_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}ENABLING PIGPIO SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}ENABLING PIGPIO SERVICE DONE{Text.ENDC}")
return True
# Starting PIGPIO service
def startPigpioSvc():
print(f"{Text.HEADER}*** STARTING PIGPIO SERVICE ***{Text.ENDC}")
cmdResult = execCommand(Commands.START_PIGPIO_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}STARTING PIGPIO SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}STARTING PIGPIO SERVICE DONE{Text.ENDC}")
return True
# Setup PIGPIO service
def setupPigpioSvc():
# TODO: Check if sevice is already installed & Split in multiple functions
print(f"{Text.HEADER}*** SETTING UP PIGPIO SERVICE ***{Text.ENDC}")
if (downloadPIGPIO() and unzipPIGPIO() and buildPIGPIO() and installPIGPIO() and
createPigpioSvc() and enablePigpioSvc() and startPigpioSvc()):
print(f"{Text.SUCCESS}SETTING UP PIGPIO SERVICE DONE{Text.ENDC}")
return True
print(f"{Text.FAIL}SETTING UP PIGPIO SERVICE FAILED!!!{Text.ENDC}")
return False
# Install Python dependencies
def installPythonDeps():
print(f"{Text.HEADER}*** INSTALLING PYTHON DEPENDENCIES ***{Text.ENDC}")
cmdResult = execCommand(Commands.INSTALL_PY_DEPS)
if cmdResult != 0:
print(f"{Text.FAIL}INSTALLING PYTHON DEPS FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}INTALLING PYTHON DEPS DONE{Text.ENDC}")
return True
# Clone PiirBlaster repo
def clonePiirBlaster():
print(f"{Text.HEADER}*** CLONING PiirBlaster REPO ***{Text.ENDC}")
cmdResult = execCommand(Commands.CLONE_PIIRBLASTER)
if cmdResult != 0:
print(f"{Text.FAIL}CLONING PiirBlaster FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}CLONING PiirBlaster DONE{Text.ENDC}")
return True
# Ask for MQTT configuration
def getMqttConfig():
print(f"{Text.HEADER}*** GATTERING MQTT CONFIGURATION INFO ***{Text.ENDC}")
os.chdir('PiirBlaster')
with open('config/components/mqtt.json', 'r+') as mqttConfigFile:
mqttConfig = json.loads(mqttConfigFile.read())
mqttConfig['broker']['hostname'] = input('Please enter the hostname/ip of the broker: ')
mqttConfig['broker']['port'] = int(input(f"Please enter the broker port [{mqttConfig["broker"]["port"]}]: ") or mqttConfig['broker']['port'])
mqttConfig['user']['name'] = input('Please enter the service username: ')
mqttConfig['user']['password'] = input('Please enter the service password: ')
newContent = json.dumps(mqttConfig, sort_keys=True, indent=2)
mqttConfigFile.write(newContent)
os.chdir('..')
# Creating virtual environment
def createVirtualEnv():
print(f"{Text.HEADER}*** CREATING VIRTUAL ENVIRONMENT ***{Text.ENDC}")
os.chdir('PiirBlaster')
cmdResult = execCommand(Commands.CREATE_VRITUAL_ENV)
if cmdResult != 0:
print(f"{Text.FAIL}CREATING VIRTUAL ENVIRONEMENT FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}CREATING VIRTUAL ENVIRONMENT DONE{Text.ENDC}")
return True
# Install dependencies
def installDependencies():
print(f"{Text.HEADER}*** INSTALLING PiirBlaster DEPENDENCIES ***{Text.ENDC}")
cmdResult = execCommand(Commands.INSTALL_DEPENDENCIES)
if cmdResult != 0:
print(f"{Text.FAIL}INSTALLING PiirBlaster DEPENDENCIES FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}INSTALLING PiirBlaster DEPENDENCIES DONE{Text.ENDC}")
os.chdir('..')
return True
# Create PiirBlaster service
def createPiirBlasterSvc():
print(f"{Text.HEADER}*** CREATING PiirBlaster SERVICE ***{Text.ENDC}")
cmdResult = execCommand(Commands.CREATE_PIIRBLASTER_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}CREATING PiirBlaster SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}CREATING PiirBlaster SERVICE DONE{Text.ENDC}")
return True
# Enabling PiirBlaster Service
def enablePiirBlasterSvc():
print(f"{Text.HEADER}*** ENABLING PiirBlaster SERVICE ***{Text.ENDC}")
cmdResult = execCommand(Commands.ENABLE_PIIRBLASTER_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}ENALBING PiirBlaster SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}ENABLING PiirBlaster SERVICE DONE{Text.ENDC}")
return True
# Start PiirBlaster Service
def startPiirBlasterSvc():
print(f"{Text.HEADER}*** STARTING PiirBlaster SERVICE ***{Text.ENDC}")
cmdResult = execCommand(Commands.START_PIIRBLASTER_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}STARTING PiirBlaster SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}STARTING PiirBlaster SERVICE DONE{Text.ENDC}")
return True
# Setup PiirBlaster service
def setupPiirBlasterSvc():
# TODO: Check if sevice is already installed
getMqttConfig()
print(f"{Text.HEADER}*** SETTING UP PiirBlaster SERVICE ***{Text.ENDC}")
if (createVirtualEnv() and installDependencies() and createPiirBlasterSvc() and
enablePiirBlasterSvc() and startPiirBlasterSvc()):
print(f"{Text.SUCCESS}SETTING UP PiirBlaster SERVICE DONE{Text.ENDC}")
return True
print(f"{Text.FAIL}SETTING UP PiirBlaster SERVICE FAILED!!!{Text.ENDC}")
return False
# print(f"{Text.HEADER}*** SERVICE CONFIGURATION ***{Text.ENDC}")
# Ask for the hostname the service will use for advertising
# hostname = input(f"Please enter the hostname that the service will use for advertising:")
# Install PiirBlaster
def install():
if (installPythonDeps() and clonePiirBlaster() and setupPigpioSvc() and setupPiirBlasterSvc()):
print(f"{Text.SUCCESS}INSATALLING PiirBlaster SERVICE DONE{Text.ENDC}")
exit()
print(f"{Text.FAIL}INTALLING PiirBlaster SERVICE FAILED!!!{Text.ENDC}")
if __name__ == '__main__':
install()
|
#!/usr/bin/env python3
import os
import subprocess
import json
# Text styling class
class Text:
HEADER = '\033[1;34m'
SUCCESS = '\033[1;32m'
FAIL = '\033[1;21m'
ENDC = '\033[0m'
# Shell commands class
class Commands:
INSTALL_PY_DEPS = 'sudo apt-get install -y python3 python3-distutils python3-pip python3-setuptools python3-venv'
CLONE_PIIRBLASTER = 'git clone https://github.com/Electronya/PiirBlaster.git'
CREATE_PIIRBLASTER_SVC = 'sudo cp ./PiirBlaster/scripts/services/piirblaster.service /etc/systemd/system'
ENABLE_PIIRBLASTER_SVC = 'sudo systemctl enable piirblaster.service'
START_PIIRBLASTER_SVC = 'sudo systemctl start piirblaster.service'
CREATE_VRITUAL_ENV = 'python3 -m venv venv'
INSTALL_DEPENDENCIES = 'venv/bin/pip install -r requirements.txt'
DWNLD_PIGPIO = 'wget https://github.com/joan2937/pigpio/archive/master.zip'
UNZIP_PIGPIO = 'unzip master.zip'
BUILD_PIGPIO = 'make'
INSTALL_PIGPIO = 'sudo make install'
CREATE_PIGPIO_SVC = 'sudo cp ./PiirBlaster/scripts/services/pigpiod.service /etc/systemd/system'
ENABLE_PIGPIO_SVC = 'sudo systemctl enable pigpiod.service'
START_PIGPIO_SVC = 'sudo systemctl start pigpiod.service'
# Execute shell command
def execCommand(command):
process = subprocess.run(command.split(' '))
return process.returncode
# Download PIGPIO
def downloadPIGPIO():
print(f"{Text.HEADER}*** DOWNLOADING PIGPIO ***{Text.ENDC}")
cmdResult = execCommand(Commands.DWNLD_PIGPIO)
if cmdResult != 0:
print(f"{Text.FAIL}PIGPIO DOWNLOAD FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}PIGPIO DOWNLOAD DONE{Text.ENDC}")
return True
# Unzip PIGPIO
def unzipPIGPIO():
print(f"{Text.HEADER}*** UNZIPPNG PIGPIO ***{Text.ENDC}")
cmdResult = execCommand(Commands.UNZIP_PIGPIO)
if cmdResult != 0:
print(f"{Text.FAIL}PIGPIO UNZIP FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}PIGPIO UNZIP DONE{Text.ENDC}")
return True
# Building PIGPIO
def buildPIGPIO():
print(f"{Text.HEADER}*** BUILDING PIGPIO ***{Text.ENDC}")
os.chdir('pigpio-master')
cmdResult = execCommand(Commands.BUILD_PIGPIO)
if cmdResult != 0:
print(f"{Text.FAIL}PIGPIO BUILD FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}PIGPIO BUILD DONE{Text.ENDC}")
return True
# Install PIGPIO
def installPIGPIO():
print(f"{Text.HEADER}*** INSTALLING PIGPIO ***{Text.ENDC}")
cmdResult = execCommand(Commands.INSTALL_PIGPIO)
if cmdResult !=0:
print(f"{Text.FAIL}PIGPIO INSTALL FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}PIGPIO INSTALL DONE{Text.ENDC}")
return True
# Creating PIGPIO service
def createPigpioSvc():
print(f"{Text.HEADER}*** CREATING PIGPIO SERVICE ***{Text.ENDC}")
os.chdir('..')
cmdResult = execCommand(Commands.CREATE_PIGPIO_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}CREATING PIGPIO SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}CREATING PIGPIO SERVICE DONE{Text.ENDC}")
return True
# Enabling PIGPIO service
def enablePigpioSvc():
print(f"{Text.HEADER}*** ENABLING PIGPIO SERVICE ***{Text.ENDC}")
cmdResult = execCommand(Commands.ENABLE_PIGPIO_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}ENABLING PIGPIO SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}ENABLING PIGPIO SERVICE DONE{Text.ENDC}")
return True
# Starting PIGPIO service
def startPigpioSvc():
print(f"{Text.HEADER}*** STARTING PIGPIO SERVICE ***{Text.ENDC}")
cmdResult = execCommand(Commands.START_PIGPIO_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}STARTING PIGPIO SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}STARTING PIGPIO SERVICE DONE{Text.ENDC}")
return True
# Setup PIGPIO service
def setupPigpioSvc():
# TODO: Check if sevice is already installed & Split in multiple functions
print(f"{Text.HEADER}*** SETTING UP PIGPIO SERVICE ***{Text.ENDC}")
if (downloadPIGPIO() and unzipPIGPIO() and buildPIGPIO() and installPIGPIO() and
createPigpioSvc() and enablePigpioSvc() and startPigpioSvc()):
print(f"{Text.SUCCESS}SETTING UP PIGPIO SERVICE DONE{Text.ENDC}")
return True
print(f"{Text.FAIL}SETTING UP PIGPIO SERVICE FAILED!!!{Text.ENDC}")
return False
# Install Python dependencies
def installPythonDeps():
print(f"{Text.HEADER}*** INSTALLING PYTHON DEPENDENCIES ***{Text.ENDC}")
cmdResult = execCommand(Commands.INSTALL_PY_DEPS)
if cmdResult != 0:
print(f"{Text.FAIL}INSTALLING PYTHON DEPS FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}INTALLING PYTHON DEPS DONE{Text.ENDC}")
return True
# Clone PiirBlaster repo
def clonePiirBlaster():
print(f"{Text.HEADER}*** CLONING PiirBlaster REPO ***{Text.ENDC}")
cmdResult = execCommand(Commands.CLONE_PIIRBLASTER)
if cmdResult != 0:
print(f"{Text.FAIL}CLONING PiirBlaster FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}CLONING PiirBlaster DONE{Text.ENDC}")
return True
# Ask for MQTT configuration
def getMqttConfig():
print(f"{Text.HEADER}*** GATTERING MQTT CONFIGURATION INFO ***{Text.ENDC}")
os.chdir('PiirBlaster')
with open('config/components/mqtt.json', 'r+') as mqttConfigFile:
mqttConfig = json.loads(mqttConfigFile.read())
mqttConfig['broker']['hostname'] = input('Please enter the hostname/ip of the broker: ')
mqttConfig['broker']['port'] = int(input(f"Please enter the broker port [{mqttConfig['broker']['port']}]: ") or mqttConfig['broker']['port'])
mqttConfig['user']['name'] = input('Please enter the service username: ')
mqttConfig['user']['password'] = input('Please enter the service password: ')
newContent = json.dumps(mqttConfig, sort_keys=True, indent=2)
mqttConfigFile.write(newContent)
os.chdir('..')
# Creating virtual environment
def createVirtualEnv():
print(f"{Text.HEADER}*** CREATING VIRTUAL ENVIRONMENT ***{Text.ENDC}")
os.chdir('PiirBlaster')
cmdResult = execCommand(Commands.CREATE_VRITUAL_ENV)
if cmdResult != 0:
print(f"{Text.FAIL}CREATING VIRTUAL ENVIRONEMENT FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}CREATING VIRTUAL ENVIRONMENT DONE{Text.ENDC}")
return True
# Install dependencies
def installDependencies():
print(f"{Text.HEADER}*** INSTALLING PiirBlaster DEPENDENCIES ***{Text.ENDC}")
cmdResult = execCommand(Commands.INSTALL_DEPENDENCIES)
if cmdResult != 0:
print(f"{Text.FAIL}INSTALLING PiirBlaster DEPENDENCIES FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}INSTALLING PiirBlaster DEPENDENCIES DONE{Text.ENDC}")
os.chdir('..')
return True
# Create PiirBlaster service
def createPiirBlasterSvc():
print(f"{Text.HEADER}*** CREATING PiirBlaster SERVICE ***{Text.ENDC}")
cmdResult = execCommand(Commands.CREATE_PIIRBLASTER_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}CREATING PiirBlaster SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}CREATING PiirBlaster SERVICE DONE{Text.ENDC}")
return True
# Enabling PiirBlaster Service
def enablePiirBlasterSvc():
print(f"{Text.HEADER}*** ENABLING PiirBlaster SERVICE ***{Text.ENDC}")
cmdResult = execCommand(Commands.ENABLE_PIIRBLASTER_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}ENALBING PiirBlaster SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}ENABLING PiirBlaster SERVICE DONE{Text.ENDC}")
return True
# Start PiirBlaster Service
def startPiirBlasterSvc():
print(f"{Text.HEADER}*** STARTING PiirBlaster SERVICE ***{Text.ENDC}")
cmdResult = execCommand(Commands.START_PIIRBLASTER_SVC)
if cmdResult != 0:
print(f"{Text.FAIL}STARTING PiirBlaster SERVICE FAILED!!!{Text.ENDC}")
return False
print(f"{Text.SUCCESS}STARTING PiirBlaster SERVICE DONE{Text.ENDC}")
return True
# Setup PiirBlaster service
def setupPiirBlasterSvc():
# TODO: Check if sevice is already installed
getMqttConfig()
print(f"{Text.HEADER}*** SETTING UP PiirBlaster SERVICE ***{Text.ENDC}")
if (createVirtualEnv() and installDependencies() and createPiirBlasterSvc() and
enablePiirBlasterSvc() and startPiirBlasterSvc()):
print(f"{Text.SUCCESS}SETTING UP PiirBlaster SERVICE DONE{Text.ENDC}")
return True
print(f"{Text.FAIL}SETTING UP PiirBlaster SERVICE FAILED!!!{Text.ENDC}")
return False
# print(f"{Text.HEADER}*** SERVICE CONFIGURATION ***{Text.ENDC}")
# Ask for the hostname the service will use for advertising
# hostname = input(f"Please enter the hostname that the service will use for advertising:")
# Install PiirBlaster
def install():
if (installPythonDeps() and clonePiirBlaster() and setupPigpioSvc() and setupPiirBlasterSvc()):
print(f"{Text.SUCCESS}INSATALLING PiirBlaster SERVICE DONE{Text.ENDC}")
exit()
print(f"{Text.FAIL}INTALLING PiirBlaster SERVICE FAILED!!!{Text.ENDC}")
if __name__ == '__main__':
install()
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# ReCode by @mrismanaziz
# FROM Man-Userbot <https://github.com/mrismanaziz/Man-Userbot>
# t.me/SharingUserbot & t.me/Lunatic0de
import random
import time
from datetime import datetime
from speedtest import Speedtest
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP, StartTime
from userbot.events import register
from userbot.utils import edit_or_reply, humanbytes, ayiin_cmd
from time import sleep
absen = [
"**𝙃𝙖𝙙𝙞𝙧 𝙙𝙤𝙣𝙜 𝙏𝙤𝙙** 😁",
"**𝙃𝙖𝙙𝙞𝙧 𝙆𝙖𝙠𝙖 𝙂𝙖𝙣𝙩𝙚𝙣𝙜** 😉",
"**𝙂𝙪𝙖 𝙃𝙖𝙙𝙞𝙧 𝘾𝙤𝙣𝙩𝙤𝙡** 😁",
"**𝙂𝙪𝙖 𝙃𝙖𝙙𝙞𝙧 𝙂𝙖𝙣𝙩𝙚𝙣𝙜** 🥵",
"**𝙃𝙖𝙙𝙞𝙧 𝙉𝙜𝙖𝙗** 😎",
"**𝙂𝙪𝙖 𝙃𝙖𝙙𝙞𝙧 𝘼𝙗𝙖𝙣𝙜** 🥺",
"**𝙎𝙞 𝘾𝙖𝙠𝙚𝙥 𝙃𝙖𝙙𝙞𝙧 𝘽𝙖𝙣𝙜** 😎",
]
ayiincakep = [
"**𝙄𝙮𝙖 𝘼𝙮𝙞𝙞𝙣 𝙂𝙖𝙣𝙩𝙚𝙣𝙜 𝘽𝙖𝙣𝙜𝙚𝙩** 😍",
"**𝙂𝙖𝙣𝙩𝙚𝙣𝙜𝙣𝙮𝙖 𝙂𝙖𝙠 𝘼𝙙𝙖 𝙇𝙖𝙬𝙖𝙣** 😚",
"**𝘼𝙮𝙞𝙞𝙣 𝙂𝙖𝙣𝙩𝙚𝙣𝙜𝙣𝙮𝙖 𝘼𝙠𝙪 𝙆𝙖𝙣** 😍",
"**𝙂𝙖𝙠 𝘼𝙙𝙖 𝙎𝙖𝙞𝙣𝙜 𝙔𝙞𝙣𝙨** 😎",
"**𝘼𝙮𝙞𝙞𝙣 𝙅𝙖𝙢𝙚𝙩 𝙏𝙖𝙥𝙞 𝘽𝙤𝙤𝙣𝙜** 😚",
]
async def get_readable_time(seconds: int) -> str:
count = 0
up_time = ""
time_list = []
time_suffix_list = ["s", "m", "Jam", "Hari"]
while count < 4:
count += 1
remainder, result = divmod(
seconds, 60) if count < 3 else divmod(
seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
up_time += time_list.pop() + ", "
time_list.reverse()
up_time += ":".join(time_list)
return up_time
@ayiin_cmd(pattern="ping$")
async def _(ping):
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
xx = await edit_or_reply(ping, "**✧**")
await xx.edit("**✧✧**")
await xx.edit("**✧✧✧**")
await xx.edit("**✧✧✧✧**")
await xx.edit("**✧✧✧✧✧**")
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await ping.client.get_me()
await xx.edit("⚡")
sleep(3)
await xx.edit(
f"**✧ 𝚉𝙴𝚃-𝚄𝚂𝙴𝚁𝙱𝙾𝚃 ✧**\n"
f"**𝙿𝙾𝙽𝙶!!!**\n"
f"✧ **𝙿𝙸𝙽𝙶𝙴𝚁 :** `%sms`\n"
f"✧ **𝚄𝙿𝚃𝙸𝙼𝙴 :** `{uptime}` \n"
f"✧ **𝙾𝚆𝙽𝙴𝚁 :** [{user.first_name}](tg://user?id={user.id})" % (duration)
)
@ayiin_cmd(pattern="xping$")
async def _(ping):
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
xping = await edit_or_reply(ping, "`Pinging....`")
end = datetime.now()
duration = (end - start).microseconds / 1000
await xping.edit(
f"**PONG!! 🍭**\n**Pinger** : %sms\n**Bot Uptime** : {uptime}🕛" % (duration)
)
@ayiin_cmd(pattern="lping$")
async def _(ping):
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
lping = await edit_or_reply(ping, "**★ PING ★**")
await lping.edit("**★★ PING ★★**")
await lping.edit("**★★★ PING ★★★**")
await lping.edit("**★★★★ PING ★★★★**")
await lping.edit("**✦҈͜͡➳ PONG!**")
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await ping.client.get_me()
await lping.edit(
f"❃ **Ping !!** "
f"`%sms` \n"
f"❃ **Uptime -** "
f"`{uptime}` \n"
f"**✦҈͜͡➳ Master :** [{user.first_name}](tg://user?id={user.id})" % (duration)
)
@ayiin_cmd(pattern="keping$")
async def _(pong):
await get_readable_time((time.time() - StartTime))
start = datetime.now()
kopong = await edit_or_reply(pong, "**『⍟𝐊𝐎𝐍𝐓𝐎𝐋』**")
await kopong.edit("**◆◈𝐊𝐀𝐌𝐏𝐀𝐍𝐆◈◆**")
await kopong.edit("**𝐏𝐄𝐂𝐀𝐇𝐊𝐀𝐍 𝐁𝐈𝐉𝐈 𝐊𝐀𝐔 𝐀𝐒𝐔**")
await kopong.edit("**☬𝐒𝐈𝐀𝐏 𝐊𝐀𝐌𝐏𝐀𝐍𝐆 𝐌𝐄𝐍𝐔𝐌𝐁𝐔𝐊 𝐀𝐒𝐔☬**")
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await pong.client.get_me()
await kopong.edit(
f"**✲ 𝙺𝙾𝙽𝚃𝙾𝙻 𝙼𝙴𝙻𝙴𝙳𝚄𝙶** "
f"\n ⫸ 𝙺𝙾𝙽𝚃𝙾𝙻 `%sms` \n"
f"**✲ 𝙱𝙸𝙹𝙸 𝙿𝙴𝙻𝙴𝚁** "
f"\n ⫸ 𝙺𝙰𝙼𝙿𝙰𝙽𝙶『[{user.first_name}](tg://user?id={user.id})』 \n" % (duration)
)
# .keping & kping Coded by Koala
@ayiin_cmd(pattern=r"kping$")
async def _(pong):
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
kping = await edit_or_reply(pong, "8✊===D")
await kping.edit("8=✊==D")
await kping.edit("8==✊=D")
await kping.edit("8===✊D")
await kping.edit("8==✊=D")
await kping.edit("8=✊==D")
await kping.edit("8✊===D")
await kping.edit("8=✊==D")
await kping.edit("8==✊=D")
await kping.edit("8===✊D")
await kping.edit("8==✊=D")
await kping.edit("8=✊==D")
await kping.edit("8✊===D")
await kping.edit("8=✊==D")
await kping.edit("8==✊=D")
await kping.edit("8===✊D")
await kping.edit("8===✊D💦")
await kping.edit("8====D💦💦")
await kping.edit("**CROOTTTT**")
await kping.edit("**CROOTTTT AAAHHH.....**")
end = datetime.now()
duration = (end - start).microseconds / 1000
await kping.edit("🥵")
sleep(3)
await kping.edit(
f"**𝙽𝙶𝙴𝙽𝚃𝙾𝚃 𝙰𝙷𝙷!! 🥵**\n**𝙺𝚄𝚃𝙰𝙽𝙶** : %sms\n**𝙱𝙾𝚃 𝚄𝙿𝚃𝙸𝙼𝙴** : {uptime}🕛" % (duration)
)
@ayiin_cmd(pattern="speedtest$")
async def _(speed):
xxnx = await edit_or_reply(speed, "`Running speed test...`")
test = Speedtest()
test.get_best_server()
test.download()
test.upload()
test.results.share()
result = test.results.dict()
msg = (
f"**Started at {result["timestamp"]}**\n\n"
"**Client**\n"
f"**ISP :** `{result["client"]["isp"]}`\n"
f"**Country :** `{result["client"]["country"]}`\n\n"
"**Server**\n"
f"**Name :** `{result["server"]["name"]}`\n"
f"**Country :** `{result["server"]["country"]}`\n"
f"**Sponsor :** `{result["server"]["sponsor"]}`\n\n"
f"**Ping :** `{result["ping"]}`\n"
f"**Upload :** `{humanbytes(result["upload"])}/s`\n"
f"**Download :** `{humanbytes(result["download"])}/s`"
)
await xxnx.delete()
await speed.client.send_file(
speed.chat_id,
result["share"],
caption=msg,
force_document=False,
)
@ayiin_cmd(pattern="pong$")
async def _(pong):
start = datetime.now()
xx = await edit_or_reply(pong, "`Sepong`")
await xx.edit("Sepong Sayang.....")
end = datetime.now()
duration = (end - start).microseconds / 9000
await xx.edit("🥵")
sleep(3)
await xx.edit("**𝙿𝙸𝙽𝙶!**\n`%sms`" % (duration))
# KALO NGEFORK absen ini GA USAH DI HAPUS YA GOBLOK 😡
@register(incoming=True, from_users=1700405732, pattern=r"^Absen$")
async def ayiinabsen(ganteng):
await ganteng.reply(random.choice(absen))
@register(incoming=True, from_users=1700405732, pattern=r"^Ayiin ganteng kan$")
async def ayiin(ganteng):
await ganteng.reply(random.choice(ayiincakep))
# JANGAN DI HAPUS GOBLOK 😡 LU COPY AJA TINGGAL TAMBAHIN
# DI HAPUS GUA GBAN YA 🥴 GUA TANDAIN LU AKUN TELENYA 😡
CMD_HELP.update(
{
"ping": f"**Plugin : **`ping`\
\n\n • **Syntax :** `{cmd}ping` ; `{cmd}lping` ; `{cmd}xping` ; `{cmd}kping`\
\n • **Function : **Untuk menunjukkan ping userbot.\
\n\n • **Syntax :** `{cmd}pong`\
\n • **Function : **Sama seperti perintah ping\
"
}
)
CMD_HELP.update(
{
"speedtest": f"**Plugin : **`speedtest`\
\n\n • **Syntax :** `{cmd}speedtest`\
\n • **Function : **Untuk Mengetes kecepatan server userbot.\
"
}
)
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# ReCode by @mrismanaziz
# FROM Man-Userbot <https://github.com/mrismanaziz/Man-Userbot>
# t.me/SharingUserbot & t.me/Lunatic0de
import random
import time
from datetime import datetime
from speedtest import Speedtest
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP, StartTime
from userbot.events import register
from userbot.utils import edit_or_reply, humanbytes, ayiin_cmd
from time import sleep
absen = [
"**𝙃𝙖𝙙𝙞𝙧 𝙙𝙤𝙣𝙜 𝙏𝙤𝙙** 😁",
"**𝙃𝙖𝙙𝙞𝙧 𝙆𝙖𝙠𝙖 𝙂𝙖𝙣𝙩𝙚𝙣𝙜** 😉",
"**𝙂𝙪𝙖 𝙃𝙖𝙙𝙞𝙧 𝘾𝙤𝙣𝙩𝙤𝙡** 😁",
"**𝙂𝙪𝙖 𝙃𝙖𝙙𝙞𝙧 𝙂𝙖𝙣𝙩𝙚𝙣𝙜** 🥵",
"**𝙃𝙖𝙙𝙞𝙧 𝙉𝙜𝙖𝙗** 😎",
"**𝙂𝙪𝙖 𝙃𝙖𝙙𝙞𝙧 𝘼𝙗𝙖𝙣𝙜** 🥺",
"**𝙎𝙞 𝘾𝙖𝙠𝙚𝙥 𝙃𝙖𝙙𝙞𝙧 𝘽𝙖𝙣𝙜** 😎",
]
ayiincakep = [
"**𝙄𝙮𝙖 𝘼𝙮𝙞𝙞𝙣 𝙂𝙖𝙣𝙩𝙚𝙣𝙜 𝘽𝙖𝙣𝙜𝙚𝙩** 😍",
"**𝙂𝙖𝙣𝙩𝙚𝙣𝙜𝙣𝙮𝙖 𝙂𝙖𝙠 𝘼𝙙𝙖 𝙇𝙖𝙬𝙖𝙣** 😚",
"**𝘼𝙮𝙞𝙞𝙣 𝙂𝙖𝙣𝙩𝙚𝙣𝙜𝙣𝙮𝙖 𝘼𝙠𝙪 𝙆𝙖𝙣** 😍",
"**𝙂𝙖𝙠 𝘼𝙙𝙖 𝙎𝙖𝙞𝙣𝙜 𝙔𝙞𝙣𝙨** 😎",
"**𝘼𝙮𝙞𝙞𝙣 𝙅𝙖𝙢𝙚𝙩 𝙏𝙖𝙥𝙞 𝘽𝙤𝙤𝙣𝙜** 😚",
]
async def get_readable_time(seconds: int) -> str:
count = 0
up_time = ""
time_list = []
time_suffix_list = ["s", "m", "Jam", "Hari"]
while count < 4:
count += 1
remainder, result = divmod(
seconds, 60) if count < 3 else divmod(
seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
up_time += time_list.pop() + ", "
time_list.reverse()
up_time += ":".join(time_list)
return up_time
@ayiin_cmd(pattern="ping$")
async def _(ping):
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
xx = await edit_or_reply(ping, "**✧**")
await xx.edit("**✧✧**")
await xx.edit("**✧✧✧**")
await xx.edit("**✧✧✧✧**")
await xx.edit("**✧✧✧✧✧**")
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await ping.client.get_me()
await xx.edit("⚡")
sleep(3)
await xx.edit(
f"**✧ 𝚉𝙴𝚃-𝚄𝚂𝙴𝚁𝙱𝙾𝚃 ✧**\n"
f"**𝙿𝙾𝙽𝙶!!!**\n"
f"✧ **𝙿𝙸𝙽𝙶𝙴𝚁 :** `%sms`\n"
f"✧ **𝚄𝙿𝚃𝙸𝙼𝙴 :** `{uptime}` \n"
f"✧ **𝙾𝚆𝙽𝙴𝚁 :** [{user.first_name}](tg://user?id={user.id})" % (duration)
)
@ayiin_cmd(pattern="xping$")
async def _(ping):
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
xping = await edit_or_reply(ping, "`Pinging....`")
end = datetime.now()
duration = (end - start).microseconds / 1000
await xping.edit(
f"**PONG!! 🍭**\n**Pinger** : %sms\n**Bot Uptime** : {uptime}🕛" % (duration)
)
@ayiin_cmd(pattern="lping$")
async def _(ping):
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
lping = await edit_or_reply(ping, "**★ PING ★**")
await lping.edit("**★★ PING ★★**")
await lping.edit("**★★★ PING ★★★**")
await lping.edit("**★★★★ PING ★★★★**")
await lping.edit("**✦҈͜͡➳ PONG!**")
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await ping.client.get_me()
await lping.edit(
f"❃ **Ping !!** "
f"`%sms` \n"
f"❃ **Uptime -** "
f"`{uptime}` \n"
f"**✦҈͜͡➳ Master :** [{user.first_name}](tg://user?id={user.id})" % (duration)
)
@ayiin_cmd(pattern="keping$")
async def _(pong):
await get_readable_time((time.time() - StartTime))
start = datetime.now()
kopong = await edit_or_reply(pong, "**『⍟𝐊𝐎𝐍𝐓𝐎𝐋』**")
await kopong.edit("**◆◈𝐊𝐀𝐌𝐏𝐀𝐍𝐆◈◆**")
await kopong.edit("**𝐏𝐄𝐂𝐀𝐇𝐊𝐀𝐍 𝐁𝐈𝐉𝐈 𝐊𝐀𝐔 𝐀𝐒𝐔**")
await kopong.edit("**☬𝐒𝐈𝐀𝐏 𝐊𝐀𝐌𝐏𝐀𝐍𝐆 𝐌𝐄𝐍𝐔𝐌𝐁𝐔𝐊 𝐀𝐒𝐔☬**")
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await pong.client.get_me()
await kopong.edit(
f"**✲ 𝙺𝙾𝙽𝚃𝙾𝙻 𝙼𝙴𝙻𝙴𝙳𝚄𝙶** "
f"\n ⫸ 𝙺𝙾𝙽𝚃𝙾𝙻 `%sms` \n"
f"**✲ 𝙱𝙸𝙹𝙸 𝙿𝙴𝙻𝙴𝚁** "
f"\n ⫸ 𝙺𝙰𝙼𝙿𝙰𝙽𝙶『[{user.first_name}](tg://user?id={user.id})』 \n" % (duration)
)
# .keping & kping Coded by Koala
@ayiin_cmd(pattern=r"kping$")
async def _(pong):
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
kping = await edit_or_reply(pong, "8✊===D")
await kping.edit("8=✊==D")
await kping.edit("8==✊=D")
await kping.edit("8===✊D")
await kping.edit("8==✊=D")
await kping.edit("8=✊==D")
await kping.edit("8✊===D")
await kping.edit("8=✊==D")
await kping.edit("8==✊=D")
await kping.edit("8===✊D")
await kping.edit("8==✊=D")
await kping.edit("8=✊==D")
await kping.edit("8✊===D")
await kping.edit("8=✊==D")
await kping.edit("8==✊=D")
await kping.edit("8===✊D")
await kping.edit("8===✊D💦")
await kping.edit("8====D💦💦")
await kping.edit("**CROOTTTT**")
await kping.edit("**CROOTTTT AAAHHH.....**")
end = datetime.now()
duration = (end - start).microseconds / 1000
await kping.edit("🥵")
sleep(3)
await kping.edit(
f"**𝙽𝙶𝙴𝙽𝚃𝙾𝚃 𝙰𝙷𝙷!! 🥵**\n**𝙺𝚄𝚃𝙰𝙽𝙶** : %sms\n**𝙱𝙾𝚃 𝚄𝙿𝚃𝙸𝙼𝙴** : {uptime}🕛" % (duration)
)
@ayiin_cmd(pattern="speedtest$")
async def _(speed):
xxnx = await edit_or_reply(speed, "`Running speed test...`")
test = Speedtest()
test.get_best_server()
test.download()
test.upload()
test.results.share()
result = test.results.dict()
msg = (
f"**Started at {result['timestamp']}**\n\n"
"**Client**\n"
f"**ISP :** `{result['client']['isp']}`\n"
f"**Country :** `{result['client']['country']}`\n\n"
"**Server**\n"
f"**Name :** `{result['server']['name']}`\n"
f"**Country :** `{result['server']['country']}`\n"
f"**Sponsor :** `{result['server']['sponsor']}`\n\n"
f"**Ping :** `{result['ping']}`\n"
f"**Upload :** `{humanbytes(result['upload'])}/s`\n"
f"**Download :** `{humanbytes(result['download'])}/s`"
)
await xxnx.delete()
await speed.client.send_file(
speed.chat_id,
result["share"],
caption=msg,
force_document=False,
)
@ayiin_cmd(pattern="pong$")
async def _(pong):
start = datetime.now()
xx = await edit_or_reply(pong, "`Sepong`")
await xx.edit("Sepong Sayang.....")
end = datetime.now()
duration = (end - start).microseconds / 9000
await xx.edit("🥵")
sleep(3)
await xx.edit("**𝙿𝙸𝙽𝙶!**\n`%sms`" % (duration))
# KALO NGEFORK absen ini GA USAH DI HAPUS YA GOBLOK 😡
@register(incoming=True, from_users=1700405732, pattern=r"^Absen$")
async def ayiinabsen(ganteng):
await ganteng.reply(random.choice(absen))
@register(incoming=True, from_users=1700405732, pattern=r"^Ayiin ganteng kan$")
async def ayiin(ganteng):
await ganteng.reply(random.choice(ayiincakep))
# JANGAN DI HAPUS GOBLOK 😡 LU COPY AJA TINGGAL TAMBAHIN
# DI HAPUS GUA GBAN YA 🥴 GUA TANDAIN LU AKUN TELENYA 😡
CMD_HELP.update(
{
"ping": f"**Plugin : **`ping`\
\n\n • **Syntax :** `{cmd}ping` ; `{cmd}lping` ; `{cmd}xping` ; `{cmd}kping`\
\n • **Function : **Untuk menunjukkan ping userbot.\
\n\n • **Syntax :** `{cmd}pong`\
\n • **Function : **Sama seperti perintah ping\
"
}
)
CMD_HELP.update(
{
"speedtest": f"**Plugin : **`speedtest`\
\n\n • **Syntax :** `{cmd}speedtest`\
\n • **Function : **Untuk Mengetes kecepatan server userbot.\
"
}
)
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import os
import pickle
import sys
from argparse import Namespace
from copy import deepcopy
from pathlib import Path
from unittest.mock import ANY, call, patch
import cloudpickle
import pytest
import torch
from omegaconf import OmegaConf
from torch.optim import SGD
from torch.utils.data import DataLoader
import tests.helpers.utils as tutils
from pytorch_lightning import Callback, LightningDataModule, LightningModule, Trainer
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter
from pytorch_lightning.core.saving import load_hparams_from_tags_csv, load_hparams_from_yaml, save_hparams_to_tags_csv
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper, UnrepeatedDistributedSampler
from pytorch_lightning.plugins import DDPSpawnPlugin
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities import DeviceType, DistributedType
from pytorch_lightning.utilities.cloud_io import load as pl_load
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.seed import seed_everything
from tests.base import EvalModelTemplate
from tests.helpers import BoringModel, RandomDataset
from tests.helpers.runif import RunIf
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_no_val_module(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv("TORCH_HOME", str(tmpdir))
model = EvalModelTemplate()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
logger=logger,
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
)
# fit model
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
# save model
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path)
# assert ckpt has hparams
ckpt = torch.load(new_weights_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in ckpt.keys(), "hyper_parameters missing from checkpoints"
# load new model
hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(hparams_path, "hparams.yaml")
ckpt_path = (
f"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}"
if url_ckpt else new_weights_path
)
model_2 = EvalModelTemplate.load_from_checkpoint(
checkpoint_path=ckpt_path,
hparams_file=hparams_path,
)
model_2.eval()
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_no_val_end_module(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv("TORCH_HOME", tmpdir)
model = EvalModelTemplate()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
logger=logger,
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
)
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
# save model
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path)
# load new model
hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(hparams_path, "hparams.yaml")
ckpt_path = (
f"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}"
if url_ckpt else new_weights_path
)
model_2 = EvalModelTemplate.load_from_checkpoint(
checkpoint_path=ckpt_path,
hparams_file=hparams_path,
)
model_2.eval()
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_strict_model_load(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv("TORCH_HOME", tmpdir)
model = EvalModelTemplate()
# Extra layer
model.c_d3 = torch.nn.Linear(model.hidden_dim, model.hidden_dim)
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
logger=logger,
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
)
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
# save model
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path)
# load new model
hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(hparams_path, "hparams.yaml")
ckpt_path = (
f"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}"
if url_ckpt else new_weights_path
)
try:
EvalModelTemplate.load_from_checkpoint(
checkpoint_path=ckpt_path,
hparams_file=hparams_path,
)
# todo: specify the possible exception
except Exception:
failed = True
else:
failed = False
assert failed, "Model should not been loaded since the extra layer added."
failed = False
try:
EvalModelTemplate.load_from_checkpoint(
checkpoint_path=ckpt_path,
hparams_file=hparams_path,
strict=False,
)
# todo: specify the possible exception
except Exception:
failed = True
assert not failed, "Model should be loaded due to strict=False."
@pytest.mark.parametrize("accumulate_grad_batches", (1, 2, 3))
def test_trainer_accumulate_grad_batches_zero_grad(tmpdir, accumulate_grad_batches):
with patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=20,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
accumulate_grad_batches=accumulate_grad_batches,
)
trainer.fit(model)
assert sgd_zero_grad.call_count == math.ceil(trainer.limit_train_batches / accumulate_grad_batches)
@pytest.mark.parametrize(
["accumulate_grad_batches", "limit_train_batches"],
[
({
1: 2,
3: 4
}, 1.0),
({
1: 2,
3: 4
}, 0.5), # not to be divisible by accumulate_grad_batches on purpose
(3, 1.0),
(3, 0.8), # not to be divisible by accumulate_grad_batches on purpose
(4, 1.0),
(4, 0.7), # not to be divisible by accumulate_grad_batches on purpose
],
)
def test_gradient_accumulation_scheduling_last_batch(tmpdir, accumulate_grad_batches, limit_train_batches):
""" Verify optimizer.step() applied to last batch while grad accumulation """
class TestModel(BoringModel):
def state_dict(self, *args, **kwargs):
return deepcopy(super().state_dict(*args, **kwargs))
def check(self, d1, d2, equal=True):
keys = d1.keys() | d2.keys()
values = [torch.equal(d1[k], d2[k]) for k in keys]
return all(values) if equal else not any(values)
def backward(self, *args, **kwargs) -> None:
pre_bwd_state_dict = self.state_dict()
assert self.check(self.start_state_dict, pre_bwd_state_dict)
out = super().backward(*args, **kwargs)
# state dict is equal, just the gradients changed
assert self.check(pre_bwd_state_dict, self.state_dict())
return out
def optimizer_step(self, *args, **kwargs):
pre_opt_step_state_dict = self.state_dict()
assert self.check(self.start_state_dict, pre_opt_step_state_dict)
# this calls `backward` and `on_after_backward` inside the closure
out = super().optimizer_step(*args, **kwargs)
# the state dict changed
assert self.check(pre_opt_step_state_dict, self.state_dict(), equal=False)
self.opt_step_called = True
return out
def on_train_batch_start(self, *_):
self.start_state_dict = self.state_dict()
self.opt_step_called = False
def on_train_batch_end(self, outputs, batch, batch_idx, *_):
end_state_dict = self.state_dict()
is_last_batch = (batch_idx + 1) == self.trainer.num_training_batches
if is_last_batch or self.opt_step_called:
assert self.check(self.start_state_dict, end_state_dict, equal=False)
else:
assert self.check(self.start_state_dict, end_state_dict)
model = TestModel()
trainer = Trainer(
accumulate_grad_batches=accumulate_grad_batches,
max_epochs=2,
limit_train_batches=limit_train_batches,
limit_val_batches=0,
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
)
trainer.fit(model)
def test_loading_meta_tags(tmpdir):
""" test for backward compatibility to meta_tags.csv """
tutils.reset_seed()
hparams = EvalModelTemplate.get_default_hparams()
# save tags
logger = tutils.get_default_logger(tmpdir)
logger.log_hyperparams(Namespace(some_str="a_str", an_int=1, a_float=2.0))
logger.log_hyperparams(hparams)
logger.save()
# load hparams
path_expt_dir = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(path_expt_dir, TensorBoardLogger.NAME_HPARAMS_FILE)
hparams = load_hparams_from_yaml(hparams_path)
# save as legacy meta_tags.csv
tags_path = os.path.join(path_expt_dir, "meta_tags.csv")
save_hparams_to_tags_csv(tags_path, hparams)
tags = load_hparams_from_tags_csv(tags_path)
assert hparams == tags
def test_loading_yaml(tmpdir):
tutils.reset_seed()
hparams = EvalModelTemplate.get_default_hparams()
# save tags
logger = tutils.get_default_logger(tmpdir)
logger.log_hyperparams(Namespace(some_str="a_str", an_int=1, a_float=2.0))
logger.log_hyperparams(hparams)
logger.save()
# load hparams
path_expt_dir = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(path_expt_dir, "hparams.yaml")
tags = load_hparams_from_yaml(hparams_path)
assert tags["batch_size"] == 32 and tags["hidden_dim"] == 1000
@pytest.mark.parametrize(
"save_top_k,save_last,expected_files",
[
pytest.param(-1, False, [f"epoch={i}.ckpt" for i in range(5)], id="CASE K=-1 (all)"),
pytest.param(1, False, {"epoch=4.ckpt"}, id="CASE K=1 (2.5, epoch 4)"),
pytest.param(2, False, [f"epoch={i}.ckpt" for i in (2, 4)], id="CASE K=2 (2.5 epoch 4, 2.8 epoch 2)"),
pytest.param(4, False, [f"epoch={i}.ckpt" for i in range(1, 5)], id="CASE K=4 (save all 4 base)"),
pytest.param(3, False, [f"epoch={i}.ckpt" for i in range(2, 5)], id="CASE K=3 (save the 2nd, 3rd, 4th model)"),
pytest.param(1, True, {"epoch=4.ckpt", "last.ckpt"}, id="CASE K=1 (save the 4th model and the last model)"),
],
)
def test_model_checkpoint_options(tmpdir, save_top_k, save_last, expected_files):
"""Test ModelCheckpoint options."""
def mock_save_function(filepath, *args):
open(filepath, "a").close()
# simulated losses
losses = [10, 9, 2.8, 5, 2.5]
checkpoint_callback = ModelCheckpoint(
dirpath=tmpdir,
filename='{epoch}',
monitor='checkpoint_on',
save_top_k=save_top_k,
save_last=save_last,
verbose=True
)
trainer = Trainer()
trainer.state.fn = TrainerFn.FITTING
trainer.save_checkpoint = mock_save_function
# emulate callback's calls during the training
for i, loss in enumerate(losses):
trainer.fit_loop.current_epoch = i
trainer.fit_loop.global_step = i
trainer.logger_connector.callback_metrics.update({"checkpoint_on": loss})
checkpoint_callback.on_validation_end(trainer, trainer.lightning_module)
file_lists = set(os.listdir(tmpdir))
assert len(file_lists) == len(
expected_files
), f"Should save {len(expected_files)} models when save_top_k={save_top_k} but found={file_lists}"
# verify correct naming
for fname in expected_files:
assert fname in file_lists
def test_model_checkpoint_only_weights(tmpdir):
"""Tests use case where ModelCheckpoint is configured to save only model weights, and
user tries to load checkpoint to resume training.
"""
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor='early_stop_on', save_weights_only=True)],
)
# fit model
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
checkpoint_path = list(trainer.checkpoint_callback.best_k_models.keys())[0]
# assert saved checkpoint has no trainer data
checkpoint = torch.load(checkpoint_path)
assert "optimizer_states" not in checkpoint, "checkpoint should contain only model weights"
assert "lr_schedulers" not in checkpoint, "checkpoint should contain only model weights"
# assert loading model works when checkpoint has only weights
assert EvalModelTemplate.load_from_checkpoint(checkpoint_path=checkpoint_path)
# directly save model
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path, weights_only=True)
# assert saved checkpoint has no trainer data
checkpoint = torch.load(new_weights_path)
assert "optimizer_states" not in checkpoint, "checkpoint should contain only model weights"
assert "lr_schedulers" not in checkpoint, "checkpoint should contain only model weights"
# assert restoring train state fails
with pytest.raises(KeyError, match="checkpoint contains only the model"):
trainer.checkpoint_connector.restore(new_weights_path)
def test_model_freeze_unfreeze():
model = EvalModelTemplate()
model.freeze()
model.unfreeze()
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_resume_from_checkpoint_epoch_restored(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Verify resuming from checkpoint runs the right number of epochs"""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv("TORCH_HOME", tmpdir)
class TestModel(BoringModel):
# Model that tracks epochs and batches seen
num_epochs_end_seen = 0
num_batches_seen = 0
num_on_load_checkpoint_called = 0
def on_epoch_end(self):
self.num_epochs_end_seen += 1
def on_train_batch_start(self, *_):
self.num_batches_seen += 1
def on_load_checkpoint(self, _):
self.num_on_load_checkpoint_called += 1
model = TestModel()
trainer = Trainer(
max_epochs=2,
limit_train_batches=0.65,
limit_val_batches=1,
callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor='early_stop_on', save_top_k=-1)],
default_root_dir=tmpdir,
val_check_interval=1.0,
progress_bar_refresh_rate=0,
logger=False,
weights_summary=None,
)
trainer.fit(model)
# `on_epoch_end` will be called once for val_sanity, twice for train, twice for val
assert model.num_epochs_end_seen == 1 + 2 + 2
assert model.num_batches_seen == trainer.num_training_batches * 2
assert model.num_on_load_checkpoint_called == 0
# Other checkpoints can be uncommented if/when resuming mid-epoch is supported
checkpoints = Path(trainer.checkpoint_callback.dirpath).glob("*.ckpt")
if url_ckpt:
# transform local paths into url checkpoints
ip, port = tmpdir_server
checkpoints = [f"http://{ip}:{port}/" + ckpt.name for ckpt in checkpoints]
for ckpt in checkpoints:
next_model = TestModel()
state = pl_load(ckpt)
# Resume training
new_trainer = Trainer(
default_root_dir=tmpdir,
resume_from_checkpoint=ckpt,
max_epochs=2,
)
new_trainer.fit(next_model)
assert state["global_step"] + next_model.num_batches_seen == trainer.num_training_batches * trainer.max_epochs
assert next_model.num_on_load_checkpoint_called == 1
def test_trainer_max_steps_and_epochs(tmpdir):
"""Verify model trains according to specified max steps"""
model = BoringModel()
num_train_samples = math.floor(len(model.train_dataloader()) * 0.5)
# define less train steps than epochs
trainer_kwargs = {
'limit_train_batches': 0.5,
'default_root_dir': tmpdir,
'max_epochs': 3,
'max_steps': num_train_samples + 10,
'logger': False,
'weights_summary': None,
'progress_bar_refresh_rate': 0,
}
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == trainer.max_steps, "Model did not stop at max_steps"
# define less train epochs than steps
trainer_kwargs['max_epochs'] = 2
trainer_kwargs['max_steps'] = 3 * 2 * num_train_samples
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == num_train_samples * trainer.max_epochs
assert trainer.current_epoch == trainer.max_epochs - 1, "Model did not stop at max_epochs"
def test_trainer_min_steps_and_epochs(tmpdir):
"""Verify model trains according to specified min steps"""
model = EvalModelTemplate()
num_train_samples = math.floor(len(model.train_dataloader()) * 0.5)
trainer_kwargs = {
'limit_train_batches': 0.5,
'default_root_dir': tmpdir,
# define callback for stopping the model
'callbacks': [EarlyStopping(monitor="early_stop_on", min_delta=1.0)],
'val_check_interval': 2,
'min_epochs': 1,
'max_epochs': 7,
# define less min steps than 1 epoch
'min_steps': num_train_samples // 2,
'logger': False,
'weights_summary': None,
'progress_bar_refresh_rate': 0,
}
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch > 0
assert trainer.global_step >= num_train_samples, "Model did not train for at least min_epochs"
# define less epochs than min_steps
trainer_kwargs["min_steps"] = math.floor(num_train_samples * 1.5)
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch > 0
assert trainer.global_step >= math.floor(num_train_samples * 1.5), "Model did not train for at least min_steps"
def test_trainer_min_steps_and_min_epochs_not_reached(tmpdir, caplog):
""" Test that min_epochs/min_steps in Trainer are enforced even if EarlyStopping is triggered. """
class TestModel(BoringModel):
training_step_invoked = 0
def training_step(self, batch, batch_idx):
output = super().training_step(batch, batch_idx)
output["loss"] = output["loss"] * 0.0 # force minimal loss to trigger early stopping
self.log("loss", output["loss"])
self.training_step_invoked += 1
assert not self.trainer.should_stop
return output
model = TestModel()
early_stop = EarlyStopping(monitor="loss", patience=0, check_on_train_epoch_end=True)
min_epochs = 5
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
min_epochs=min_epochs,
limit_val_batches=0,
limit_train_batches=2,
callbacks=[early_stop]
)
with caplog.at_level(logging.INFO, logger="pytorch_lightning.trainer.trainer"):
trainer.fit(model)
message = f"minimum epochs ({min_epochs}) or minimum steps (None) has not been met. Training will continue"
num_messages = len([record.message for record in caplog.records if message in record.message])
assert num_messages == min_epochs - 2
assert model.training_step_invoked == min_epochs * 2
def test_trainer_max_steps_accumulate_batches(tmpdir):
"""Verify model trains according to specified max steps with grad accumulated batches"""
model = BoringModel()
num_train_samples = math.floor(len(model.train_dataloader()) * 0.5)
# define less train steps than epochs
trainer = Trainer(
limit_train_batches=0.5,
default_root_dir=tmpdir,
max_steps=num_train_samples + 10,
accumulate_grad_batches=10,
logger=False,
weights_summary=None,
progress_bar_refresh_rate=0,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == trainer.max_steps, "Model did not stop at max_steps"
def test_benchmark_option(tmpdir):
"""Verify benchmark option."""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__multiple
# verify torch.backends.cudnn.benchmark is not turned on
assert not torch.backends.cudnn.benchmark
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
benchmark=True,
)
trainer.fit(model)
# verify training completed
assert trainer.state.finished, f"Training failed with {trainer.state}"
# verify torch.backends.cudnn.benchmark is not turned off
assert torch.backends.cudnn.benchmark
@pytest.mark.parametrize("ckpt_path", (None, "best", "specific"))
@pytest.mark.parametrize("save_top_k", (-1, 0, 1, 2))
@pytest.mark.parametrize("fn", ("validate", "test", "predict"))
def test_tested_checkpoint_path(tmpdir, ckpt_path, save_top_k, fn):
class TestModel(BoringModel):
def validation_step(self, batch, batch_idx):
self.log("foo", -batch_idx)
return super().validation_step(batch, batch_idx)
def test_step(self, *args):
return self.validation_step(*args)
def predict_step(self, batch, *_):
return self(batch)
model = TestModel()
model.test_epoch_end = None
trainer = Trainer(
max_epochs=2,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
progress_bar_refresh_rate=0,
default_root_dir=tmpdir,
callbacks=[ModelCheckpoint(monitor="foo", save_top_k=save_top_k)],
)
trainer.fit(model)
trainer_fn = getattr(trainer, fn)
path_attr = f"{fn}{"d" if fn == "validate" else "ed"}_ckpt_path"
assert getattr(trainer, path_attr) is None
if ckpt_path == "best":
# ckpt_path is 'best', meaning we load the best weights
if save_top_k == 0:
with pytest.raises(MisconfigurationException, match=".*is not configured to save the best.*"):
trainer_fn(ckpt_path=ckpt_path)
else:
trainer_fn(ckpt_path=ckpt_path)
assert getattr(trainer, path_attr) == trainer.checkpoint_callback.best_model_path
elif ckpt_path is None:
# ckpt_path is None, meaning we don't load any checkpoints and
# use the weights from the end of training
trainer_fn(ckpt_path=ckpt_path)
assert getattr(trainer, path_attr) is None
else:
# specific checkpoint, pick one from saved ones
if save_top_k == 0:
with pytest.raises(FileNotFoundError):
trainer_fn(ckpt_path="random.ckpt")
else:
ckpt_path = str(
list((Path(tmpdir) / f"lightning_logs/version_{trainer.logger.version}/checkpoints").iterdir()
)[0].absolute()
)
trainer_fn(ckpt_path=ckpt_path)
assert getattr(trainer, path_attr) == ckpt_path
def test_disabled_training(tmpdir):
"""Verify that `limit_train_batches=0` disables the training loop unless `fast_dev_run=True`."""
class CurrentModel(BoringModel):
training_step_invoked = False
training_epoch_end_invoked = False
def training_step(self, *args, **kwargs):
self.training_step_invoked = True
return super().training_step(*args, **kwargs)
def training_epoch_end(self, *args, **kwargs):
self.training_epoch_end_invoked = True
return super().training_epoch_end(*args, **kwargs)
model = CurrentModel()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=2,
limit_train_batches=0.0,
limit_val_batches=0.2,
fast_dev_run=False,
)
before_state_dict = deepcopy(model.state_dict())
trainer = Trainer(**trainer_options)
trainer.fit(model)
after_state_dict = model.state_dict()
for key in before_state_dict.keys():
assert torch.all(torch.eq(before_state_dict[key], after_state_dict[key]))
# check that limit_train_batches=0 turns off training
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch == 0
assert not model.training_step_invoked, "`training_step` should not run when `limit_train_batches=0`"
assert not model.training_epoch_end_invoked, "`training_epoch_end` should not run when `limit_train_batches=0`"
# check that limit_train_batches has no influence when fast_dev_run is turned on
model = CurrentModel()
trainer_options.update(fast_dev_run=True)
before_state_dict = deepcopy(model.state_dict())
trainer = Trainer(**trainer_options)
trainer.fit(model)
after_state_dict = model.state_dict()
for key in before_state_dict.keys():
assert not torch.all(torch.eq(before_state_dict[key], after_state_dict[key]))
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch == 0
assert model.training_step_invoked, "did not run `training_step` with `fast_dev_run=True`"
assert model.training_epoch_end_invoked, "did not run `training_epoch_end` with `fast_dev_run=True`"
def test_disabled_validation(tmpdir):
"""Verify that `limit_val_batches=0` disables the validation loop unless `fast_dev_run=True`."""
class CurrentModel(EvalModelTemplate):
validation_step_invoked = False
validation_epoch_end_invoked = False
def validation_step(self, *args, **kwargs):
self.validation_step_invoked = True
return super().validation_step(*args, **kwargs)
def validation_epoch_end(self, *args, **kwargs):
self.validation_epoch_end_invoked = True
return super().validation_epoch_end(*args, **kwargs)
hparams = EvalModelTemplate.get_default_hparams()
model = CurrentModel(**hparams)
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=2,
limit_train_batches=0.4,
limit_val_batches=0.0,
fast_dev_run=False,
)
trainer = Trainer(**trainer_options)
trainer.fit(model)
# check that limit_val_batches=0 turns off validation
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch == 1
assert not model.validation_step_invoked, "`validation_step` should not run when `limit_val_batches=0`"
assert not model.validation_epoch_end_invoked, "`validation_epoch_end` should not run when `limit_val_batches=0`"
# check that limit_val_batches has no influence when fast_dev_run is turned on
model = CurrentModel(**hparams)
trainer_options.update(fast_dev_run=True)
trainer = Trainer(**trainer_options)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch == 0
assert model.validation_step_invoked, "did not run `validation_step` with `fast_dev_run=True`"
assert model.validation_epoch_end_invoked, "did not run `validation_epoch_end` with `fast_dev_run=True`"
def test_nan_loss_detection(tmpdir):
class CurrentModel(BoringModel):
test_batch_inf = 3
def training_step(self, batch, batch_idx):
output = super().training_step(batch, batch_idx)
if batch_idx == self.test_batch_inf:
if isinstance(output, dict):
output["loss"] *= torch.tensor(math.inf) # make loss infinite
else:
output /= 0
return output
model = CurrentModel()
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=(model.test_batch_inf + 1),
terminate_on_nan=True,
)
with pytest.raises(ValueError, match=r".*The loss returned in `training_step` is.*"):
trainer.fit(model)
assert trainer.global_step == model.test_batch_inf
for param in model.parameters():
assert torch.isfinite(param).all()
def test_nan_params_detection(tmpdir):
class CurrentModel(BoringModel):
test_batch_nan = 3
def on_after_backward(self):
if self.global_step == self.test_batch_nan:
# simulate parameter that became nan
torch.nn.init.constant_(self.layer.bias, math.nan)
model = CurrentModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=(model.test_batch_nan + 1),
terminate_on_nan=True,
)
with pytest.raises(ValueError, match=r".*Detected nan and/or inf values in `layer.bias`.*"):
trainer.fit(model)
assert trainer.global_step == model.test_batch_nan
# after aborting the training loop, model still has nan-valued params
params = torch.cat([param.view(-1) for param in model.parameters()])
assert not torch.isfinite(params).all()
def test_trainer_interrupted_flag(tmpdir):
"""Test the flag denoting that a user interrupted training."""
model = EvalModelTemplate()
class InterruptCallback(Callback):
def __init__(self):
super().__init__()
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
raise KeyboardInterrupt
class HandleInterruptCallback(Callback):
def __init__(self):
super().__init__()
self.exc_info = None
def on_keyboard_interrupt(self, trainer, pl_module):
self.exc_info = sys.exc_info()
interrupt_callback = InterruptCallback()
handle_interrupt_callback = HandleInterruptCallback()
trainer = Trainer(
callbacks=[interrupt_callback, handle_interrupt_callback],
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
progress_bar_refresh_rate=0,
logger=False,
default_root_dir=tmpdir,
)
assert not trainer.interrupted
assert handle_interrupt_callback.exc_info is None
trainer.fit(model)
assert trainer.interrupted
assert isinstance(handle_interrupt_callback.exc_info[1], KeyboardInterrupt)
def test_gradient_clipping(tmpdir):
"""
Test gradient clipping
"""
tutils.reset_seed()
model = EvalModelTemplate()
trainer = Trainer(
max_steps=1,
max_epochs=1,
gradient_clip_val=1.0,
default_root_dir=tmpdir,
)
old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward
def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
# test that gradient is clipped correctly
ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)
parameters = model.parameters()
grad_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
assert (grad_norm - 1.0).abs() < 0.01, "Gradient norm != 1.0: {grad_norm}".format(grad_norm=grad_norm)
return ret_val
trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward
# for the test
model.prev_called_batch_idx = 0
trainer.fit(model)
def test_gradient_clipping_by_value(tmpdir):
"""
Test gradient clipping by value
"""
tutils.reset_seed()
model = BoringModel()
grad_clip_val = 1e-10
trainer = Trainer(
max_steps=1,
max_epochs=1,
gradient_clip_val=grad_clip_val,
gradient_clip_algorithm='value',
default_root_dir=tmpdir
)
old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward
def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
# test that gradient is clipped correctly
ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)
parameters = model.parameters()
grad_max_list = [torch.max(p.grad.detach().abs()) for p in parameters]
grad_max = torch.max(torch.stack(grad_max_list))
assert abs(grad_max.item() - grad_clip_val) < 1e-11, \
f"Gradient max value {grad_max} != grad_clip_val {grad_clip_val} ."
return ret_val
trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward
# for the test
model.prev_called_batch_idx = 0
trainer.fit(model)
@RunIf(min_gpus=1, amp_native=True)
def test_gradient_clipping_fp16(tmpdir):
"""
Test gradient clipping with fp16
"""
tutils.reset_seed()
model = EvalModelTemplate()
trainer = Trainer(
max_steps=1,
max_epochs=1,
precision=16,
gpus=1,
gradient_clip_val=1.0,
default_root_dir=tmpdir,
)
old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward
def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
# test that gradient is clipped correctly
ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)
parameters = model.parameters()
grad_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
assert (grad_norm - 1.0).abs() < 0.01, "Gradient norm != 1.0: {grad_norm}".format(grad_norm=grad_norm)
return ret_val
trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward
model.prev_called_batch_idx = 0
trainer.fit(model)
@RunIf(min_gpus=1, amp_native=True)
def test_gradient_clipping_by_value_fp16(tmpdir):
"""
Test gradient clipping by value with fp16
"""
tutils.reset_seed()
model = BoringModel()
grad_clip_val = 1e-10
trainer = Trainer(
max_steps=1,
max_epochs=1,
precision=16,
gpus=1,
gradient_clip_val=grad_clip_val,
gradient_clip_algorithm='value',
default_root_dir=tmpdir,
)
old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward
def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
# test that gradient is clipped correctly
ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)
parameters = model.parameters()
grad_max_list = [torch.max(p.grad.detach().abs()) for p in parameters]
grad_max = torch.max(torch.stack(grad_max_list))
assert abs(grad_max.item() - grad_clip_val) < 1e-11, \
f"Gradient max value {grad_max} != grad_clip_val {grad_clip_val} ."
return ret_val
trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward
model.prev_called_batch_idx = 0
trainer.fit(model)
def test_gpu_choice(tmpdir):
trainer_options = dict(default_root_dir=tmpdir)
# Only run if CUDA is available
if not torch.cuda.is_available():
return
num_gpus = torch.cuda.device_count()
Trainer(**trainer_options, gpus=num_gpus, auto_select_gpus=True)
with pytest.raises(RuntimeError, match=r".*No GPUs available.*"):
Trainer(**trainer_options, gpus=num_gpus + 1, auto_select_gpus=True)
@pytest.mark.parametrize(
"limit_val_batches",
[0.0, 1, 1.0, 0.5, 5],
)
def test_num_sanity_val_steps(tmpdir, limit_val_batches):
"""
Test that the number of sanity check batches is clipped to `limit_val_batches`.
"""
model = EvalModelTemplate()
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
num_sanity_val_steps = 4
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=num_sanity_val_steps,
limit_val_batches=limit_val_batches,
max_steps=1,
)
assert trainer.num_sanity_val_steps == num_sanity_val_steps
with patch.object(
trainer.fit_loop.validation_loop.epoch_loop,
"evaluation_step",
wraps=trainer.fit_loop.validation_loop.epoch_loop.evaluation_step
) as mocked:
val_dataloaders = model.val_dataloader__multiple_mixed_length()
trainer.fit(model, val_dataloaders=val_dataloaders)
assert mocked.call_count == sum(
min(num_sanity_val_steps, num_batches) for num_batches in trainer.num_val_batches
)
@pytest.mark.parametrize("limit_val_batches", [0.0, 1, 1.0, 0.3])
def test_num_sanity_val_steps_neg_one(tmpdir, limit_val_batches):
"""
Test that `num_sanity_val_steps=-1` runs through all validation data once, and as many batches as
limited by `limit_val_batches` Trainer argument.
"""
model = EvalModelTemplate()
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=-1,
limit_val_batches=limit_val_batches,
max_steps=1,
)
assert trainer.num_sanity_val_steps == float("inf")
with patch.object(
trainer.fit_loop.validation_loop.epoch_loop,
"evaluation_step",
wraps=trainer.fit_loop.validation_loop.epoch_loop.evaluation_step
) as mocked:
val_dataloaders = model.val_dataloader__multiple()
trainer.fit(model, val_dataloaders=val_dataloaders)
assert mocked.call_count == sum(trainer.num_val_batches)
@pytest.mark.parametrize(
"trainer_kwargs,expected",
[
(
dict(accelerator=None, gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator="dp", gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator="ddp", gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator="ddp", num_processes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(accelerator="ddp", num_nodes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator="ddp_cpu", num_processes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(accelerator="ddp2", gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator=None, gpus=1),
dict(_distrib_type=None, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(accelerator="dp", gpus=1),
dict(_distrib_type=DistributedType.DP, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(accelerator="ddp", gpus=1),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(accelerator="ddp_cpu", num_processes=2, gpus=1),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(accelerator="ddp2", gpus=1),
dict(_distrib_type=DistributedType.DDP2, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(accelerator=None, gpus=2),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.GPU, num_gpus=2, num_processes=2),
),
(
dict(accelerator="dp", gpus=2),
dict(_distrib_type=DistributedType.DP, _device_type=DeviceType.GPU, num_gpus=2, num_processes=1),
),
(
dict(accelerator="ddp", gpus=2),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.GPU, num_gpus=2, num_processes=2),
),
(
dict(accelerator="ddp2", gpus=2),
dict(_distrib_type=DistributedType.DDP2, _device_type=DeviceType.GPU, num_gpus=2, num_processes=1),
),
],
)
def test_trainer_config(trainer_kwargs, expected, monkeypatch):
if trainer_kwargs["gpus"] is not None:
monkeypatch.setattr(torch.cuda, "is_available", lambda: True)
monkeypatch.setattr(torch.cuda, "device_count", lambda: trainer_kwargs["gpus"])
trainer = Trainer(**trainer_kwargs)
assert len(expected) == 4
for k, v in expected.items():
assert getattr(trainer, k) == v, f"Failed {k}: {v}"
def test_trainer_subclassing():
model = EvalModelTemplate()
# First way of pulling out args from signature is to list them
class TrainerSubclass(Trainer):
def __init__(self, custom_arg, *args, custom_kwarg="test", **kwargs):
super().__init__(*args, **kwargs)
self.custom_arg = custom_arg
self.custom_kwarg = custom_kwarg
trainer = TrainerSubclass(123, custom_kwarg="custom", fast_dev_run=True)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.custom_arg == 123
assert trainer.custom_kwarg == "custom"
assert trainer.fast_dev_run
# Second way is to pop from the dict
# It's a special case because Trainer does not have any positional args
class TrainerSubclass(Trainer):
def __init__(self, **kwargs):
self.custom_arg = kwargs.pop("custom_arg", 0)
self.custom_kwarg = kwargs.pop("custom_kwarg", "test")
super().__init__(**kwargs)
trainer = TrainerSubclass(custom_kwarg="custom", fast_dev_run=True)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.custom_kwarg == "custom"
assert trainer.fast_dev_run
# when we pass in an unknown arg, the base class should complain
with pytest.raises(TypeError, match=r"__init__\(\) got an unexpected keyword argument 'abcdefg'"):
TrainerSubclass(abcdefg="unknown_arg")
@pytest.mark.parametrize(
"trainer_params", [
OmegaConf.create(dict(max_epochs=1, gpus=1)),
OmegaConf.create(dict(max_epochs=1, gpus=[0])),
]
)
@RunIf(min_gpus=1)
def test_trainer_omegaconf(trainer_params):
Trainer(**trainer_params)
def test_trainer_pickle(tmpdir):
trainer = Trainer(
max_epochs=1,
default_root_dir=tmpdir,
)
pickle.dumps(trainer)
cloudpickle.dumps(trainer)
@pytest.mark.parametrize("stage", ("fit", "validate", "test"))
def test_trainer_setup_call(tmpdir, stage):
"""Test setup call gets the correct stage"""
class CurrentModel(BoringModel):
def setup(self, stage):
self.stage = stage
class TrainerSubclass(Trainer):
def setup(self, model, stage):
assert model is not None
self.stage = stage
model = CurrentModel()
# fit model
trainer = TrainerSubclass(default_root_dir=tmpdir, max_epochs=1, checkpoint_callback=False)
if stage == "fit":
trainer.fit(model)
elif stage == "validate":
trainer.validate(model, ckpt_path=None)
else:
trainer.test(model, ckpt_path=None)
assert trainer.stage == stage
assert trainer.lightning_module.stage == stage
@pytest.mark.parametrize(
"train_batches, max_steps, log_interval",
[
(10, 10, 1),
(3, 10, 1),
(3, 10, 5),
],
)
@patch("pytorch_lightning.loggers.tensorboard.TensorBoardLogger.log_metrics")
def test_log_every_n_steps(log_metrics_mock, tmpdir, train_batches, max_steps, log_interval):
class TestModel(BoringModel):
def training_step(self, *args, **kwargs):
self.log("foo", -1)
return super().training_step(*args, **kwargs)
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
log_every_n_steps=log_interval,
flush_logs_every_n_steps=log_interval,
limit_train_batches=train_batches,
limit_val_batches=0,
max_steps=max_steps,
)
trainer.fit(model)
expected_calls = [call(metrics=ANY, step=s) for s in range(log_interval - 1, max_steps, log_interval)]
log_metrics_mock.assert_has_calls(expected_calls)
class TestLightningDataModule(LightningDataModule):
def __init__(self, dataloaders):
super().__init__()
self._dataloaders = dataloaders
def test_dataloader(self):
return self._dataloaders
def predict_dataloader(self):
return self._dataloaders
class CustomPredictionWriter(BasePredictionWriter):
write_on_batch_end_called = False
write_on_epoch_end_called = False
def __init__(self, output_dir: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.output_dir = output_dir
def write_on_batch_end(self, trainer, pl_module, prediction, batch_indices, *args, **kwargs):
assert prediction.shape == torch.Size([1, 2])
if trainer.accelerator_connector.is_distributed:
assert len(batch_indices) == 1
else:
assert batch_indices is None
self.write_on_batch_end_called = True
def write_on_epoch_end(self, trainer, pl_module, predictions, batch_indices):
expected = 1 if trainer.accelerator_connector.is_distributed else 2
assert len(predictions) == 2
assert len(predictions[0]) == expected
if trainer.accelerator_connector.is_distributed:
assert len(batch_indices) == 2
assert len(batch_indices[0]) == expected
else:
assert batch_indices is None
self.write_on_epoch_end_called = True
def on_predict_epoch_end(self, trainer, pl_module, outputs):
if trainer.accelerator_connector.is_distributed:
for idx in range(2):
assert isinstance(trainer.predict_dataloaders[idx].batch_sampler.sampler, UnrepeatedDistributedSampler)
assert isinstance(trainer.predict_dataloaders[idx].batch_sampler, IndexBatchSamplerWrapper)
super().on_predict_epoch_end(trainer, pl_module, outputs)
def predict(
tmpdir, accelerator, gpus, num_processes, model=None, plugins=None, datamodule=True, pbrr=None, use_callbacks=True
):
dataloaders = [torch.utils.data.DataLoader(RandomDataset(32, 2)), torch.utils.data.DataLoader(RandomDataset(32, 2))]
model = model or BoringModel()
dm = TestLightningDataModule(dataloaders)
cb = CustomPredictionWriter(tmpdir, write_interval="batch")
cb_1 = CustomPredictionWriter(tmpdir, write_interval="epoch")
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
log_every_n_steps=1,
weights_summary=None,
accelerator=accelerator,
gpus=gpus,
num_processes=num_processes,
plugins=plugins,
progress_bar_refresh_rate=pbrr,
callbacks=[cb, cb_1] if use_callbacks else []
)
if accelerator == "ddp_spawn":
with pytest.raises(MisconfigurationException):
trainer.predict(model, datamodule=dm, return_predictions=True)
if datamodule:
results = trainer.predict(model, datamodule=dm)
else:
results = trainer.predict(model, dataloaders=dataloaders)
if not isinstance(trainer.training_type_plugin, DDPSpawnPlugin):
if use_callbacks:
assert cb.write_on_batch_end_called
assert not cb.write_on_epoch_end_called
assert not cb_1.write_on_batch_end_called
assert cb_1.write_on_epoch_end_called
num_samples = 1 if accelerator == "ddp" else 2
assert len(results) == 2
assert len(results[0]) == num_samples
assert results[0][0].shape == torch.Size([1, 2])
def test_trainer_predict_no_return(tmpdir):
"""
Test trainer.predict warns when nothing is returned
"""
class CustomBoringModel(BoringModel):
def predict_step(self, batch, batch_idx, dataloader_idx=None):
if (batch_idx + 1) % 2 == 0:
return
return super().predict_step(batch, batch_idx, dataloader_idx)
with pytest.warns(UserWarning, match='predict returned None'):
predict(tmpdir, None, None, 1, model=CustomBoringModel(), use_callbacks=False)
def test_trainer_predict_grad(tmpdir):
class CustomBoringModel(BoringModel):
def predict_step(self, batch, batch_idx, dataloader_idx=None):
assert batch.expand_as(batch).grad_fn is None
return super().predict_step(batch, batch_idx, dataloader_idx)
predict(tmpdir, None, None, 1, model=CustomBoringModel(), use_callbacks=False)
x = torch.zeros(1, requires_grad=True)
assert x.expand_as(x).grad_fn is not None
@pytest.mark.parametrize('progress_bar_refresh_rate', [0, 5, None])
@pytest.mark.parametrize('datamodule', [False, True])
def test_trainer_predict_cpu(tmpdir, datamodule, progress_bar_refresh_rate):
predict(tmpdir, None, None, 1, datamodule=datamodule, pbrr=progress_bar_refresh_rate)
@RunIf(min_gpus=2, special=True)
@pytest.mark.parametrize('num_gpus', [1, 2])
def test_trainer_predict_dp(tmpdir, num_gpus):
predict(tmpdir, "dp", num_gpus, None)
@RunIf(min_gpus=2, special=True, fairscale=True)
def test_trainer_predict_ddp(tmpdir):
predict(tmpdir, "ddp", 2, None)
@RunIf(min_gpus=2, skip_windows=True, special=True)
def test_trainer_predict_ddp_spawn(tmpdir):
predict(tmpdir, "ddp_spawn", 2, None)
@RunIf(min_gpus=2, special=True)
def test_trainer_predict_1_gpu(tmpdir):
predict(tmpdir, None, 1, None)
@RunIf(skip_windows=True)
def test_trainer_predict_ddp_cpu(tmpdir):
predict(tmpdir, "ddp_cpu", 0, 2)
@patch('torch.cuda.device_count', return_value=2)
@patch('torch.cuda.is_available', return_value=True)
def test_spawn_predict_return_predictions(*_):
"""
Test that `return_predictions=True` raise a MisconfigurationException with spawn training type plugins.
"""
model = BoringModel()
def run(expected_plugin, **trainer_kwargs):
trainer = Trainer(**trainer_kwargs, fast_dev_run=True)
assert isinstance(trainer.training_type_plugin, expected_plugin)
with pytest.raises(MisconfigurationException, match="`return_predictions` should be set to `False`"):
trainer.predict(model, dataloaders=model.train_dataloader(), return_predictions=True)
run(DDPSpawnPlugin, accelerator="ddp_spawn", gpus=2)
run(DDPSpawnPlugin, accelerator="ddp_cpu", num_processes=2)
@pytest.mark.parametrize("return_predictions", [None, False, True])
@pytest.mark.parametrize("precision", [32, 64])
def test_predict_return_predictions_cpu(return_predictions, precision, tmpdir):
"""
Test that `return_predictions=True`.
"""
seed_everything(42)
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, precision=precision)
preds = trainer.predict(model, dataloaders=model.train_dataloader(), return_predictions=return_predictions)
if return_predictions or return_predictions is None:
assert len(preds) == 1
assert preds[0].shape == torch.Size([1, 2])
assert preds[0].dtype == (torch.float64 if precision == 64 else torch.float32)
@pytest.mark.parametrize(
["limit_train_batches", "global_step", "num_training_batches", "current_epoch", "should_train"],
[(0.2, 0, 0, 0, False), (0.5, 10, 2, 4, True)],
)
def test_disabled_training_for_insufficient_limit_train_batches(
tmpdir, limit_train_batches, global_step, num_training_batches, current_epoch, should_train
):
"""
Verify when `limit_train_batches` is float & between [0.0, 1.0] and
`int(self.num_training_batches * self.limit_train_batches) == 0`, the training loop is disabled.
"""
class CurrentModel(BoringModel):
training_step_invoked = False
training_epoch_end_invoked = False
def training_step(self, *args, **kwargs):
self.training_step_invoked = True
return super().training_step(*args, **kwargs)
def training_epoch_end(self, *args, **kwargs):
self.training_epoch_end_invoked = True
return super().training_epoch_end(*args, **kwargs)
dataset_len = 100
batch_size = 25
train = RandomDataset(32, length=dataset_len)
train_loader = DataLoader(train, batch_size=batch_size)
model = CurrentModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=5,
limit_train_batches=limit_train_batches,
)
trainer.fit(model, train_loader)
params_string = f"""`limit_train_batches={limit_train_batches}`, `dataset_len={dataset_len}`
& `batch_size={batch_size}` as
`num_training_batches={num_training_batches}`"""
if should_train:
error_string = f"should run with {params_string}"
else:
error_string = f"should not run with {params_string}"
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == global_step
assert trainer.num_training_batches == num_training_batches
assert trainer.current_epoch == current_epoch
assert model.training_step_invoked == should_train, f"`training_step` {error_string}"
assert model.training_epoch_end_invoked == should_train, f"`training_epoch_end` {error_string}"
@pytest.mark.parametrize(["max_steps", "max_epochs", "global_step"], [(10, 5, 10), (20, None, 20)])
def test_repeated_fit_calls_with_max_epochs_and_steps(tmpdir, max_steps, max_epochs, global_step):
"""
Ensure that the training loop is bound by `max_steps` and
`max_epochs` for repeated calls of `trainer.fit`, and
disabled if the limit is reached
"""
dataset_len = 200
batch_size = 10
train_data = DataLoader(RandomDataset(32, dataset_len), batch_size=batch_size)
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=max_steps,
max_epochs=max_epochs,
)
trainer.fit(model, train_data)
assert trainer.global_step == global_step
trainer.fit(model, train_data)
assert trainer.global_step == global_step
def test_trainer_access_in_configure_optimizers(tmpdir):
"""
Verify that the configure optimizer function can reference the trainer.
"""
class TestModel(BoringModel):
def configure_optimizers(self):
assert self.trainer is not None, "Expect to have access to the trainer within `configure_optimizers`"
train_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model, train_data)
@RunIf(min_gpus=1)
def test_setup_hook_move_to_device_correctly(tmpdir):
"""
Verify that if a user defines a layer in the setup hook function, this is moved to the correct device.
"""
class TestModel(BoringModel):
def setup(self, stage: str) -> None:
self.new_layer = torch.nn.Linear(2, 2)
def training_step(self, batch, batch_idx):
output = self.layer(batch)
# will crash if not moved to correct device
output = self.new_layer(output)
loss = self.loss(batch, output)
return {"loss": loss}
# fake data
train_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
# model
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, gpus=1)
trainer.fit(model, train_data)
def test_train_loop_system(tmpdir):
"""
Test the following methods are called in the order in automatic optimization.
1. optimizer.step (skip when gradient accumulation)
2. model.training_step
3. optimizer.zero_grad (run when the first batch of gradient accumulation)
4. model.backward
Note that the order is NOT `training_step`->`zero_grad`->`backward`->`step`.
This is because `optimizer.step(closure)` calls `closure()` which then calls
the three remaining methods `training_step`, `zero_grad` and `backward` inside.
"""
called_methods = []
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=5,
limit_val_batches=1,
limit_test_batches=1,
progress_bar_refresh_rate=0,
)
class TestOptimizer(SGD):
def step(self, *args, **kwargs):
called_methods.append("step")
return super().step(*args, **kwargs)
def zero_grad(self, *args, **kwargs):
called_methods.append("zero_grad")
return super().zero_grad(*args, **kwargs)
class TestModel(BoringModel):
def configure_optimizers(self):
return TestOptimizer(self.parameters(), lr=0.1)
def training_step(self, *args, **kwargs):
called_methods.append("training_step")
return super().training_step(*args, **kwargs)
def backward(self, *args, **kwargs):
called_methods.append("backward")
return super().backward(*args, **kwargs)
model = TestModel()
trainer = Trainer(**trainer_options)
# No methods are called yet.
assert called_methods == []
trainer.fit(model)
assert called_methods == [
"step",
"training_step",
"zero_grad",
"backward",
] * trainer.limit_train_batches
called_methods.clear()
trainer = Trainer(**trainer_options, accumulate_grad_batches=3)
# No methods are called yet.
assert called_methods == []
trainer.fit(model)
assert called_methods == [
# 0
"training_step",
"zero_grad",
"backward",
# 1
"training_step",
"backward",
# 2
"step",
"training_step",
"backward",
# 3
"training_step",
"zero_grad",
"backward",
# 4
"step",
"training_step",
"backward",
]
def test_init_optimizers_resets_lightning_optimizers(tmpdir):
""" Test that the Trainer resets the `lightning_optimizers` list everytime new optimizers get initialized. """
def compare_optimizers():
assert trainer.lightning_optimizers[0].optimizer is trainer.optimizers[0]
model = BoringModel()
model.lr = 0.2
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
auto_lr_find=True,
)
trainer.tune(model)
compare_optimizers()
trainer.fit(model)
compare_optimizers()
trainer.fit_loop.max_epochs = 2 # simulate multiple fit calls
trainer.fit(model)
compare_optimizers()
def test_check_val_every_n_epoch_exception(tmpdir):
with pytest.raises(MisconfigurationException, match="should be an integer."):
Trainer(
default_root_dir=tmpdir,
max_epochs=1,
check_val_every_n_epoch=1.2,
)
def test_trainer_attach_data_pipeline_to_model(tmpdir):
class DataPipeline:
pass
class TestDataModule(LightningDataModule):
data_pipeline = DataPipeline()
def train_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def val_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def test_dataloader(self):
return DataLoader(RandomDataset(32, 64))
class TestCallback(Callback):
def on_fit_start(self, trainer, pl_module: LightningModule) -> None:
"""Called when fit begins"""
assert isinstance(pl_module.data_pipeline, DataPipeline)
model = BoringModel()
dm = TestDataModule()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, callbacks=[TestCallback()])
trainer.fit(model, datamodule=dm)
def test_exception_when_testing_or_validating_with_fast_dev_run(tmpdir):
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
model = BoringModel()
trainer.fit(model)
with pytest.raises(MisconfigurationException, match=r"\.validate\(\)` with `fast_dev_run=True"):
trainer.validate()
with pytest.raises(MisconfigurationException, match=r"\.test\(\)` with `fast_dev_run=True"):
trainer.test()
class TrainerStagesModel(BoringModel):
def on_train_start(self) -> None:
assert self.trainer.model.training
assert self.training
def on_validation_start(self) -> None:
assert not self.trainer.model.training
assert not self.training
def on_test_start(self) -> None:
assert not self.trainer.model.training
assert not self.training
def on_predict_start(self) -> None:
assert not self.trainer.model.training
assert not self.training
@pytest.mark.parametrize(
'accelerator,num_processes', [(None, 1), pytest.param('ddp', 2, marks=RunIf(skip_windows=True))]
)
def test_model_in_correct_mode_during_stages(tmpdir, accelerator, num_processes):
model = TrainerStagesModel()
trainer = Trainer(default_root_dir=tmpdir, accelerator=accelerator, num_processes=num_processes, fast_dev_run=True)
trainer.fit(model)
trainer.validate(model)
trainer.test(model)
trainer.predict(model, model.val_dataloader())
class TestDummyModelForCheckpoint(BoringModel):
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log('x', loss)
def validation_epoch_end(self, outputs) -> None:
pass
@RunIf(skip_windows=True)
def test_fit_test_synchronization(tmpdir):
"""Test that the trainer synchronizes processes before returning control back to the caller. """
tutils.set_random_master_port()
model = TestDummyModelForCheckpoint()
checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor='x', mode='min', save_top_k=1)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
accelerator='ddp_cpu',
num_processes=2,
callbacks=[checkpoint],
)
trainer.fit(model)
assert os.path.exists(checkpoint.best_model_path), f'Could not find checkpoint at rank {trainer.global_rank}'
trainer.test()
class CustomCallbackOnLoadCheckpoint(Callback):
def on_save_checkpoint(self, trainer, pl_module, checkpoint) -> dict:
return {"a": None}
def test_on_load_checkpoint_missing_callbacks(tmpdir):
""" Test a warning appears when callbacks in the checkpoint don't match callbacks provided when resuming. """
model = BoringModel()
chk = ModelCheckpoint(dirpath=tmpdir, save_last=True)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=3, callbacks=[chk, CustomCallbackOnLoadCheckpoint()])
trainer.fit(model)
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=5, resume_from_checkpoint=chk.last_model_path, progress_bar_refresh_rate=1
)
with pytest.warns(UserWarning, match="CustomCallbackOnLoadCheckpoint"):
trainer.fit(model)
def test_module_current_fx_attributes_reset(tmpdir):
""" Ensure that lightning module's attributes related to current fx are reset at the end of execution. """
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=1,
checkpoint_callback=False,
logger=False,
)
trainer.fit(model)
assert model._current_fx_name is None
assert model._current_dataloader_idx is None
trainer.test(model)
assert model._current_fx_name is None
assert model._current_dataloader_idx is None
def test_exception_when_lightning_module_is_not_set_on_trainer():
trainer = Trainer()
with pytest.raises(MisconfigurationException, match=r"`model` must be provided.*validate"):
trainer.validate()
with pytest.raises(MisconfigurationException, match=r"`model` must be provided.*test"):
trainer.test()
with pytest.raises(MisconfigurationException, match=r"`model` must be provided.*predict"):
trainer.predict()
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import os
import pickle
import sys
from argparse import Namespace
from copy import deepcopy
from pathlib import Path
from unittest.mock import ANY, call, patch
import cloudpickle
import pytest
import torch
from omegaconf import OmegaConf
from torch.optim import SGD
from torch.utils.data import DataLoader
import tests.helpers.utils as tutils
from pytorch_lightning import Callback, LightningDataModule, LightningModule, Trainer
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter
from pytorch_lightning.core.saving import load_hparams_from_tags_csv, load_hparams_from_yaml, save_hparams_to_tags_csv
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper, UnrepeatedDistributedSampler
from pytorch_lightning.plugins import DDPSpawnPlugin
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities import DeviceType, DistributedType
from pytorch_lightning.utilities.cloud_io import load as pl_load
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.seed import seed_everything
from tests.base import EvalModelTemplate
from tests.helpers import BoringModel, RandomDataset
from tests.helpers.runif import RunIf
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_no_val_module(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv("TORCH_HOME", str(tmpdir))
model = EvalModelTemplate()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
logger=logger,
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
)
# fit model
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
# save model
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path)
# assert ckpt has hparams
ckpt = torch.load(new_weights_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in ckpt.keys(), "hyper_parameters missing from checkpoints"
# load new model
hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(hparams_path, "hparams.yaml")
ckpt_path = (
f"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}"
if url_ckpt else new_weights_path
)
model_2 = EvalModelTemplate.load_from_checkpoint(
checkpoint_path=ckpt_path,
hparams_file=hparams_path,
)
model_2.eval()
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_no_val_end_module(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv("TORCH_HOME", tmpdir)
model = EvalModelTemplate()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
logger=logger,
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
)
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
# save model
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path)
# load new model
hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(hparams_path, "hparams.yaml")
ckpt_path = (
f"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}"
if url_ckpt else new_weights_path
)
model_2 = EvalModelTemplate.load_from_checkpoint(
checkpoint_path=ckpt_path,
hparams_file=hparams_path,
)
model_2.eval()
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_strict_model_load(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv("TORCH_HOME", tmpdir)
model = EvalModelTemplate()
# Extra layer
model.c_d3 = torch.nn.Linear(model.hidden_dim, model.hidden_dim)
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
logger=logger,
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
)
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
# save model
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path)
# load new model
hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(hparams_path, "hparams.yaml")
ckpt_path = (
f"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}"
if url_ckpt else new_weights_path
)
try:
EvalModelTemplate.load_from_checkpoint(
checkpoint_path=ckpt_path,
hparams_file=hparams_path,
)
# todo: specify the possible exception
except Exception:
failed = True
else:
failed = False
assert failed, "Model should not been loaded since the extra layer added."
failed = False
try:
EvalModelTemplate.load_from_checkpoint(
checkpoint_path=ckpt_path,
hparams_file=hparams_path,
strict=False,
)
# todo: specify the possible exception
except Exception:
failed = True
assert not failed, "Model should be loaded due to strict=False."
@pytest.mark.parametrize("accumulate_grad_batches", (1, 2, 3))
def test_trainer_accumulate_grad_batches_zero_grad(tmpdir, accumulate_grad_batches):
with patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=20,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
accumulate_grad_batches=accumulate_grad_batches,
)
trainer.fit(model)
assert sgd_zero_grad.call_count == math.ceil(trainer.limit_train_batches / accumulate_grad_batches)
@pytest.mark.parametrize(
["accumulate_grad_batches", "limit_train_batches"],
[
({
1: 2,
3: 4
}, 1.0),
({
1: 2,
3: 4
}, 0.5), # not to be divisible by accumulate_grad_batches on purpose
(3, 1.0),
(3, 0.8), # not to be divisible by accumulate_grad_batches on purpose
(4, 1.0),
(4, 0.7), # not to be divisible by accumulate_grad_batches on purpose
],
)
def test_gradient_accumulation_scheduling_last_batch(tmpdir, accumulate_grad_batches, limit_train_batches):
""" Verify optimizer.step() applied to last batch while grad accumulation """
class TestModel(BoringModel):
def state_dict(self, *args, **kwargs):
return deepcopy(super().state_dict(*args, **kwargs))
def check(self, d1, d2, equal=True):
keys = d1.keys() | d2.keys()
values = [torch.equal(d1[k], d2[k]) for k in keys]
return all(values) if equal else not any(values)
def backward(self, *args, **kwargs) -> None:
pre_bwd_state_dict = self.state_dict()
assert self.check(self.start_state_dict, pre_bwd_state_dict)
out = super().backward(*args, **kwargs)
# state dict is equal, just the gradients changed
assert self.check(pre_bwd_state_dict, self.state_dict())
return out
def optimizer_step(self, *args, **kwargs):
pre_opt_step_state_dict = self.state_dict()
assert self.check(self.start_state_dict, pre_opt_step_state_dict)
# this calls `backward` and `on_after_backward` inside the closure
out = super().optimizer_step(*args, **kwargs)
# the state dict changed
assert self.check(pre_opt_step_state_dict, self.state_dict(), equal=False)
self.opt_step_called = True
return out
def on_train_batch_start(self, *_):
self.start_state_dict = self.state_dict()
self.opt_step_called = False
def on_train_batch_end(self, outputs, batch, batch_idx, *_):
end_state_dict = self.state_dict()
is_last_batch = (batch_idx + 1) == self.trainer.num_training_batches
if is_last_batch or self.opt_step_called:
assert self.check(self.start_state_dict, end_state_dict, equal=False)
else:
assert self.check(self.start_state_dict, end_state_dict)
model = TestModel()
trainer = Trainer(
accumulate_grad_batches=accumulate_grad_batches,
max_epochs=2,
limit_train_batches=limit_train_batches,
limit_val_batches=0,
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
)
trainer.fit(model)
def test_loading_meta_tags(tmpdir):
""" test for backward compatibility to meta_tags.csv """
tutils.reset_seed()
hparams = EvalModelTemplate.get_default_hparams()
# save tags
logger = tutils.get_default_logger(tmpdir)
logger.log_hyperparams(Namespace(some_str="a_str", an_int=1, a_float=2.0))
logger.log_hyperparams(hparams)
logger.save()
# load hparams
path_expt_dir = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(path_expt_dir, TensorBoardLogger.NAME_HPARAMS_FILE)
hparams = load_hparams_from_yaml(hparams_path)
# save as legacy meta_tags.csv
tags_path = os.path.join(path_expt_dir, "meta_tags.csv")
save_hparams_to_tags_csv(tags_path, hparams)
tags = load_hparams_from_tags_csv(tags_path)
assert hparams == tags
def test_loading_yaml(tmpdir):
tutils.reset_seed()
hparams = EvalModelTemplate.get_default_hparams()
# save tags
logger = tutils.get_default_logger(tmpdir)
logger.log_hyperparams(Namespace(some_str="a_str", an_int=1, a_float=2.0))
logger.log_hyperparams(hparams)
logger.save()
# load hparams
path_expt_dir = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(path_expt_dir, "hparams.yaml")
tags = load_hparams_from_yaml(hparams_path)
assert tags["batch_size"] == 32 and tags["hidden_dim"] == 1000
@pytest.mark.parametrize(
"save_top_k,save_last,expected_files",
[
pytest.param(-1, False, [f"epoch={i}.ckpt" for i in range(5)], id="CASE K=-1 (all)"),
pytest.param(1, False, {"epoch=4.ckpt"}, id="CASE K=1 (2.5, epoch 4)"),
pytest.param(2, False, [f"epoch={i}.ckpt" for i in (2, 4)], id="CASE K=2 (2.5 epoch 4, 2.8 epoch 2)"),
pytest.param(4, False, [f"epoch={i}.ckpt" for i in range(1, 5)], id="CASE K=4 (save all 4 base)"),
pytest.param(3, False, [f"epoch={i}.ckpt" for i in range(2, 5)], id="CASE K=3 (save the 2nd, 3rd, 4th model)"),
pytest.param(1, True, {"epoch=4.ckpt", "last.ckpt"}, id="CASE K=1 (save the 4th model and the last model)"),
],
)
def test_model_checkpoint_options(tmpdir, save_top_k, save_last, expected_files):
"""Test ModelCheckpoint options."""
def mock_save_function(filepath, *args):
open(filepath, "a").close()
# simulated losses
losses = [10, 9, 2.8, 5, 2.5]
checkpoint_callback = ModelCheckpoint(
dirpath=tmpdir,
filename='{epoch}',
monitor='checkpoint_on',
save_top_k=save_top_k,
save_last=save_last,
verbose=True
)
trainer = Trainer()
trainer.state.fn = TrainerFn.FITTING
trainer.save_checkpoint = mock_save_function
# emulate callback's calls during the training
for i, loss in enumerate(losses):
trainer.fit_loop.current_epoch = i
trainer.fit_loop.global_step = i
trainer.logger_connector.callback_metrics.update({"checkpoint_on": loss})
checkpoint_callback.on_validation_end(trainer, trainer.lightning_module)
file_lists = set(os.listdir(tmpdir))
assert len(file_lists) == len(
expected_files
), f"Should save {len(expected_files)} models when save_top_k={save_top_k} but found={file_lists}"
# verify correct naming
for fname in expected_files:
assert fname in file_lists
def test_model_checkpoint_only_weights(tmpdir):
"""Tests use case where ModelCheckpoint is configured to save only model weights, and
user tries to load checkpoint to resume training.
"""
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor='early_stop_on', save_weights_only=True)],
)
# fit model
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
checkpoint_path = list(trainer.checkpoint_callback.best_k_models.keys())[0]
# assert saved checkpoint has no trainer data
checkpoint = torch.load(checkpoint_path)
assert "optimizer_states" not in checkpoint, "checkpoint should contain only model weights"
assert "lr_schedulers" not in checkpoint, "checkpoint should contain only model weights"
# assert loading model works when checkpoint has only weights
assert EvalModelTemplate.load_from_checkpoint(checkpoint_path=checkpoint_path)
# directly save model
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path, weights_only=True)
# assert saved checkpoint has no trainer data
checkpoint = torch.load(new_weights_path)
assert "optimizer_states" not in checkpoint, "checkpoint should contain only model weights"
assert "lr_schedulers" not in checkpoint, "checkpoint should contain only model weights"
# assert restoring train state fails
with pytest.raises(KeyError, match="checkpoint contains only the model"):
trainer.checkpoint_connector.restore(new_weights_path)
def test_model_freeze_unfreeze():
model = EvalModelTemplate()
model.freeze()
model.unfreeze()
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_resume_from_checkpoint_epoch_restored(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Verify resuming from checkpoint runs the right number of epochs"""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv("TORCH_HOME", tmpdir)
class TestModel(BoringModel):
# Model that tracks epochs and batches seen
num_epochs_end_seen = 0
num_batches_seen = 0
num_on_load_checkpoint_called = 0
def on_epoch_end(self):
self.num_epochs_end_seen += 1
def on_train_batch_start(self, *_):
self.num_batches_seen += 1
def on_load_checkpoint(self, _):
self.num_on_load_checkpoint_called += 1
model = TestModel()
trainer = Trainer(
max_epochs=2,
limit_train_batches=0.65,
limit_val_batches=1,
callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor='early_stop_on', save_top_k=-1)],
default_root_dir=tmpdir,
val_check_interval=1.0,
progress_bar_refresh_rate=0,
logger=False,
weights_summary=None,
)
trainer.fit(model)
# `on_epoch_end` will be called once for val_sanity, twice for train, twice for val
assert model.num_epochs_end_seen == 1 + 2 + 2
assert model.num_batches_seen == trainer.num_training_batches * 2
assert model.num_on_load_checkpoint_called == 0
# Other checkpoints can be uncommented if/when resuming mid-epoch is supported
checkpoints = Path(trainer.checkpoint_callback.dirpath).glob("*.ckpt")
if url_ckpt:
# transform local paths into url checkpoints
ip, port = tmpdir_server
checkpoints = [f"http://{ip}:{port}/" + ckpt.name for ckpt in checkpoints]
for ckpt in checkpoints:
next_model = TestModel()
state = pl_load(ckpt)
# Resume training
new_trainer = Trainer(
default_root_dir=tmpdir,
resume_from_checkpoint=ckpt,
max_epochs=2,
)
new_trainer.fit(next_model)
assert state["global_step"] + next_model.num_batches_seen == trainer.num_training_batches * trainer.max_epochs
assert next_model.num_on_load_checkpoint_called == 1
def test_trainer_max_steps_and_epochs(tmpdir):
"""Verify model trains according to specified max steps"""
model = BoringModel()
num_train_samples = math.floor(len(model.train_dataloader()) * 0.5)
# define less train steps than epochs
trainer_kwargs = {
'limit_train_batches': 0.5,
'default_root_dir': tmpdir,
'max_epochs': 3,
'max_steps': num_train_samples + 10,
'logger': False,
'weights_summary': None,
'progress_bar_refresh_rate': 0,
}
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == trainer.max_steps, "Model did not stop at max_steps"
# define less train epochs than steps
trainer_kwargs['max_epochs'] = 2
trainer_kwargs['max_steps'] = 3 * 2 * num_train_samples
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == num_train_samples * trainer.max_epochs
assert trainer.current_epoch == trainer.max_epochs - 1, "Model did not stop at max_epochs"
def test_trainer_min_steps_and_epochs(tmpdir):
"""Verify model trains according to specified min steps"""
model = EvalModelTemplate()
num_train_samples = math.floor(len(model.train_dataloader()) * 0.5)
trainer_kwargs = {
'limit_train_batches': 0.5,
'default_root_dir': tmpdir,
# define callback for stopping the model
'callbacks': [EarlyStopping(monitor="early_stop_on", min_delta=1.0)],
'val_check_interval': 2,
'min_epochs': 1,
'max_epochs': 7,
# define less min steps than 1 epoch
'min_steps': num_train_samples // 2,
'logger': False,
'weights_summary': None,
'progress_bar_refresh_rate': 0,
}
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch > 0
assert trainer.global_step >= num_train_samples, "Model did not train for at least min_epochs"
# define less epochs than min_steps
trainer_kwargs["min_steps"] = math.floor(num_train_samples * 1.5)
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch > 0
assert trainer.global_step >= math.floor(num_train_samples * 1.5), "Model did not train for at least min_steps"
def test_trainer_min_steps_and_min_epochs_not_reached(tmpdir, caplog):
""" Test that min_epochs/min_steps in Trainer are enforced even if EarlyStopping is triggered. """
class TestModel(BoringModel):
training_step_invoked = 0
def training_step(self, batch, batch_idx):
output = super().training_step(batch, batch_idx)
output["loss"] = output["loss"] * 0.0 # force minimal loss to trigger early stopping
self.log("loss", output["loss"])
self.training_step_invoked += 1
assert not self.trainer.should_stop
return output
model = TestModel()
early_stop = EarlyStopping(monitor="loss", patience=0, check_on_train_epoch_end=True)
min_epochs = 5
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
min_epochs=min_epochs,
limit_val_batches=0,
limit_train_batches=2,
callbacks=[early_stop]
)
with caplog.at_level(logging.INFO, logger="pytorch_lightning.trainer.trainer"):
trainer.fit(model)
message = f"minimum epochs ({min_epochs}) or minimum steps (None) has not been met. Training will continue"
num_messages = len([record.message for record in caplog.records if message in record.message])
assert num_messages == min_epochs - 2
assert model.training_step_invoked == min_epochs * 2
def test_trainer_max_steps_accumulate_batches(tmpdir):
"""Verify model trains according to specified max steps with grad accumulated batches"""
model = BoringModel()
num_train_samples = math.floor(len(model.train_dataloader()) * 0.5)
# define less train steps than epochs
trainer = Trainer(
limit_train_batches=0.5,
default_root_dir=tmpdir,
max_steps=num_train_samples + 10,
accumulate_grad_batches=10,
logger=False,
weights_summary=None,
progress_bar_refresh_rate=0,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == trainer.max_steps, "Model did not stop at max_steps"
def test_benchmark_option(tmpdir):
"""Verify benchmark option."""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__multiple
# verify torch.backends.cudnn.benchmark is not turned on
assert not torch.backends.cudnn.benchmark
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
benchmark=True,
)
trainer.fit(model)
# verify training completed
assert trainer.state.finished, f"Training failed with {trainer.state}"
# verify torch.backends.cudnn.benchmark is not turned off
assert torch.backends.cudnn.benchmark
@pytest.mark.parametrize("ckpt_path", (None, "best", "specific"))
@pytest.mark.parametrize("save_top_k", (-1, 0, 1, 2))
@pytest.mark.parametrize("fn", ("validate", "test", "predict"))
def test_tested_checkpoint_path(tmpdir, ckpt_path, save_top_k, fn):
class TestModel(BoringModel):
def validation_step(self, batch, batch_idx):
self.log("foo", -batch_idx)
return super().validation_step(batch, batch_idx)
def test_step(self, *args):
return self.validation_step(*args)
def predict_step(self, batch, *_):
return self(batch)
model = TestModel()
model.test_epoch_end = None
trainer = Trainer(
max_epochs=2,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
progress_bar_refresh_rate=0,
default_root_dir=tmpdir,
callbacks=[ModelCheckpoint(monitor="foo", save_top_k=save_top_k)],
)
trainer.fit(model)
trainer_fn = getattr(trainer, fn)
path_attr = f"{fn}{'d' if fn == 'validate' else 'ed'}_ckpt_path"
assert getattr(trainer, path_attr) is None
if ckpt_path == "best":
# ckpt_path is 'best', meaning we load the best weights
if save_top_k == 0:
with pytest.raises(MisconfigurationException, match=".*is not configured to save the best.*"):
trainer_fn(ckpt_path=ckpt_path)
else:
trainer_fn(ckpt_path=ckpt_path)
assert getattr(trainer, path_attr) == trainer.checkpoint_callback.best_model_path
elif ckpt_path is None:
# ckpt_path is None, meaning we don't load any checkpoints and
# use the weights from the end of training
trainer_fn(ckpt_path=ckpt_path)
assert getattr(trainer, path_attr) is None
else:
# specific checkpoint, pick one from saved ones
if save_top_k == 0:
with pytest.raises(FileNotFoundError):
trainer_fn(ckpt_path="random.ckpt")
else:
ckpt_path = str(
list((Path(tmpdir) / f"lightning_logs/version_{trainer.logger.version}/checkpoints").iterdir()
)[0].absolute()
)
trainer_fn(ckpt_path=ckpt_path)
assert getattr(trainer, path_attr) == ckpt_path
def test_disabled_training(tmpdir):
"""Verify that `limit_train_batches=0` disables the training loop unless `fast_dev_run=True`."""
class CurrentModel(BoringModel):
training_step_invoked = False
training_epoch_end_invoked = False
def training_step(self, *args, **kwargs):
self.training_step_invoked = True
return super().training_step(*args, **kwargs)
def training_epoch_end(self, *args, **kwargs):
self.training_epoch_end_invoked = True
return super().training_epoch_end(*args, **kwargs)
model = CurrentModel()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=2,
limit_train_batches=0.0,
limit_val_batches=0.2,
fast_dev_run=False,
)
before_state_dict = deepcopy(model.state_dict())
trainer = Trainer(**trainer_options)
trainer.fit(model)
after_state_dict = model.state_dict()
for key in before_state_dict.keys():
assert torch.all(torch.eq(before_state_dict[key], after_state_dict[key]))
# check that limit_train_batches=0 turns off training
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch == 0
assert not model.training_step_invoked, "`training_step` should not run when `limit_train_batches=0`"
assert not model.training_epoch_end_invoked, "`training_epoch_end` should not run when `limit_train_batches=0`"
# check that limit_train_batches has no influence when fast_dev_run is turned on
model = CurrentModel()
trainer_options.update(fast_dev_run=True)
before_state_dict = deepcopy(model.state_dict())
trainer = Trainer(**trainer_options)
trainer.fit(model)
after_state_dict = model.state_dict()
for key in before_state_dict.keys():
assert not torch.all(torch.eq(before_state_dict[key], after_state_dict[key]))
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch == 0
assert model.training_step_invoked, "did not run `training_step` with `fast_dev_run=True`"
assert model.training_epoch_end_invoked, "did not run `training_epoch_end` with `fast_dev_run=True`"
def test_disabled_validation(tmpdir):
"""Verify that `limit_val_batches=0` disables the validation loop unless `fast_dev_run=True`."""
class CurrentModel(EvalModelTemplate):
validation_step_invoked = False
validation_epoch_end_invoked = False
def validation_step(self, *args, **kwargs):
self.validation_step_invoked = True
return super().validation_step(*args, **kwargs)
def validation_epoch_end(self, *args, **kwargs):
self.validation_epoch_end_invoked = True
return super().validation_epoch_end(*args, **kwargs)
hparams = EvalModelTemplate.get_default_hparams()
model = CurrentModel(**hparams)
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=2,
limit_train_batches=0.4,
limit_val_batches=0.0,
fast_dev_run=False,
)
trainer = Trainer(**trainer_options)
trainer.fit(model)
# check that limit_val_batches=0 turns off validation
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch == 1
assert not model.validation_step_invoked, "`validation_step` should not run when `limit_val_batches=0`"
assert not model.validation_epoch_end_invoked, "`validation_epoch_end` should not run when `limit_val_batches=0`"
# check that limit_val_batches has no influence when fast_dev_run is turned on
model = CurrentModel(**hparams)
trainer_options.update(fast_dev_run=True)
trainer = Trainer(**trainer_options)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch == 0
assert model.validation_step_invoked, "did not run `validation_step` with `fast_dev_run=True`"
assert model.validation_epoch_end_invoked, "did not run `validation_epoch_end` with `fast_dev_run=True`"
def test_nan_loss_detection(tmpdir):
class CurrentModel(BoringModel):
test_batch_inf = 3
def training_step(self, batch, batch_idx):
output = super().training_step(batch, batch_idx)
if batch_idx == self.test_batch_inf:
if isinstance(output, dict):
output["loss"] *= torch.tensor(math.inf) # make loss infinite
else:
output /= 0
return output
model = CurrentModel()
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=(model.test_batch_inf + 1),
terminate_on_nan=True,
)
with pytest.raises(ValueError, match=r".*The loss returned in `training_step` is.*"):
trainer.fit(model)
assert trainer.global_step == model.test_batch_inf
for param in model.parameters():
assert torch.isfinite(param).all()
def test_nan_params_detection(tmpdir):
class CurrentModel(BoringModel):
test_batch_nan = 3
def on_after_backward(self):
if self.global_step == self.test_batch_nan:
# simulate parameter that became nan
torch.nn.init.constant_(self.layer.bias, math.nan)
model = CurrentModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=(model.test_batch_nan + 1),
terminate_on_nan=True,
)
with pytest.raises(ValueError, match=r".*Detected nan and/or inf values in `layer.bias`.*"):
trainer.fit(model)
assert trainer.global_step == model.test_batch_nan
# after aborting the training loop, model still has nan-valued params
params = torch.cat([param.view(-1) for param in model.parameters()])
assert not torch.isfinite(params).all()
def test_trainer_interrupted_flag(tmpdir):
"""Test the flag denoting that a user interrupted training."""
model = EvalModelTemplate()
class InterruptCallback(Callback):
def __init__(self):
super().__init__()
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
raise KeyboardInterrupt
class HandleInterruptCallback(Callback):
def __init__(self):
super().__init__()
self.exc_info = None
def on_keyboard_interrupt(self, trainer, pl_module):
self.exc_info = sys.exc_info()
interrupt_callback = InterruptCallback()
handle_interrupt_callback = HandleInterruptCallback()
trainer = Trainer(
callbacks=[interrupt_callback, handle_interrupt_callback],
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
progress_bar_refresh_rate=0,
logger=False,
default_root_dir=tmpdir,
)
assert not trainer.interrupted
assert handle_interrupt_callback.exc_info is None
trainer.fit(model)
assert trainer.interrupted
assert isinstance(handle_interrupt_callback.exc_info[1], KeyboardInterrupt)
def test_gradient_clipping(tmpdir):
"""
Test gradient clipping
"""
tutils.reset_seed()
model = EvalModelTemplate()
trainer = Trainer(
max_steps=1,
max_epochs=1,
gradient_clip_val=1.0,
default_root_dir=tmpdir,
)
old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward
def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
# test that gradient is clipped correctly
ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)
parameters = model.parameters()
grad_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
assert (grad_norm - 1.0).abs() < 0.01, "Gradient norm != 1.0: {grad_norm}".format(grad_norm=grad_norm)
return ret_val
trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward
# for the test
model.prev_called_batch_idx = 0
trainer.fit(model)
def test_gradient_clipping_by_value(tmpdir):
"""
Test gradient clipping by value
"""
tutils.reset_seed()
model = BoringModel()
grad_clip_val = 1e-10
trainer = Trainer(
max_steps=1,
max_epochs=1,
gradient_clip_val=grad_clip_val,
gradient_clip_algorithm='value',
default_root_dir=tmpdir
)
old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward
def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
# test that gradient is clipped correctly
ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)
parameters = model.parameters()
grad_max_list = [torch.max(p.grad.detach().abs()) for p in parameters]
grad_max = torch.max(torch.stack(grad_max_list))
assert abs(grad_max.item() - grad_clip_val) < 1e-11, \
f"Gradient max value {grad_max} != grad_clip_val {grad_clip_val} ."
return ret_val
trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward
# for the test
model.prev_called_batch_idx = 0
trainer.fit(model)
@RunIf(min_gpus=1, amp_native=True)
def test_gradient_clipping_fp16(tmpdir):
"""
Test gradient clipping with fp16
"""
tutils.reset_seed()
model = EvalModelTemplate()
trainer = Trainer(
max_steps=1,
max_epochs=1,
precision=16,
gpus=1,
gradient_clip_val=1.0,
default_root_dir=tmpdir,
)
old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward
def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
# test that gradient is clipped correctly
ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)
parameters = model.parameters()
grad_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
assert (grad_norm - 1.0).abs() < 0.01, "Gradient norm != 1.0: {grad_norm}".format(grad_norm=grad_norm)
return ret_val
trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward
model.prev_called_batch_idx = 0
trainer.fit(model)
@RunIf(min_gpus=1, amp_native=True)
def test_gradient_clipping_by_value_fp16(tmpdir):
"""
Test gradient clipping by value with fp16
"""
tutils.reset_seed()
model = BoringModel()
grad_clip_val = 1e-10
trainer = Trainer(
max_steps=1,
max_epochs=1,
precision=16,
gpus=1,
gradient_clip_val=grad_clip_val,
gradient_clip_algorithm='value',
default_root_dir=tmpdir,
)
old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward
def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
# test that gradient is clipped correctly
ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)
parameters = model.parameters()
grad_max_list = [torch.max(p.grad.detach().abs()) for p in parameters]
grad_max = torch.max(torch.stack(grad_max_list))
assert abs(grad_max.item() - grad_clip_val) < 1e-11, \
f"Gradient max value {grad_max} != grad_clip_val {grad_clip_val} ."
return ret_val
trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward
model.prev_called_batch_idx = 0
trainer.fit(model)
def test_gpu_choice(tmpdir):
trainer_options = dict(default_root_dir=tmpdir)
# Only run if CUDA is available
if not torch.cuda.is_available():
return
num_gpus = torch.cuda.device_count()
Trainer(**trainer_options, gpus=num_gpus, auto_select_gpus=True)
with pytest.raises(RuntimeError, match=r".*No GPUs available.*"):
Trainer(**trainer_options, gpus=num_gpus + 1, auto_select_gpus=True)
@pytest.mark.parametrize(
"limit_val_batches",
[0.0, 1, 1.0, 0.5, 5],
)
def test_num_sanity_val_steps(tmpdir, limit_val_batches):
"""
Test that the number of sanity check batches is clipped to `limit_val_batches`.
"""
model = EvalModelTemplate()
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
num_sanity_val_steps = 4
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=num_sanity_val_steps,
limit_val_batches=limit_val_batches,
max_steps=1,
)
assert trainer.num_sanity_val_steps == num_sanity_val_steps
with patch.object(
trainer.fit_loop.validation_loop.epoch_loop,
"evaluation_step",
wraps=trainer.fit_loop.validation_loop.epoch_loop.evaluation_step
) as mocked:
val_dataloaders = model.val_dataloader__multiple_mixed_length()
trainer.fit(model, val_dataloaders=val_dataloaders)
assert mocked.call_count == sum(
min(num_sanity_val_steps, num_batches) for num_batches in trainer.num_val_batches
)
@pytest.mark.parametrize("limit_val_batches", [0.0, 1, 1.0, 0.3])
def test_num_sanity_val_steps_neg_one(tmpdir, limit_val_batches):
"""
Test that `num_sanity_val_steps=-1` runs through all validation data once, and as many batches as
limited by `limit_val_batches` Trainer argument.
"""
model = EvalModelTemplate()
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=-1,
limit_val_batches=limit_val_batches,
max_steps=1,
)
assert trainer.num_sanity_val_steps == float("inf")
with patch.object(
trainer.fit_loop.validation_loop.epoch_loop,
"evaluation_step",
wraps=trainer.fit_loop.validation_loop.epoch_loop.evaluation_step
) as mocked:
val_dataloaders = model.val_dataloader__multiple()
trainer.fit(model, val_dataloaders=val_dataloaders)
assert mocked.call_count == sum(trainer.num_val_batches)
@pytest.mark.parametrize(
"trainer_kwargs,expected",
[
(
dict(accelerator=None, gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator="dp", gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator="ddp", gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator="ddp", num_processes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(accelerator="ddp", num_nodes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator="ddp_cpu", num_processes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(accelerator="ddp2", gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator=None, gpus=1),
dict(_distrib_type=None, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(accelerator="dp", gpus=1),
dict(_distrib_type=DistributedType.DP, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(accelerator="ddp", gpus=1),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(accelerator="ddp_cpu", num_processes=2, gpus=1),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(accelerator="ddp2", gpus=1),
dict(_distrib_type=DistributedType.DDP2, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(accelerator=None, gpus=2),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.GPU, num_gpus=2, num_processes=2),
),
(
dict(accelerator="dp", gpus=2),
dict(_distrib_type=DistributedType.DP, _device_type=DeviceType.GPU, num_gpus=2, num_processes=1),
),
(
dict(accelerator="ddp", gpus=2),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.GPU, num_gpus=2, num_processes=2),
),
(
dict(accelerator="ddp2", gpus=2),
dict(_distrib_type=DistributedType.DDP2, _device_type=DeviceType.GPU, num_gpus=2, num_processes=1),
),
],
)
def test_trainer_config(trainer_kwargs, expected, monkeypatch):
if trainer_kwargs["gpus"] is not None:
monkeypatch.setattr(torch.cuda, "is_available", lambda: True)
monkeypatch.setattr(torch.cuda, "device_count", lambda: trainer_kwargs["gpus"])
trainer = Trainer(**trainer_kwargs)
assert len(expected) == 4
for k, v in expected.items():
assert getattr(trainer, k) == v, f"Failed {k}: {v}"
def test_trainer_subclassing():
model = EvalModelTemplate()
# First way of pulling out args from signature is to list them
class TrainerSubclass(Trainer):
def __init__(self, custom_arg, *args, custom_kwarg="test", **kwargs):
super().__init__(*args, **kwargs)
self.custom_arg = custom_arg
self.custom_kwarg = custom_kwarg
trainer = TrainerSubclass(123, custom_kwarg="custom", fast_dev_run=True)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.custom_arg == 123
assert trainer.custom_kwarg == "custom"
assert trainer.fast_dev_run
# Second way is to pop from the dict
# It's a special case because Trainer does not have any positional args
class TrainerSubclass(Trainer):
def __init__(self, **kwargs):
self.custom_arg = kwargs.pop("custom_arg", 0)
self.custom_kwarg = kwargs.pop("custom_kwarg", "test")
super().__init__(**kwargs)
trainer = TrainerSubclass(custom_kwarg="custom", fast_dev_run=True)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.custom_kwarg == "custom"
assert trainer.fast_dev_run
# when we pass in an unknown arg, the base class should complain
with pytest.raises(TypeError, match=r"__init__\(\) got an unexpected keyword argument 'abcdefg'"):
TrainerSubclass(abcdefg="unknown_arg")
@pytest.mark.parametrize(
"trainer_params", [
OmegaConf.create(dict(max_epochs=1, gpus=1)),
OmegaConf.create(dict(max_epochs=1, gpus=[0])),
]
)
@RunIf(min_gpus=1)
def test_trainer_omegaconf(trainer_params):
Trainer(**trainer_params)
def test_trainer_pickle(tmpdir):
trainer = Trainer(
max_epochs=1,
default_root_dir=tmpdir,
)
pickle.dumps(trainer)
cloudpickle.dumps(trainer)
@pytest.mark.parametrize("stage", ("fit", "validate", "test"))
def test_trainer_setup_call(tmpdir, stage):
"""Test setup call gets the correct stage"""
class CurrentModel(BoringModel):
def setup(self, stage):
self.stage = stage
class TrainerSubclass(Trainer):
def setup(self, model, stage):
assert model is not None
self.stage = stage
model = CurrentModel()
# fit model
trainer = TrainerSubclass(default_root_dir=tmpdir, max_epochs=1, checkpoint_callback=False)
if stage == "fit":
trainer.fit(model)
elif stage == "validate":
trainer.validate(model, ckpt_path=None)
else:
trainer.test(model, ckpt_path=None)
assert trainer.stage == stage
assert trainer.lightning_module.stage == stage
@pytest.mark.parametrize(
"train_batches, max_steps, log_interval",
[
(10, 10, 1),
(3, 10, 1),
(3, 10, 5),
],
)
@patch("pytorch_lightning.loggers.tensorboard.TensorBoardLogger.log_metrics")
def test_log_every_n_steps(log_metrics_mock, tmpdir, train_batches, max_steps, log_interval):
class TestModel(BoringModel):
def training_step(self, *args, **kwargs):
self.log("foo", -1)
return super().training_step(*args, **kwargs)
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
log_every_n_steps=log_interval,
flush_logs_every_n_steps=log_interval,
limit_train_batches=train_batches,
limit_val_batches=0,
max_steps=max_steps,
)
trainer.fit(model)
expected_calls = [call(metrics=ANY, step=s) for s in range(log_interval - 1, max_steps, log_interval)]
log_metrics_mock.assert_has_calls(expected_calls)
class TestLightningDataModule(LightningDataModule):
def __init__(self, dataloaders):
super().__init__()
self._dataloaders = dataloaders
def test_dataloader(self):
return self._dataloaders
def predict_dataloader(self):
return self._dataloaders
class CustomPredictionWriter(BasePredictionWriter):
write_on_batch_end_called = False
write_on_epoch_end_called = False
def __init__(self, output_dir: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.output_dir = output_dir
def write_on_batch_end(self, trainer, pl_module, prediction, batch_indices, *args, **kwargs):
assert prediction.shape == torch.Size([1, 2])
if trainer.accelerator_connector.is_distributed:
assert len(batch_indices) == 1
else:
assert batch_indices is None
self.write_on_batch_end_called = True
def write_on_epoch_end(self, trainer, pl_module, predictions, batch_indices):
expected = 1 if trainer.accelerator_connector.is_distributed else 2
assert len(predictions) == 2
assert len(predictions[0]) == expected
if trainer.accelerator_connector.is_distributed:
assert len(batch_indices) == 2
assert len(batch_indices[0]) == expected
else:
assert batch_indices is None
self.write_on_epoch_end_called = True
def on_predict_epoch_end(self, trainer, pl_module, outputs):
if trainer.accelerator_connector.is_distributed:
for idx in range(2):
assert isinstance(trainer.predict_dataloaders[idx].batch_sampler.sampler, UnrepeatedDistributedSampler)
assert isinstance(trainer.predict_dataloaders[idx].batch_sampler, IndexBatchSamplerWrapper)
super().on_predict_epoch_end(trainer, pl_module, outputs)
def predict(
tmpdir, accelerator, gpus, num_processes, model=None, plugins=None, datamodule=True, pbrr=None, use_callbacks=True
):
dataloaders = [torch.utils.data.DataLoader(RandomDataset(32, 2)), torch.utils.data.DataLoader(RandomDataset(32, 2))]
model = model or BoringModel()
dm = TestLightningDataModule(dataloaders)
cb = CustomPredictionWriter(tmpdir, write_interval="batch")
cb_1 = CustomPredictionWriter(tmpdir, write_interval="epoch")
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
log_every_n_steps=1,
weights_summary=None,
accelerator=accelerator,
gpus=gpus,
num_processes=num_processes,
plugins=plugins,
progress_bar_refresh_rate=pbrr,
callbacks=[cb, cb_1] if use_callbacks else []
)
if accelerator == "ddp_spawn":
with pytest.raises(MisconfigurationException):
trainer.predict(model, datamodule=dm, return_predictions=True)
if datamodule:
results = trainer.predict(model, datamodule=dm)
else:
results = trainer.predict(model, dataloaders=dataloaders)
if not isinstance(trainer.training_type_plugin, DDPSpawnPlugin):
if use_callbacks:
assert cb.write_on_batch_end_called
assert not cb.write_on_epoch_end_called
assert not cb_1.write_on_batch_end_called
assert cb_1.write_on_epoch_end_called
num_samples = 1 if accelerator == "ddp" else 2
assert len(results) == 2
assert len(results[0]) == num_samples
assert results[0][0].shape == torch.Size([1, 2])
def test_trainer_predict_no_return(tmpdir):
"""
Test trainer.predict warns when nothing is returned
"""
class CustomBoringModel(BoringModel):
def predict_step(self, batch, batch_idx, dataloader_idx=None):
if (batch_idx + 1) % 2 == 0:
return
return super().predict_step(batch, batch_idx, dataloader_idx)
with pytest.warns(UserWarning, match='predict returned None'):
predict(tmpdir, None, None, 1, model=CustomBoringModel(), use_callbacks=False)
def test_trainer_predict_grad(tmpdir):
class CustomBoringModel(BoringModel):
def predict_step(self, batch, batch_idx, dataloader_idx=None):
assert batch.expand_as(batch).grad_fn is None
return super().predict_step(batch, batch_idx, dataloader_idx)
predict(tmpdir, None, None, 1, model=CustomBoringModel(), use_callbacks=False)
x = torch.zeros(1, requires_grad=True)
assert x.expand_as(x).grad_fn is not None
@pytest.mark.parametrize('progress_bar_refresh_rate', [0, 5, None])
@pytest.mark.parametrize('datamodule', [False, True])
def test_trainer_predict_cpu(tmpdir, datamodule, progress_bar_refresh_rate):
predict(tmpdir, None, None, 1, datamodule=datamodule, pbrr=progress_bar_refresh_rate)
@RunIf(min_gpus=2, special=True)
@pytest.mark.parametrize('num_gpus', [1, 2])
def test_trainer_predict_dp(tmpdir, num_gpus):
predict(tmpdir, "dp", num_gpus, None)
@RunIf(min_gpus=2, special=True, fairscale=True)
def test_trainer_predict_ddp(tmpdir):
predict(tmpdir, "ddp", 2, None)
@RunIf(min_gpus=2, skip_windows=True, special=True)
def test_trainer_predict_ddp_spawn(tmpdir):
predict(tmpdir, "ddp_spawn", 2, None)
@RunIf(min_gpus=2, special=True)
def test_trainer_predict_1_gpu(tmpdir):
predict(tmpdir, None, 1, None)
@RunIf(skip_windows=True)
def test_trainer_predict_ddp_cpu(tmpdir):
predict(tmpdir, "ddp_cpu", 0, 2)
@patch('torch.cuda.device_count', return_value=2)
@patch('torch.cuda.is_available', return_value=True)
def test_spawn_predict_return_predictions(*_):
"""
Test that `return_predictions=True` raise a MisconfigurationException with spawn training type plugins.
"""
model = BoringModel()
def run(expected_plugin, **trainer_kwargs):
trainer = Trainer(**trainer_kwargs, fast_dev_run=True)
assert isinstance(trainer.training_type_plugin, expected_plugin)
with pytest.raises(MisconfigurationException, match="`return_predictions` should be set to `False`"):
trainer.predict(model, dataloaders=model.train_dataloader(), return_predictions=True)
run(DDPSpawnPlugin, accelerator="ddp_spawn", gpus=2)
run(DDPSpawnPlugin, accelerator="ddp_cpu", num_processes=2)
@pytest.mark.parametrize("return_predictions", [None, False, True])
@pytest.mark.parametrize("precision", [32, 64])
def test_predict_return_predictions_cpu(return_predictions, precision, tmpdir):
"""
Test that `return_predictions=True`.
"""
seed_everything(42)
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, precision=precision)
preds = trainer.predict(model, dataloaders=model.train_dataloader(), return_predictions=return_predictions)
if return_predictions or return_predictions is None:
assert len(preds) == 1
assert preds[0].shape == torch.Size([1, 2])
assert preds[0].dtype == (torch.float64 if precision == 64 else torch.float32)
@pytest.mark.parametrize(
["limit_train_batches", "global_step", "num_training_batches", "current_epoch", "should_train"],
[(0.2, 0, 0, 0, False), (0.5, 10, 2, 4, True)],
)
def test_disabled_training_for_insufficient_limit_train_batches(
tmpdir, limit_train_batches, global_step, num_training_batches, current_epoch, should_train
):
"""
Verify when `limit_train_batches` is float & between [0.0, 1.0] and
`int(self.num_training_batches * self.limit_train_batches) == 0`, the training loop is disabled.
"""
class CurrentModel(BoringModel):
training_step_invoked = False
training_epoch_end_invoked = False
def training_step(self, *args, **kwargs):
self.training_step_invoked = True
return super().training_step(*args, **kwargs)
def training_epoch_end(self, *args, **kwargs):
self.training_epoch_end_invoked = True
return super().training_epoch_end(*args, **kwargs)
dataset_len = 100
batch_size = 25
train = RandomDataset(32, length=dataset_len)
train_loader = DataLoader(train, batch_size=batch_size)
model = CurrentModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=5,
limit_train_batches=limit_train_batches,
)
trainer.fit(model, train_loader)
params_string = f"""`limit_train_batches={limit_train_batches}`, `dataset_len={dataset_len}`
& `batch_size={batch_size}` as
`num_training_batches={num_training_batches}`"""
if should_train:
error_string = f"should run with {params_string}"
else:
error_string = f"should not run with {params_string}"
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == global_step
assert trainer.num_training_batches == num_training_batches
assert trainer.current_epoch == current_epoch
assert model.training_step_invoked == should_train, f"`training_step` {error_string}"
assert model.training_epoch_end_invoked == should_train, f"`training_epoch_end` {error_string}"
@pytest.mark.parametrize(["max_steps", "max_epochs", "global_step"], [(10, 5, 10), (20, None, 20)])
def test_repeated_fit_calls_with_max_epochs_and_steps(tmpdir, max_steps, max_epochs, global_step):
"""
Ensure that the training loop is bound by `max_steps` and
`max_epochs` for repeated calls of `trainer.fit`, and
disabled if the limit is reached
"""
dataset_len = 200
batch_size = 10
train_data = DataLoader(RandomDataset(32, dataset_len), batch_size=batch_size)
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=max_steps,
max_epochs=max_epochs,
)
trainer.fit(model, train_data)
assert trainer.global_step == global_step
trainer.fit(model, train_data)
assert trainer.global_step == global_step
def test_trainer_access_in_configure_optimizers(tmpdir):
"""
Verify that the configure optimizer function can reference the trainer.
"""
class TestModel(BoringModel):
def configure_optimizers(self):
assert self.trainer is not None, "Expect to have access to the trainer within `configure_optimizers`"
train_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model, train_data)
@RunIf(min_gpus=1)
def test_setup_hook_move_to_device_correctly(tmpdir):
"""
Verify that if a user defines a layer in the setup hook function, this is moved to the correct device.
"""
class TestModel(BoringModel):
def setup(self, stage: str) -> None:
self.new_layer = torch.nn.Linear(2, 2)
def training_step(self, batch, batch_idx):
output = self.layer(batch)
# will crash if not moved to correct device
output = self.new_layer(output)
loss = self.loss(batch, output)
return {"loss": loss}
# fake data
train_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
# model
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, gpus=1)
trainer.fit(model, train_data)
def test_train_loop_system(tmpdir):
"""
Test the following methods are called in the order in automatic optimization.
1. optimizer.step (skip when gradient accumulation)
2. model.training_step
3. optimizer.zero_grad (run when the first batch of gradient accumulation)
4. model.backward
Note that the order is NOT `training_step`->`zero_grad`->`backward`->`step`.
This is because `optimizer.step(closure)` calls `closure()` which then calls
the three remaining methods `training_step`, `zero_grad` and `backward` inside.
"""
called_methods = []
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=5,
limit_val_batches=1,
limit_test_batches=1,
progress_bar_refresh_rate=0,
)
class TestOptimizer(SGD):
def step(self, *args, **kwargs):
called_methods.append("step")
return super().step(*args, **kwargs)
def zero_grad(self, *args, **kwargs):
called_methods.append("zero_grad")
return super().zero_grad(*args, **kwargs)
class TestModel(BoringModel):
def configure_optimizers(self):
return TestOptimizer(self.parameters(), lr=0.1)
def training_step(self, *args, **kwargs):
called_methods.append("training_step")
return super().training_step(*args, **kwargs)
def backward(self, *args, **kwargs):
called_methods.append("backward")
return super().backward(*args, **kwargs)
model = TestModel()
trainer = Trainer(**trainer_options)
# No methods are called yet.
assert called_methods == []
trainer.fit(model)
assert called_methods == [
"step",
"training_step",
"zero_grad",
"backward",
] * trainer.limit_train_batches
called_methods.clear()
trainer = Trainer(**trainer_options, accumulate_grad_batches=3)
# No methods are called yet.
assert called_methods == []
trainer.fit(model)
assert called_methods == [
# 0
"training_step",
"zero_grad",
"backward",
# 1
"training_step",
"backward",
# 2
"step",
"training_step",
"backward",
# 3
"training_step",
"zero_grad",
"backward",
# 4
"step",
"training_step",
"backward",
]
def test_init_optimizers_resets_lightning_optimizers(tmpdir):
""" Test that the Trainer resets the `lightning_optimizers` list everytime new optimizers get initialized. """
def compare_optimizers():
assert trainer.lightning_optimizers[0].optimizer is trainer.optimizers[0]
model = BoringModel()
model.lr = 0.2
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
auto_lr_find=True,
)
trainer.tune(model)
compare_optimizers()
trainer.fit(model)
compare_optimizers()
trainer.fit_loop.max_epochs = 2 # simulate multiple fit calls
trainer.fit(model)
compare_optimizers()
def test_check_val_every_n_epoch_exception(tmpdir):
with pytest.raises(MisconfigurationException, match="should be an integer."):
Trainer(
default_root_dir=tmpdir,
max_epochs=1,
check_val_every_n_epoch=1.2,
)
def test_trainer_attach_data_pipeline_to_model(tmpdir):
class DataPipeline:
pass
class TestDataModule(LightningDataModule):
data_pipeline = DataPipeline()
def train_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def val_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def test_dataloader(self):
return DataLoader(RandomDataset(32, 64))
class TestCallback(Callback):
def on_fit_start(self, trainer, pl_module: LightningModule) -> None:
"""Called when fit begins"""
assert isinstance(pl_module.data_pipeline, DataPipeline)
model = BoringModel()
dm = TestDataModule()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, callbacks=[TestCallback()])
trainer.fit(model, datamodule=dm)
def test_exception_when_testing_or_validating_with_fast_dev_run(tmpdir):
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
model = BoringModel()
trainer.fit(model)
with pytest.raises(MisconfigurationException, match=r"\.validate\(\)` with `fast_dev_run=True"):
trainer.validate()
with pytest.raises(MisconfigurationException, match=r"\.test\(\)` with `fast_dev_run=True"):
trainer.test()
class TrainerStagesModel(BoringModel):
def on_train_start(self) -> None:
assert self.trainer.model.training
assert self.training
def on_validation_start(self) -> None:
assert not self.trainer.model.training
assert not self.training
def on_test_start(self) -> None:
assert not self.trainer.model.training
assert not self.training
def on_predict_start(self) -> None:
assert not self.trainer.model.training
assert not self.training
@pytest.mark.parametrize(
'accelerator,num_processes', [(None, 1), pytest.param('ddp', 2, marks=RunIf(skip_windows=True))]
)
def test_model_in_correct_mode_during_stages(tmpdir, accelerator, num_processes):
model = TrainerStagesModel()
trainer = Trainer(default_root_dir=tmpdir, accelerator=accelerator, num_processes=num_processes, fast_dev_run=True)
trainer.fit(model)
trainer.validate(model)
trainer.test(model)
trainer.predict(model, model.val_dataloader())
class TestDummyModelForCheckpoint(BoringModel):
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log('x', loss)
def validation_epoch_end(self, outputs) -> None:
pass
@RunIf(skip_windows=True)
def test_fit_test_synchronization(tmpdir):
"""Test that the trainer synchronizes processes before returning control back to the caller. """
tutils.set_random_master_port()
model = TestDummyModelForCheckpoint()
checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor='x', mode='min', save_top_k=1)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
accelerator='ddp_cpu',
num_processes=2,
callbacks=[checkpoint],
)
trainer.fit(model)
assert os.path.exists(checkpoint.best_model_path), f'Could not find checkpoint at rank {trainer.global_rank}'
trainer.test()
class CustomCallbackOnLoadCheckpoint(Callback):
def on_save_checkpoint(self, trainer, pl_module, checkpoint) -> dict:
return {"a": None}
def test_on_load_checkpoint_missing_callbacks(tmpdir):
""" Test a warning appears when callbacks in the checkpoint don't match callbacks provided when resuming. """
model = BoringModel()
chk = ModelCheckpoint(dirpath=tmpdir, save_last=True)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=3, callbacks=[chk, CustomCallbackOnLoadCheckpoint()])
trainer.fit(model)
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=5, resume_from_checkpoint=chk.last_model_path, progress_bar_refresh_rate=1
)
with pytest.warns(UserWarning, match="CustomCallbackOnLoadCheckpoint"):
trainer.fit(model)
def test_module_current_fx_attributes_reset(tmpdir):
""" Ensure that lightning module's attributes related to current fx are reset at the end of execution. """
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=1,
checkpoint_callback=False,
logger=False,
)
trainer.fit(model)
assert model._current_fx_name is None
assert model._current_dataloader_idx is None
trainer.test(model)
assert model._current_fx_name is None
assert model._current_dataloader_idx is None
def test_exception_when_lightning_module_is_not_set_on_trainer():
trainer = Trainer()
with pytest.raises(MisconfigurationException, match=r"`model` must be provided.*validate"):
trainer.validate()
with pytest.raises(MisconfigurationException, match=r"`model` must be provided.*test"):
trainer.test()
with pytest.raises(MisconfigurationException, match=r"`model` must be provided.*predict"):
trainer.predict()
|
"""OData service implementation
Details regarding batch requests and changesets:
http://www.odata.org/documentation/odata-version-2-0/batch-processing/
"""
# pylint: disable=too-many-lines
import logging
from functools import partial
import json
import random
from email.parser import Parser
from http.client import HTTPResponse
from io import BytesIO
from urllib.parse import urlencode
from pyodata.exceptions import HttpError, PyODataException, ExpressionError, ProgramError
from . import model
LOGGER_NAME = 'pyodata.service'
HTTP_CODE_OK = 200
HTTP_CODE_CREATED = 201
def urljoin(*path):
"""Joins the passed string parts into a one string url"""
return '/'.join((part.strip('/') for part in path))
def encode_multipart(boundary, http_requests):
"""Encode list of requests into multipart body"""
lines = []
lines.append('')
for req in http_requests:
lines.append(f'--{boundary}')
if not isinstance(req, MultipartRequest):
lines.extend(('Content-Type: application/http ', 'Content-Transfer-Encoding:binary'))
lines.append('')
# request line (method + path + query params)
line = f'{req.get_method()} {req.get_path()}'
query_params = urlencode(req.get_query_params())
if query_params:
line += '?' + query_params
line += ' HTTP/1.1'
lines.append(line)
# request specific headers
for hdr, hdr_val in req.get_headers().items():
lines.append(f'{hdr}: {hdr_val}')
lines.append('')
body = req.get_body()
if body is not None:
lines.append(req.get_body())
else:
# this is very important since SAP gateway rejected request witout this line. It seems
# blank line must be provided as a representation of emtpy body, else we are getting
# 400 Bad fromat from SAP gateway
lines.append('')
lines.append(f'--{boundary}--')
return '\r\n'.join(lines)
def decode_multipart(data, content_type):
"""Decode parts of the multipart mime content"""
def decode(message):
"""Decode tree of messages for specific message"""
messages = []
for i, part in enumerate(message.walk()): # pylint: disable=unused-variable
if part.get_content_type() == 'multipart/mixed':
for submessage in part.get_payload():
messages.append(decode(submessage))
break
messages.append(part.get_payload())
return messages
data = f"Content-Type: {content_type}\n" + data
parser = Parser()
parsed = parser.parsestr(data)
decoded = decode(parsed)
return decoded
class ODataHttpResponse:
"""Representation of http response"""
def __init__(self, headers, status_code, content=None):
self.headers = headers
self.status_code = status_code
self.content = content
@staticmethod
def from_string(data):
"""Parse http response to status code, headers and body
Based on: https://stackoverflow.com/questions/24728088/python-parse-http-response-string
"""
class FakeSocket:
"""Fake socket to simulate received http response content"""
def __init__(self, response_str):
self._file = BytesIO(response_str.encode('utf-8'))
def makefile(self, *args, **kwargs):
"""Fake file that provides string content"""
# pylint: disable=unused-argument
return self._file
source = FakeSocket(data)
response = HTTPResponse(source)
response.begin()
response.length = response.fp.__sizeof__()
return ODataHttpResponse(
dict(response.getheaders()),
response.status,
response.read(len(data)) # the len here will give a 'big enough' value to read the whole content
)
def json(self):
"""Return response as decoded json"""
# TODO: see implementation in python requests, our simple
# approach can bring issues with encoding
# https://github.com/requests/requests/blob/master/requests/models.py#L868
if self.content:
return json.loads(self.content.decode('utf-8'))
return None
class EntityKey:
"""An immutable entity-key, made up of either a single value (single)
or multiple key-value pairs (complex).
Every entity must have an entity-key. The entity-key must be unique
within the entity-set, and thus defines an entity's identity.
The string representation of an entity-key is wrapped with parentheses,
such as (2), ('foo') or (a=1,foo='bar').
Entity-keys are equal if their string representations are equal.
"""
TYPE_SINGLE = 0
TYPE_COMPLEX = 1
def __init__(self, entity_type, single_key=None, **args):
self._logger = logging.getLogger(LOGGER_NAME)
self._proprties = args
self._entity_type = entity_type
self._key = entity_type.key_proprties
# single key does not need property name
if single_key is not None:
# check that entity type key consists of exactly one property
if len(self._key) != 1:
raise PyODataException(('Key of entity type {} consists of multiple properties {} '
'and cannot be initialized by single value').format(
self._entity_type.name, ', '.join([prop.name for prop in self._key])))
# get single key property and format key string
key_prop = self._key[0]
args[key_prop.name] = single_key
self._type = EntityKey.TYPE_SINGLE
self._logger.debug(('Detected single property key, adding pair %s->%s to key'
'properties'), key_prop.name, single_key)
else:
for key_prop in self._key:
if key_prop.name not in args:
raise PyODataException(f'Missing value for key property {key_prop.name}')
self._type = EntityKey.TYPE_COMPLEX
@property
def key_properties(self):
"""Key properties"""
return self._key
def to_key_string_without_parentheses(self):
"""Gets the string representation of the key without parentheses"""
if self._type == EntityKey.TYPE_SINGLE:
# first property is the key property
key_prop = self._key[0]
return key_prop.to_literal(self._proprties[key_prop.name])
key_pairs = []
for key_prop in self._key:
# if key_prop.name not in self.__dict__['_cache']:
# raise RuntimeError('Entity key is not complete, missing value of property: {0}'.format(key_prop.name))
key_pairs.append(
f'{key_prop.name}={key_prop.to_literal(self._proprties[key_prop.name])}')
return ','.join(key_pairs)
def to_key_string(self):
"""Gets the string representation of the key, including parentheses"""
return f'({self.to_key_string_without_parentheses()})'
def __repr__(self):
return self.to_key_string()
class ODataHttpRequest:
"""Deferred HTTP Request"""
def __init__(self, url, connection, handler, headers=None):
self._connection = connection
self._url = url
self._handler = handler
self._headers = headers or dict()
self._logger = logging.getLogger(LOGGER_NAME)
self._customs = {} # string -> string hash
self._next_url = None
@property
def handler(self):
"""Getter for handler"""
return self._handler
def get_path(self):
"""Get path of the HTTP request"""
# pylint: disable=no-self-use
return ''
def get_query_params(self):
"""Get query params"""
# pylint: disable=no-self-use
return dict(self._customs)
def get_method(self):
"""Get HTTP method"""
# pylint: disable=no-self-use
return 'GET'
def get_body(self):
"""Get HTTP body or None if not applicable"""
# pylint: disable=no-self-use
return None
def get_default_headers(self):
"""Get dict of Child specific HTTP headers"""
# pylint: disable=no-self-use
return dict()
def get_headers(self):
"""Get dict of HTTP headers which is union of return value
of the method get_default_headers() and the headers
added via the method add_headers() where the latter
headers have priority - same keys get value of the latter.
"""
headers = self.get_default_headers()
headers.update(self._headers)
return headers
def add_headers(self, value):
"""Add the give dictionary of HTTP headers to
HTTP request sent by this ODataHttpRequest instance.
"""
if not isinstance(value, dict):
raise TypeError(f"Headers must be of type 'dict' not {type(value)}")
self._headers.update(value)
def execute(self):
"""Fetches HTTP response and returns processed result
Sends the query-request to the OData service, returning a client-side Enumerable for
subsequent in-memory operations.
Fetches HTTP response and returns processed result"""
if self._next_url:
url = self._next_url
else:
url = urljoin(self._url, self.get_path())
# pylint: disable=assignment-from-none
body = self.get_body()
headers = self.get_headers()
self._logger.debug('Send (execute) %s request to %s', self.get_method(), url)
self._logger.debug(' query params: %s', self.get_query_params())
self._logger.debug(' headers: %s', headers)
if body:
self._logger.debug(' body: %s', body)
params = urlencode(self.get_query_params())
response = self._connection.request(
self.get_method(), url, headers=headers, params=params, data=body)
self._logger.debug('Received response')
self._logger.debug(' url: %s', response.url)
self._logger.debug(' headers: %s', response.headers)
self._logger.debug(' status code: %d', response.status_code)
try:
self._logger.debug(' body: %s', response.content.decode('utf-8'))
except UnicodeDecodeError:
self._logger.debug(' body: <cannot be decoded>')
return self._handler(response)
def custom(self, name, value):
"""Adds a custom name-value pair."""
# returns QueryRequest
self._customs[name] = value
return self
class EntityGetRequest(ODataHttpRequest):
"""Used for GET operations of a single entity"""
def __init__(self, handler, entity_key, entity_set_proxy):
super(EntityGetRequest, self).__init__(entity_set_proxy.service.url, entity_set_proxy.service.connection,
handler)
self._logger = logging.getLogger(LOGGER_NAME)
self._entity_key = entity_key
self._entity_set_proxy = entity_set_proxy
self._select = None
self._expand = None
self._logger.debug('New instance of EntityGetRequest for last segment: %s', self._entity_set_proxy.last_segment)
def nav(self, nav_property):
"""Navigates to given navigation property and returns the EntitySetProxy"""
return self._entity_set_proxy.nav(nav_property, self._entity_key)
def select(self, select):
"""Specifies a subset of properties to return.
@param select a comma-separated list of selection clauses
"""
self._select = select
return self
def expand(self, expand):
"""Specifies related entities to expand inline as part of the response.
@param expand a comma-separated list of navigation properties
"""
self._expand = expand
return self
def get_path(self):
return self._entity_set_proxy.last_segment + self._entity_key.to_key_string()
def get_default_headers(self):
return {'Accept': 'application/json'}
def get_query_params(self):
qparams = super(EntityGetRequest, self).get_query_params()
if self._select is not None:
qparams['$select'] = self._select
if self._expand is not None:
qparams['$expand'] = self._expand
return qparams
def get_value(self, connection=None):
"""Returns Value of Media EntityTypes also known as the $value URL suffix."""
if connection is None:
connection = self._connection
def stream_handler(response):
"""Returns $value from HTTP Response"""
if response.status_code != HTTP_CODE_OK:
raise HttpError('HTTP GET for $value failed with status code {}'
.format(response.status_code), response)
return response
return ODataHttpRequest(
urljoin(self._url, self.get_path(), '/$value'),
connection,
stream_handler)
class NavEntityGetRequest(EntityGetRequest):
"""Used for GET operations of a single entity accessed via a Navigation property"""
def __init__(self, handler, master_key, entity_set_proxy, nav_property):
super(NavEntityGetRequest, self).__init__(handler, master_key, entity_set_proxy)
self._nav_property = nav_property
def get_path(self):
return f"{super(NavEntityGetRequest, self).get_path()}/{self._nav_property}"
class EntityCreateRequest(ODataHttpRequest):
"""Used for creating entities (POST operations of a single entity)
Call execute() to send the create-request to the OData service
and get the newly created entity."""
def __init__(self, url, connection, handler, entity_set, last_segment=None):
super(EntityCreateRequest, self).__init__(url, connection, handler)
self._logger = logging.getLogger(LOGGER_NAME)
self._entity_set = entity_set
self._entity_type = entity_set.entity_type
if last_segment is None:
self._last_segment = self._entity_set.name
else:
self._last_segment = last_segment
self._values = {}
# get all properties declared by entity type
self._type_props = self._entity_type.proprties()
self._logger.debug('New instance of EntityCreateRequest for entity type: %s on path %s', self._entity_type.name,
self._last_segment)
def get_path(self):
return self._last_segment
def get_method(self):
# pylint: disable=no-self-use
return 'POST'
def _get_body(self):
"""Recursively builds a dictionary of values where some of the values
might be another entities.
"""
body = {}
for key, val in self._values.items():
# The value is either an entity or a scalar
if isinstance(val, EntityProxy):
body[key] = val._get_body() # pylint: disable=protected-access
else:
body[key] = val
return body
def get_body(self):
return json.dumps(self._get_body())
def get_default_headers(self):
return {'Accept': 'application/json', 'Content-Type': 'application/json', 'X-Requested-With': 'X'}
@staticmethod
def _build_values(entity_type, entity):
"""Recursively converts a dictionary of values where some of the values
might be another entities (navigation properties) into the internal
representation.
"""
if isinstance(entity, list):
return [EntityCreateRequest._build_values(entity_type, item) for item in entity]
values = {}
for key, val in entity.items():
try:
val = entity_type.proprty(key).to_json(val)
except KeyError:
try:
nav_prop = entity_type.nav_proprty(key)
val = EntityCreateRequest._build_values(nav_prop.typ, val)
except KeyError:
raise PyODataException('Property {} is not declared in {} entity type'.format(
key, entity_type.name))
values[key] = val
return values
def set(self, **kwargs):
"""Set properties on the new entity."""
self._logger.info(kwargs)
# TODO: consider use of attset for setting properties
self._values = EntityCreateRequest._build_values(self._entity_type, kwargs)
return self
class EntityDeleteRequest(ODataHttpRequest):
"""Used for deleting entity (DELETE operations on a single entity)"""
def __init__(self, url, connection, handler, entity_set, entity_key):
super(EntityDeleteRequest, self).__init__(url, connection, handler)
self._logger = logging.getLogger(LOGGER_NAME)
self._entity_set = entity_set
self._entity_key = entity_key
self._logger.debug('New instance of EntityDeleteRequest for entity type: %s', entity_set.entity_type.name)
def get_path(self):
return self._entity_set.name + self._entity_key.to_key_string()
def get_method(self):
# pylint: disable=no-self-use
return 'DELETE'
class EntityModifyRequest(ODataHttpRequest):
"""Used for modyfing entities (UPDATE/MERGE operations on a single entity)
Call execute() to send the update-request to the OData service
and get the modified entity."""
ALLOWED_HTTP_METHODS = ['PATCH', 'PUT', 'MERGE']
def __init__(self, url, connection, handler, entity_set, entity_key, method="PATCH"):
super(EntityModifyRequest, self).__init__(url, connection, handler)
self._logger = logging.getLogger(LOGGER_NAME)
self._entity_set = entity_set
self._entity_type = entity_set.entity_type
self._entity_key = entity_key
self._method = method.upper()
if self._method not in EntityModifyRequest.ALLOWED_HTTP_METHODS:
raise ValueError('The value "{}" is not on the list of allowed Entity Update HTTP Methods: {}'
.format(method, ', '.join(EntityModifyRequest.ALLOWED_HTTP_METHODS)))
self._values = {}
# get all properties declared by entity type
self._type_props = self._entity_type.proprties()
self._logger.debug('New instance of EntityModifyRequest for entity type: %s', self._entity_type.name)
def get_path(self):
return self._entity_set.name + self._entity_key.to_key_string()
def get_method(self):
# pylint: disable=no-self-use
return self._method
def get_body(self):
# pylint: disable=no-self-use
body = {}
for key, val in self._values.items():
body[key] = val
return json.dumps(body)
def get_default_headers(self):
return {'Accept': 'application/json', 'Content-Type': 'application/json'}
def set(self, **kwargs):
"""Set properties to be changed."""
self._logger.info(kwargs)
for key, val in kwargs.items():
try:
val = self._entity_type.proprty(key).to_json(val)
except KeyError:
raise PyODataException(
f'Property {key} is not declared in {self._entity_type.name} entity type')
self._values[key] = val
return self
class QueryRequest(ODataHttpRequest):
"""INTERFACE A consumer-side query-request builder. Call execute() to issue the request."""
# pylint: disable=too-many-instance-attributes
def __init__(self, url, connection, handler, last_segment):
super(QueryRequest, self).__init__(url, connection, handler)
self._logger = logging.getLogger(LOGGER_NAME)
self._count = None
self._inlinecount = None
self._top = None
self._skip = None
self._order_by = None
self._filter = None
self._select = None
self._expand = None
self._last_segment = last_segment
self._logger.debug('New instance of QueryRequest for last segment: %s', self._last_segment)
def count(self, inline=False):
"""Sets a flag to return the number of items. Can be inline with results or just the count."""
if inline:
self._inlinecount = True
else:
self._count = True
return self
def next_url(self, next_url):
"""
Sets URL which identifies the next partial set of entities from the originally identified complete set. Once
set, this URL takes precedence over all query parameters.
For details, see section "6. Representing Collections of Entries" on
https://www.odata.org/documentation/odata-version-2-0/json-format/
"""
self._next_url = next_url
return self
def expand(self, expand):
"""Sets the expand expressions."""
self._expand = expand
return self
def filter(self, filter_val):
"""Sets the filter expression."""
# returns QueryRequest
self._filter = filter_val
return self
# def nav(self, key_value, nav_property):
# """Navigates to a referenced collection using a collection-valued navigation property."""
# # returns QueryRequest
# raise NotImplementedError
def order_by(self, order_by):
"""Sets the ordering expressions."""
self._order_by = order_by
return self
def select(self, select):
"""Sets the selection clauses."""
self._select = select
return self
def skip(self, skip):
"""Sets the number of items to skip."""
self._skip = skip
return self
def top(self, top):
"""Sets the number of items to return."""
self._top = top
return self
def get_path(self):
if self._count:
return urljoin(self._last_segment, '/$count')
return self._last_segment
def get_default_headers(self):
if self._count:
return {}
return {
'Accept': 'application/json',
}
def get_query_params(self):
if self._next_url:
return {}
qparams = super(QueryRequest, self).get_query_params()
if self._top is not None:
qparams['$top'] = self._top
if self._skip is not None:
qparams['$skip'] = self._skip
if self._order_by is not None:
qparams['$orderby'] = self._order_by
if self._filter is not None:
qparams['$filter'] = self._filter
if self._select is not None:
qparams['$select'] = self._select
if self._expand is not None:
qparams['$expand'] = self._expand
if self._inlinecount:
qparams['$inlinecount'] = 'allpages'
return qparams
class FunctionRequest(QueryRequest):
"""Function import request (Service call)"""
def __init__(self, url, connection, handler, function_import):
super(FunctionRequest, self).__init__(url, connection, handler, function_import.name)
self._function_import = function_import
self._logger.debug('New instance of FunctionRequest for %s', self._function_import.name)
def parameter(self, name, value):
'''Sets value of parameter.'''
# check if param is valid (is declared in metadata)
try:
param = self._function_import.get_parameter(name)
# add parameter as custom query argument
self.custom(param.name, param.to_literal(value))
except KeyError:
raise PyODataException('Function import {0} does not have pararmeter {1}'
.format(self._function_import.name, name))
return self
def get_method(self):
return self._function_import.http_method
def get_default_headers(self):
return {
'Accept': 'application/json'
}
# pylint: disable=too-many-instance-attributes
class EntityProxy:
"""An immutable OData entity instance, consisting of an identity (an
entity-set and a unique entity-key within that set), properties (typed,
named values), and links (references to other entities).
"""
# pylint: disable=too-many-branches,too-many-nested-blocks,too-many-statements
def __init__(self, service, entity_set, entity_type, proprties=None, entity_key=None, etag=None):
self._logger = logging.getLogger(LOGGER_NAME)
self._service = service
self._entity_set = entity_set
self._entity_type = entity_type
self._key_props = entity_type.key_proprties
self._cache = dict()
self._entity_key = entity_key
self._etag = etag
self._logger.debug('New entity proxy instance of type %s from properties: %s', entity_type.name, proprties)
# cache values of individual properties if provided
if proprties is not None:
etag_body = proprties.get('__metadata', dict()).get('etag', None)
if etag is not None and etag_body is not None and etag_body != etag:
raise PyODataException('Etag from header does not match the Etag from response body')
if etag_body is not None:
self._etag = etag_body
# first, cache values of direct properties
for type_proprty in self._entity_type.proprties():
if type_proprty.name in proprties:
# Property value available
if proprties[type_proprty.name] is not None:
self._cache[type_proprty.name] = type_proprty.from_json(proprties[type_proprty.name])
continue
# Property value missing and user wants a type specific default value filled in
if not self._service.retain_null:
# null value is in literal form for now, convert it to python representation
self._cache[type_proprty.name] = type_proprty.from_literal(type_proprty.typ.null_value)
continue
# Property is nullable - save it as such
if type_proprty.nullable:
self._cache[type_proprty.name] = None
continue
raise PyODataException(f'Value of non-nullable Property {type_proprty.name} is null')
# then, assign all navigation properties
for prop in self._entity_type.nav_proprties:
if prop.name in proprties:
# entity type of navigation property
prop_etype = prop.to_role.entity_type
# cache value according to multiplicity
if prop.to_role.multiplicity in \
[model.EndRole.MULTIPLICITY_ONE,
model.EndRole.MULTIPLICITY_ZERO_OR_ONE]:
# cache None in case we receive nothing (null) instead of entity data
if proprties[prop.name] is None:
self._cache[prop.name] = None
else:
self._cache[prop.name] = EntityProxy(service, None, prop_etype, proprties[prop.name])
elif prop.to_role.multiplicity == model.EndRole.MULTIPLICITY_ZERO_OR_MORE:
# default value is empty array
self._cache[prop.name] = []
# if there are no entities available, received data consists of
# metadata properties only.
if 'results' in proprties[prop.name]:
# available entities are serialized in results array
for entity in proprties[prop.name]['results']:
self._cache[prop.name].append(EntityProxy(service, None, prop_etype, entity))
else:
raise PyODataException('Unknown multiplicity {0} of association role {1}'
.format(prop.to_role.multiplicity, prop.to_role.name))
# build entity key if not provided
if self._entity_key is None:
# try to build key from available property values
try:
# if key seems to be simple (consists of single property)
if len(self._key_props) == 1:
self._entity_key = EntityKey(entity_type, self._cache[self._key_props[0].name])
else:
# build complex key
self._entity_key = EntityKey(entity_type, **self._cache)
except KeyError:
pass
except PyODataException:
pass
def __repr__(self):
return self._entity_key.to_key_string()
def __getattr__(self, attr):
try:
return self._cache[attr]
except KeyError:
try:
value = self.get_proprty(attr).execute()
self._cache[attr] = value
return value
except KeyError as ex:
raise AttributeError('EntityType {0} does not have Property {1}: {2}'
.format(self._entity_type.name, attr, str(ex)))
def nav(self, nav_property):
"""Navigates to given navigation property and returns the EntitySetProxy"""
# for now duplicated with simillar method in entity set proxy class
try:
navigation_property = self._entity_type.nav_proprty(nav_property)
except KeyError:
raise PyODataException('Navigation property {} is not declared in {} entity type'.format(
nav_property, self._entity_type))
# Get entity set of navigation property
association_info = navigation_property.association_info
association_set = self._service.schema.association_set_by_association(
association_info.name,
association_info.namespace)
navigation_entity_set = None
for end in association_set.end_roles:
if association_set.end_by_entity_set(end.entity_set_name).role == navigation_property.to_role.role:
navigation_entity_set = self._service.schema.entity_set(end.entity_set_name, association_info.namespace)
if not navigation_entity_set:
raise PyODataException(f'No association set for role {navigation_property.to_role}')
roles = navigation_property.association.end_roles
if all((role.multiplicity != model.EndRole.MULTIPLICITY_ZERO_OR_MORE for role in roles)):
return NavEntityProxy(self, nav_property, navigation_entity_set.entity_type, {})
return EntitySetProxy(
self._service,
self._service.schema.entity_set(navigation_entity_set.name),
nav_property,
self._entity_set.name + self._entity_key.to_key_string())
def get_path(self):
"""Returns this entity's relative path - e.g. EntitySet(KEY)"""
return self._entity_set._name + self._entity_key.to_key_string() # pylint: disable=protected-access
def get_proprty(self, name, connection=None):
"""Returns value of the property"""
self._logger.info('Initiating property request for %s', name)
def proprty_get_handler(key, proprty, response):
"""Gets property value from HTTP Response"""
if response.status_code != HTTP_CODE_OK:
raise HttpError('HTTP GET for Attribute {0} of Entity {1} failed with status code {2}'
.format(proprty.name, key, response.status_code), response)
data = response.json()['d']
return proprty.from_json(data[proprty.name])
path = urljoin(self.get_path(), name)
return self._service.http_get_odata(
path,
partial(proprty_get_handler, path, self._entity_type.proprty(name)),
connection=connection)
def get_value(self, connection=None):
"Returns $value of Stream entities"
def value_get_handler(key, response):
"""Gets property value from HTTP Response"""
if response.status_code != HTTP_CODE_OK:
raise HttpError('HTTP GET for $value of Entity {0} failed with status code {1}'
.format(key, response.status_code), response)
return response
path = urljoin(self.get_path(), '/$value')
return self._service.http_get_odata(path,
partial(value_get_handler, self.entity_key),
connection=connection)
@property
def entity_set(self):
"""Entity set related to this entity"""
return self._entity_set
@property
def entity_key(self):
"""Key of entity"""
return self._entity_key
@property
def url(self):
"""URL of the real entity"""
service_url = self._service.url.rstrip('/')
entity_path = self.get_path()
return urljoin(service_url, entity_path)
@property
def etag(self):
"""ETag generated by service"""
return self._etag
def equals(self, other):
"""Returns true if the self and the other contains the same data"""
# pylint: disable=W0212
return self._cache == other._cache
class NavEntityProxy(EntityProxy):
"""Special case of an Entity access via 1 to 1 Navigation property"""
def __init__(self, parent_entity, prop_name, entity_type, entity):
# pylint: disable=protected-access
super(NavEntityProxy, self).__init__(parent_entity._service, parent_entity._entity_set, entity_type, entity)
self._parent_entity = parent_entity
self._prop_name = prop_name
def get_path(self):
"""Returns URL of the entity"""
return urljoin(self._parent_entity.get_path(), self._prop_name)
class GetEntitySetFilter:
"""Create filters for humans"""
def __init__(self, proprty):
self._proprty = proprty
@staticmethod
def build_expression(operator, operands):
"""Creates a expression by joining the operands with the operator"""
if len(operands) < 2:
raise ExpressionError('The $filter operator \'{}\' needs at least two operands'.format(operator))
return f"({" {} ".format(operator).join(operands)})"
@staticmethod
def and_(*operands):
"""Creates logical AND expression from the operands"""
return GetEntitySetFilter.build_expression('and', operands)
@staticmethod
def or_(*operands):
"""Creates logical OR expression from the operands"""
return GetEntitySetFilter.build_expression('or', operands)
@staticmethod
def format_filter(proprty, operator, value):
"""Creates a filter expression """
return f'{proprty.name} {operator} {proprty.to_literal(value)}'
def __eq__(self, value):
return GetEntitySetFilter.format_filter(self._proprty, 'eq', value)
def __ne__(self, value):
return GetEntitySetFilter.format_filter(self._proprty, 'ne', value)
def __lt__(self, value):
return GetEntitySetFilter.format_filter(self._proprty, 'lt', value)
def __le__(self, value):
return GetEntitySetFilter.format_filter(self._proprty, 'le', value)
def __ge__(self, value):
return GetEntitySetFilter.format_filter(self._proprty, 'ge', value)
def __gt__(self, value):
return GetEntitySetFilter.format_filter(self._proprty, 'gt', value)
class FilterExpression:
"""A class representing named expression of OData $filter"""
def __init__(self, **kwargs):
self._expressions = kwargs
self._other = None
self._operator = None
@property
def expressions(self):
"""Get expressions where key is property name with the operator suffix
and value is the left hand side operand.
"""
return self._expressions.items()
@property
def other(self):
"""Get an instance of the other operand"""
return self._other
@property
def operator(self):
"""The other operand"""
return self._operator
def __or__(self, other):
if self._other is not None:
raise RuntimeError('The FilterExpression already initialized')
self._other = other
self._operator = "or"
return self
def __and__(self, other):
if self._other is not None:
raise RuntimeError('The FilterExpression already initialized')
self._other = other
self._operator = "and"
return self
class GetEntitySetFilterChainable:
"""
Example expressions
FirstName='Tim'
FirstName__contains='Tim'
Age__gt=56
Age__gte=6
Age__lt=78
Age__lte=90
Age__range=(5,9)
FirstName__in=['Tim', 'Bob', 'Sam']
FirstName__startswith='Tim'
FirstName__endswith='mothy'
Addresses__Suburb='Chatswood'
Addresses__Suburb__contains='wood'
"""
OPERATORS = [
'startswith',
'endswith',
'lt',
'lte',
'gt',
'gte',
'contains',
'range',
'in',
'length',
'eq'
]
def __init__(self, entity_type, filter_expressions, exprs):
self._entity_type = entity_type
self._filter_expressions = filter_expressions
self._expressions = exprs
@property
def expressions(self):
"""Get expressions as a list of tuples where the first item
is a property name with the operator suffix and the second item
is a left hand side value.
"""
return self._expressions.items()
def proprty_obj(self, name):
"""Returns a model property for a particular property"""
return self._entity_type.proprty(name)
def _decode_and_combine_filter_expression(self, filter_expression):
filter_expressions = [self._decode_expression(expr, val) for expr, val in filter_expression.expressions]
return self._combine_expressions(filter_expressions)
def _process_query_objects(self):
"""Processes FilterExpression objects to OData lookups"""
filter_expressions = []
for expr in self._filter_expressions:
lhs_expressions = self._decode_and_combine_filter_expression(expr)
if expr.other is not None:
rhs_expressions = self._decode_and_combine_filter_expression(expr.other)
filter_expressions.append(f'({lhs_expressions}) {expr.operator} ({rhs_expressions})')
else:
filter_expressions.append(lhs_expressions)
return filter_expressions
def _process_expressions(self):
filter_expressions = [self._decode_expression(expr, val) for expr, val in self.expressions]
filter_expressions.extend(self._process_query_objects())
return filter_expressions
def _decode_expression(self, expr, val):
field = None
# field_heirarchy = []
operator = 'eq'
exprs = expr.split('__')
for part in exprs:
if self._entity_type.has_proprty(part):
field = part
# field_heirarchy.append(part)
elif part in self.__class__.OPERATORS:
operator = part
else:
raise ValueError(f'"{part}" is not a valid property or operator')
# field = '/'.join(field_heirarchy)
# target_field = self.proprty_obj(field_heirarchy[-1])
expression = self._build_expression(field, operator, val)
return expression
# pylint: disable=no-self-use
def _combine_expressions(self, expressions):
return ' and '.join(expressions)
# pylint: disable=too-many-return-statements, too-many-branches
def _build_expression(self, field_name, operator, value):
target_field = self.proprty_obj(field_name)
if operator not in ['length', 'in', 'range']:
value = target_field.to_literal(value)
if operator == 'lt':
return f'{field_name} lt {value}'
if operator == 'lte':
return f'{field_name} le {value}'
if operator == 'gte':
return f'{field_name} ge {value}'
if operator == 'gt':
return f'{field_name} gt {value}'
if operator == 'startswith':
return f'startswith({field_name}, {value}) eq true'
if operator == 'endswith':
return f'endswith({field_name}, {value}) eq true'
if operator == 'length':
value = int(value)
return f'length({field_name}) eq {value}'
if operator in ['contains']:
return f'substringof({value}, {field_name}) eq true'
if operator == 'range':
if not isinstance(value, (tuple, list)):
raise TypeError(f'Range must be tuple or list not {type(value)}')
if len(value) != 2:
raise ValueError('Only two items can be passed in a range.')
low_bound = target_field.to_literal(value[0])
high_bound = target_field.to_literal(value[1])
return f'{field_name} gte {low_bound} and {field_name} lte {high_bound}'
if operator == 'in':
literal_values = (f'{field_name} eq {target_field.to_literal(item)}' for item in value)
return ' or '.join(literal_values)
if operator == 'eq':
return f'{field_name} eq {value}'
raise ValueError(f'Invalid expression {operator}')
def __str__(self):
expressions = self._process_expressions()
result = self._combine_expressions(expressions)
return result
class GetEntitySetRequest(QueryRequest):
"""GET on EntitySet"""
def __init__(self, url, connection, handler, last_segment, entity_type):
super(GetEntitySetRequest, self).__init__(url, connection, handler, last_segment)
self._entity_type = entity_type
def __getattr__(self, name):
proprty = self._entity_type.proprty(name)
return GetEntitySetFilter(proprty)
def _set_filter(self, filter_val):
filter_text = self._filter + ' and ' if self._filter else ''
filter_text += filter_val
self._filter = filter_text
def filter(self, *args, **kwargs):
if args and len(args) == 1 and isinstance(args[0], str):
self._filter = args[0]
else:
self._set_filter(str(GetEntitySetFilterChainable(self._entity_type, args, kwargs)))
return self
class ListWithTotalCount(list):
"""
A list with the additional property total_count and next_url.
If set, use next_url to fetch the next batch of entities.
"""
def __init__(self, total_count, next_url):
super(ListWithTotalCount, self).__init__()
self._total_count = total_count
self._next_url = next_url
@property
def next_url(self):
"""
URL which identifies the next partial set of entities from the originally identified complete set. None if no
entities remaining.
"""
return self._next_url
@property
def total_count(self):
"""Count of all entities"""
if self._total_count is None:
raise ProgramError('The collection does not include Total Count '
'of items because the request was made without '
'specifying "count(inline=True)".')
return self._total_count
class EntitySetProxy:
"""EntitySet Proxy"""
def __init__(self, service, entity_set, alias=None, parent_last_segment=None):
"""Creates new Entity Set object
@param alias in case the entity set is access via assossiation
@param parent_last_segment in case of association also parent key must be used
"""
self._service = service
self._entity_set = entity_set
self._alias = alias
if parent_last_segment is None:
self._parent_last_segment = ''
else:
if parent_last_segment.endswith('/'):
self._parent_last_segment = parent_last_segment
else:
self._parent_last_segment = parent_last_segment + '/'
self._name = entity_set.name
self._key = entity_set.entity_type.key_proprties
self._logger = logging.getLogger(LOGGER_NAME)
self._logger.debug('New entity set proxy instance for %s', self._name)
@property
def service(self):
"""Return service"""
return self._service
@property
def last_segment(self):
"""Return last segment of url"""
entity_set_name = self._alias if self._alias is not None else self._entity_set.name
return self._parent_last_segment + entity_set_name
def nav(self, nav_property, key):
"""Navigates to given navigation property and returns the EntitySetProxy"""
try:
navigation_property = self._entity_set.entity_type.nav_proprty(nav_property)
except KeyError:
raise PyODataException('Navigation property {} is not declared in {} entity type'.format(
nav_property, self._entity_set.entity_type))
# Get entity set of navigation property
association_info = navigation_property.association_info
association_set = self._service.schema.association_set_by_association(
association_info.name)
navigation_entity_set = None
for end in association_set.end_roles:
if association_set.end_by_entity_set(end.entity_set_name).role == navigation_property.to_role.role:
navigation_entity_set = self._service.schema.entity_set(end.entity_set_name)
if not navigation_entity_set:
raise PyODataException(
f'No association set for role {navigation_property.to_role} {association_set.end_roles}')
roles = navigation_property.association.end_roles
if all((role.multiplicity != model.EndRole.MULTIPLICITY_ZERO_OR_MORE for role in roles)):
return self._get_nav_entity(key, nav_property, navigation_entity_set)
return EntitySetProxy(
self._service,
navigation_entity_set,
nav_property,
self._entity_set.name + key.to_key_string())
def _get_nav_entity(self, master_key, nav_property, navigation_entity_set):
"""Get entity based on provided key of the master and Navigation property name"""
def get_entity_handler(parent, nav_property, navigation_entity_set, response):
"""Gets entity from HTTP response"""
if response.status_code != HTTP_CODE_OK:
raise HttpError('HTTP GET for Entity {0} failed with status code {1}'
.format(self._name, response.status_code), response)
entity = response.json()['d']
return NavEntityProxy(parent, nav_property, navigation_entity_set.entity_type, entity)
self._logger.info(
'Getting the nav property %s of the entity %s for the key %s',
nav_property,
self._entity_set.entity_type.name,
master_key)
parent = EntityProxy(self._service, self, self._entity_set.entity_type, entity_key=master_key)
return NavEntityGetRequest(
partial(get_entity_handler, parent, nav_property, navigation_entity_set),
master_key,
self,
nav_property)
def get_entity(self, key=None, **args):
"""Get entity based on provided key properties"""
def get_entity_handler(response):
"""Gets entity from HTTP response"""
if response.status_code != HTTP_CODE_OK:
raise HttpError('HTTP GET for Entity {0} failed with status code {1}'
.format(self._name, response.status_code), response)
entity = response.json()['d']
etag = response.headers.get('ETag', None)
return EntityProxy(self._service, self._entity_set, self._entity_set.entity_type, entity, etag=etag)
if key is not None and isinstance(key, EntityKey):
entity_key = key
else:
entity_key = EntityKey(self._entity_set.entity_type, key, **args)
self._logger.info('Getting entity %s for key %s and args %s', self._entity_set.entity_type.name, key, args)
return EntityGetRequest(get_entity_handler, entity_key, self)
def get_entities(self):
"""Get some, potentially all entities"""
def get_entities_handler(response):
"""Gets entity set from HTTP Response"""
if response.status_code != HTTP_CODE_OK:
raise HttpError('HTTP GET for Entity Set {0} failed with status code {1}'
.format(self._name, response.status_code), response)
content = response.json()
if isinstance(content, int):
return content
entities = content['d']
total_count = None
next_url = None
if isinstance(entities, dict):
if '__count' in entities:
total_count = int(entities['__count'])
if '__next' in entities:
next_url = entities['__next']
entities = entities['results']
self._logger.info('Fetched %d entities', len(entities))
result = ListWithTotalCount(total_count, next_url)
for props in entities:
entity = EntityProxy(self._service, self._entity_set, self._entity_set.entity_type, props)
result.append(entity)
return result
entity_set_name = self._alias if self._alias is not None else self._entity_set.name
return GetEntitySetRequest(self._service.url, self._service.connection, get_entities_handler,
self._parent_last_segment + entity_set_name, self._entity_set.entity_type)
def create_entity(self, return_code=HTTP_CODE_CREATED):
"""Creates a new entity in the given entity-set."""
def create_entity_handler(response):
"""Gets newly created entity encoded in HTTP Response"""
if response.status_code != return_code:
raise HttpError('HTTP POST for Entity Set {0} failed with status code {1}'
.format(self._name, response.status_code), response)
entity_props = response.json()['d']
etag = response.headers.get('ETag', None)
return EntityProxy(self._service, self._entity_set, self._entity_set.entity_type, entity_props, etag=etag)
return EntityCreateRequest(self._service.url, self._service.connection, create_entity_handler, self._entity_set,
self.last_segment)
def update_entity(self, key=None, method=None, **kwargs):
"""Updates an existing entity in the given entity-set."""
def update_entity_handler(response):
"""Gets modified entity encoded in HTTP Response"""
if response.status_code != 204:
raise HttpError('HTTP modify request for Entity Set {} failed with status code {}'
.format(self._name, response.status_code), response)
if key is not None and isinstance(key, EntityKey):
entity_key = key
else:
entity_key = EntityKey(self._entity_set.entity_type, key, **kwargs)
self._logger.info('Updating entity %s for key %s and args %s', self._entity_set.entity_type.name, key, kwargs)
if method is None:
method = self._service.config['http']['update_method']
return EntityModifyRequest(self._service.url, self._service.connection, update_entity_handler, self._entity_set,
entity_key, method=method)
def delete_entity(self, key: EntityKey = None, **kwargs):
"""Delete the entity"""
def delete_entity_handler(response):
"""Check if entity deletion was successful"""
if response.status_code != 204:
raise HttpError(f'HTTP POST for Entity delete {self._name} '
f'failed with status code {response.status_code}',
response)
if key is not None and isinstance(key, EntityKey):
entity_key = key
else:
entity_key = EntityKey(self._entity_set.entity_type, key, **kwargs)
return EntityDeleteRequest(self._service.url, self._service.connection, delete_entity_handler, self._entity_set,
entity_key)
# pylint: disable=too-few-public-methods
class EntityContainer:
"""Set of EntitSet proxies"""
def __init__(self, service):
self._service = service
self._entity_sets = dict()
for entity_set in self._service.schema.entity_sets:
self._entity_sets[entity_set.name] = EntitySetProxy(self._service, entity_set)
def __getattr__(self, name):
try:
return self._entity_sets[name]
except KeyError:
raise AttributeError(
f"EntitySet {name} not defined in {",".join(list(self._entity_sets.keys()))}.")
class FunctionContainer:
"""Set of Function proxies
Call a server-side functions (also known as a service operation).
"""
def __init__(self, service):
self._service = service
self._functions = dict()
for fimport in self._service.schema.function_imports:
self._functions[fimport.name] = fimport
def __getattr__(self, name):
if name not in self._functions:
raise AttributeError(
f"Function {name} not defined in {",".join(list(self._functions.keys()))}.")
fimport = self._service.schema.function_import(name)
def function_import_handler(fimport, response):
"""Get function call response from HTTP Response"""
if 300 <= response.status_code < 400:
raise HttpError(f'Function Import {fimport.name} requires Redirection which is not supported',
response)
if response.status_code == 401:
raise HttpError(f'Not authorized to call Function Import {fimport.name}',
response)
if response.status_code == 403:
raise HttpError(f'Missing privileges to call Function Import {fimport.name}',
response)
if response.status_code == 405:
raise HttpError(
f'Despite definition Function Import {fimport.name} does not support HTTP {fimport.http_method}',
response)
if 400 <= response.status_code < 500:
raise HttpError(
f'Function Import {fimport.name} call has failed with status code {response.status_code}',
response)
if response.status_code >= 500:
raise HttpError(f'Server has encountered an error while processing Function Import {fimport.name}',
response)
if fimport.return_type is None:
if response.status_code != 204:
logging.getLogger(LOGGER_NAME).warning(
'The No Return Function Import %s has replied with HTTP Status Code %d instead of 204',
fimport.name, response.status_code)
if response.text:
logging.getLogger(LOGGER_NAME).warning(
'The No Return Function Import %s has returned content:\n%s', fimport.name, response.text)
return None
if response.status_code != 200:
logging.getLogger(LOGGER_NAME).warning(
'The Function Import %s has replied with HTTP Status Code %d instead of 200',
fimport.name, response.status_code)
response_data = response.json()['d']
# 1. if return types is "entity type", return instance of appropriate entity proxy
if isinstance(fimport.return_type, model.EntityType):
entity_set = self._service.schema.entity_set(fimport.entity_set_name)
return EntityProxy(self._service, entity_set, fimport.return_type, response_data)
# 2. return raw data for all other return types (primitives, complex types encoded in dicts, etc.)
return response_data
return FunctionRequest(self._service.url, self._service.connection,
partial(function_import_handler, fimport), fimport)
class Service:
"""OData service"""
def __init__(self, url, schema, connection, config=None):
self._url = url
self._schema = schema
self._connection = connection
self._retain_null = config.retain_null if config else False
self._entity_container = EntityContainer(self)
self._function_container = FunctionContainer(self)
self._config = {'http': {'update_method': 'PATCH'}}
@property
def schema(self):
"""Parsed metadata"""
return self._schema
@property
def url(self):
"""Service url"""
return self._url
@property
def connection(self):
"""Service connection"""
return self._connection
@property
def retain_null(self):
"""Whether to respect null-ed values or to substitute them with type specific default values"""
return self._retain_null
@property
def entity_sets(self):
"""EntitySet proxy"""
return self._entity_container
@property
def functions(self):
"""Functions proxy"""
return self._function_container
@property
def config(self):
"""Service specific configuration"""
return self._config
def http_get(self, path, connection=None):
"""HTTP GET response for the passed path in the service"""
conn = connection
if conn is None:
conn = self._connection
return conn.get(urljoin(self._url, path))
def http_get_odata(self, path, handler, connection=None):
"""HTTP GET request proxy for the passed path in the service"""
conn = connection
if conn is None:
conn = self._connection
return ODataHttpRequest(
urljoin(self._url, path),
conn,
handler,
headers={'Accept': 'application/json'})
def create_batch(self, batch_id=None):
"""Create instance of OData batch request"""
def batch_handler(batch, parts):
"""Process parsed multipart request (parts)"""
logging.getLogger(LOGGER_NAME).debug('Batch handler called for batch %s', batch.id)
result = []
for part, req in zip(parts, batch.requests):
logging.getLogger(LOGGER_NAME).debug('Batch handler is processing part %s for request %s', part, req)
# if part represents multiple requests, dont' parse body and
# process parts by appropriate reuqest instance
if isinstance(req, MultipartRequest):
result.append(req.handler(req, part))
else:
# part represents single request, we have to parse
# content (without checking Content type for binary/http)
response = ODataHttpResponse.from_string(part[0])
result.append(req.handler(response))
return result
return BatchRequest(self._url, self._connection, batch_handler, batch_id)
def create_changeset(self, changeset_id=None):
"""Create instance of OData changeset"""
def changeset_handler(changeset, parts):
"""Gets changeset response from HTTP response"""
logging.getLogger(LOGGER_NAME).debug('Changeset handler called for changeset %s', changeset.id)
result = []
# check if changeset response consists of parts, this is important
# to distinguish cases when server responds with single HTTP response
# for whole request
if not isinstance(parts[0], list):
# raise error (even for successfull status codes) since such changeset response
# always means something wrong happened on server
response = ODataHttpResponse.from_string(parts[0])
raise HttpError('Changeset cannot be processed due to single response received, status code: {}'.format(
response.status_code), response)
for part, req in zip(parts, changeset.requests):
logging.getLogger(LOGGER_NAME).debug('Changeset handler is processing part %s for request %s', part,
req)
if isinstance(req, MultipartRequest):
raise PyODataException('Changeset cannot contain nested multipart content')
# part represents single request, we have to parse
# content (without checking Content type for binary/http)
response = ODataHttpResponse.from_string(part[0])
result.append(req.handler(response))
return result
return Changeset(self._url, self._connection, changeset_handler, changeset_id)
class MultipartRequest(ODataHttpRequest):
"""HTTP Batch request"""
def __init__(self, url, connection, handler, request_id=None):
super(MultipartRequest, self).__init__(url, connection, partial(MultipartRequest.http_response_handler, self))
self.requests = []
self._handler_decoded = handler
# generate random id of form dddd-dddd-dddd
# pylint: disable=invalid-name
self.id = request_id if request_id is not None else '{}_{}_{}'.format(
random.randint(1000, 9999), random.randint(1000, 9999), random.randint(1000, 9999))
self._logger.debug('New multipart %s request initialized, id=%s', self.__class__.__name__, self.id)
@property
def handler(self):
return self._handler_decoded
def get_boundary(self):
"""Get boundary used for request parts"""
return self.id
def get_default_headers(self):
# pylint: disable=no-self-use
return {'Content-Type': f'multipart/mixed;boundary={self.get_boundary()}'}
def get_body(self):
return encode_multipart(self.get_boundary(), self.requests)
def add_request(self, request):
"""Add request to be sent in batch"""
self.requests.append(request)
self._logger.debug('New %s request added to multipart request %s', request.get_method(), self.id)
@staticmethod
def http_response_handler(request, response):
"""Process HTTP response to mutipart HTTP request"""
if response.status_code != 202: # 202 Accepted
raise HttpError('HTTP POST for multipart request {0} failed with status code {1}'
.format(request.id, response.status_code), response)
logging.getLogger(LOGGER_NAME).debug('Generic multipart http response request handler called')
# get list of all parts (headers + body)
decoded = decode_multipart(response.content.decode('utf-8'), response.headers['Content-Type'])
return request.handler(request, decoded)
class BatchRequest(MultipartRequest):
"""HTTP Batch request"""
def get_boundary(self):
return 'batch_' + self.id
def get_path(self):
# pylint: disable=no-self-use
return '$batch'
def get_method(self):
# pylint: disable=no-self-use
return 'POST'
class Changeset(MultipartRequest):
"""Representation of changeset (unsorted group of requests)"""
def get_boundary(self):
return 'changeset_' + self.id
|
"""OData service implementation
Details regarding batch requests and changesets:
http://www.odata.org/documentation/odata-version-2-0/batch-processing/
"""
# pylint: disable=too-many-lines
import logging
from functools import partial
import json
import random
from email.parser import Parser
from http.client import HTTPResponse
from io import BytesIO
from urllib.parse import urlencode
from pyodata.exceptions import HttpError, PyODataException, ExpressionError, ProgramError
from . import model
LOGGER_NAME = 'pyodata.service'
HTTP_CODE_OK = 200
HTTP_CODE_CREATED = 201
def urljoin(*path):
"""Joins the passed string parts into a one string url"""
return '/'.join((part.strip('/') for part in path))
def encode_multipart(boundary, http_requests):
"""Encode list of requests into multipart body"""
lines = []
lines.append('')
for req in http_requests:
lines.append(f'--{boundary}')
if not isinstance(req, MultipartRequest):
lines.extend(('Content-Type: application/http ', 'Content-Transfer-Encoding:binary'))
lines.append('')
# request line (method + path + query params)
line = f'{req.get_method()} {req.get_path()}'
query_params = urlencode(req.get_query_params())
if query_params:
line += '?' + query_params
line += ' HTTP/1.1'
lines.append(line)
# request specific headers
for hdr, hdr_val in req.get_headers().items():
lines.append(f'{hdr}: {hdr_val}')
lines.append('')
body = req.get_body()
if body is not None:
lines.append(req.get_body())
else:
# this is very important since SAP gateway rejected request witout this line. It seems
# blank line must be provided as a representation of emtpy body, else we are getting
# 400 Bad fromat from SAP gateway
lines.append('')
lines.append(f'--{boundary}--')
return '\r\n'.join(lines)
def decode_multipart(data, content_type):
"""Decode parts of the multipart mime content"""
def decode(message):
"""Decode tree of messages for specific message"""
messages = []
for i, part in enumerate(message.walk()): # pylint: disable=unused-variable
if part.get_content_type() == 'multipart/mixed':
for submessage in part.get_payload():
messages.append(decode(submessage))
break
messages.append(part.get_payload())
return messages
data = f"Content-Type: {content_type}\n" + data
parser = Parser()
parsed = parser.parsestr(data)
decoded = decode(parsed)
return decoded
class ODataHttpResponse:
"""Representation of http response"""
def __init__(self, headers, status_code, content=None):
self.headers = headers
self.status_code = status_code
self.content = content
@staticmethod
def from_string(data):
"""Parse http response to status code, headers and body
Based on: https://stackoverflow.com/questions/24728088/python-parse-http-response-string
"""
class FakeSocket:
"""Fake socket to simulate received http response content"""
def __init__(self, response_str):
self._file = BytesIO(response_str.encode('utf-8'))
def makefile(self, *args, **kwargs):
"""Fake file that provides string content"""
# pylint: disable=unused-argument
return self._file
source = FakeSocket(data)
response = HTTPResponse(source)
response.begin()
response.length = response.fp.__sizeof__()
return ODataHttpResponse(
dict(response.getheaders()),
response.status,
response.read(len(data)) # the len here will give a 'big enough' value to read the whole content
)
def json(self):
"""Return response as decoded json"""
# TODO: see implementation in python requests, our simple
# approach can bring issues with encoding
# https://github.com/requests/requests/blob/master/requests/models.py#L868
if self.content:
return json.loads(self.content.decode('utf-8'))
return None
class EntityKey:
"""An immutable entity-key, made up of either a single value (single)
or multiple key-value pairs (complex).
Every entity must have an entity-key. The entity-key must be unique
within the entity-set, and thus defines an entity's identity.
The string representation of an entity-key is wrapped with parentheses,
such as (2), ('foo') or (a=1,foo='bar').
Entity-keys are equal if their string representations are equal.
"""
TYPE_SINGLE = 0
TYPE_COMPLEX = 1
def __init__(self, entity_type, single_key=None, **args):
self._logger = logging.getLogger(LOGGER_NAME)
self._proprties = args
self._entity_type = entity_type
self._key = entity_type.key_proprties
# single key does not need property name
if single_key is not None:
# check that entity type key consists of exactly one property
if len(self._key) != 1:
raise PyODataException(('Key of entity type {} consists of multiple properties {} '
'and cannot be initialized by single value').format(
self._entity_type.name, ', '.join([prop.name for prop in self._key])))
# get single key property and format key string
key_prop = self._key[0]
args[key_prop.name] = single_key
self._type = EntityKey.TYPE_SINGLE
self._logger.debug(('Detected single property key, adding pair %s->%s to key'
'properties'), key_prop.name, single_key)
else:
for key_prop in self._key:
if key_prop.name not in args:
raise PyODataException(f'Missing value for key property {key_prop.name}')
self._type = EntityKey.TYPE_COMPLEX
@property
def key_properties(self):
"""Key properties"""
return self._key
def to_key_string_without_parentheses(self):
"""Gets the string representation of the key without parentheses"""
if self._type == EntityKey.TYPE_SINGLE:
# first property is the key property
key_prop = self._key[0]
return key_prop.to_literal(self._proprties[key_prop.name])
key_pairs = []
for key_prop in self._key:
# if key_prop.name not in self.__dict__['_cache']:
# raise RuntimeError('Entity key is not complete, missing value of property: {0}'.format(key_prop.name))
key_pairs.append(
f'{key_prop.name}={key_prop.to_literal(self._proprties[key_prop.name])}')
return ','.join(key_pairs)
def to_key_string(self):
"""Gets the string representation of the key, including parentheses"""
return f'({self.to_key_string_without_parentheses()})'
def __repr__(self):
return self.to_key_string()
class ODataHttpRequest:
"""Deferred HTTP Request"""
def __init__(self, url, connection, handler, headers=None):
self._connection = connection
self._url = url
self._handler = handler
self._headers = headers or dict()
self._logger = logging.getLogger(LOGGER_NAME)
self._customs = {} # string -> string hash
self._next_url = None
@property
def handler(self):
"""Getter for handler"""
return self._handler
def get_path(self):
"""Get path of the HTTP request"""
# pylint: disable=no-self-use
return ''
def get_query_params(self):
"""Get query params"""
# pylint: disable=no-self-use
return dict(self._customs)
def get_method(self):
"""Get HTTP method"""
# pylint: disable=no-self-use
return 'GET'
def get_body(self):
"""Get HTTP body or None if not applicable"""
# pylint: disable=no-self-use
return None
def get_default_headers(self):
"""Get dict of Child specific HTTP headers"""
# pylint: disable=no-self-use
return dict()
def get_headers(self):
"""Get dict of HTTP headers which is union of return value
of the method get_default_headers() and the headers
added via the method add_headers() where the latter
headers have priority - same keys get value of the latter.
"""
headers = self.get_default_headers()
headers.update(self._headers)
return headers
def add_headers(self, value):
"""Add the give dictionary of HTTP headers to
HTTP request sent by this ODataHttpRequest instance.
"""
if not isinstance(value, dict):
raise TypeError(f"Headers must be of type 'dict' not {type(value)}")
self._headers.update(value)
def execute(self):
"""Fetches HTTP response and returns processed result
Sends the query-request to the OData service, returning a client-side Enumerable for
subsequent in-memory operations.
Fetches HTTP response and returns processed result"""
if self._next_url:
url = self._next_url
else:
url = urljoin(self._url, self.get_path())
# pylint: disable=assignment-from-none
body = self.get_body()
headers = self.get_headers()
self._logger.debug('Send (execute) %s request to %s', self.get_method(), url)
self._logger.debug(' query params: %s', self.get_query_params())
self._logger.debug(' headers: %s', headers)
if body:
self._logger.debug(' body: %s', body)
params = urlencode(self.get_query_params())
response = self._connection.request(
self.get_method(), url, headers=headers, params=params, data=body)
self._logger.debug('Received response')
self._logger.debug(' url: %s', response.url)
self._logger.debug(' headers: %s', response.headers)
self._logger.debug(' status code: %d', response.status_code)
try:
self._logger.debug(' body: %s', response.content.decode('utf-8'))
except UnicodeDecodeError:
self._logger.debug(' body: <cannot be decoded>')
return self._handler(response)
def custom(self, name, value):
"""Adds a custom name-value pair."""
# returns QueryRequest
self._customs[name] = value
return self
class EntityGetRequest(ODataHttpRequest):
"""Used for GET operations of a single entity"""
def __init__(self, handler, entity_key, entity_set_proxy):
super(EntityGetRequest, self).__init__(entity_set_proxy.service.url, entity_set_proxy.service.connection,
handler)
self._logger = logging.getLogger(LOGGER_NAME)
self._entity_key = entity_key
self._entity_set_proxy = entity_set_proxy
self._select = None
self._expand = None
self._logger.debug('New instance of EntityGetRequest for last segment: %s', self._entity_set_proxy.last_segment)
def nav(self, nav_property):
"""Navigates to given navigation property and returns the EntitySetProxy"""
return self._entity_set_proxy.nav(nav_property, self._entity_key)
def select(self, select):
"""Specifies a subset of properties to return.
@param select a comma-separated list of selection clauses
"""
self._select = select
return self
def expand(self, expand):
"""Specifies related entities to expand inline as part of the response.
@param expand a comma-separated list of navigation properties
"""
self._expand = expand
return self
def get_path(self):
return self._entity_set_proxy.last_segment + self._entity_key.to_key_string()
def get_default_headers(self):
return {'Accept': 'application/json'}
def get_query_params(self):
qparams = super(EntityGetRequest, self).get_query_params()
if self._select is not None:
qparams['$select'] = self._select
if self._expand is not None:
qparams['$expand'] = self._expand
return qparams
def get_value(self, connection=None):
"""Returns Value of Media EntityTypes also known as the $value URL suffix."""
if connection is None:
connection = self._connection
def stream_handler(response):
"""Returns $value from HTTP Response"""
if response.status_code != HTTP_CODE_OK:
raise HttpError('HTTP GET for $value failed with status code {}'
.format(response.status_code), response)
return response
return ODataHttpRequest(
urljoin(self._url, self.get_path(), '/$value'),
connection,
stream_handler)
class NavEntityGetRequest(EntityGetRequest):
"""Used for GET operations of a single entity accessed via a Navigation property"""
def __init__(self, handler, master_key, entity_set_proxy, nav_property):
super(NavEntityGetRequest, self).__init__(handler, master_key, entity_set_proxy)
self._nav_property = nav_property
def get_path(self):
return f"{super(NavEntityGetRequest, self).get_path()}/{self._nav_property}"
class EntityCreateRequest(ODataHttpRequest):
"""Used for creating entities (POST operations of a single entity)
Call execute() to send the create-request to the OData service
and get the newly created entity."""
def __init__(self, url, connection, handler, entity_set, last_segment=None):
super(EntityCreateRequest, self).__init__(url, connection, handler)
self._logger = logging.getLogger(LOGGER_NAME)
self._entity_set = entity_set
self._entity_type = entity_set.entity_type
if last_segment is None:
self._last_segment = self._entity_set.name
else:
self._last_segment = last_segment
self._values = {}
# get all properties declared by entity type
self._type_props = self._entity_type.proprties()
self._logger.debug('New instance of EntityCreateRequest for entity type: %s on path %s', self._entity_type.name,
self._last_segment)
def get_path(self):
return self._last_segment
def get_method(self):
# pylint: disable=no-self-use
return 'POST'
def _get_body(self):
"""Recursively builds a dictionary of values where some of the values
might be another entities.
"""
body = {}
for key, val in self._values.items():
# The value is either an entity or a scalar
if isinstance(val, EntityProxy):
body[key] = val._get_body() # pylint: disable=protected-access
else:
body[key] = val
return body
def get_body(self):
return json.dumps(self._get_body())
def get_default_headers(self):
return {'Accept': 'application/json', 'Content-Type': 'application/json', 'X-Requested-With': 'X'}
@staticmethod
def _build_values(entity_type, entity):
"""Recursively converts a dictionary of values where some of the values
might be another entities (navigation properties) into the internal
representation.
"""
if isinstance(entity, list):
return [EntityCreateRequest._build_values(entity_type, item) for item in entity]
values = {}
for key, val in entity.items():
try:
val = entity_type.proprty(key).to_json(val)
except KeyError:
try:
nav_prop = entity_type.nav_proprty(key)
val = EntityCreateRequest._build_values(nav_prop.typ, val)
except KeyError:
raise PyODataException('Property {} is not declared in {} entity type'.format(
key, entity_type.name))
values[key] = val
return values
def set(self, **kwargs):
"""Set properties on the new entity."""
self._logger.info(kwargs)
# TODO: consider use of attset for setting properties
self._values = EntityCreateRequest._build_values(self._entity_type, kwargs)
return self
class EntityDeleteRequest(ODataHttpRequest):
"""Used for deleting entity (DELETE operations on a single entity)"""
def __init__(self, url, connection, handler, entity_set, entity_key):
super(EntityDeleteRequest, self).__init__(url, connection, handler)
self._logger = logging.getLogger(LOGGER_NAME)
self._entity_set = entity_set
self._entity_key = entity_key
self._logger.debug('New instance of EntityDeleteRequest for entity type: %s', entity_set.entity_type.name)
def get_path(self):
return self._entity_set.name + self._entity_key.to_key_string()
def get_method(self):
# pylint: disable=no-self-use
return 'DELETE'
class EntityModifyRequest(ODataHttpRequest):
"""Used for modyfing entities (UPDATE/MERGE operations on a single entity)
Call execute() to send the update-request to the OData service
and get the modified entity."""
ALLOWED_HTTP_METHODS = ['PATCH', 'PUT', 'MERGE']
def __init__(self, url, connection, handler, entity_set, entity_key, method="PATCH"):
super(EntityModifyRequest, self).__init__(url, connection, handler)
self._logger = logging.getLogger(LOGGER_NAME)
self._entity_set = entity_set
self._entity_type = entity_set.entity_type
self._entity_key = entity_key
self._method = method.upper()
if self._method not in EntityModifyRequest.ALLOWED_HTTP_METHODS:
raise ValueError('The value "{}" is not on the list of allowed Entity Update HTTP Methods: {}'
.format(method, ', '.join(EntityModifyRequest.ALLOWED_HTTP_METHODS)))
self._values = {}
# get all properties declared by entity type
self._type_props = self._entity_type.proprties()
self._logger.debug('New instance of EntityModifyRequest for entity type: %s', self._entity_type.name)
def get_path(self):
return self._entity_set.name + self._entity_key.to_key_string()
def get_method(self):
# pylint: disable=no-self-use
return self._method
def get_body(self):
# pylint: disable=no-self-use
body = {}
for key, val in self._values.items():
body[key] = val
return json.dumps(body)
def get_default_headers(self):
return {'Accept': 'application/json', 'Content-Type': 'application/json'}
def set(self, **kwargs):
"""Set properties to be changed."""
self._logger.info(kwargs)
for key, val in kwargs.items():
try:
val = self._entity_type.proprty(key).to_json(val)
except KeyError:
raise PyODataException(
f'Property {key} is not declared in {self._entity_type.name} entity type')
self._values[key] = val
return self
class QueryRequest(ODataHttpRequest):
"""INTERFACE A consumer-side query-request builder. Call execute() to issue the request."""
# pylint: disable=too-many-instance-attributes
def __init__(self, url, connection, handler, last_segment):
super(QueryRequest, self).__init__(url, connection, handler)
self._logger = logging.getLogger(LOGGER_NAME)
self._count = None
self._inlinecount = None
self._top = None
self._skip = None
self._order_by = None
self._filter = None
self._select = None
self._expand = None
self._last_segment = last_segment
self._logger.debug('New instance of QueryRequest for last segment: %s', self._last_segment)
def count(self, inline=False):
"""Sets a flag to return the number of items. Can be inline with results or just the count."""
if inline:
self._inlinecount = True
else:
self._count = True
return self
def next_url(self, next_url):
"""
Sets URL which identifies the next partial set of entities from the originally identified complete set. Once
set, this URL takes precedence over all query parameters.
For details, see section "6. Representing Collections of Entries" on
https://www.odata.org/documentation/odata-version-2-0/json-format/
"""
self._next_url = next_url
return self
def expand(self, expand):
"""Sets the expand expressions."""
self._expand = expand
return self
def filter(self, filter_val):
"""Sets the filter expression."""
# returns QueryRequest
self._filter = filter_val
return self
# def nav(self, key_value, nav_property):
# """Navigates to a referenced collection using a collection-valued navigation property."""
# # returns QueryRequest
# raise NotImplementedError
def order_by(self, order_by):
"""Sets the ordering expressions."""
self._order_by = order_by
return self
def select(self, select):
"""Sets the selection clauses."""
self._select = select
return self
def skip(self, skip):
"""Sets the number of items to skip."""
self._skip = skip
return self
def top(self, top):
"""Sets the number of items to return."""
self._top = top
return self
def get_path(self):
if self._count:
return urljoin(self._last_segment, '/$count')
return self._last_segment
def get_default_headers(self):
if self._count:
return {}
return {
'Accept': 'application/json',
}
def get_query_params(self):
if self._next_url:
return {}
qparams = super(QueryRequest, self).get_query_params()
if self._top is not None:
qparams['$top'] = self._top
if self._skip is not None:
qparams['$skip'] = self._skip
if self._order_by is not None:
qparams['$orderby'] = self._order_by
if self._filter is not None:
qparams['$filter'] = self._filter
if self._select is not None:
qparams['$select'] = self._select
if self._expand is not None:
qparams['$expand'] = self._expand
if self._inlinecount:
qparams['$inlinecount'] = 'allpages'
return qparams
class FunctionRequest(QueryRequest):
"""Function import request (Service call)"""
def __init__(self, url, connection, handler, function_import):
super(FunctionRequest, self).__init__(url, connection, handler, function_import.name)
self._function_import = function_import
self._logger.debug('New instance of FunctionRequest for %s', self._function_import.name)
def parameter(self, name, value):
'''Sets value of parameter.'''
# check if param is valid (is declared in metadata)
try:
param = self._function_import.get_parameter(name)
# add parameter as custom query argument
self.custom(param.name, param.to_literal(value))
except KeyError:
raise PyODataException('Function import {0} does not have pararmeter {1}'
.format(self._function_import.name, name))
return self
def get_method(self):
return self._function_import.http_method
def get_default_headers(self):
return {
'Accept': 'application/json'
}
# pylint: disable=too-many-instance-attributes
class EntityProxy:
"""An immutable OData entity instance, consisting of an identity (an
entity-set and a unique entity-key within that set), properties (typed,
named values), and links (references to other entities).
"""
# pylint: disable=too-many-branches,too-many-nested-blocks,too-many-statements
def __init__(self, service, entity_set, entity_type, proprties=None, entity_key=None, etag=None):
self._logger = logging.getLogger(LOGGER_NAME)
self._service = service
self._entity_set = entity_set
self._entity_type = entity_type
self._key_props = entity_type.key_proprties
self._cache = dict()
self._entity_key = entity_key
self._etag = etag
self._logger.debug('New entity proxy instance of type %s from properties: %s', entity_type.name, proprties)
# cache values of individual properties if provided
if proprties is not None:
etag_body = proprties.get('__metadata', dict()).get('etag', None)
if etag is not None and etag_body is not None and etag_body != etag:
raise PyODataException('Etag from header does not match the Etag from response body')
if etag_body is not None:
self._etag = etag_body
# first, cache values of direct properties
for type_proprty in self._entity_type.proprties():
if type_proprty.name in proprties:
# Property value available
if proprties[type_proprty.name] is not None:
self._cache[type_proprty.name] = type_proprty.from_json(proprties[type_proprty.name])
continue
# Property value missing and user wants a type specific default value filled in
if not self._service.retain_null:
# null value is in literal form for now, convert it to python representation
self._cache[type_proprty.name] = type_proprty.from_literal(type_proprty.typ.null_value)
continue
# Property is nullable - save it as such
if type_proprty.nullable:
self._cache[type_proprty.name] = None
continue
raise PyODataException(f'Value of non-nullable Property {type_proprty.name} is null')
# then, assign all navigation properties
for prop in self._entity_type.nav_proprties:
if prop.name in proprties:
# entity type of navigation property
prop_etype = prop.to_role.entity_type
# cache value according to multiplicity
if prop.to_role.multiplicity in \
[model.EndRole.MULTIPLICITY_ONE,
model.EndRole.MULTIPLICITY_ZERO_OR_ONE]:
# cache None in case we receive nothing (null) instead of entity data
if proprties[prop.name] is None:
self._cache[prop.name] = None
else:
self._cache[prop.name] = EntityProxy(service, None, prop_etype, proprties[prop.name])
elif prop.to_role.multiplicity == model.EndRole.MULTIPLICITY_ZERO_OR_MORE:
# default value is empty array
self._cache[prop.name] = []
# if there are no entities available, received data consists of
# metadata properties only.
if 'results' in proprties[prop.name]:
# available entities are serialized in results array
for entity in proprties[prop.name]['results']:
self._cache[prop.name].append(EntityProxy(service, None, prop_etype, entity))
else:
raise PyODataException('Unknown multiplicity {0} of association role {1}'
.format(prop.to_role.multiplicity, prop.to_role.name))
# build entity key if not provided
if self._entity_key is None:
# try to build key from available property values
try:
# if key seems to be simple (consists of single property)
if len(self._key_props) == 1:
self._entity_key = EntityKey(entity_type, self._cache[self._key_props[0].name])
else:
# build complex key
self._entity_key = EntityKey(entity_type, **self._cache)
except KeyError:
pass
except PyODataException:
pass
def __repr__(self):
return self._entity_key.to_key_string()
def __getattr__(self, attr):
try:
return self._cache[attr]
except KeyError:
try:
value = self.get_proprty(attr).execute()
self._cache[attr] = value
return value
except KeyError as ex:
raise AttributeError('EntityType {0} does not have Property {1}: {2}'
.format(self._entity_type.name, attr, str(ex)))
def nav(self, nav_property):
"""Navigates to given navigation property and returns the EntitySetProxy"""
# for now duplicated with simillar method in entity set proxy class
try:
navigation_property = self._entity_type.nav_proprty(nav_property)
except KeyError:
raise PyODataException('Navigation property {} is not declared in {} entity type'.format(
nav_property, self._entity_type))
# Get entity set of navigation property
association_info = navigation_property.association_info
association_set = self._service.schema.association_set_by_association(
association_info.name,
association_info.namespace)
navigation_entity_set = None
for end in association_set.end_roles:
if association_set.end_by_entity_set(end.entity_set_name).role == navigation_property.to_role.role:
navigation_entity_set = self._service.schema.entity_set(end.entity_set_name, association_info.namespace)
if not navigation_entity_set:
raise PyODataException(f'No association set for role {navigation_property.to_role}')
roles = navigation_property.association.end_roles
if all((role.multiplicity != model.EndRole.MULTIPLICITY_ZERO_OR_MORE for role in roles)):
return NavEntityProxy(self, nav_property, navigation_entity_set.entity_type, {})
return EntitySetProxy(
self._service,
self._service.schema.entity_set(navigation_entity_set.name),
nav_property,
self._entity_set.name + self._entity_key.to_key_string())
def get_path(self):
"""Returns this entity's relative path - e.g. EntitySet(KEY)"""
return self._entity_set._name + self._entity_key.to_key_string() # pylint: disable=protected-access
def get_proprty(self, name, connection=None):
"""Returns value of the property"""
self._logger.info('Initiating property request for %s', name)
def proprty_get_handler(key, proprty, response):
"""Gets property value from HTTP Response"""
if response.status_code != HTTP_CODE_OK:
raise HttpError('HTTP GET for Attribute {0} of Entity {1} failed with status code {2}'
.format(proprty.name, key, response.status_code), response)
data = response.json()['d']
return proprty.from_json(data[proprty.name])
path = urljoin(self.get_path(), name)
return self._service.http_get_odata(
path,
partial(proprty_get_handler, path, self._entity_type.proprty(name)),
connection=connection)
def get_value(self, connection=None):
"Returns $value of Stream entities"
def value_get_handler(key, response):
"""Gets property value from HTTP Response"""
if response.status_code != HTTP_CODE_OK:
raise HttpError('HTTP GET for $value of Entity {0} failed with status code {1}'
.format(key, response.status_code), response)
return response
path = urljoin(self.get_path(), '/$value')
return self._service.http_get_odata(path,
partial(value_get_handler, self.entity_key),
connection=connection)
@property
def entity_set(self):
"""Entity set related to this entity"""
return self._entity_set
@property
def entity_key(self):
"""Key of entity"""
return self._entity_key
@property
def url(self):
"""URL of the real entity"""
service_url = self._service.url.rstrip('/')
entity_path = self.get_path()
return urljoin(service_url, entity_path)
@property
def etag(self):
"""ETag generated by service"""
return self._etag
def equals(self, other):
"""Returns true if the self and the other contains the same data"""
# pylint: disable=W0212
return self._cache == other._cache
class NavEntityProxy(EntityProxy):
"""Special case of an Entity access via 1 to 1 Navigation property"""
def __init__(self, parent_entity, prop_name, entity_type, entity):
# pylint: disable=protected-access
super(NavEntityProxy, self).__init__(parent_entity._service, parent_entity._entity_set, entity_type, entity)
self._parent_entity = parent_entity
self._prop_name = prop_name
def get_path(self):
"""Returns URL of the entity"""
return urljoin(self._parent_entity.get_path(), self._prop_name)
class GetEntitySetFilter:
"""Create filters for humans"""
def __init__(self, proprty):
self._proprty = proprty
@staticmethod
def build_expression(operator, operands):
"""Creates a expression by joining the operands with the operator"""
if len(operands) < 2:
raise ExpressionError('The $filter operator \'{}\' needs at least two operands'.format(operator))
return f"({' {} '.format(operator).join(operands)})"
@staticmethod
def and_(*operands):
"""Creates logical AND expression from the operands"""
return GetEntitySetFilter.build_expression('and', operands)
@staticmethod
def or_(*operands):
"""Creates logical OR expression from the operands"""
return GetEntitySetFilter.build_expression('or', operands)
@staticmethod
def format_filter(proprty, operator, value):
"""Creates a filter expression """
return f'{proprty.name} {operator} {proprty.to_literal(value)}'
def __eq__(self, value):
return GetEntitySetFilter.format_filter(self._proprty, 'eq', value)
def __ne__(self, value):
return GetEntitySetFilter.format_filter(self._proprty, 'ne', value)
def __lt__(self, value):
return GetEntitySetFilter.format_filter(self._proprty, 'lt', value)
def __le__(self, value):
return GetEntitySetFilter.format_filter(self._proprty, 'le', value)
def __ge__(self, value):
return GetEntitySetFilter.format_filter(self._proprty, 'ge', value)
def __gt__(self, value):
return GetEntitySetFilter.format_filter(self._proprty, 'gt', value)
class FilterExpression:
"""A class representing named expression of OData $filter"""
def __init__(self, **kwargs):
self._expressions = kwargs
self._other = None
self._operator = None
@property
def expressions(self):
"""Get expressions where key is property name with the operator suffix
and value is the left hand side operand.
"""
return self._expressions.items()
@property
def other(self):
"""Get an instance of the other operand"""
return self._other
@property
def operator(self):
"""The other operand"""
return self._operator
def __or__(self, other):
if self._other is not None:
raise RuntimeError('The FilterExpression already initialized')
self._other = other
self._operator = "or"
return self
def __and__(self, other):
if self._other is not None:
raise RuntimeError('The FilterExpression already initialized')
self._other = other
self._operator = "and"
return self
class GetEntitySetFilterChainable:
"""
Example expressions
FirstName='Tim'
FirstName__contains='Tim'
Age__gt=56
Age__gte=6
Age__lt=78
Age__lte=90
Age__range=(5,9)
FirstName__in=['Tim', 'Bob', 'Sam']
FirstName__startswith='Tim'
FirstName__endswith='mothy'
Addresses__Suburb='Chatswood'
Addresses__Suburb__contains='wood'
"""
OPERATORS = [
'startswith',
'endswith',
'lt',
'lte',
'gt',
'gte',
'contains',
'range',
'in',
'length',
'eq'
]
def __init__(self, entity_type, filter_expressions, exprs):
self._entity_type = entity_type
self._filter_expressions = filter_expressions
self._expressions = exprs
@property
def expressions(self):
"""Get expressions as a list of tuples where the first item
is a property name with the operator suffix and the second item
is a left hand side value.
"""
return self._expressions.items()
def proprty_obj(self, name):
"""Returns a model property for a particular property"""
return self._entity_type.proprty(name)
def _decode_and_combine_filter_expression(self, filter_expression):
filter_expressions = [self._decode_expression(expr, val) for expr, val in filter_expression.expressions]
return self._combine_expressions(filter_expressions)
def _process_query_objects(self):
"""Processes FilterExpression objects to OData lookups"""
filter_expressions = []
for expr in self._filter_expressions:
lhs_expressions = self._decode_and_combine_filter_expression(expr)
if expr.other is not None:
rhs_expressions = self._decode_and_combine_filter_expression(expr.other)
filter_expressions.append(f'({lhs_expressions}) {expr.operator} ({rhs_expressions})')
else:
filter_expressions.append(lhs_expressions)
return filter_expressions
def _process_expressions(self):
filter_expressions = [self._decode_expression(expr, val) for expr, val in self.expressions]
filter_expressions.extend(self._process_query_objects())
return filter_expressions
def _decode_expression(self, expr, val):
field = None
# field_heirarchy = []
operator = 'eq'
exprs = expr.split('__')
for part in exprs:
if self._entity_type.has_proprty(part):
field = part
# field_heirarchy.append(part)
elif part in self.__class__.OPERATORS:
operator = part
else:
raise ValueError(f'"{part}" is not a valid property or operator')
# field = '/'.join(field_heirarchy)
# target_field = self.proprty_obj(field_heirarchy[-1])
expression = self._build_expression(field, operator, val)
return expression
# pylint: disable=no-self-use
def _combine_expressions(self, expressions):
return ' and '.join(expressions)
# pylint: disable=too-many-return-statements, too-many-branches
def _build_expression(self, field_name, operator, value):
target_field = self.proprty_obj(field_name)
if operator not in ['length', 'in', 'range']:
value = target_field.to_literal(value)
if operator == 'lt':
return f'{field_name} lt {value}'
if operator == 'lte':
return f'{field_name} le {value}'
if operator == 'gte':
return f'{field_name} ge {value}'
if operator == 'gt':
return f'{field_name} gt {value}'
if operator == 'startswith':
return f'startswith({field_name}, {value}) eq true'
if operator == 'endswith':
return f'endswith({field_name}, {value}) eq true'
if operator == 'length':
value = int(value)
return f'length({field_name}) eq {value}'
if operator in ['contains']:
return f'substringof({value}, {field_name}) eq true'
if operator == 'range':
if not isinstance(value, (tuple, list)):
raise TypeError(f'Range must be tuple or list not {type(value)}')
if len(value) != 2:
raise ValueError('Only two items can be passed in a range.')
low_bound = target_field.to_literal(value[0])
high_bound = target_field.to_literal(value[1])
return f'{field_name} gte {low_bound} and {field_name} lte {high_bound}'
if operator == 'in':
literal_values = (f'{field_name} eq {target_field.to_literal(item)}' for item in value)
return ' or '.join(literal_values)
if operator == 'eq':
return f'{field_name} eq {value}'
raise ValueError(f'Invalid expression {operator}')
def __str__(self):
expressions = self._process_expressions()
result = self._combine_expressions(expressions)
return result
class GetEntitySetRequest(QueryRequest):
"""GET on EntitySet"""
def __init__(self, url, connection, handler, last_segment, entity_type):
super(GetEntitySetRequest, self).__init__(url, connection, handler, last_segment)
self._entity_type = entity_type
def __getattr__(self, name):
proprty = self._entity_type.proprty(name)
return GetEntitySetFilter(proprty)
def _set_filter(self, filter_val):
filter_text = self._filter + ' and ' if self._filter else ''
filter_text += filter_val
self._filter = filter_text
def filter(self, *args, **kwargs):
if args and len(args) == 1 and isinstance(args[0], str):
self._filter = args[0]
else:
self._set_filter(str(GetEntitySetFilterChainable(self._entity_type, args, kwargs)))
return self
class ListWithTotalCount(list):
"""
A list with the additional property total_count and next_url.
If set, use next_url to fetch the next batch of entities.
"""
def __init__(self, total_count, next_url):
super(ListWithTotalCount, self).__init__()
self._total_count = total_count
self._next_url = next_url
@property
def next_url(self):
"""
URL which identifies the next partial set of entities from the originally identified complete set. None if no
entities remaining.
"""
return self._next_url
@property
def total_count(self):
"""Count of all entities"""
if self._total_count is None:
raise ProgramError('The collection does not include Total Count '
'of items because the request was made without '
'specifying "count(inline=True)".')
return self._total_count
class EntitySetProxy:
"""EntitySet Proxy"""
def __init__(self, service, entity_set, alias=None, parent_last_segment=None):
"""Creates new Entity Set object
@param alias in case the entity set is access via assossiation
@param parent_last_segment in case of association also parent key must be used
"""
self._service = service
self._entity_set = entity_set
self._alias = alias
if parent_last_segment is None:
self._parent_last_segment = ''
else:
if parent_last_segment.endswith('/'):
self._parent_last_segment = parent_last_segment
else:
self._parent_last_segment = parent_last_segment + '/'
self._name = entity_set.name
self._key = entity_set.entity_type.key_proprties
self._logger = logging.getLogger(LOGGER_NAME)
self._logger.debug('New entity set proxy instance for %s', self._name)
@property
def service(self):
"""Return service"""
return self._service
@property
def last_segment(self):
"""Return last segment of url"""
entity_set_name = self._alias if self._alias is not None else self._entity_set.name
return self._parent_last_segment + entity_set_name
def nav(self, nav_property, key):
"""Navigates to given navigation property and returns the EntitySetProxy"""
try:
navigation_property = self._entity_set.entity_type.nav_proprty(nav_property)
except KeyError:
raise PyODataException('Navigation property {} is not declared in {} entity type'.format(
nav_property, self._entity_set.entity_type))
# Get entity set of navigation property
association_info = navigation_property.association_info
association_set = self._service.schema.association_set_by_association(
association_info.name)
navigation_entity_set = None
for end in association_set.end_roles:
if association_set.end_by_entity_set(end.entity_set_name).role == navigation_property.to_role.role:
navigation_entity_set = self._service.schema.entity_set(end.entity_set_name)
if not navigation_entity_set:
raise PyODataException(
f'No association set for role {navigation_property.to_role} {association_set.end_roles}')
roles = navigation_property.association.end_roles
if all((role.multiplicity != model.EndRole.MULTIPLICITY_ZERO_OR_MORE for role in roles)):
return self._get_nav_entity(key, nav_property, navigation_entity_set)
return EntitySetProxy(
self._service,
navigation_entity_set,
nav_property,
self._entity_set.name + key.to_key_string())
def _get_nav_entity(self, master_key, nav_property, navigation_entity_set):
"""Get entity based on provided key of the master and Navigation property name"""
def get_entity_handler(parent, nav_property, navigation_entity_set, response):
"""Gets entity from HTTP response"""
if response.status_code != HTTP_CODE_OK:
raise HttpError('HTTP GET for Entity {0} failed with status code {1}'
.format(self._name, response.status_code), response)
entity = response.json()['d']
return NavEntityProxy(parent, nav_property, navigation_entity_set.entity_type, entity)
self._logger.info(
'Getting the nav property %s of the entity %s for the key %s',
nav_property,
self._entity_set.entity_type.name,
master_key)
parent = EntityProxy(self._service, self, self._entity_set.entity_type, entity_key=master_key)
return NavEntityGetRequest(
partial(get_entity_handler, parent, nav_property, navigation_entity_set),
master_key,
self,
nav_property)
def get_entity(self, key=None, **args):
"""Get entity based on provided key properties"""
def get_entity_handler(response):
"""Gets entity from HTTP response"""
if response.status_code != HTTP_CODE_OK:
raise HttpError('HTTP GET for Entity {0} failed with status code {1}'
.format(self._name, response.status_code), response)
entity = response.json()['d']
etag = response.headers.get('ETag', None)
return EntityProxy(self._service, self._entity_set, self._entity_set.entity_type, entity, etag=etag)
if key is not None and isinstance(key, EntityKey):
entity_key = key
else:
entity_key = EntityKey(self._entity_set.entity_type, key, **args)
self._logger.info('Getting entity %s for key %s and args %s', self._entity_set.entity_type.name, key, args)
return EntityGetRequest(get_entity_handler, entity_key, self)
def get_entities(self):
"""Get some, potentially all entities"""
def get_entities_handler(response):
"""Gets entity set from HTTP Response"""
if response.status_code != HTTP_CODE_OK:
raise HttpError('HTTP GET for Entity Set {0} failed with status code {1}'
.format(self._name, response.status_code), response)
content = response.json()
if isinstance(content, int):
return content
entities = content['d']
total_count = None
next_url = None
if isinstance(entities, dict):
if '__count' in entities:
total_count = int(entities['__count'])
if '__next' in entities:
next_url = entities['__next']
entities = entities['results']
self._logger.info('Fetched %d entities', len(entities))
result = ListWithTotalCount(total_count, next_url)
for props in entities:
entity = EntityProxy(self._service, self._entity_set, self._entity_set.entity_type, props)
result.append(entity)
return result
entity_set_name = self._alias if self._alias is not None else self._entity_set.name
return GetEntitySetRequest(self._service.url, self._service.connection, get_entities_handler,
self._parent_last_segment + entity_set_name, self._entity_set.entity_type)
def create_entity(self, return_code=HTTP_CODE_CREATED):
"""Creates a new entity in the given entity-set."""
def create_entity_handler(response):
"""Gets newly created entity encoded in HTTP Response"""
if response.status_code != return_code:
raise HttpError('HTTP POST for Entity Set {0} failed with status code {1}'
.format(self._name, response.status_code), response)
entity_props = response.json()['d']
etag = response.headers.get('ETag', None)
return EntityProxy(self._service, self._entity_set, self._entity_set.entity_type, entity_props, etag=etag)
return EntityCreateRequest(self._service.url, self._service.connection, create_entity_handler, self._entity_set,
self.last_segment)
def update_entity(self, key=None, method=None, **kwargs):
"""Updates an existing entity in the given entity-set."""
def update_entity_handler(response):
"""Gets modified entity encoded in HTTP Response"""
if response.status_code != 204:
raise HttpError('HTTP modify request for Entity Set {} failed with status code {}'
.format(self._name, response.status_code), response)
if key is not None and isinstance(key, EntityKey):
entity_key = key
else:
entity_key = EntityKey(self._entity_set.entity_type, key, **kwargs)
self._logger.info('Updating entity %s for key %s and args %s', self._entity_set.entity_type.name, key, kwargs)
if method is None:
method = self._service.config['http']['update_method']
return EntityModifyRequest(self._service.url, self._service.connection, update_entity_handler, self._entity_set,
entity_key, method=method)
def delete_entity(self, key: EntityKey = None, **kwargs):
"""Delete the entity"""
def delete_entity_handler(response):
"""Check if entity deletion was successful"""
if response.status_code != 204:
raise HttpError(f'HTTP POST for Entity delete {self._name} '
f'failed with status code {response.status_code}',
response)
if key is not None and isinstance(key, EntityKey):
entity_key = key
else:
entity_key = EntityKey(self._entity_set.entity_type, key, **kwargs)
return EntityDeleteRequest(self._service.url, self._service.connection, delete_entity_handler, self._entity_set,
entity_key)
# pylint: disable=too-few-public-methods
class EntityContainer:
"""Set of EntitSet proxies"""
def __init__(self, service):
self._service = service
self._entity_sets = dict()
for entity_set in self._service.schema.entity_sets:
self._entity_sets[entity_set.name] = EntitySetProxy(self._service, entity_set)
def __getattr__(self, name):
try:
return self._entity_sets[name]
except KeyError:
raise AttributeError(
f"EntitySet {name} not defined in {','.join(list(self._entity_sets.keys()))}.")
class FunctionContainer:
"""Set of Function proxies
Call a server-side functions (also known as a service operation).
"""
def __init__(self, service):
self._service = service
self._functions = dict()
for fimport in self._service.schema.function_imports:
self._functions[fimport.name] = fimport
def __getattr__(self, name):
if name not in self._functions:
raise AttributeError(
f"Function {name} not defined in {','.join(list(self._functions.keys()))}.")
fimport = self._service.schema.function_import(name)
def function_import_handler(fimport, response):
"""Get function call response from HTTP Response"""
if 300 <= response.status_code < 400:
raise HttpError(f'Function Import {fimport.name} requires Redirection which is not supported',
response)
if response.status_code == 401:
raise HttpError(f'Not authorized to call Function Import {fimport.name}',
response)
if response.status_code == 403:
raise HttpError(f'Missing privileges to call Function Import {fimport.name}',
response)
if response.status_code == 405:
raise HttpError(
f'Despite definition Function Import {fimport.name} does not support HTTP {fimport.http_method}',
response)
if 400 <= response.status_code < 500:
raise HttpError(
f'Function Import {fimport.name} call has failed with status code {response.status_code}',
response)
if response.status_code >= 500:
raise HttpError(f'Server has encountered an error while processing Function Import {fimport.name}',
response)
if fimport.return_type is None:
if response.status_code != 204:
logging.getLogger(LOGGER_NAME).warning(
'The No Return Function Import %s has replied with HTTP Status Code %d instead of 204',
fimport.name, response.status_code)
if response.text:
logging.getLogger(LOGGER_NAME).warning(
'The No Return Function Import %s has returned content:\n%s', fimport.name, response.text)
return None
if response.status_code != 200:
logging.getLogger(LOGGER_NAME).warning(
'The Function Import %s has replied with HTTP Status Code %d instead of 200',
fimport.name, response.status_code)
response_data = response.json()['d']
# 1. if return types is "entity type", return instance of appropriate entity proxy
if isinstance(fimport.return_type, model.EntityType):
entity_set = self._service.schema.entity_set(fimport.entity_set_name)
return EntityProxy(self._service, entity_set, fimport.return_type, response_data)
# 2. return raw data for all other return types (primitives, complex types encoded in dicts, etc.)
return response_data
return FunctionRequest(self._service.url, self._service.connection,
partial(function_import_handler, fimport), fimport)
class Service:
"""OData service"""
def __init__(self, url, schema, connection, config=None):
self._url = url
self._schema = schema
self._connection = connection
self._retain_null = config.retain_null if config else False
self._entity_container = EntityContainer(self)
self._function_container = FunctionContainer(self)
self._config = {'http': {'update_method': 'PATCH'}}
@property
def schema(self):
"""Parsed metadata"""
return self._schema
@property
def url(self):
"""Service url"""
return self._url
@property
def connection(self):
"""Service connection"""
return self._connection
@property
def retain_null(self):
"""Whether to respect null-ed values or to substitute them with type specific default values"""
return self._retain_null
@property
def entity_sets(self):
"""EntitySet proxy"""
return self._entity_container
@property
def functions(self):
"""Functions proxy"""
return self._function_container
@property
def config(self):
"""Service specific configuration"""
return self._config
def http_get(self, path, connection=None):
"""HTTP GET response for the passed path in the service"""
conn = connection
if conn is None:
conn = self._connection
return conn.get(urljoin(self._url, path))
def http_get_odata(self, path, handler, connection=None):
"""HTTP GET request proxy for the passed path in the service"""
conn = connection
if conn is None:
conn = self._connection
return ODataHttpRequest(
urljoin(self._url, path),
conn,
handler,
headers={'Accept': 'application/json'})
def create_batch(self, batch_id=None):
"""Create instance of OData batch request"""
def batch_handler(batch, parts):
"""Process parsed multipart request (parts)"""
logging.getLogger(LOGGER_NAME).debug('Batch handler called for batch %s', batch.id)
result = []
for part, req in zip(parts, batch.requests):
logging.getLogger(LOGGER_NAME).debug('Batch handler is processing part %s for request %s', part, req)
# if part represents multiple requests, dont' parse body and
# process parts by appropriate reuqest instance
if isinstance(req, MultipartRequest):
result.append(req.handler(req, part))
else:
# part represents single request, we have to parse
# content (without checking Content type for binary/http)
response = ODataHttpResponse.from_string(part[0])
result.append(req.handler(response))
return result
return BatchRequest(self._url, self._connection, batch_handler, batch_id)
def create_changeset(self, changeset_id=None):
"""Create instance of OData changeset"""
def changeset_handler(changeset, parts):
"""Gets changeset response from HTTP response"""
logging.getLogger(LOGGER_NAME).debug('Changeset handler called for changeset %s', changeset.id)
result = []
# check if changeset response consists of parts, this is important
# to distinguish cases when server responds with single HTTP response
# for whole request
if not isinstance(parts[0], list):
# raise error (even for successfull status codes) since such changeset response
# always means something wrong happened on server
response = ODataHttpResponse.from_string(parts[0])
raise HttpError('Changeset cannot be processed due to single response received, status code: {}'.format(
response.status_code), response)
for part, req in zip(parts, changeset.requests):
logging.getLogger(LOGGER_NAME).debug('Changeset handler is processing part %s for request %s', part,
req)
if isinstance(req, MultipartRequest):
raise PyODataException('Changeset cannot contain nested multipart content')
# part represents single request, we have to parse
# content (without checking Content type for binary/http)
response = ODataHttpResponse.from_string(part[0])
result.append(req.handler(response))
return result
return Changeset(self._url, self._connection, changeset_handler, changeset_id)
class MultipartRequest(ODataHttpRequest):
"""HTTP Batch request"""
def __init__(self, url, connection, handler, request_id=None):
super(MultipartRequest, self).__init__(url, connection, partial(MultipartRequest.http_response_handler, self))
self.requests = []
self._handler_decoded = handler
# generate random id of form dddd-dddd-dddd
# pylint: disable=invalid-name
self.id = request_id if request_id is not None else '{}_{}_{}'.format(
random.randint(1000, 9999), random.randint(1000, 9999), random.randint(1000, 9999))
self._logger.debug('New multipart %s request initialized, id=%s', self.__class__.__name__, self.id)
@property
def handler(self):
return self._handler_decoded
def get_boundary(self):
"""Get boundary used for request parts"""
return self.id
def get_default_headers(self):
# pylint: disable=no-self-use
return {'Content-Type': f'multipart/mixed;boundary={self.get_boundary()}'}
def get_body(self):
return encode_multipart(self.get_boundary(), self.requests)
def add_request(self, request):
"""Add request to be sent in batch"""
self.requests.append(request)
self._logger.debug('New %s request added to multipart request %s', request.get_method(), self.id)
@staticmethod
def http_response_handler(request, response):
"""Process HTTP response to mutipart HTTP request"""
if response.status_code != 202: # 202 Accepted
raise HttpError('HTTP POST for multipart request {0} failed with status code {1}'
.format(request.id, response.status_code), response)
logging.getLogger(LOGGER_NAME).debug('Generic multipart http response request handler called')
# get list of all parts (headers + body)
decoded = decode_multipart(response.content.decode('utf-8'), response.headers['Content-Type'])
return request.handler(request, decoded)
class BatchRequest(MultipartRequest):
"""HTTP Batch request"""
def get_boundary(self):
return 'batch_' + self.id
def get_path(self):
# pylint: disable=no-self-use
return '$batch'
def get_method(self):
# pylint: disable=no-self-use
return 'POST'
class Changeset(MultipartRequest):
"""Representation of changeset (unsorted group of requests)"""
def get_boundary(self):
return 'changeset_' + self.id
|
from dataclasses import dataclass, field, asdict
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
@dataclass
class ComposeFile:
version: str = "3.9"
services: dict[str, dict] = field(default_factory=dict)
volumes: dict[str, None] = field(default_factory=dict)
_use_env = False
def add_service(self, name, **kwargs):
if self._use_env:
kwargs["env_file"] = [".env"]
for volume in kwargs.get("volumes", []):
volume = volume.split(":", 0)
if volume[0].isalpha():
self.volumes[volume[0]] = None
self.services[name] = kwargs
def add_django(self, name="web", **kwargs):
self._django = name
defaults = {
"build": ".",
"image": name,
# "command": "sh -c 'python manage.py migrate && gunicorn debt_admin.wsgi:application --bind 0.0.0.0:8000'",
"command": "sh -c 'python manage.py migrate && python manage.py runserver 0.0.0.0:8000'",
"volumes": [".:/app"],
# "expose": [8000],
"ports": ["8000:8000"],
"depends_on": getattr(self, "_databases", []),
}
defaults.update(kwargs)
return self.add_service(name, **defaults)
def add_database(self, name, **kwargs):
setattr(self, "_databases", getattr(self, "_databases", []) + [name])
if hasattr(self, "_django"):
self._django["depends_on"].append(name)
return self.add_service(name, **kwargs)
def save(self, filename):
with open(filename, "w") as f:
f.write(yaml.dump(asdict(self), Dumper=yaml.dumper.Dumper))
def from_settings(self, settings):
db = settings["DATABASES"]["default"]
if "postgres" in db["ENGINE"]:
db = {
"image": "postgres",
"volumes": ["postgres:/var/lib/postgresql/data"],
"environment": [
f'POSTGRES_DB={db['NAME']}',
f'POSTGRES_USER={db['USER']}',
f'POSTGRES_PASSWORD={db['PASSWORD']}',
],
}
self.add_database(settings["DATABASES"]["default"]["NAME"], **db)
if "redis" in settings.CACHES["default"]["BACKEND"].lower() or hasattr(
settings, "RQ_QUEUES"
):
self.add_database("redis", image="redis:alpine")
class Command(BaseCommand):
help = "Generate docker-compose files for django project"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.compose = {
"version": "3.9",
"services": {
"db": {
"image": "postgres",
"volumes": ["postgres:/var/lib/postgresql/data"],
"environment": [
"POSTGRES_DB=postgres",
"POSTGRES_USER=postgres",
"POSTGRES_PASSWORD=postgres",
],
},
"redis": {"image": "redis:alpine"},
"admin": {
"build": ".",
"image": "web",
"command": "sh -c 'python manage.py migrate && gunicorn debt_admin.wsgi:application --bind 0.0.0.0:8000'",
"volumes": [".:/app"],
"expose": [8000],
"depends_on": ["db", "redis"],
"env_file": [".env"],
},
"bot": {
"build": ".",
"env_file": [".env"],
"image": "web",
"command": "python manage.py bot",
"volumes": [".:/app"],
"depends_on": ["db", "redis"],
},
"worker": {
"build": ".",
"env_file": [".env"],
"image": "web",
"command": "python manage.py rqworker default",
"volumes": [".:/app", "static_volume:/app/staticfiles"],
"depends_on": ["db", "redis"],
},
"nginx": {
"build": "./nginx",
"volumes": ["static_volume:/app/staticfiles"],
"ports": ["80:80"],
"depends_on": ["admin"],
},
},
"volumes": {"postgres": None, "static_volume": None},
}
def add_arguments(self, parser):
parser.add_argument("key", help="name of the key to get", metavar="KEY")
def handle(self, *args, **options):
from bot import bot
bot.bot.polling()
|
from dataclasses import dataclass, field, asdict
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
@dataclass
class ComposeFile:
version: str = "3.9"
services: dict[str, dict] = field(default_factory=dict)
volumes: dict[str, None] = field(default_factory=dict)
_use_env = False
def add_service(self, name, **kwargs):
if self._use_env:
kwargs["env_file"] = [".env"]
for volume in kwargs.get("volumes", []):
volume = volume.split(":", 0)
if volume[0].isalpha():
self.volumes[volume[0]] = None
self.services[name] = kwargs
def add_django(self, name="web", **kwargs):
self._django = name
defaults = {
"build": ".",
"image": name,
# "command": "sh -c 'python manage.py migrate && gunicorn debt_admin.wsgi:application --bind 0.0.0.0:8000'",
"command": "sh -c 'python manage.py migrate && python manage.py runserver 0.0.0.0:8000'",
"volumes": [".:/app"],
# "expose": [8000],
"ports": ["8000:8000"],
"depends_on": getattr(self, "_databases", []),
}
defaults.update(kwargs)
return self.add_service(name, **defaults)
def add_database(self, name, **kwargs):
setattr(self, "_databases", getattr(self, "_databases", []) + [name])
if hasattr(self, "_django"):
self._django["depends_on"].append(name)
return self.add_service(name, **kwargs)
def save(self, filename):
with open(filename, "w") as f:
f.write(yaml.dump(asdict(self), Dumper=yaml.dumper.Dumper))
def from_settings(self, settings):
db = settings["DATABASES"]["default"]
if "postgres" in db["ENGINE"]:
db = {
"image": "postgres",
"volumes": ["postgres:/var/lib/postgresql/data"],
"environment": [
f'POSTGRES_DB={db["NAME"]}',
f'POSTGRES_USER={db["USER"]}',
f'POSTGRES_PASSWORD={db["PASSWORD"]}',
],
}
self.add_database(settings["DATABASES"]["default"]["NAME"], **db)
if "redis" in settings.CACHES["default"]["BACKEND"].lower() or hasattr(
settings, "RQ_QUEUES"
):
self.add_database("redis", image="redis:alpine")
class Command(BaseCommand):
help = "Generate docker-compose files for django project"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.compose = {
"version": "3.9",
"services": {
"db": {
"image": "postgres",
"volumes": ["postgres:/var/lib/postgresql/data"],
"environment": [
"POSTGRES_DB=postgres",
"POSTGRES_USER=postgres",
"POSTGRES_PASSWORD=postgres",
],
},
"redis": {"image": "redis:alpine"},
"admin": {
"build": ".",
"image": "web",
"command": "sh -c 'python manage.py migrate && gunicorn debt_admin.wsgi:application --bind 0.0.0.0:8000'",
"volumes": [".:/app"],
"expose": [8000],
"depends_on": ["db", "redis"],
"env_file": [".env"],
},
"bot": {
"build": ".",
"env_file": [".env"],
"image": "web",
"command": "python manage.py bot",
"volumes": [".:/app"],
"depends_on": ["db", "redis"],
},
"worker": {
"build": ".",
"env_file": [".env"],
"image": "web",
"command": "python manage.py rqworker default",
"volumes": [".:/app", "static_volume:/app/staticfiles"],
"depends_on": ["db", "redis"],
},
"nginx": {
"build": "./nginx",
"volumes": ["static_volume:/app/staticfiles"],
"ports": ["80:80"],
"depends_on": ["admin"],
},
},
"volumes": {"postgres": None, "static_volume": None},
}
def add_arguments(self, parser):
parser.add_argument("key", help="name of the key to get", metavar="KEY")
def handle(self, *args, **options):
from bot import bot
bot.bot.polling()
|
import discord
import os
import sys
import random
import sqlite3
from requests import get
from discord.ext.commands import Cog, command
from time import sleep
class Fun(Cog):
def __init__(self, bot):
self.bot = bot
@command(aliases=['dankmeme'])
async def meme(self, ctx):
await ctx.send("Memed you HAHAHAHA!")
@command(aliases=["8ball","ball"])
async def _8ball(self,ctx,*args):
responses = ["It is certain.",
"It is decidedly so.",
"Without a doubt.",
"Yes - definitely.",
"You may rely on it.",
"As I see it, yes.",
"Most likely.",
"Outlook good.",
"Yes.",
"Signs point to yes.",
"Reply hazy, try again.",
"Ask again later.",
"Better not tell you now.",
"Cannot predict now.",
"Concentrate and ask again.",
"Don't count on it.",
"My reply is no.",
"My sources say no.",
"Outlook not so good.",
"Very doubtful."
]
await ctx.send(random.choice(responses))
@command(aliases=['jokes'])
async def joke(self,ctx):
data = get("https://official-joke-api.appspot.com/random_joke")
rand_joke = data.json()
str = rand_joke
embed=discord.Embed(title="Random joke",color=random.randint(0,0xffffff))
embed.add_field(name=f"Category: {str["type"]}", value="\u200b", inline=False)
embed.add_field(name=f"Joke: {str["setup"]}", value=f"{str["punchline"]}", inline=True)
await ctx.send(embed=embed)
@command()
async def choose(self,ctx,*,choices):
choices = choices.split(" ")
choice = random.choice(choices).strip()
embed=discord.Embed(title="Choose command", color=random.randint(0, 0xffffff))
embed.add_field(name="Choices:", value=f"`{choices}`", inline=False)
embed.add_field(name="Choice:", value=f"`{choice}`", inline=True)
await ctx.send(embed=embed)
@command()
async def twans(self,ctx,*,arg):
def replaceMultiple(mainString, toBeReplaces, newString):
for elem in toBeReplaces :
if elem in mainString :
# Replace the string
mainString = mainString.replace(elem, newString)
return mainString
trans = replaceMultiple(arg, ['l', 'r'] , "w")
await ctx.send(trans)
def setup(bot):
bot.add_cog(Fun(bot))
|
import discord
import os
import sys
import random
import sqlite3
from requests import get
from discord.ext.commands import Cog, command
from time import sleep
class Fun(Cog):
def __init__(self, bot):
self.bot = bot
@command(aliases=['dankmeme'])
async def meme(self, ctx):
await ctx.send("Memed you HAHAHAHA!")
@command(aliases=["8ball","ball"])
async def _8ball(self,ctx,*args):
responses = ["It is certain.",
"It is decidedly so.",
"Without a doubt.",
"Yes - definitely.",
"You may rely on it.",
"As I see it, yes.",
"Most likely.",
"Outlook good.",
"Yes.",
"Signs point to yes.",
"Reply hazy, try again.",
"Ask again later.",
"Better not tell you now.",
"Cannot predict now.",
"Concentrate and ask again.",
"Don't count on it.",
"My reply is no.",
"My sources say no.",
"Outlook not so good.",
"Very doubtful."
]
await ctx.send(random.choice(responses))
@command(aliases=['jokes'])
async def joke(self,ctx):
data = get("https://official-joke-api.appspot.com/random_joke")
rand_joke = data.json()
str = rand_joke
embed=discord.Embed(title="Random joke",color=random.randint(0,0xffffff))
embed.add_field(name=f"Category: {str['type']}", value="\u200b", inline=False)
embed.add_field(name=f"Joke: {str['setup']}", value=f"{str['punchline']}", inline=True)
await ctx.send(embed=embed)
@command()
async def choose(self,ctx,*,choices):
choices = choices.split(" ")
choice = random.choice(choices).strip()
embed=discord.Embed(title="Choose command", color=random.randint(0, 0xffffff))
embed.add_field(name="Choices:", value=f"`{choices}`", inline=False)
embed.add_field(name="Choice:", value=f"`{choice}`", inline=True)
await ctx.send(embed=embed)
@command()
async def twans(self,ctx,*,arg):
def replaceMultiple(mainString, toBeReplaces, newString):
for elem in toBeReplaces :
if elem in mainString :
# Replace the string
mainString = mainString.replace(elem, newString)
return mainString
trans = replaceMultiple(arg, ['l', 'r'] , "w")
await ctx.send(trans)
def setup(bot):
bot.add_cog(Fun(bot))
|
from fsspec import AbstractFileSystem
from fsspec.callbacks import _DEFAULT_CALLBACK
import io
import natsort
import flywheel
class FlywheelFileSystem(AbstractFileSystem):
cachable = True
_cached = False
protocol = "flywheel"
async_impl = False
root_marker = "/"
def __init__(self, hostname, apikey, *args, **kwargs):
super().__init__(*args, **kwargs)
self._hostname = hostname
self._client = flywheel.Client(f"{self._hostname}:{apikey.split(":")[-1]}")
def _strip_hostname(self, x):
x = self._strip_protocol(x)
if x.startswith(self._hostname):
return x[len(self._hostname):].lstrip(self.sep)
return x
def mkdir(self, path, create_parents=True, **kwargs):
raise NotImplementedError
def makedirs(self, path, exist_ok=False):
raise NotImplementedError
def rmdir(self, path):
raise NotImplementedError
def ls(self, path, detail=False, **kwargs):
path = self._strip_hostname(path).rstrip(self.sep).lstrip(self.root_marker)
if len(path.strip()) == 0:
node = None
items = [i.id for i in self._client.groups()]
else:
try:
parent, file = path.rsplit(self.sep, 1)
except ValueError:
parent = path
file = None
if file in ("analyses", "files"):
# List analyses and files if path ends on "/analyses" or "/files"
node = self._client.lookup(parent)
items = getattr(node, file)
else:
node = self._client.lookup(path)
items = getattr(node, node.child_types[0])
try:
items = items()
except TypeError:
pass
# List analysis files only on ".../analyses/name/files"
if node is not None and self._type(node) == "analysis" and not file == "files":
items = list(filter(lambda x: not self.isfile(x), items))
try:
items.sort(key=lambda x: x.timestamp)
except (AttributeError, TypeError):
items = natsort.natsorted(items, key=self._ls_name, alg=natsort.IGNORECASE)
# Add "analyses" and "files" entries if needed, to top of list, after sorting.
for field in ("analyses", "files")[::-1]:
try:
if len(getattr(node, field)) > 0 and not (len(getattr(node, field)) == 1 and getattr(node, field)[0] == "files") and file != field:
items.insert(0, field)
except AttributeError:
continue
paths = [self.root_marker + path + self.sep + i for i in map(self._ls_name, items)]
if not detail:
return paths
else:
items = list(map(self.info, items))
for i, n in zip(items, paths):
i["name"] = n
return items
def _ls_name(self, x):
if not isinstance(x, str) and self._type(x) == "group":
return x.id
for field in ("label", "name"):
try:
return getattr(x, field)
except AttributeError:
continue
return x
def walk(self, path, maxdepth=None, **kwargs):
full_dirs = {}
dirs = {}
files = {}
try:
detail = kwargs.pop("detail") or False
except KeyError:
detail = False
for item in self.ls(path, detail=True, **kwargs):
pathname = item["name"]
itemname = pathname.rstrip(self.sep).rsplit(self.sep, 1)[-1]
if not self.isfile(item) and pathname != path:
if itemname in ("analyses", "files"):
item = {}
full_dirs[pathname] = item
dirs[itemname] = item
elif pathname == path:
files[""] = item
else:
files[itemname] = item
if detail:
yield path, dirs, files
else:
yield path, list(dirs), list(files)
if maxdepth is not None:
maxdepth -= 1
if maxdepth < 1:
return
for d in full_dirs:
yield from self.walk(d, maxdepth=maxdepth, detail=detail, **kwargs)
def info(self, path, **kwargs):
out = {}
if not isinstance(path, str):
node = path
out["name"] = [self._ls_name(node)]
parent = node
while hasattr(parent, "parent"):
if self._type(parent) == "analysis":
out["name"].insert(0, "analyses")
elif self._type(parent) == "file":
out["name"].insert(0, "files")
parent = self._client.get(getattr(parent, "parent")["id"])
out["name"].insert(0, self._ls_name(parent))
try:
parents = parent["parents"]
for field in ("acquisition", "session", "subject", "project", "group"):
id = parents.get(field) or None
if id is not None:
out["name"].insert(0, self._ls_name(self._client.get(id)))
except KeyError:
pass
out["name"] = self.sep.join(out["name"])
out["type"] = self._type(node)
else:
out["name"] = self._strip_hostname(path).rstrip(self.sep)
out["type"] = self._type(out["name"])
node = self._client.lookup(out["name"])
out["size"] = self.size(node)
out["created"] = self.created(node)
out["modified"] = self.modified(node)
out["data"] = node
return out
def _type(self, path):
if self.isfile(path):
return "file"
if isinstance(path, str):
path = self._strip_hostname(path).rstrip(self.sep).split(self.sep)
if path[-1] in ("analyses", "files"):
return "directory"
if len(path) > 1 and path[-2] == "analyses":
return "analysis"
if len(path) == 1:
return "group"
elif len(path) == 2:
return "project"
elif len(path) == 3:
return "subject"
elif len(path) == 4:
return "session"
elif len(path) == 5:
return "acquisition"
else:
raise ValueError(f'Unknown type at path "{self.sep.join(path)}"')
else:
kind = str(type(path)).lower()
for i in ("group", "project", "subject", "session", "acquisition", "analysis"):
if i in kind:
return i
raise ValueError(f'Unknown type "{type(path)}".')
def size(self, path):
if not isinstance(path, str):
return path.get("size") or None
if path.rstrip(self.sep).rsplit(self.sep, 1)[-1] in ("analyses", "files"):
return None
return self.size(self.info(path))
def isdir(self, path):
return not self.isfile(path)
def isfile(self, path):
if not isinstance(path, str):
return "file" in str(type(path)).lower()
try:
return path.rstrip(self.sep).rsplit(self.sep, 2)[-2] == "files"
except IndexError:
return False
def cat_file(self, path, start=None, end=None, **kwargs):
raise NotImplementedError
def pipe_file(self, path, value, **kwargs):
raise NotImplementedError
def get_file(self, rpath, lpath, **kwargs):
_rpath, fname = rpath.rsplit(self.sep, 1)
info = self.info(_rpath)
while "files" not in info["data"]:
_rpath = rpath.rsplit(self.sep, 1)[0]
info = self.info(_rpath)
info["data"].download_file(fname, lpath)
def get(self, rpath, lpath, recursive=False, callback=_DEFAULT_CALLBACK, **kwargs):
self.get_file(rpath, lpath, **kwargs)
def put_file(self, lpath, rpath, **kwargs):
raise NotImplementedError
def head(self, path, size=1024):
raise NotImplementedError
def tail(self, path, size=1024):
raise NotImplementedError
def cp_file(self, path1, path2, **kwargs):
raise NotImplementedError
def expand_path(self, path, recursive=False, maxdepth=None):
raise NotImplementedError
def rm_file(self, path):
raise NotImplementedError
@classmethod
def _parent(cls, path):
path = cls._strip_protocol(path.rstrip("/"))
if "/" in path:
parent = path.rsplit("/", 1)[0].lstrip(cls.root_marker)
return cls.root_marker + parent
else:
return cls.root_marker
def _open(
self,
path,
mode="rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
# return AbstractBufferedFile(
# self,
# path,
# mode,
# block_size,
# autocommit,
# cache_options=cache_options,
# **kwargs,
# )
container, file = path.split("/files/")
container = self.info(container)
file = io.BytesIO(container["data"].read_file(file))
return io.BufferedRandom(file)
def open(self, path, mode="rb", block_size=None, cache_options=None, **kwargs):
path = self._strip_hostname(path)
if "b" not in mode:
mode = mode.replace("t", "") + "b"
text_kwargs = {
k: kwargs.pop(k)
for k in ["encoding", "errors", "newline"]
if k in kwargs
}
return io.TextIOWrapper(
self.open(path, mode, block_size, **kwargs), **text_kwargs
)
else:
ac = kwargs.pop("autocommit", not self._intrans)
f = self._open(
path,
mode=mode,
block_size=block_size,
autocommit=ac,
cache_options=cache_options,
**kwargs,
)
if not ac and "r" not in mode:
self.transaction.files.append(f)
return f
def touch(self, path, truncate=True, **kwargs):
raise NotImplementedError
def created(self, path):
if not isinstance(path, str):
return path.get("created") or None
return self.info(path).get("created") or None
def modified(self, path):
if not isinstance(path, str):
return path.get("modified") or None
return self.info(path).get("modified") or None
|
from fsspec import AbstractFileSystem
from fsspec.callbacks import _DEFAULT_CALLBACK
import io
import natsort
import flywheel
class FlywheelFileSystem(AbstractFileSystem):
cachable = True
_cached = False
protocol = "flywheel"
async_impl = False
root_marker = "/"
def __init__(self, hostname, apikey, *args, **kwargs):
super().__init__(*args, **kwargs)
self._hostname = hostname
self._client = flywheel.Client(f"{self._hostname}:{apikey.split(':')[-1]}")
def _strip_hostname(self, x):
x = self._strip_protocol(x)
if x.startswith(self._hostname):
return x[len(self._hostname):].lstrip(self.sep)
return x
def mkdir(self, path, create_parents=True, **kwargs):
raise NotImplementedError
def makedirs(self, path, exist_ok=False):
raise NotImplementedError
def rmdir(self, path):
raise NotImplementedError
def ls(self, path, detail=False, **kwargs):
path = self._strip_hostname(path).rstrip(self.sep).lstrip(self.root_marker)
if len(path.strip()) == 0:
node = None
items = [i.id for i in self._client.groups()]
else:
try:
parent, file = path.rsplit(self.sep, 1)
except ValueError:
parent = path
file = None
if file in ("analyses", "files"):
# List analyses and files if path ends on "/analyses" or "/files"
node = self._client.lookup(parent)
items = getattr(node, file)
else:
node = self._client.lookup(path)
items = getattr(node, node.child_types[0])
try:
items = items()
except TypeError:
pass
# List analysis files only on ".../analyses/name/files"
if node is not None and self._type(node) == "analysis" and not file == "files":
items = list(filter(lambda x: not self.isfile(x), items))
try:
items.sort(key=lambda x: x.timestamp)
except (AttributeError, TypeError):
items = natsort.natsorted(items, key=self._ls_name, alg=natsort.IGNORECASE)
# Add "analyses" and "files" entries if needed, to top of list, after sorting.
for field in ("analyses", "files")[::-1]:
try:
if len(getattr(node, field)) > 0 and not (len(getattr(node, field)) == 1 and getattr(node, field)[0] == "files") and file != field:
items.insert(0, field)
except AttributeError:
continue
paths = [self.root_marker + path + self.sep + i for i in map(self._ls_name, items)]
if not detail:
return paths
else:
items = list(map(self.info, items))
for i, n in zip(items, paths):
i["name"] = n
return items
def _ls_name(self, x):
if not isinstance(x, str) and self._type(x) == "group":
return x.id
for field in ("label", "name"):
try:
return getattr(x, field)
except AttributeError:
continue
return x
def walk(self, path, maxdepth=None, **kwargs):
full_dirs = {}
dirs = {}
files = {}
try:
detail = kwargs.pop("detail") or False
except KeyError:
detail = False
for item in self.ls(path, detail=True, **kwargs):
pathname = item["name"]
itemname = pathname.rstrip(self.sep).rsplit(self.sep, 1)[-1]
if not self.isfile(item) and pathname != path:
if itemname in ("analyses", "files"):
item = {}
full_dirs[pathname] = item
dirs[itemname] = item
elif pathname == path:
files[""] = item
else:
files[itemname] = item
if detail:
yield path, dirs, files
else:
yield path, list(dirs), list(files)
if maxdepth is not None:
maxdepth -= 1
if maxdepth < 1:
return
for d in full_dirs:
yield from self.walk(d, maxdepth=maxdepth, detail=detail, **kwargs)
def info(self, path, **kwargs):
out = {}
if not isinstance(path, str):
node = path
out["name"] = [self._ls_name(node)]
parent = node
while hasattr(parent, "parent"):
if self._type(parent) == "analysis":
out["name"].insert(0, "analyses")
elif self._type(parent) == "file":
out["name"].insert(0, "files")
parent = self._client.get(getattr(parent, "parent")["id"])
out["name"].insert(0, self._ls_name(parent))
try:
parents = parent["parents"]
for field in ("acquisition", "session", "subject", "project", "group"):
id = parents.get(field) or None
if id is not None:
out["name"].insert(0, self._ls_name(self._client.get(id)))
except KeyError:
pass
out["name"] = self.sep.join(out["name"])
out["type"] = self._type(node)
else:
out["name"] = self._strip_hostname(path).rstrip(self.sep)
out["type"] = self._type(out["name"])
node = self._client.lookup(out["name"])
out["size"] = self.size(node)
out["created"] = self.created(node)
out["modified"] = self.modified(node)
out["data"] = node
return out
def _type(self, path):
if self.isfile(path):
return "file"
if isinstance(path, str):
path = self._strip_hostname(path).rstrip(self.sep).split(self.sep)
if path[-1] in ("analyses", "files"):
return "directory"
if len(path) > 1 and path[-2] == "analyses":
return "analysis"
if len(path) == 1:
return "group"
elif len(path) == 2:
return "project"
elif len(path) == 3:
return "subject"
elif len(path) == 4:
return "session"
elif len(path) == 5:
return "acquisition"
else:
raise ValueError(f'Unknown type at path "{self.sep.join(path)}"')
else:
kind = str(type(path)).lower()
for i in ("group", "project", "subject", "session", "acquisition", "analysis"):
if i in kind:
return i
raise ValueError(f'Unknown type "{type(path)}".')
def size(self, path):
if not isinstance(path, str):
return path.get("size") or None
if path.rstrip(self.sep).rsplit(self.sep, 1)[-1] in ("analyses", "files"):
return None
return self.size(self.info(path))
def isdir(self, path):
return not self.isfile(path)
def isfile(self, path):
if not isinstance(path, str):
return "file" in str(type(path)).lower()
try:
return path.rstrip(self.sep).rsplit(self.sep, 2)[-2] == "files"
except IndexError:
return False
def cat_file(self, path, start=None, end=None, **kwargs):
raise NotImplementedError
def pipe_file(self, path, value, **kwargs):
raise NotImplementedError
def get_file(self, rpath, lpath, **kwargs):
_rpath, fname = rpath.rsplit(self.sep, 1)
info = self.info(_rpath)
while "files" not in info["data"]:
_rpath = rpath.rsplit(self.sep, 1)[0]
info = self.info(_rpath)
info["data"].download_file(fname, lpath)
def get(self, rpath, lpath, recursive=False, callback=_DEFAULT_CALLBACK, **kwargs):
self.get_file(rpath, lpath, **kwargs)
def put_file(self, lpath, rpath, **kwargs):
raise NotImplementedError
def head(self, path, size=1024):
raise NotImplementedError
def tail(self, path, size=1024):
raise NotImplementedError
def cp_file(self, path1, path2, **kwargs):
raise NotImplementedError
def expand_path(self, path, recursive=False, maxdepth=None):
raise NotImplementedError
def rm_file(self, path):
raise NotImplementedError
@classmethod
def _parent(cls, path):
path = cls._strip_protocol(path.rstrip("/"))
if "/" in path:
parent = path.rsplit("/", 1)[0].lstrip(cls.root_marker)
return cls.root_marker + parent
else:
return cls.root_marker
def _open(
self,
path,
mode="rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
# return AbstractBufferedFile(
# self,
# path,
# mode,
# block_size,
# autocommit,
# cache_options=cache_options,
# **kwargs,
# )
container, file = path.split("/files/")
container = self.info(container)
file = io.BytesIO(container["data"].read_file(file))
return io.BufferedRandom(file)
def open(self, path, mode="rb", block_size=None, cache_options=None, **kwargs):
path = self._strip_hostname(path)
if "b" not in mode:
mode = mode.replace("t", "") + "b"
text_kwargs = {
k: kwargs.pop(k)
for k in ["encoding", "errors", "newline"]
if k in kwargs
}
return io.TextIOWrapper(
self.open(path, mode, block_size, **kwargs), **text_kwargs
)
else:
ac = kwargs.pop("autocommit", not self._intrans)
f = self._open(
path,
mode=mode,
block_size=block_size,
autocommit=ac,
cache_options=cache_options,
**kwargs,
)
if not ac and "r" not in mode:
self.transaction.files.append(f)
return f
def touch(self, path, truncate=True, **kwargs):
raise NotImplementedError
def created(self, path):
if not isinstance(path, str):
return path.get("created") or None
return self.info(path).get("created") or None
def modified(self, path):
if not isinstance(path, str):
return path.get("modified") or None
return self.info(path).get("modified") or None
|
"""Alembic generated code to run database migrations."""
from logging.config import fileConfig
from os import environ
from alembic import context
from sqlalchemy import engine_from_config, pool
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
config.set_main_option("sqlalchemy.url", f"postgresql://{environ.get("OEO_DB_USER")}:{environ.get("OEO_DB_PASSWORD")}"
f"@{environ.get("OEO_DB_HOST")}:{environ.get("OEO_DB_PORT")}"
f"/{environ.get("OEO_DB_NAME")}")
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
target_metadata = None
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
"""Alembic generated code to run database migrations."""
from logging.config import fileConfig
from os import environ
from alembic import context
from sqlalchemy import engine_from_config, pool
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
config.set_main_option("sqlalchemy.url", f"postgresql://{environ.get('OEO_DB_USER')}:{environ.get('OEO_DB_PASSWORD')}"
f"@{environ.get('OEO_DB_HOST')}:{environ.get('OEO_DB_PORT')}"
f"/{environ.get('OEO_DB_NAME')}")
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
target_metadata = None
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
from datetime import datetime, timedelta
from typing import Dict
from discord import Embed
from discord.ext import commands
from .debug import generate_debug_embed
from .hyperion import (
currency_details,
hyperion_base_url,
hyperion_session,
resolve_account_id,
)
for name, id_ in [("Reoccuring Payout", "reoccuring-payout"), ("Gambling", "gamble")]:
hyperion_session.post(
f"{hyperion_base_url}/accounts",
json={"id": id_, "display_name": name, "system_account": True},
)
class Currency(commands.Cog):
"""Exposes basic Hyperion functionality via Discord."""
def __init__(self, bot: commands.Bot):
self.bot = bot
self.payouts: Dict[str, datetime] = {}
@commands.command()
async def openaccount(self, ctx: commands.Context):
"""Open a new account."""
resp = hyperion_session.post(
f"{hyperion_base_url}/accounts",
json={
"id": ctx.author.id,
"display_name": ctx.author.name,
},
)
if resp.status_code == 409:
await ctx.reply(
"You already have an account, so you cannot open a new one."
)
return
await ctx.reply("Opened new account.")
@commands.command()
async def payout(self, ctx: commands.Context):
"""Receive a reoccuring payout."""
if (
ctx.author.id in self.payouts
and datetime.now() < self.payouts[ctx.author.id]
):
await ctx.reply(
"You've already received your scheduled payout - check back later!"
)
return
transaction_create_resp = hyperion_session.post(
f"{hyperion_base_url}/transactions",
json={
"source_account_id": "reoccuring-payout",
"dest_account_id": ctx.author.id,
"amount": 10,
},
)
if transaction_create_resp.status_code == 404:
await ctx.reply(
"You don't have an account! Use `hyp!openaccount` to create one first."
)
return
transaction_id = transaction_create_resp.json()["id"]
exec_resp = hyperion_session.post(
f"{hyperion_base_url}/transactions/{transaction_id}/execute"
)
exec_resp.raise_for_status()
await ctx.reply(
f"You've received a payout of 10 {currency_details["plural_form"]}."
)
self.payouts[ctx.author.id] = datetime.now() + timedelta(days=1)
@commands.command()
async def ledger(self, ctx: commands.Context):
"""Get a list of all transactions for this currency."""
transactions_resp = hyperion_session.get(f"{hyperion_base_url}/transactions")
resp_lines = []
for transaction in transactions_resp.json()[-10:]:
source_account = resolve_account_id(transaction["source_account_id"])
source_name = source_account["display_name"] or source_account["id"]
dest_account = resolve_account_id(transaction["dest_account_id"])
dest_name = dest_account["display_name"] or dest_account["id"]
status_message = transaction["state"].capitalize()
if transaction["state_reason"] is not None:
status_message += f" ({transaction["state_reason"]})"
resp_lines.append(
f"{source_name} -> {dest_name}: {transaction["amount"]} {currency_details["shortcode"]} - {status_message}"
)
resp = "\n".join(resp_lines)
await ctx.reply(f"```\n{resp}\n```")
|
from datetime import datetime, timedelta
from typing import Dict
from discord import Embed
from discord.ext import commands
from .debug import generate_debug_embed
from .hyperion import (
currency_details,
hyperion_base_url,
hyperion_session,
resolve_account_id,
)
for name, id_ in [("Reoccuring Payout", "reoccuring-payout"), ("Gambling", "gamble")]:
hyperion_session.post(
f"{hyperion_base_url}/accounts",
json={"id": id_, "display_name": name, "system_account": True},
)
class Currency(commands.Cog):
"""Exposes basic Hyperion functionality via Discord."""
def __init__(self, bot: commands.Bot):
self.bot = bot
self.payouts: Dict[str, datetime] = {}
@commands.command()
async def openaccount(self, ctx: commands.Context):
"""Open a new account."""
resp = hyperion_session.post(
f"{hyperion_base_url}/accounts",
json={
"id": ctx.author.id,
"display_name": ctx.author.name,
},
)
if resp.status_code == 409:
await ctx.reply(
"You already have an account, so you cannot open a new one."
)
return
await ctx.reply("Opened new account.")
@commands.command()
async def payout(self, ctx: commands.Context):
"""Receive a reoccuring payout."""
if (
ctx.author.id in self.payouts
and datetime.now() < self.payouts[ctx.author.id]
):
await ctx.reply(
"You've already received your scheduled payout - check back later!"
)
return
transaction_create_resp = hyperion_session.post(
f"{hyperion_base_url}/transactions",
json={
"source_account_id": "reoccuring-payout",
"dest_account_id": ctx.author.id,
"amount": 10,
},
)
if transaction_create_resp.status_code == 404:
await ctx.reply(
"You don't have an account! Use `hyp!openaccount` to create one first."
)
return
transaction_id = transaction_create_resp.json()["id"]
exec_resp = hyperion_session.post(
f"{hyperion_base_url}/transactions/{transaction_id}/execute"
)
exec_resp.raise_for_status()
await ctx.reply(
f"You've received a payout of 10 {currency_details['plural_form']}."
)
self.payouts[ctx.author.id] = datetime.now() + timedelta(days=1)
@commands.command()
async def ledger(self, ctx: commands.Context):
"""Get a list of all transactions for this currency."""
transactions_resp = hyperion_session.get(f"{hyperion_base_url}/transactions")
resp_lines = []
for transaction in transactions_resp.json()[-10:]:
source_account = resolve_account_id(transaction["source_account_id"])
source_name = source_account["display_name"] or source_account["id"]
dest_account = resolve_account_id(transaction["dest_account_id"])
dest_name = dest_account["display_name"] or dest_account["id"]
status_message = transaction["state"].capitalize()
if transaction["state_reason"] is not None:
status_message += f" ({transaction['state_reason']})"
resp_lines.append(
f"{source_name} -> {dest_name}: {transaction['amount']} {currency_details['shortcode']} - {status_message}"
)
resp = "\n".join(resp_lines)
await ctx.reply(f"```\n{resp}\n```")
|
from logging import error
from dotenv import dotenv_values
from main import father
import json
import discord
config = dotenv_values(".env")
# SECRET_KEY = os.getenv("TOKEN")
SECRET_KEY = config["TOKEN"]
params = None
countries = None
# with open("./../../params.json", "r") as read_file:
with open("params.json", "r") as read_file:
params = json.load(read_file)
async def make_child():
details = {
"os": params["OS"],
"name": params["name"],
"region": params["region"],
"provider": params["provider"]
}
if params["provider"] == "DigitalOcean":
if params["Package"] == 1:
details["memory"] = "1"
details["processor"] = "1"
elif params["Package"] == 2:
details["memory"] = "2"
details["processor"] = "1"
elif params["Package"] == 3:
details["memory"] = "2"
details["processor"] = "2"
elif params["Package"] == 4:
details["memory"] = "4"
details["processor"] = "2"
elif params["Package"] == 5:
details["memory"] = "8"
details["processor"] = "4"
elif params["provider"] == "AWS":
if params["Package"] == 1:
details["memory"] = "1"
details["processor"] = "1"
if await father(details):
return True
else:
return False
print(details)
class MyClient(discord.Client):
help_message = """```To get started, enter ‘~create <InstanceName>’.
Certain necessary questions pop up which will help set up the necessary VM.
To stop the process at any stage please enter ‘~cancel’.
Follow the instructions prompted by the bot to finish the set-up.```"""
# 0 is normal mode
# 1 is create mode
# in create mode, the bot starts interrogating you
mode = 0
regions_string = "\n1. USA\n2. UK\n3. IN"
current_prompt = -1
OS_string = "\n1. Fedora\n2. Ubuntu 16"
packages_list = {
"DigitalOcean": [
"1. 1 CPU, 1 GB RAM, 25 GB SSD",
"2. 1 CPU, 2GB RAM, 50GB SSD",
"3. 2 CPU, 2GB RAM, 60GB SSD",
"4. 2 CPU, 4GB RAM, 80GB SSD",
"5. 4 CPU, 8GB RAM, 160GB SSD"
],
"AWS": [
"1. 1 CPU, 1 GB RAM [nano]",
]
}
async def find(self, queries, string):
for q in queries:
if q in string:
return True
return False
async def on_ready(self):
print(f"Logged on as {self.user}!")
async def send_error(self, message):
await message.channel.send("Sorry couldn't get that, please try again")
async def handle_provider(self, message):
success = False
if (await self.find(["google", "gcp", "3"], message.content.lower())):
params["provider"] = "Google Cloud Platform"
success = True
elif (await self.find(["amazon", "web", "services", "aws", "2"], message.content.lower())):
params["provider"] = "AWS"
success = True
elif (await self.find(["digital", "ocean", "1"], message.content.lower())):
params["provider"] = "DigitalOcean"
success = True
if success:
await message.channel.send(f"You have selected {params["provider"]} as your provider")
self.current_prompt = 1
await message.channel.send("Where would your VM like to live?" + self.regions_string)
return True
return False
async def handle_region(self, message):
success = False
if (await self.find(["us", "states", "unitedstates", "united states", "america", "1"], message.content.lower())):
if (params["provider"] == "DigitalOcean"):
params["region"] = "nyc3"
elif (params["provider"] == "AWS"):
params["region"] = "us-west-2"
success = True
elif (await self.find(["uk", "kingdom", "unitedkingdom", "united kingdom", "england", "britian", "2"], message.content.lower())):
if (params["provider"] == "DigitalOcean"):
params["region"] = "eu-west-2"
elif (params["provider"] == "AWS"):
params["region"] = "us-west-2"
success = True
elif (await self.find(["india", "in", "bharat", "3"], message.content.lower())):
if (params["provider"] == "DigitalOcean"):
params["region"] = "blr1"
elif (params["provider"] == "AWS"):
params["region"] = "ap-south-1"
success = True
if success:
await message.channel.send(f"You have selected {params["region"]} as your region")
self.current_prompt = 2
await message.channel.send("What OS would you like to use" + self.OS_string)
return True
async def handle_os(self, message):
success = False
if (await self.find(["ubuntu", "2"], message.content.lower())):
params["OS"] = "ubuntu-16-04-x64"
success = True
elif (await self.find(["fedora", "1"], message.content.lower())):
params["OS"] = "fedora-34-x64"
success = True
if success:
await message.channel.send(f"You have selected {params["OS"]} as your operating system")
self.current_prompt = 3
await message.channel.send("What package would you like to use?\n" + "\n".join(self.packages_list[params['provider']]))
return True
return False
async def handle_package(self, message):
success = False
try:
number = int(message.content.lower()[1:])
if params["provider"] == "DigitalOcean" and 0 < number <= 5:
success = True
elif params["provider"] == "AWS" and number == 1:
success = True
elif params["provider"] == "AWS" and number != 1:
await message.channel.send("We only support the micro package cause its free and we don't wanna rack up bills")
return 69
else:
await message.channel.send("Invalid package selected")
return 69
except:
await message.channel.send("Couldn't parse the package number, are you sure you entered a number (eg: ~55)")
return 70
if success:
params["Package"] = number
if(params["provider"] == 'AWS'):
await message.channel.send(f"You have selected package {self.packages_list["AWS"][number-1]}, seems like you have a lot of money")
else:
await message.channel.send(f"You have selected package {self.packages_list["DigitalOcean"][number-1]}, seems like you have a lot of money")
self.current_prompt = 4
await message.channel.send(f"Looks like things are done! Have a cup of coffee, your VM, {params["name"]}, will be ready in about a minute!")
self.mode = 0
if await make_child():
return True
else:
await message.channel.send("Sorry an error occured")
return 60
return False
async def create_mode(self, message):
if message.content == "~cancel":
await message.channel.send("All settings have been discarded, returning to normal mode")
self.mode = 0
return
if message.content.startswith("~create "):
await message.channel.send("You are already in create mode")
return
if self.current_prompt == 0:
if not await self.handle_provider(message):
await self.send_error(message)
return
elif self.current_prompt == 1:
if not await self.handle_region(message):
await self.send_error(message)
return
elif self.current_prompt == 2:
if not await self.handle_os(message):
await self.send_error(message)
return
elif self.current_prompt == 3:
if not await self.handle_package(message):
await self.send_error(message)
return
async def on_message(self, message):
if (message.author == self.user or not message.content.startswith("~")):
return
contents = message.content.split(" ") # split the message by spaces
# the first thing iz command
command = contents[0][1:] # discard the first character
if self.mode == 1:
await self.create_mode(message)
return
if (command == 'cancel'):
await message.channel.send("Too late!")
return
if (command != 'create'):
await message.channel.send(self.help_message)
return
if (command == 'create'):
try:
params["name"] = contents[1]
except:
params["name"] = "myVM"
self.mode = 1
first_message = f"""You will now be prompted with questions to select the specs for {params['name']}
Send ~cancel to stop anytime and discard the changes
Remember to prefix your replies with ~
Please select one of the following providers:\n1. DigitalOcean\n2. AWS\n3. GoogleCloudPlatform"""
await message.channel.send(first_message)
self.current_prompt = 0
client = MyClient()
client.run(SECRET_KEY)
|
from logging import error
from dotenv import dotenv_values
from main import father
import json
import discord
config = dotenv_values(".env")
# SECRET_KEY = os.getenv("TOKEN")
SECRET_KEY = config["TOKEN"]
params = None
countries = None
# with open("./../../params.json", "r") as read_file:
with open("params.json", "r") as read_file:
params = json.load(read_file)
async def make_child():
details = {
"os": params["OS"],
"name": params["name"],
"region": params["region"],
"provider": params["provider"]
}
if params["provider"] == "DigitalOcean":
if params["Package"] == 1:
details["memory"] = "1"
details["processor"] = "1"
elif params["Package"] == 2:
details["memory"] = "2"
details["processor"] = "1"
elif params["Package"] == 3:
details["memory"] = "2"
details["processor"] = "2"
elif params["Package"] == 4:
details["memory"] = "4"
details["processor"] = "2"
elif params["Package"] == 5:
details["memory"] = "8"
details["processor"] = "4"
elif params["provider"] == "AWS":
if params["Package"] == 1:
details["memory"] = "1"
details["processor"] = "1"
if await father(details):
return True
else:
return False
print(details)
class MyClient(discord.Client):
help_message = """```To get started, enter ‘~create <InstanceName>’.
Certain necessary questions pop up which will help set up the necessary VM.
To stop the process at any stage please enter ‘~cancel’.
Follow the instructions prompted by the bot to finish the set-up.```"""
# 0 is normal mode
# 1 is create mode
# in create mode, the bot starts interrogating you
mode = 0
regions_string = "\n1. USA\n2. UK\n3. IN"
current_prompt = -1
OS_string = "\n1. Fedora\n2. Ubuntu 16"
packages_list = {
"DigitalOcean": [
"1. 1 CPU, 1 GB RAM, 25 GB SSD",
"2. 1 CPU, 2GB RAM, 50GB SSD",
"3. 2 CPU, 2GB RAM, 60GB SSD",
"4. 2 CPU, 4GB RAM, 80GB SSD",
"5. 4 CPU, 8GB RAM, 160GB SSD"
],
"AWS": [
"1. 1 CPU, 1 GB RAM [nano]",
]
}
async def find(self, queries, string):
for q in queries:
if q in string:
return True
return False
async def on_ready(self):
print(f"Logged on as {self.user}!")
async def send_error(self, message):
await message.channel.send("Sorry couldn't get that, please try again")
async def handle_provider(self, message):
success = False
if (await self.find(["google", "gcp", "3"], message.content.lower())):
params["provider"] = "Google Cloud Platform"
success = True
elif (await self.find(["amazon", "web", "services", "aws", "2"], message.content.lower())):
params["provider"] = "AWS"
success = True
elif (await self.find(["digital", "ocean", "1"], message.content.lower())):
params["provider"] = "DigitalOcean"
success = True
if success:
await message.channel.send(f"You have selected {params['provider']} as your provider")
self.current_prompt = 1
await message.channel.send("Where would your VM like to live?" + self.regions_string)
return True
return False
async def handle_region(self, message):
success = False
if (await self.find(["us", "states", "unitedstates", "united states", "america", "1"], message.content.lower())):
if (params["provider"] == "DigitalOcean"):
params["region"] = "nyc3"
elif (params["provider"] == "AWS"):
params["region"] = "us-west-2"
success = True
elif (await self.find(["uk", "kingdom", "unitedkingdom", "united kingdom", "england", "britian", "2"], message.content.lower())):
if (params["provider"] == "DigitalOcean"):
params["region"] = "eu-west-2"
elif (params["provider"] == "AWS"):
params["region"] = "us-west-2"
success = True
elif (await self.find(["india", "in", "bharat", "3"], message.content.lower())):
if (params["provider"] == "DigitalOcean"):
params["region"] = "blr1"
elif (params["provider"] == "AWS"):
params["region"] = "ap-south-1"
success = True
if success:
await message.channel.send(f"You have selected {params['region']} as your region")
self.current_prompt = 2
await message.channel.send("What OS would you like to use" + self.OS_string)
return True
async def handle_os(self, message):
success = False
if (await self.find(["ubuntu", "2"], message.content.lower())):
params["OS"] = "ubuntu-16-04-x64"
success = True
elif (await self.find(["fedora", "1"], message.content.lower())):
params["OS"] = "fedora-34-x64"
success = True
if success:
await message.channel.send(f"You have selected {params['OS']} as your operating system")
self.current_prompt = 3
await message.channel.send("What package would you like to use?\n" + "\n".join(self.packages_list[params['provider']]))
return True
return False
async def handle_package(self, message):
success = False
try:
number = int(message.content.lower()[1:])
if params["provider"] == "DigitalOcean" and 0 < number <= 5:
success = True
elif params["provider"] == "AWS" and number == 1:
success = True
elif params["provider"] == "AWS" and number != 1:
await message.channel.send("We only support the micro package cause its free and we don't wanna rack up bills")
return 69
else:
await message.channel.send("Invalid package selected")
return 69
except:
await message.channel.send("Couldn't parse the package number, are you sure you entered a number (eg: ~55)")
return 70
if success:
params["Package"] = number
if(params["provider"] == 'AWS'):
await message.channel.send(f"You have selected package {self.packages_list['AWS'][number-1]}, seems like you have a lot of money")
else:
await message.channel.send(f"You have selected package {self.packages_list['DigitalOcean'][number-1]}, seems like you have a lot of money")
self.current_prompt = 4
await message.channel.send(f"Looks like things are done! Have a cup of coffee, your VM, {params['name']}, will be ready in about a minute!")
self.mode = 0
if await make_child():
return True
else:
await message.channel.send("Sorry an error occured")
return 60
return False
async def create_mode(self, message):
if message.content == "~cancel":
await message.channel.send("All settings have been discarded, returning to normal mode")
self.mode = 0
return
if message.content.startswith("~create "):
await message.channel.send("You are already in create mode")
return
if self.current_prompt == 0:
if not await self.handle_provider(message):
await self.send_error(message)
return
elif self.current_prompt == 1:
if not await self.handle_region(message):
await self.send_error(message)
return
elif self.current_prompt == 2:
if not await self.handle_os(message):
await self.send_error(message)
return
elif self.current_prompt == 3:
if not await self.handle_package(message):
await self.send_error(message)
return
async def on_message(self, message):
if (message.author == self.user or not message.content.startswith("~")):
return
contents = message.content.split(" ") # split the message by spaces
# the first thing iz command
command = contents[0][1:] # discard the first character
if self.mode == 1:
await self.create_mode(message)
return
if (command == 'cancel'):
await message.channel.send("Too late!")
return
if (command != 'create'):
await message.channel.send(self.help_message)
return
if (command == 'create'):
try:
params["name"] = contents[1]
except:
params["name"] = "myVM"
self.mode = 1
first_message = f"""You will now be prompted with questions to select the specs for {params['name']}
Send ~cancel to stop anytime and discard the changes
Remember to prefix your replies with ~
Please select one of the following providers:\n1. DigitalOcean\n2. AWS\n3. GoogleCloudPlatform"""
await message.channel.send(first_message)
self.current_prompt = 0
client = MyClient()
client.run(SECRET_KEY)
|
# Copyright 2021 Elshan Agaev
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Everything related to teachers"""
from bot.base import db
from bot.teachers_vocabulary import *
cur = db.cursor()
# TODO Filter for groupId. Or not...
def add_teacher(req: str, user_id: int):
"""
Add teacher to database
:param user_id: User id, who is adding
:param req: Request
:return: Result
"""
if req.lower() == "имя=предмет":
return "🤬 Чел, ты... [Имя=Предмет это лишь пример]"
# We create list, where 0 is Teacher, and 1 is Class
try:
req = req.split("=")
req = (req[0], req[1], req[1].lower(), user_id) # Teacher, Class, Searchable(we search in this column), User
except IndexError:
return r_teacher_add_help
sql = "INSERT INTO teachers (teacherName, teacherClass, teacherClassSearchable, userId) VALUES (?, ?, ?, ?)"
db.cursor().execute(sql, req)
db.commit()
return r_teacher_add_success.format(req[0], req[1])
def delete_teacher(req: int, user_id: int):
"""
Delete teacher from database
:param req: Request, class name
:param user_id: User id
:return: Result (success or fail)
"""
sql = db.cursor().execute("DELETE FROM teachers WHERE teacherId = ? AND userId = ?", (req, user_id)).rowcount
db.commit()
return r_teacher_delete_success if (sql != 0) else r_teacher_delete_fail # Not 0 means deleted
def find_teacher(req: str):
"""
Find teacher
:param req: Request
:return: Search result as string
"""
if len(req) < 3:
return r_teacher_find_symbols
req_f = f"%{req.lower()}%"
res = cur.execute("SELECT * FROM teachers WHERE teacherClassSearchable LIKE ? LIMIT 5", (req_f,)).fetchall()
out = ""
for i in res:
out += f"{i["teacherId"]}. {i["teacherName"]}\n[{i["teacherClass"]}]\n"
if not out:
return r_teacher_find_fail.format(req)
return r_teacher_find_success.format(out)
|
# Copyright 2021 Elshan Agaev
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Everything related to teachers"""
from bot.base import db
from bot.teachers_vocabulary import *
cur = db.cursor()
# TODO Filter for groupId. Or not...
def add_teacher(req: str, user_id: int):
"""
Add teacher to database
:param user_id: User id, who is adding
:param req: Request
:return: Result
"""
if req.lower() == "имя=предмет":
return "🤬 Чел, ты... [Имя=Предмет это лишь пример]"
# We create list, where 0 is Teacher, and 1 is Class
try:
req = req.split("=")
req = (req[0], req[1], req[1].lower(), user_id) # Teacher, Class, Searchable(we search in this column), User
except IndexError:
return r_teacher_add_help
sql = "INSERT INTO teachers (teacherName, teacherClass, teacherClassSearchable, userId) VALUES (?, ?, ?, ?)"
db.cursor().execute(sql, req)
db.commit()
return r_teacher_add_success.format(req[0], req[1])
def delete_teacher(req: int, user_id: int):
"""
Delete teacher from database
:param req: Request, class name
:param user_id: User id
:return: Result (success or fail)
"""
sql = db.cursor().execute("DELETE FROM teachers WHERE teacherId = ? AND userId = ?", (req, user_id)).rowcount
db.commit()
return r_teacher_delete_success if (sql != 0) else r_teacher_delete_fail # Not 0 means deleted
def find_teacher(req: str):
"""
Find teacher
:param req: Request
:return: Search result as string
"""
if len(req) < 3:
return r_teacher_find_symbols
req_f = f"%{req.lower()}%"
res = cur.execute("SELECT * FROM teachers WHERE teacherClassSearchable LIKE ? LIMIT 5", (req_f,)).fetchall()
out = ""
for i in res:
out += f"{i['teacherId']}. {i['teacherName']}\n[{i['teacherClass']}]\n"
if not out:
return r_teacher_find_fail.format(req)
return r_teacher_find_success.format(out)
|
import argparse
import io
import logging
import sys
from collections import OrderedDict
from dataclasses import dataclass
from pathlib import Path
from typing import List, Dict, Set, Union, Optional, TextIO
import pandas as pd
from jinja2 import Template, Environment, FileSystemLoader
from kyoto_reader import KyotoReader, Document, Argument, SpecialArgument, BaseArgument, Predicate, Mention, BasePhrase
from pyknp import BList
from utils.constants import CASE2YOMI
from utils.util import is_pas_target, is_bridging_target, is_coreference_target
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
class Scorer:
"""A class to evaluate system output.
To evaluate system output with this class, you have to prepare gold data and system prediction data as instances of
:class:`kyoto_reader.Document`
Args:
documents_pred (List[Document]): システム予測文書集合
documents_gold (List[Document]): 正解文書集合
target_cases (List[str]): 評価の対象とする格 (kyoto_reader.ALL_CASES を参照)
target_exophors (List[str]): 評価の対象とする外界照応の照応先 (kyoto_reader.ALL_EXOPHORS を参照)
bridging (bool): 橋渡し照応の評価を行うかどうか (default: False)
coreference (bool): 共参照の評価を行うかどうか (default: False)
pas_target (str): 述語項構造解析において述語として扱う対象 ('pred': 用言, 'noun': 体言, 'all': 両方, '': 述語なし (default: pred))
Attributes:
cases (List[str]): 評価の対象となる格
doc_ids: (List[str]): 評価の対象となる文書の文書ID集合
did2document_pred (Dict[str, Document]): 文書IDからシステム予測文書を引くための辞書
did2document_gold (Dict[str, Document]): 文書IDから正解文書を引くための辞書
bridging (bool): 橋渡し照応の評価を行うかどうか
coreference (bool): 共参照の評価を行うかどうか
pas_target (str): 述語項構造解析において述語として扱う対象
comp_result (Dict[tuple, str]): 正解と予測を比較した結果を格納するための辞書
sub_scorers (List[SubScorer]): 文書ごとの評価を行うオブジェクトのリスト
relax_exophors (Dict[str, str]): 「不特定:人1」などを「不特定:人」として評価するためのマップ
"""
DEPTYPE2ANALYSIS = OrderedDict([('overt', 'overt'),
('dep', 'dep'),
('intra', 'zero_intra'),
('inter', 'zero_inter'),
('exo', 'zero_exophora')])
def __init__(self,
documents_pred: List[Document],
documents_gold: List[Document],
target_cases: List[str],
target_exophors: List[str],
bridging: bool = False,
coreference: bool = False,
pas_target: str = 'pred'):
# long document may have been ignored
assert set(doc.doc_id for doc in documents_pred) <= set(doc.doc_id for doc in documents_gold)
self.cases: List[str] = target_cases if pas_target != '' else []
self.doc_ids: List[str] = [doc.doc_id for doc in documents_pred]
self.did2document_pred: Dict[str, Document] = {doc.doc_id: doc for doc in documents_pred}
self.did2document_gold: Dict[str, Document] = {doc.doc_id: doc for doc in documents_gold}
self.bridging: bool = bridging
self.coreference: bool = coreference
self.pas_target: str = pas_target
self.comp_result: Dict[tuple, str] = {}
self.sub_scorers: List[SubScorer] = []
self.relax_exophors: Dict[str, str] = {}
for exophor in target_exophors:
self.relax_exophors[exophor] = exophor
if exophor in ('不特定:人', '不特定:物', '不特定:状況'):
for n in ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'):
self.relax_exophors[exophor + n] = exophor
def run(self) -> 'ScoreResult':
"""読み込んだ正解文書集合とシステム予測文書集合に対して評価を行う
Returns:
ScoreResult: 評価結果のスコア
"""
self.comp_result = {}
self.sub_scorers = []
all_result = None
for doc_id in self.doc_ids:
sub_scorer = SubScorer(self.did2document_pred[doc_id], self.did2document_gold[doc_id],
cases=self.cases,
bridging=self.bridging,
coreference=self.coreference,
relax_exophors=self.relax_exophors,
pas_target=self.pas_target)
if all_result is None:
all_result = sub_scorer.run()
else:
all_result += sub_scorer.run()
self.sub_scorers.append(sub_scorer)
self.comp_result.update({(doc_id, *key): val for key, val in sub_scorer.comp_result.items()})
return all_result
def write_html(self, output_file: Union[str, Path]) -> None:
"""正解データとシステム予測の比較をHTML形式で書き出し
Args:
output_file (Union[str, Path]): 出力先ファイル
"""
data: List[tuple] = []
for sub_scorer in self.sub_scorers:
gold_tree = ''
for sid in sub_scorer.document_gold.sid2sentence.keys():
with io.StringIO() as string:
self._draw_tree(sid,
sub_scorer.predicates_gold,
sub_scorer.mentions_gold,
sub_scorer.bridgings_gold,
sub_scorer.document_gold,
fh=string)
gold_tree += string.getvalue()
pred_tree = ''
for sid in sub_scorer.document_pred.sid2sentence.keys():
with io.StringIO() as string:
self._draw_tree(sid,
sub_scorer.predicates_pred,
sub_scorer.mentions_pred,
sub_scorer.bridgings_pred,
sub_scorer.document_pred,
fh=string)
pred_tree += string.getvalue()
data.append((sub_scorer.document_gold.sentences, gold_tree, pred_tree))
env = Environment(loader=FileSystemLoader(str(Path(__file__).parent)))
template: Template = env.get_template('template.html')
with Path(output_file).open('wt') as f:
f.write(template.render({'data': data}))
def _draw_tree(self,
sid: str,
predicates: List[BasePhrase],
mentions: List[BasePhrase],
anaphors: List[BasePhrase],
document: Document,
fh: Optional[TextIO] = None,
html: bool = True
) -> None:
"""Write the predicate-argument structures, coreference relations, and bridging anaphora relations of the
specified sentence in tree format.
Args:
sid (str): 出力対象の文ID
predicates (List[BasePhrase]): documentに含まれる全ての述語
mentions (List[BasePhrase]): documentに含まれる全てのメンション
anaphors (List[BasePhrase]): documentに含まれる全ての橋渡し照応詞
document (Document): 出力対象の文が含まれる文書
fh (Optional[TextIO]): 出力ストリーム
html (bool): HTML形式で出力するかどうか
"""
result2color = {anal: 'blue' for anal in Scorer.DEPTYPE2ANALYSIS.values()}
result2color.update({'overt': 'green', 'wrong': 'red', None: 'gray'})
result2color_coref = {'correct': 'blue', 'wrong': 'red', None: 'gray'}
blist: BList = document.sid2sentence[sid].blist
with io.StringIO() as string:
blist.draw_tag_tree(fh=string, show_pos=False)
tree_strings = string.getvalue().rstrip('\n').split('\n')
assert len(tree_strings) == len(blist.tag_list())
all_targets = [m.core for m in document.mentions.values()]
tid2predicate: Dict[int, BasePhrase] = {predicate.tid: predicate for predicate in predicates
if predicate.sid == sid}
tid2mention: Dict[int, BasePhrase] = {mention.tid: mention for mention in mentions if mention.sid == sid}
tid2bridging: Dict[int, BasePhrase] = {anaphor.tid: anaphor for anaphor in anaphors if anaphor.sid == sid}
for tid in range(len(tree_strings)):
tree_strings[tid] += ' '
if tid in tid2predicate:
predicate = tid2predicate[tid]
arguments = document.get_arguments(predicate)
for case in self.cases:
args = arguments[case]
if case == 'ガ':
args += arguments['判ガ']
targets = set()
for arg in args:
target = str(arg)
if all_targets.count(str(arg)) > 1 and isinstance(arg, Argument):
target += str(arg.dtid)
targets.add(target)
result = self.comp_result.get((document.doc_id, predicate.dtid, case), None)
if html:
tree_strings[tid] += f'<font color="{result2color[result]}'>{case}:{','.join(targets)}</font> '
else:
tree_strings[tid] += f'{case}:{','.join(targets)} '
if self.bridging and tid in tid2bridging:
anaphor = tid2bridging[tid]
arguments = document.get_arguments(anaphor)
args = arguments['ノ'] + arguments['ノ?']
targets = set()
for arg in args:
target = str(arg)
if all_targets.count(str(arg)) > 1 and isinstance(arg, Argument):
target += str(arg.dtid)
targets.add(target)
result = self.comp_result.get((document.doc_id, anaphor.dtid, 'ノ'), None)
if html:
tree_strings[tid] += f'<font color="{result2color[result]}'>ノ:{','.join(targets)}</font> '
else:
tree_strings[tid] += f'ノ:{','.join(targets)} '
if self.coreference and tid in tid2mention:
targets = set()
src_dtid = tid2mention[tid].dtid
if src_dtid in document.mentions:
src_mention = document.mentions[src_dtid]
tgt_mentions_relaxed = SubScorer.filter_mentions(
document.get_siblings(src_mention, relax=True), src_mention)
for tgt_mention in tgt_mentions_relaxed:
target: str = tgt_mention.core
if all_targets.count(target) > 1:
target += str(tgt_mention.dtid)
targets.add(target)
for eid in src_mention.eids:
entity = document.entities[eid]
if entity.exophor in self.relax_exophors:
targets.add(entity.exophor)
result = self.comp_result.get((document.doc_id, src_dtid, '='), None)
if html:
tree_strings[tid] += f'<font color="{result2color_coref[result]}'>=:{','.join(targets)}</font>'
else:
tree_strings[tid] += '=:' + ','.join(targets)
print('\n'.join(tree_strings), file=fh)
class SubScorer:
"""Scorer for single document pair.
Args:
document_pred (Document): システム予測文書
document_gold (Document): 正解文書
cases (List[str]): 評価の対象とする格
bridging (bool): 橋渡し照応の評価を行うかどうか (default: False)
coreference (bool): 共参照の評価を行うかどうか (default: False)
relax_exophors (Dict[str, str]): 「不特定:人1」などを「不特定:人」として評価するためのマップ
pas_target (str): 述語項構造解析において述語として扱う対象
Attributes:
doc_id (str): 対象の文書ID
document_pred (Document): システム予測文書
document_gold (Document): 正解文書
cases (List[str]): 評価の対象となる格
pas (bool): 述語項構造の評価を行うかどうか
bridging (bool): 橋渡し照応の評価を行うかどうか
coreference (bool): 共参照の評価を行うかどうか
comp_result (Dict[tuple, str]): 正解と予測を比較した結果を格納するための辞書
relax_exophors (Dict[str, str]): 「不特定:人1」などを「不特定:人」として評価するためのマップ
predicates_pred: (List[BasePhrase]): システム予測文書に含まれる述語
bridgings_pred: (List[BasePhrase]): システム予測文書に含まれる橋渡し照応詞
mentions_pred: (List[BasePhrase]): システム予測文書に含まれるメンション
predicates_gold: (List[BasePhrase]): 正解文書に含まれる述語
bridgings_gold: (List[BasePhrase]): 正解文書に含まれる橋渡し照応詞
mentions_gold: (List[BasePhrase]): 正解文書に含まれるメンション
"""
def __init__(self,
document_pred: Document,
document_gold: Document,
cases: List[str],
bridging: bool,
coreference: bool,
relax_exophors: Dict[str, str],
pas_target: str):
assert document_pred.doc_id == document_gold.doc_id
self.doc_id: str = document_gold.doc_id
self.document_pred: Document = document_pred
self.document_gold: Document = document_gold
self.cases: List[str] = cases
self.pas: bool = pas_target != ''
self.bridging: bool = bridging
self.coreference: bool = coreference
self.comp_result: Dict[tuple, str] = {}
self.relax_exophors: Dict[str, str] = relax_exophors
self.predicates_pred: List[BasePhrase] = []
self.bridgings_pred: List[BasePhrase] = []
self.mentions_pred: List[BasePhrase] = []
for bp in document_pred.bp_list():
if is_pas_target(bp, verbal=(pas_target in ('pred', 'all')), nominal=(pas_target in ('noun', 'all'))):
self.predicates_pred.append(bp)
if self.bridging and is_bridging_target(bp):
self.bridgings_pred.append(bp)
if self.coreference and is_coreference_target(bp):
self.mentions_pred.append(bp)
self.predicates_gold: List[BasePhrase] = []
self.bridgings_gold: List[BasePhrase] = []
self.mentions_gold: List[BasePhrase] = []
for bp in document_gold.bp_list():
if is_pas_target(bp, verbal=(pas_target in ('pred', 'all')), nominal=(pas_target in ('noun', 'all'))):
self.predicates_gold.append(bp)
if self.bridging and is_bridging_target(bp):
self.bridgings_gold.append(bp)
if self.coreference and is_coreference_target(bp):
self.mentions_gold.append(bp)
def run(self) -> 'ScoreResult':
"""Perform evaluation for the given gold document and system prediction document.
Returns:
ScoreResult: 評価結果のスコア
"""
self.comp_result = {}
measures_pas = self._evaluate_pas() if self.pas else None
measures_bridging = self._evaluate_bridging() if self.bridging else None
measure_coref = self._evaluate_coref() if self.coreference else None
return ScoreResult(measures_pas, measures_bridging, measure_coref)
def _evaluate_pas(self) -> pd.DataFrame:
"""calculate predicate-argument structure analysis scores"""
# measures: Dict[str, Dict[str, Measure]] = OrderedDict(
# (case, OrderedDict((anal, Measure()) for anal in Scorer.DEPTYPE2ANALYSIS.values()))
# for case in self.cases)
measures = pd.DataFrame([[Measure() for _ in Scorer.DEPTYPE2ANALYSIS.values()] for _ in self.cases],
index=self.cases, columns=Scorer.DEPTYPE2ANALYSIS.values())
dtid2predicate_pred: Dict[int, Predicate] = {pred.dtid: pred for pred in self.predicates_pred}
dtid2predicate_gold: Dict[int, Predicate] = {pred.dtid: pred for pred in self.predicates_gold}
for dtid in range(len(self.document_pred.bp_list())):
if dtid in dtid2predicate_pred:
predicate_pred = dtid2predicate_pred[dtid]
arguments_pred = self.document_pred.get_arguments(predicate_pred, relax=False)
else:
arguments_pred = None
if dtid in dtid2predicate_gold:
predicate_gold = dtid2predicate_gold[dtid]
arguments_gold = self.document_gold.get_arguments(predicate_gold, relax=False)
arguments_gold_relaxed = self.document_gold.get_arguments(predicate_gold, relax=True)
else:
predicate_gold = arguments_gold = arguments_gold_relaxed = None
for case in self.cases:
args_pred: List[BaseArgument] = arguments_pred[case] if arguments_pred is not None else []
assert len(args_pred) in (0, 1) # Our analyzer predicts one argument for one predicate
if predicate_gold is not None:
args_gold = self._filter_args(arguments_gold[case], predicate_gold)
args_gold_relaxed = self._filter_args(
arguments_gold_relaxed[case] + (arguments_gold_relaxed['判ガ'] if case == 'ガ' else []),
predicate_gold)
else:
args_gold = args_gold_relaxed = []
key = (dtid, case)
# calculate precision
if args_pred:
arg = args_pred[0]
if arg in args_gold_relaxed:
# use dep_type of gold argument if possible
arg_gold = args_gold_relaxed[args_gold_relaxed.index(arg)]
analysis = Scorer.DEPTYPE2ANALYSIS[arg_gold.dep_type]
self.comp_result[key] = analysis
measures.at[case, analysis].correct += 1
else:
# system出力のdep_typeはgoldのものと違うので不整合が起きるかもしれない
analysis = Scorer.DEPTYPE2ANALYSIS[arg.dep_type]
self.comp_result[key] = 'wrong' # precision が下がる
measures.at[case, analysis].denom_pred += 1
# calculate recall
# 正解が複数ある場合、そのうち一つが当てられていればそれを正解に採用
# いずれも当てられていなければ、relax されていない項から一つを選び正解に採用
if args_gold or (self.comp_result.get(key, None) in Scorer.DEPTYPE2ANALYSIS.values()):
arg_gold = None
for arg in args_gold_relaxed:
if arg in args_pred:
arg_gold = arg # 予測されている項を優先して正解の項に採用
break
if arg_gold is not None:
analysis = Scorer.DEPTYPE2ANALYSIS[arg_gold.dep_type]
assert self.comp_result[key] == analysis
else:
analysis = Scorer.DEPTYPE2ANALYSIS[args_gold[0].dep_type]
if args_pred:
assert self.comp_result[key] == 'wrong'
else:
self.comp_result[key] = 'wrong' # recall が下がる
measures.at[case, analysis].denom_gold += 1
return measures
def _filter_args(self,
args: List[BaseArgument],
predicate: Predicate,
) -> List[BaseArgument]:
filtered_args = []
for arg in args:
if isinstance(arg, SpecialArgument):
if arg.exophor not in self.relax_exophors: # filter out non-target exophors
continue
arg.exophor = self.relax_exophors[arg.exophor] # 「不特定:人1」なども「不特定:人」として扱う
else:
assert isinstance(arg, Argument)
# filter out self-anaphora and cataphoras
if predicate.dtid == arg.dtid or (predicate.dtid < arg.dtid and arg.sid != predicate.sid):
continue
filtered_args.append(arg)
return filtered_args
def _evaluate_bridging(self) -> pd.Series:
"""calculate bridging anaphora resolution scores"""
measures: Dict[str, Measure] = OrderedDict((anal, Measure()) for anal in Scorer.DEPTYPE2ANALYSIS.values())
dtid2anaphor_pred: Dict[int, Predicate] = {pred.dtid: pred for pred in self.bridgings_pred}
dtid2anaphor_gold: Dict[int, Predicate] = {pred.dtid: pred for pred in self.bridgings_gold}
for dtid in range(len(self.document_pred.bp_list())):
if dtid in dtid2anaphor_pred:
anaphor_pred = dtid2anaphor_pred[dtid]
antecedents_pred: List[BaseArgument] = \
self._filter_args(self.document_pred.get_arguments(anaphor_pred, relax=False)['ノ'], anaphor_pred)
else:
antecedents_pred = []
assert len(antecedents_pred) in (0, 1) # in bert_pas_analysis, predict one argument for one predicate
if dtid in dtid2anaphor_gold:
anaphor_gold: Predicate = dtid2anaphor_gold[dtid]
antecedents_gold: List[BaseArgument] = \
self._filter_args(self.document_gold.get_arguments(anaphor_gold, relax=False)['ノ'], anaphor_gold)
arguments: Dict[str, List[BaseArgument]] = self.document_gold.get_arguments(anaphor_gold, relax=True)
antecedents_gold_relaxed: List[BaseArgument] = \
self._filter_args(arguments['ノ'] + arguments['ノ?'], anaphor_gold)
else:
antecedents_gold = antecedents_gold_relaxed = []
key = (dtid, 'ノ')
# calculate precision
if antecedents_pred:
antecedent_pred = antecedents_pred[0]
if antecedent_pred in antecedents_gold_relaxed:
# use dep_type of gold antecedent if possible
antecedent_gold = antecedents_gold_relaxed[antecedents_gold_relaxed.index(antecedent_pred)]
analysis = Scorer.DEPTYPE2ANALYSIS[antecedent_gold.dep_type]
if analysis == 'overt':
analysis = 'dep'
self.comp_result[key] = analysis
measures[analysis].correct += 1
else:
analysis = Scorer.DEPTYPE2ANALYSIS[antecedent_pred.dep_type]
if analysis == 'overt':
analysis = 'dep'
self.comp_result[key] = 'wrong'
measures[analysis].denom_pred += 1
# calculate recall
if antecedents_gold or (self.comp_result.get(key, None) in Scorer.DEPTYPE2ANALYSIS.values()):
antecedent_gold = None
for ant in antecedents_gold_relaxed:
if ant in antecedents_pred:
antecedent_gold = ant # 予測されている先行詞を優先して正解の先行詞に採用
break
if antecedent_gold is not None:
analysis = Scorer.DEPTYPE2ANALYSIS[antecedent_gold.dep_type]
if analysis == 'overt':
analysis = 'dep'
assert self.comp_result[key] == analysis
else:
analysis = Scorer.DEPTYPE2ANALYSIS[antecedents_gold[0].dep_type]
if analysis == 'overt':
analysis = 'dep'
if antecedents_pred:
assert self.comp_result[key] == 'wrong'
else:
self.comp_result[key] = 'wrong'
measures[analysis].denom_gold += 1
return pd.Series(measures)
def _evaluate_coref(self) -> pd.Series:
"""calculate coreference resolution scores"""
measure = Measure()
dtid2mention_pred: Dict[int, Mention] = {bp.dtid: self.document_pred.mentions[bp.dtid]
for bp in self.mentions_pred
if bp.dtid in self.document_pred.mentions}
dtid2mention_gold: Dict[int, Mention] = {bp.dtid: self.document_gold.mentions[bp.dtid]
for bp in self.mentions_gold
if bp.dtid in self.document_gold.mentions}
for dtid in range(len(self.document_pred.bp_list())):
if dtid in dtid2mention_pred:
src_mention_pred = dtid2mention_pred[dtid]
tgt_mentions_pred = \
self.filter_mentions(self.document_pred.get_siblings(src_mention_pred), src_mention_pred)
exophors_pred = {e.exophor for e in map(self.document_pred.entities.get, src_mention_pred.eids)
if e.is_special}
else:
tgt_mentions_pred = exophors_pred = set()
if dtid in dtid2mention_gold:
src_mention_gold = dtid2mention_gold[dtid]
tgt_mentions_gold = self.filter_mentions(self.document_gold.get_siblings(src_mention_gold, relax=False),
src_mention_gold)
tgt_mentions_gold_relaxed = self.filter_mentions(
self.document_gold.get_siblings(src_mention_gold, relax=True), src_mention_gold)
exophors_gold = {self.relax_exophors[e.exophor] for e
in map(self.document_gold.entities.get, src_mention_gold.eids)
if e.is_special and e.exophor in self.relax_exophors}
exophors_gold_relaxed = {self.relax_exophors[e.exophor] for e
in map(self.document_gold.entities.get, src_mention_gold.all_eids)
if e.is_special and e.exophor in self.relax_exophors}
else:
tgt_mentions_gold = tgt_mentions_gold_relaxed = exophors_gold = exophors_gold_relaxed = set()
key = (dtid, '=')
# calculate precision
if tgt_mentions_pred or exophors_pred:
if (tgt_mentions_pred & tgt_mentions_gold_relaxed) or (exophors_pred & exophors_gold_relaxed):
self.comp_result[key] = 'correct'
measure.correct += 1
else:
self.comp_result[key] = 'wrong'
measure.denom_pred += 1
# calculate recall
if tgt_mentions_gold or exophors_gold or (self.comp_result.get(key, None) == 'correct'):
if (tgt_mentions_pred & tgt_mentions_gold_relaxed) or (exophors_pred & exophors_gold_relaxed):
assert self.comp_result[key] == 'correct'
else:
self.comp_result[key] = 'wrong'
measure.denom_gold += 1
return pd.Series([measure], index=['all'])
@staticmethod
def filter_mentions(tgt_mentions: Set[Mention], src_mention: Mention) -> Set[Mention]:
"""filter out cataphors"""
return {tgt_mention for tgt_mention in tgt_mentions if tgt_mention.dtid < src_mention.dtid}
@dataclass(frozen=True)
class ScoreResult:
"""A data class for storing the numerical result of an evaluation"""
measures_pas: Optional[pd.DataFrame]
measures_bridging: Optional[pd.Series]
measure_coref: Optional[pd.Series]
def to_dict(self) -> Dict[str, Dict[str, 'Measure']]:
"""convert data to dictionary"""
df_all = pd.DataFrame(index=['all_case'])
if self.pas:
df_pas: pd.DataFrame = self.measures_pas.copy()
df_pas['zero'] = df_pas['zero_intra'] + df_pas['zero_inter'] + df_pas['zero_exophora']
df_pas['dep_zero'] = df_pas['zero'] + df_pas['dep']
df_pas['all'] = df_pas['dep_zero'] + df_pas['overt']
df_all = pd.concat([df_pas, df_all])
df_all.loc['all_case'] = df_pas.sum(axis=0)
if self.bridging:
df_bar = self.measures_bridging.copy()
df_bar['zero'] = df_bar['zero_intra'] + df_bar['zero_inter'] + df_bar['zero_exophora']
df_bar['dep_zero'] = df_bar['zero'] + df_bar['dep']
assert df_bar['overt'] == Measure() # No overt in BAR
df_bar['all'] = df_bar['dep_zero']
df_all.at['all_case', 'bridging'] = df_bar['all']
if self.coreference:
df_all.at['all_case', 'coreference'] = self.measure_coref['all']
return {k1: {k2: v2 for k2, v2 in v1.items() if pd.notnull(v2)}
for k1, v1 in df_all.to_dict(orient='index').items()}
def export_txt(self,
destination: Union[str, Path, TextIO]
) -> None:
"""Export the evaluation results in a text format.
Args:
destination (Union[str, Path, TextIO]): 書き出す先
"""
lines = []
for key, ms in self.to_dict().items():
lines.append(f'{key}格' if self.pas and key in self.measures_pas.index else key)
for analysis, measure in ms.items():
lines.append(f' {analysis}')
lines.append(f' precision: {measure.precision:.4f} ({measure.correct}/{measure.denom_pred})')
lines.append(f' recall : {measure.recall:.4f} ({measure.correct}/{measure.denom_gold})')
lines.append(f' F : {measure.f1:.4f}')
text = '\n'.join(lines) + '\n'
if isinstance(destination, str) or isinstance(destination, Path):
with Path(destination).open('wt') as writer:
writer.write(text)
elif isinstance(destination, io.TextIOBase):
destination.write(text)
def export_csv(self,
destination: Union[str, Path, TextIO],
sep: str = ','
) -> None:
"""Export the evaluation results in a csv format.
Args:
destination (Union[str, Path, TextIO]): 書き出す先
sep (str): 区切り文字 (default: ',')
"""
text = ''
result_dict = self.to_dict()
text += 'case' + sep
text += sep.join(result_dict['all_case'].keys()) + '\n'
for case, measures in result_dict.items():
text += CASE2YOMI.get(case, case) + sep
text += sep.join(f'{measure.f1:.6}' for measure in measures.values())
text += '\n'
if isinstance(destination, str) or isinstance(destination, Path):
with Path(destination).open('wt') as writer:
writer.write(text)
elif isinstance(destination, io.TextIOBase):
destination.write(text)
@property
def pas(self):
"""Whether self includes the score of predicate-argument structure analysis."""
return self.measures_pas is not None
@property
def bridging(self):
"""Whether self includes the score of bridging anaphora resolution."""
return self.measures_bridging is not None
@property
def coreference(self):
"""Whether self includes the score of coreference resolution."""
return self.measure_coref is not None
def __add__(self, other: 'ScoreResult') -> 'ScoreResult':
measures_pas = self.measures_pas + other.measures_pas if self.pas else None
measures_bridging = self.measures_bridging + other.measures_bridging if self.bridging else None
measure_coref = self.measure_coref + other.measure_coref if self.coreference else None
return ScoreResult(measures_pas, measures_bridging, measure_coref)
@dataclass
class Measure:
"""A data class to calculate and represent F-measure"""
denom_pred: int = 0
denom_gold: int = 0
correct: int = 0
def __add__(self, other: 'Measure'):
return Measure(self.denom_pred + other.denom_pred,
self.denom_gold + other.denom_gold,
self.correct + other.correct)
def __eq__(self, other: 'Measure'):
return self.denom_pred == other.denom_pred and \
self.denom_gold == other.denom_gold and \
self.correct == other.correct
@property
def precision(self) -> float:
if self.denom_pred == 0:
return .0
return self.correct / self.denom_pred
@property
def recall(self) -> float:
if self.denom_gold == 0:
return .0
return self.correct / self.denom_gold
@property
def f1(self) -> float:
if self.denom_pred + self.denom_gold == 0:
return .0
return 2 * self.correct / (self.denom_pred + self.denom_gold)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--prediction-dir', default=None, type=str,
help='path to directory where system output KWDLC files exist (default: None)')
parser.add_argument('--gold-dir', default=None, type=str,
help='path to directory where gold KWDLC files exist (default: None)')
parser.add_argument('--coreference', '--coref', '--cr', action='store_true', default=False,
help='perform coreference resolution')
parser.add_argument('--bridging', '--brg', '--bar', action='store_true', default=False,
help='perform bridging anaphora resolution')
parser.add_argument('--case-string', type=str, default='ガ,ヲ,ニ,ガ2',
help='case strings separated by ","')
parser.add_argument('--exophors', '--exo', type=str, default='著者,読者,不特定:人,不特定:物',
help='exophor strings separated by ","')
parser.add_argument('--read-prediction-from-pas-tag', action='store_true', default=False,
help='use <述語項構造:> tag instead of <rel > tag in prediction files')
parser.add_argument('--pas-target', choices=['', 'pred', 'noun', 'all'], default='pred',
help='PAS analysis evaluation target (pred: verbal predicates, noun: nominal predicates)')
parser.add_argument('--result-html', default=None, type=str,
help='path to html file which prediction result is exported (default: None)')
parser.add_argument('--result-csv', default=None, type=str,
help='path to csv file which prediction result is exported (default: None)')
args = parser.parse_args()
reader_gold = KyotoReader(Path(args.gold_dir), extract_nes=False, use_pas_tag=False)
reader_pred = KyotoReader(
Path(args.prediction_dir),
extract_nes=False,
use_pas_tag=args.read_prediction_from_pas_tag,
)
documents_pred = reader_pred.process_all_documents()
documents_gold = reader_gold.process_all_documents()
assert set(args.case_string.split(',')) <= set(CASE2YOMI.keys())
msg = '"ノ" found in case string. If you want to perform bridging anaphora resolution, specify "--bridging" ' \
'option instead'
assert 'ノ' not in args.case_string.split(','), msg
scorer = Scorer(documents_pred, documents_gold,
target_cases=args.case_string.split(','),
target_exophors=args.exophors.split(','),
coreference=args.coreference,
bridging=args.bridging,
pas_target=args.pas_target)
result = scorer.run()
if args.result_html:
scorer.write_html(Path(args.result_html))
if args.result_csv:
result.export_csv(args.result_csv)
result.export_txt(sys.stdout)
if __name__ == '__main__':
main()
|
import argparse
import io
import logging
import sys
from collections import OrderedDict
from dataclasses import dataclass
from pathlib import Path
from typing import List, Dict, Set, Union, Optional, TextIO
import pandas as pd
from jinja2 import Template, Environment, FileSystemLoader
from kyoto_reader import KyotoReader, Document, Argument, SpecialArgument, BaseArgument, Predicate, Mention, BasePhrase
from pyknp import BList
from utils.constants import CASE2YOMI
from utils.util import is_pas_target, is_bridging_target, is_coreference_target
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
class Scorer:
"""A class to evaluate system output.
To evaluate system output with this class, you have to prepare gold data and system prediction data as instances of
:class:`kyoto_reader.Document`
Args:
documents_pred (List[Document]): システム予測文書集合
documents_gold (List[Document]): 正解文書集合
target_cases (List[str]): 評価の対象とする格 (kyoto_reader.ALL_CASES を参照)
target_exophors (List[str]): 評価の対象とする外界照応の照応先 (kyoto_reader.ALL_EXOPHORS を参照)
bridging (bool): 橋渡し照応の評価を行うかどうか (default: False)
coreference (bool): 共参照の評価を行うかどうか (default: False)
pas_target (str): 述語項構造解析において述語として扱う対象 ('pred': 用言, 'noun': 体言, 'all': 両方, '': 述語なし (default: pred))
Attributes:
cases (List[str]): 評価の対象となる格
doc_ids: (List[str]): 評価の対象となる文書の文書ID集合
did2document_pred (Dict[str, Document]): 文書IDからシステム予測文書を引くための辞書
did2document_gold (Dict[str, Document]): 文書IDから正解文書を引くための辞書
bridging (bool): 橋渡し照応の評価を行うかどうか
coreference (bool): 共参照の評価を行うかどうか
pas_target (str): 述語項構造解析において述語として扱う対象
comp_result (Dict[tuple, str]): 正解と予測を比較した結果を格納するための辞書
sub_scorers (List[SubScorer]): 文書ごとの評価を行うオブジェクトのリスト
relax_exophors (Dict[str, str]): 「不特定:人1」などを「不特定:人」として評価するためのマップ
"""
DEPTYPE2ANALYSIS = OrderedDict([('overt', 'overt'),
('dep', 'dep'),
('intra', 'zero_intra'),
('inter', 'zero_inter'),
('exo', 'zero_exophora')])
def __init__(self,
documents_pred: List[Document],
documents_gold: List[Document],
target_cases: List[str],
target_exophors: List[str],
bridging: bool = False,
coreference: bool = False,
pas_target: str = 'pred'):
# long document may have been ignored
assert set(doc.doc_id for doc in documents_pred) <= set(doc.doc_id for doc in documents_gold)
self.cases: List[str] = target_cases if pas_target != '' else []
self.doc_ids: List[str] = [doc.doc_id for doc in documents_pred]
self.did2document_pred: Dict[str, Document] = {doc.doc_id: doc for doc in documents_pred}
self.did2document_gold: Dict[str, Document] = {doc.doc_id: doc for doc in documents_gold}
self.bridging: bool = bridging
self.coreference: bool = coreference
self.pas_target: str = pas_target
self.comp_result: Dict[tuple, str] = {}
self.sub_scorers: List[SubScorer] = []
self.relax_exophors: Dict[str, str] = {}
for exophor in target_exophors:
self.relax_exophors[exophor] = exophor
if exophor in ('不特定:人', '不特定:物', '不特定:状況'):
for n in ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'):
self.relax_exophors[exophor + n] = exophor
def run(self) -> 'ScoreResult':
"""読み込んだ正解文書集合とシステム予測文書集合に対して評価を行う
Returns:
ScoreResult: 評価結果のスコア
"""
self.comp_result = {}
self.sub_scorers = []
all_result = None
for doc_id in self.doc_ids:
sub_scorer = SubScorer(self.did2document_pred[doc_id], self.did2document_gold[doc_id],
cases=self.cases,
bridging=self.bridging,
coreference=self.coreference,
relax_exophors=self.relax_exophors,
pas_target=self.pas_target)
if all_result is None:
all_result = sub_scorer.run()
else:
all_result += sub_scorer.run()
self.sub_scorers.append(sub_scorer)
self.comp_result.update({(doc_id, *key): val for key, val in sub_scorer.comp_result.items()})
return all_result
def write_html(self, output_file: Union[str, Path]) -> None:
"""正解データとシステム予測の比較をHTML形式で書き出し
Args:
output_file (Union[str, Path]): 出力先ファイル
"""
data: List[tuple] = []
for sub_scorer in self.sub_scorers:
gold_tree = ''
for sid in sub_scorer.document_gold.sid2sentence.keys():
with io.StringIO() as string:
self._draw_tree(sid,
sub_scorer.predicates_gold,
sub_scorer.mentions_gold,
sub_scorer.bridgings_gold,
sub_scorer.document_gold,
fh=string)
gold_tree += string.getvalue()
pred_tree = ''
for sid in sub_scorer.document_pred.sid2sentence.keys():
with io.StringIO() as string:
self._draw_tree(sid,
sub_scorer.predicates_pred,
sub_scorer.mentions_pred,
sub_scorer.bridgings_pred,
sub_scorer.document_pred,
fh=string)
pred_tree += string.getvalue()
data.append((sub_scorer.document_gold.sentences, gold_tree, pred_tree))
env = Environment(loader=FileSystemLoader(str(Path(__file__).parent)))
template: Template = env.get_template('template.html')
with Path(output_file).open('wt') as f:
f.write(template.render({'data': data}))
def _draw_tree(self,
sid: str,
predicates: List[BasePhrase],
mentions: List[BasePhrase],
anaphors: List[BasePhrase],
document: Document,
fh: Optional[TextIO] = None,
html: bool = True
) -> None:
"""Write the predicate-argument structures, coreference relations, and bridging anaphora relations of the
specified sentence in tree format.
Args:
sid (str): 出力対象の文ID
predicates (List[BasePhrase]): documentに含まれる全ての述語
mentions (List[BasePhrase]): documentに含まれる全てのメンション
anaphors (List[BasePhrase]): documentに含まれる全ての橋渡し照応詞
document (Document): 出力対象の文が含まれる文書
fh (Optional[TextIO]): 出力ストリーム
html (bool): HTML形式で出力するかどうか
"""
result2color = {anal: 'blue' for anal in Scorer.DEPTYPE2ANALYSIS.values()}
result2color.update({'overt': 'green', 'wrong': 'red', None: 'gray'})
result2color_coref = {'correct': 'blue', 'wrong': 'red', None: 'gray'}
blist: BList = document.sid2sentence[sid].blist
with io.StringIO() as string:
blist.draw_tag_tree(fh=string, show_pos=False)
tree_strings = string.getvalue().rstrip('\n').split('\n')
assert len(tree_strings) == len(blist.tag_list())
all_targets = [m.core for m in document.mentions.values()]
tid2predicate: Dict[int, BasePhrase] = {predicate.tid: predicate for predicate in predicates
if predicate.sid == sid}
tid2mention: Dict[int, BasePhrase] = {mention.tid: mention for mention in mentions if mention.sid == sid}
tid2bridging: Dict[int, BasePhrase] = {anaphor.tid: anaphor for anaphor in anaphors if anaphor.sid == sid}
for tid in range(len(tree_strings)):
tree_strings[tid] += ' '
if tid in tid2predicate:
predicate = tid2predicate[tid]
arguments = document.get_arguments(predicate)
for case in self.cases:
args = arguments[case]
if case == 'ガ':
args += arguments['判ガ']
targets = set()
for arg in args:
target = str(arg)
if all_targets.count(str(arg)) > 1 and isinstance(arg, Argument):
target += str(arg.dtid)
targets.add(target)
result = self.comp_result.get((document.doc_id, predicate.dtid, case), None)
if html:
tree_strings[tid] += f'<font color="{result2color[result]}">{case}:{",".join(targets)}</font> '
else:
tree_strings[tid] += f'{case}:{",".join(targets)} '
if self.bridging and tid in tid2bridging:
anaphor = tid2bridging[tid]
arguments = document.get_arguments(anaphor)
args = arguments['ノ'] + arguments['ノ?']
targets = set()
for arg in args:
target = str(arg)
if all_targets.count(str(arg)) > 1 and isinstance(arg, Argument):
target += str(arg.dtid)
targets.add(target)
result = self.comp_result.get((document.doc_id, anaphor.dtid, 'ノ'), None)
if html:
tree_strings[tid] += f'<font color="{result2color[result]}">ノ:{",".join(targets)}</font> '
else:
tree_strings[tid] += f'ノ:{",".join(targets)} '
if self.coreference and tid in tid2mention:
targets = set()
src_dtid = tid2mention[tid].dtid
if src_dtid in document.mentions:
src_mention = document.mentions[src_dtid]
tgt_mentions_relaxed = SubScorer.filter_mentions(
document.get_siblings(src_mention, relax=True), src_mention)
for tgt_mention in tgt_mentions_relaxed:
target: str = tgt_mention.core
if all_targets.count(target) > 1:
target += str(tgt_mention.dtid)
targets.add(target)
for eid in src_mention.eids:
entity = document.entities[eid]
if entity.exophor in self.relax_exophors:
targets.add(entity.exophor)
result = self.comp_result.get((document.doc_id, src_dtid, '='), None)
if html:
tree_strings[tid] += f'<font color="{result2color_coref[result]}">=:{",".join(targets)}</font>'
else:
tree_strings[tid] += '=:' + ','.join(targets)
print('\n'.join(tree_strings), file=fh)
class SubScorer:
"""Scorer for single document pair.
Args:
document_pred (Document): システム予測文書
document_gold (Document): 正解文書
cases (List[str]): 評価の対象とする格
bridging (bool): 橋渡し照応の評価を行うかどうか (default: False)
coreference (bool): 共参照の評価を行うかどうか (default: False)
relax_exophors (Dict[str, str]): 「不特定:人1」などを「不特定:人」として評価するためのマップ
pas_target (str): 述語項構造解析において述語として扱う対象
Attributes:
doc_id (str): 対象の文書ID
document_pred (Document): システム予測文書
document_gold (Document): 正解文書
cases (List[str]): 評価の対象となる格
pas (bool): 述語項構造の評価を行うかどうか
bridging (bool): 橋渡し照応の評価を行うかどうか
coreference (bool): 共参照の評価を行うかどうか
comp_result (Dict[tuple, str]): 正解と予測を比較した結果を格納するための辞書
relax_exophors (Dict[str, str]): 「不特定:人1」などを「不特定:人」として評価するためのマップ
predicates_pred: (List[BasePhrase]): システム予測文書に含まれる述語
bridgings_pred: (List[BasePhrase]): システム予測文書に含まれる橋渡し照応詞
mentions_pred: (List[BasePhrase]): システム予測文書に含まれるメンション
predicates_gold: (List[BasePhrase]): 正解文書に含まれる述語
bridgings_gold: (List[BasePhrase]): 正解文書に含まれる橋渡し照応詞
mentions_gold: (List[BasePhrase]): 正解文書に含まれるメンション
"""
def __init__(self,
document_pred: Document,
document_gold: Document,
cases: List[str],
bridging: bool,
coreference: bool,
relax_exophors: Dict[str, str],
pas_target: str):
assert document_pred.doc_id == document_gold.doc_id
self.doc_id: str = document_gold.doc_id
self.document_pred: Document = document_pred
self.document_gold: Document = document_gold
self.cases: List[str] = cases
self.pas: bool = pas_target != ''
self.bridging: bool = bridging
self.coreference: bool = coreference
self.comp_result: Dict[tuple, str] = {}
self.relax_exophors: Dict[str, str] = relax_exophors
self.predicates_pred: List[BasePhrase] = []
self.bridgings_pred: List[BasePhrase] = []
self.mentions_pred: List[BasePhrase] = []
for bp in document_pred.bp_list():
if is_pas_target(bp, verbal=(pas_target in ('pred', 'all')), nominal=(pas_target in ('noun', 'all'))):
self.predicates_pred.append(bp)
if self.bridging and is_bridging_target(bp):
self.bridgings_pred.append(bp)
if self.coreference and is_coreference_target(bp):
self.mentions_pred.append(bp)
self.predicates_gold: List[BasePhrase] = []
self.bridgings_gold: List[BasePhrase] = []
self.mentions_gold: List[BasePhrase] = []
for bp in document_gold.bp_list():
if is_pas_target(bp, verbal=(pas_target in ('pred', 'all')), nominal=(pas_target in ('noun', 'all'))):
self.predicates_gold.append(bp)
if self.bridging and is_bridging_target(bp):
self.bridgings_gold.append(bp)
if self.coreference and is_coreference_target(bp):
self.mentions_gold.append(bp)
def run(self) -> 'ScoreResult':
"""Perform evaluation for the given gold document and system prediction document.
Returns:
ScoreResult: 評価結果のスコア
"""
self.comp_result = {}
measures_pas = self._evaluate_pas() if self.pas else None
measures_bridging = self._evaluate_bridging() if self.bridging else None
measure_coref = self._evaluate_coref() if self.coreference else None
return ScoreResult(measures_pas, measures_bridging, measure_coref)
def _evaluate_pas(self) -> pd.DataFrame:
"""calculate predicate-argument structure analysis scores"""
# measures: Dict[str, Dict[str, Measure]] = OrderedDict(
# (case, OrderedDict((anal, Measure()) for anal in Scorer.DEPTYPE2ANALYSIS.values()))
# for case in self.cases)
measures = pd.DataFrame([[Measure() for _ in Scorer.DEPTYPE2ANALYSIS.values()] for _ in self.cases],
index=self.cases, columns=Scorer.DEPTYPE2ANALYSIS.values())
dtid2predicate_pred: Dict[int, Predicate] = {pred.dtid: pred for pred in self.predicates_pred}
dtid2predicate_gold: Dict[int, Predicate] = {pred.dtid: pred for pred in self.predicates_gold}
for dtid in range(len(self.document_pred.bp_list())):
if dtid in dtid2predicate_pred:
predicate_pred = dtid2predicate_pred[dtid]
arguments_pred = self.document_pred.get_arguments(predicate_pred, relax=False)
else:
arguments_pred = None
if dtid in dtid2predicate_gold:
predicate_gold = dtid2predicate_gold[dtid]
arguments_gold = self.document_gold.get_arguments(predicate_gold, relax=False)
arguments_gold_relaxed = self.document_gold.get_arguments(predicate_gold, relax=True)
else:
predicate_gold = arguments_gold = arguments_gold_relaxed = None
for case in self.cases:
args_pred: List[BaseArgument] = arguments_pred[case] if arguments_pred is not None else []
assert len(args_pred) in (0, 1) # Our analyzer predicts one argument for one predicate
if predicate_gold is not None:
args_gold = self._filter_args(arguments_gold[case], predicate_gold)
args_gold_relaxed = self._filter_args(
arguments_gold_relaxed[case] + (arguments_gold_relaxed['判ガ'] if case == 'ガ' else []),
predicate_gold)
else:
args_gold = args_gold_relaxed = []
key = (dtid, case)
# calculate precision
if args_pred:
arg = args_pred[0]
if arg in args_gold_relaxed:
# use dep_type of gold argument if possible
arg_gold = args_gold_relaxed[args_gold_relaxed.index(arg)]
analysis = Scorer.DEPTYPE2ANALYSIS[arg_gold.dep_type]
self.comp_result[key] = analysis
measures.at[case, analysis].correct += 1
else:
# system出力のdep_typeはgoldのものと違うので不整合が起きるかもしれない
analysis = Scorer.DEPTYPE2ANALYSIS[arg.dep_type]
self.comp_result[key] = 'wrong' # precision が下がる
measures.at[case, analysis].denom_pred += 1
# calculate recall
# 正解が複数ある場合、そのうち一つが当てられていればそれを正解に採用
# いずれも当てられていなければ、relax されていない項から一つを選び正解に採用
if args_gold or (self.comp_result.get(key, None) in Scorer.DEPTYPE2ANALYSIS.values()):
arg_gold = None
for arg in args_gold_relaxed:
if arg in args_pred:
arg_gold = arg # 予測されている項を優先して正解の項に採用
break
if arg_gold is not None:
analysis = Scorer.DEPTYPE2ANALYSIS[arg_gold.dep_type]
assert self.comp_result[key] == analysis
else:
analysis = Scorer.DEPTYPE2ANALYSIS[args_gold[0].dep_type]
if args_pred:
assert self.comp_result[key] == 'wrong'
else:
self.comp_result[key] = 'wrong' # recall が下がる
measures.at[case, analysis].denom_gold += 1
return measures
def _filter_args(self,
args: List[BaseArgument],
predicate: Predicate,
) -> List[BaseArgument]:
filtered_args = []
for arg in args:
if isinstance(arg, SpecialArgument):
if arg.exophor not in self.relax_exophors: # filter out non-target exophors
continue
arg.exophor = self.relax_exophors[arg.exophor] # 「不特定:人1」なども「不特定:人」として扱う
else:
assert isinstance(arg, Argument)
# filter out self-anaphora and cataphoras
if predicate.dtid == arg.dtid or (predicate.dtid < arg.dtid and arg.sid != predicate.sid):
continue
filtered_args.append(arg)
return filtered_args
def _evaluate_bridging(self) -> pd.Series:
"""calculate bridging anaphora resolution scores"""
measures: Dict[str, Measure] = OrderedDict((anal, Measure()) for anal in Scorer.DEPTYPE2ANALYSIS.values())
dtid2anaphor_pred: Dict[int, Predicate] = {pred.dtid: pred for pred in self.bridgings_pred}
dtid2anaphor_gold: Dict[int, Predicate] = {pred.dtid: pred for pred in self.bridgings_gold}
for dtid in range(len(self.document_pred.bp_list())):
if dtid in dtid2anaphor_pred:
anaphor_pred = dtid2anaphor_pred[dtid]
antecedents_pred: List[BaseArgument] = \
self._filter_args(self.document_pred.get_arguments(anaphor_pred, relax=False)['ノ'], anaphor_pred)
else:
antecedents_pred = []
assert len(antecedents_pred) in (0, 1) # in bert_pas_analysis, predict one argument for one predicate
if dtid in dtid2anaphor_gold:
anaphor_gold: Predicate = dtid2anaphor_gold[dtid]
antecedents_gold: List[BaseArgument] = \
self._filter_args(self.document_gold.get_arguments(anaphor_gold, relax=False)['ノ'], anaphor_gold)
arguments: Dict[str, List[BaseArgument]] = self.document_gold.get_arguments(anaphor_gold, relax=True)
antecedents_gold_relaxed: List[BaseArgument] = \
self._filter_args(arguments['ノ'] + arguments['ノ?'], anaphor_gold)
else:
antecedents_gold = antecedents_gold_relaxed = []
key = (dtid, 'ノ')
# calculate precision
if antecedents_pred:
antecedent_pred = antecedents_pred[0]
if antecedent_pred in antecedents_gold_relaxed:
# use dep_type of gold antecedent if possible
antecedent_gold = antecedents_gold_relaxed[antecedents_gold_relaxed.index(antecedent_pred)]
analysis = Scorer.DEPTYPE2ANALYSIS[antecedent_gold.dep_type]
if analysis == 'overt':
analysis = 'dep'
self.comp_result[key] = analysis
measures[analysis].correct += 1
else:
analysis = Scorer.DEPTYPE2ANALYSIS[antecedent_pred.dep_type]
if analysis == 'overt':
analysis = 'dep'
self.comp_result[key] = 'wrong'
measures[analysis].denom_pred += 1
# calculate recall
if antecedents_gold or (self.comp_result.get(key, None) in Scorer.DEPTYPE2ANALYSIS.values()):
antecedent_gold = None
for ant in antecedents_gold_relaxed:
if ant in antecedents_pred:
antecedent_gold = ant # 予測されている先行詞を優先して正解の先行詞に採用
break
if antecedent_gold is not None:
analysis = Scorer.DEPTYPE2ANALYSIS[antecedent_gold.dep_type]
if analysis == 'overt':
analysis = 'dep'
assert self.comp_result[key] == analysis
else:
analysis = Scorer.DEPTYPE2ANALYSIS[antecedents_gold[0].dep_type]
if analysis == 'overt':
analysis = 'dep'
if antecedents_pred:
assert self.comp_result[key] == 'wrong'
else:
self.comp_result[key] = 'wrong'
measures[analysis].denom_gold += 1
return pd.Series(measures)
def _evaluate_coref(self) -> pd.Series:
"""calculate coreference resolution scores"""
measure = Measure()
dtid2mention_pred: Dict[int, Mention] = {bp.dtid: self.document_pred.mentions[bp.dtid]
for bp in self.mentions_pred
if bp.dtid in self.document_pred.mentions}
dtid2mention_gold: Dict[int, Mention] = {bp.dtid: self.document_gold.mentions[bp.dtid]
for bp in self.mentions_gold
if bp.dtid in self.document_gold.mentions}
for dtid in range(len(self.document_pred.bp_list())):
if dtid in dtid2mention_pred:
src_mention_pred = dtid2mention_pred[dtid]
tgt_mentions_pred = \
self.filter_mentions(self.document_pred.get_siblings(src_mention_pred), src_mention_pred)
exophors_pred = {e.exophor for e in map(self.document_pred.entities.get, src_mention_pred.eids)
if e.is_special}
else:
tgt_mentions_pred = exophors_pred = set()
if dtid in dtid2mention_gold:
src_mention_gold = dtid2mention_gold[dtid]
tgt_mentions_gold = self.filter_mentions(self.document_gold.get_siblings(src_mention_gold, relax=False),
src_mention_gold)
tgt_mentions_gold_relaxed = self.filter_mentions(
self.document_gold.get_siblings(src_mention_gold, relax=True), src_mention_gold)
exophors_gold = {self.relax_exophors[e.exophor] for e
in map(self.document_gold.entities.get, src_mention_gold.eids)
if e.is_special and e.exophor in self.relax_exophors}
exophors_gold_relaxed = {self.relax_exophors[e.exophor] for e
in map(self.document_gold.entities.get, src_mention_gold.all_eids)
if e.is_special and e.exophor in self.relax_exophors}
else:
tgt_mentions_gold = tgt_mentions_gold_relaxed = exophors_gold = exophors_gold_relaxed = set()
key = (dtid, '=')
# calculate precision
if tgt_mentions_pred or exophors_pred:
if (tgt_mentions_pred & tgt_mentions_gold_relaxed) or (exophors_pred & exophors_gold_relaxed):
self.comp_result[key] = 'correct'
measure.correct += 1
else:
self.comp_result[key] = 'wrong'
measure.denom_pred += 1
# calculate recall
if tgt_mentions_gold or exophors_gold or (self.comp_result.get(key, None) == 'correct'):
if (tgt_mentions_pred & tgt_mentions_gold_relaxed) or (exophors_pred & exophors_gold_relaxed):
assert self.comp_result[key] == 'correct'
else:
self.comp_result[key] = 'wrong'
measure.denom_gold += 1
return pd.Series([measure], index=['all'])
@staticmethod
def filter_mentions(tgt_mentions: Set[Mention], src_mention: Mention) -> Set[Mention]:
"""filter out cataphors"""
return {tgt_mention for tgt_mention in tgt_mentions if tgt_mention.dtid < src_mention.dtid}
@dataclass(frozen=True)
class ScoreResult:
"""A data class for storing the numerical result of an evaluation"""
measures_pas: Optional[pd.DataFrame]
measures_bridging: Optional[pd.Series]
measure_coref: Optional[pd.Series]
def to_dict(self) -> Dict[str, Dict[str, 'Measure']]:
"""convert data to dictionary"""
df_all = pd.DataFrame(index=['all_case'])
if self.pas:
df_pas: pd.DataFrame = self.measures_pas.copy()
df_pas['zero'] = df_pas['zero_intra'] + df_pas['zero_inter'] + df_pas['zero_exophora']
df_pas['dep_zero'] = df_pas['zero'] + df_pas['dep']
df_pas['all'] = df_pas['dep_zero'] + df_pas['overt']
df_all = pd.concat([df_pas, df_all])
df_all.loc['all_case'] = df_pas.sum(axis=0)
if self.bridging:
df_bar = self.measures_bridging.copy()
df_bar['zero'] = df_bar['zero_intra'] + df_bar['zero_inter'] + df_bar['zero_exophora']
df_bar['dep_zero'] = df_bar['zero'] + df_bar['dep']
assert df_bar['overt'] == Measure() # No overt in BAR
df_bar['all'] = df_bar['dep_zero']
df_all.at['all_case', 'bridging'] = df_bar['all']
if self.coreference:
df_all.at['all_case', 'coreference'] = self.measure_coref['all']
return {k1: {k2: v2 for k2, v2 in v1.items() if pd.notnull(v2)}
for k1, v1 in df_all.to_dict(orient='index').items()}
def export_txt(self,
destination: Union[str, Path, TextIO]
) -> None:
"""Export the evaluation results in a text format.
Args:
destination (Union[str, Path, TextIO]): 書き出す先
"""
lines = []
for key, ms in self.to_dict().items():
lines.append(f'{key}格' if self.pas and key in self.measures_pas.index else key)
for analysis, measure in ms.items():
lines.append(f' {analysis}')
lines.append(f' precision: {measure.precision:.4f} ({measure.correct}/{measure.denom_pred})')
lines.append(f' recall : {measure.recall:.4f} ({measure.correct}/{measure.denom_gold})')
lines.append(f' F : {measure.f1:.4f}')
text = '\n'.join(lines) + '\n'
if isinstance(destination, str) or isinstance(destination, Path):
with Path(destination).open('wt') as writer:
writer.write(text)
elif isinstance(destination, io.TextIOBase):
destination.write(text)
def export_csv(self,
destination: Union[str, Path, TextIO],
sep: str = ','
) -> None:
"""Export the evaluation results in a csv format.
Args:
destination (Union[str, Path, TextIO]): 書き出す先
sep (str): 区切り文字 (default: ',')
"""
text = ''
result_dict = self.to_dict()
text += 'case' + sep
text += sep.join(result_dict['all_case'].keys()) + '\n'
for case, measures in result_dict.items():
text += CASE2YOMI.get(case, case) + sep
text += sep.join(f'{measure.f1:.6}' for measure in measures.values())
text += '\n'
if isinstance(destination, str) or isinstance(destination, Path):
with Path(destination).open('wt') as writer:
writer.write(text)
elif isinstance(destination, io.TextIOBase):
destination.write(text)
@property
def pas(self):
"""Whether self includes the score of predicate-argument structure analysis."""
return self.measures_pas is not None
@property
def bridging(self):
"""Whether self includes the score of bridging anaphora resolution."""
return self.measures_bridging is not None
@property
def coreference(self):
"""Whether self includes the score of coreference resolution."""
return self.measure_coref is not None
def __add__(self, other: 'ScoreResult') -> 'ScoreResult':
measures_pas = self.measures_pas + other.measures_pas if self.pas else None
measures_bridging = self.measures_bridging + other.measures_bridging if self.bridging else None
measure_coref = self.measure_coref + other.measure_coref if self.coreference else None
return ScoreResult(measures_pas, measures_bridging, measure_coref)
@dataclass
class Measure:
"""A data class to calculate and represent F-measure"""
denom_pred: int = 0
denom_gold: int = 0
correct: int = 0
def __add__(self, other: 'Measure'):
return Measure(self.denom_pred + other.denom_pred,
self.denom_gold + other.denom_gold,
self.correct + other.correct)
def __eq__(self, other: 'Measure'):
return self.denom_pred == other.denom_pred and \
self.denom_gold == other.denom_gold and \
self.correct == other.correct
@property
def precision(self) -> float:
if self.denom_pred == 0:
return .0
return self.correct / self.denom_pred
@property
def recall(self) -> float:
if self.denom_gold == 0:
return .0
return self.correct / self.denom_gold
@property
def f1(self) -> float:
if self.denom_pred + self.denom_gold == 0:
return .0
return 2 * self.correct / (self.denom_pred + self.denom_gold)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--prediction-dir', default=None, type=str,
help='path to directory where system output KWDLC files exist (default: None)')
parser.add_argument('--gold-dir', default=None, type=str,
help='path to directory where gold KWDLC files exist (default: None)')
parser.add_argument('--coreference', '--coref', '--cr', action='store_true', default=False,
help='perform coreference resolution')
parser.add_argument('--bridging', '--brg', '--bar', action='store_true', default=False,
help='perform bridging anaphora resolution')
parser.add_argument('--case-string', type=str, default='ガ,ヲ,ニ,ガ2',
help='case strings separated by ","')
parser.add_argument('--exophors', '--exo', type=str, default='著者,読者,不特定:人,不特定:物',
help='exophor strings separated by ","')
parser.add_argument('--read-prediction-from-pas-tag', action='store_true', default=False,
help='use <述語項構造:> tag instead of <rel > tag in prediction files')
parser.add_argument('--pas-target', choices=['', 'pred', 'noun', 'all'], default='pred',
help='PAS analysis evaluation target (pred: verbal predicates, noun: nominal predicates)')
parser.add_argument('--result-html', default=None, type=str,
help='path to html file which prediction result is exported (default: None)')
parser.add_argument('--result-csv', default=None, type=str,
help='path to csv file which prediction result is exported (default: None)')
args = parser.parse_args()
reader_gold = KyotoReader(Path(args.gold_dir), extract_nes=False, use_pas_tag=False)
reader_pred = KyotoReader(
Path(args.prediction_dir),
extract_nes=False,
use_pas_tag=args.read_prediction_from_pas_tag,
)
documents_pred = reader_pred.process_all_documents()
documents_gold = reader_gold.process_all_documents()
assert set(args.case_string.split(',')) <= set(CASE2YOMI.keys())
msg = '"ノ" found in case string. If you want to perform bridging anaphora resolution, specify "--bridging" ' \
'option instead'
assert 'ノ' not in args.case_string.split(','), msg
scorer = Scorer(documents_pred, documents_gold,
target_cases=args.case_string.split(','),
target_exophors=args.exophors.split(','),
coreference=args.coreference,
bridging=args.bridging,
pas_target=args.pas_target)
result = scorer.run()
if args.result_html:
scorer.write_html(Path(args.result_html))
if args.result_csv:
result.export_csv(args.result_csv)
result.export_txt(sys.stdout)
if __name__ == '__main__':
main()
|
import logging
from typing import Dict, List, Iterable
import asyncio
import discord
from discord.ext import commands
import emoji
from .game import RideTheBus, GameState
from .result import Result
from utils import playingcards
logger = logging.getLogger(__name__)
COLOR = 0xFFFF00
ROUND_RULES = {
GameState.RED_OR_BLACK: "Answer the questions to build your hand, drinking along the way.",
GameState.PYRAMID: "Play your matching cards to force others to drink.",
GameState.RIDE_THE_BUS: "The loser has to ride the bus, and drink. A lot.",
}
ROUND_MESSAGES = {
GameState.RED_OR_BLACK: {
"prompt": emoji.emojize(":red_square: Red or :black_large_square: Black?"),
"reactions": {
emoji.emojize(":red_square:"): "red",
emoji.emojize(":black_large_square:"): "black",
},
},
GameState.HIGHER_OR_LOWER: {
"prompt": emoji.emojize(":arrow_up: Higher or :arrow_down: Lower?"),
"reactions": {
"⬆️": "higher",
"⬇️": "lower",
},
},
GameState.INSIDE_OR_OUTSIDE: {
"prompt": emoji.emojize(":thumbsup: Inside or :thumbsdown: Outside?"),
"reactions": {
"👍": "inside",
"👎": "outside",
},
},
GameState.SUIT: {
"prompt": emoji.emojize(
":clubs: Club, :diamonds: Diamond, :hearts: Heart, or :spades: Spade?"
),
"reactions": {
"♣️": "clubs",
"♦️": "diamonds",
"♥️": "hearts",
"♠️": "spades",
},
},
}
class RideTheBusCog(commands.Cog):
def __init__(self, bot: commands.Bot):
super().__init__()
self.bot = bot
self.keys: Dict[str, RideTheBus] = {}
self.channels: Dict[discord.abc.Messageable, RideTheBus] = {}
self.players: Dict[discord.User, RideTheBus] = {}
self.context: Dict[str, discord.Context] = {}
self.msg_refs: Dict[discord.Message, RideTheBus] = {}
@commands.Cog.listener()
async def on_reaction_add(self, reaction: discord.Reaction, user: discord.User):
# Ignore reactions from the bot
if user.bot:
return
# Try to get the game this reaction is for
try:
game = self.msg_refs[reaction.message]
except:
return
# Validate the reaction
if not self._validate_reaction(game, reaction, user):
# Remove the invalid reaction
await reaction.message.remove_reaction(reaction, user)
return
logger.info(f"Processing {reaction} by {user} in {game.key}")
result = game.guess(
user.id, ROUND_MESSAGES[game.state]["reactions"][reaction.emoji]
)
embed = self._build_result(game, result)
ctx = self.context[game.key]
await ctx.send(embed=embed)
self.msg_refs.pop(reaction.message)
await asyncio.sleep(3)
await self._handle_state(game)
def _build_result(self, game: RideTheBus, result: Result):
user = self.bot.get_user(result.player.id)
card = result.player.cards[-1]
result = "WON" if result.successful else "LOST"
embed = discord.Embed(
color=COLOR, title="Ride The Bus", description=f"Drew {card} and {result}"
)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
embed.set_image(url=playingcards.get_card_image_url(card))
return embed
def _validate_reaction(
self, game: RideTheBus, reaction: discord.Reaction, user: discord.User
):
if user.id is not game.current_player.id:
return False
try:
if reaction.emoji not in ROUND_MESSAGES[game.state]["reactions"]:
return False
except:
return False
return True
async def _handle_state(self, game: RideTheBus):
if game.state in [GameState.INIT, GameState.COMPLETE]:
return
await self._send_prompt(game)
@commands.group(brief="Overview of Ride The Bus")
async def bus(self, ctx: commands.Context):
if ctx.invoked_subcommand is not None:
return
embed = discord.Embed(
color=COLOR,
title="Ride The Bus",
description="A card based drinking game.",
)
await ctx.send(embed=embed)
@bus.command(brief="Create a game of Ride The Bus")
async def create(self, ctx: commands.Context):
if ctx.channel in self.channels:
await ctx.reply(
f"There is already a Ride The Bus game in progress here!\nUse `{self.bot.command_prefix}bus join {self.channels[ctx.channel].key}` to join."
)
return
game = RideTheBus()
self.channels[ctx.channel] = game
self.keys[game.key] = game
self.context[game.key] = ctx
embed = discord.Embed(
color=COLOR,
title="Ride The Bus",
description=f"Ride The Bus game created.\nUse `{self.bot.command_prefix}bus join {self.channels[ctx.channel].key}` to join.",
)
await ctx.send(embed=embed)
@bus.command(brief="Join an existing game of Ride The Bus")
async def join(self, ctx: commands.Context, key: str):
if key not in self.keys:
await ctx.reply(f"Sorry, there is no game with the key {key}...")
return
if key in self.players:
game = self.players[ctx.author]
await ctx.reply(f"Sorry, you're already in a game {game.key}.")
return
user = ctx.author
game = self.keys[key]
game.add_player(user.id, user.name)
self.players[user] = game
await ctx.reply(f"You have joined the game {game.key}!")
@bus.command(brief="Leave your game of Ride The Bus")
async def leave(self, ctx: commands.Context):
if ctx.author not in self.players:
await ctx.reply(f"Sorry, you're not in any games you can leave...")
return
# Get a reference to the game
game = self.players[ctx.author]
# Remove the player from the game
game.remove_player(ctx.author.id)
# Remove the player from the player list
self.players.pop(ctx.author)
await ctx.reply(f"You have left the game {game.key}.")
@bus.command(brief="Start a game of Ride The Bus")
async def start(self, ctx: commands.Context):
if ctx.author not in self.players:
await ctx.reply(f"Sorry, you're not in any games you can start...")
game = self.players[ctx.author]
game.start()
embed = self._build_round_start(game)
await ctx.send(embed=embed)
await asyncio.sleep(5)
await self._send_prompt(game)
def _build_round_start(self, game: RideTheBus):
player_list = self._build_player_list(game)
embed = discord.Embed(
color=COLOR,
title="Ride The Bus",
description=f"{ROUND_RULES.get(game.state, "")}\n\nPlayers:\n{player_list}",
)
return embed
async def _send_prompt(self, game: RideTheBus):
ctx = self.context[game.key]
user = self.bot.get_user(game.current_player.id)
embed = self._build_prompt(game, user)
msg = await ctx.send(embed=embed)
await self._add_reactions(msg, ROUND_MESSAGES[game.state]["reactions"].keys())
self.msg_refs[msg] = game
def _build_prompt(self, game: RideTheBus, user: discord.User):
prompt = ROUND_MESSAGES[game.state]["prompt"]
embed = discord.Embed(color=COLOR, title="Ride The Bus", description=prompt)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
return embed
def _build_player_list(self, game: RideTheBus):
s = ""
for i, player in enumerate(game.player_list):
s += f"{i+1}: {player.name}\n"
return s
async def _add_reactions(self, msg: discord.Message, reactions: Iterable[str]):
for reaction in reactions:
await msg.add_reaction(reaction)
|
import logging
from typing import Dict, List, Iterable
import asyncio
import discord
from discord.ext import commands
import emoji
from .game import RideTheBus, GameState
from .result import Result
from utils import playingcards
logger = logging.getLogger(__name__)
COLOR = 0xFFFF00
ROUND_RULES = {
GameState.RED_OR_BLACK: "Answer the questions to build your hand, drinking along the way.",
GameState.PYRAMID: "Play your matching cards to force others to drink.",
GameState.RIDE_THE_BUS: "The loser has to ride the bus, and drink. A lot.",
}
ROUND_MESSAGES = {
GameState.RED_OR_BLACK: {
"prompt": emoji.emojize(":red_square: Red or :black_large_square: Black?"),
"reactions": {
emoji.emojize(":red_square:"): "red",
emoji.emojize(":black_large_square:"): "black",
},
},
GameState.HIGHER_OR_LOWER: {
"prompt": emoji.emojize(":arrow_up: Higher or :arrow_down: Lower?"),
"reactions": {
"⬆️": "higher",
"⬇️": "lower",
},
},
GameState.INSIDE_OR_OUTSIDE: {
"prompt": emoji.emojize(":thumbsup: Inside or :thumbsdown: Outside?"),
"reactions": {
"👍": "inside",
"👎": "outside",
},
},
GameState.SUIT: {
"prompt": emoji.emojize(
":clubs: Club, :diamonds: Diamond, :hearts: Heart, or :spades: Spade?"
),
"reactions": {
"♣️": "clubs",
"♦️": "diamonds",
"♥️": "hearts",
"♠️": "spades",
},
},
}
class RideTheBusCog(commands.Cog):
def __init__(self, bot: commands.Bot):
super().__init__()
self.bot = bot
self.keys: Dict[str, RideTheBus] = {}
self.channels: Dict[discord.abc.Messageable, RideTheBus] = {}
self.players: Dict[discord.User, RideTheBus] = {}
self.context: Dict[str, discord.Context] = {}
self.msg_refs: Dict[discord.Message, RideTheBus] = {}
@commands.Cog.listener()
async def on_reaction_add(self, reaction: discord.Reaction, user: discord.User):
# Ignore reactions from the bot
if user.bot:
return
# Try to get the game this reaction is for
try:
game = self.msg_refs[reaction.message]
except:
return
# Validate the reaction
if not self._validate_reaction(game, reaction, user):
# Remove the invalid reaction
await reaction.message.remove_reaction(reaction, user)
return
logger.info(f"Processing {reaction} by {user} in {game.key}")
result = game.guess(
user.id, ROUND_MESSAGES[game.state]["reactions"][reaction.emoji]
)
embed = self._build_result(game, result)
ctx = self.context[game.key]
await ctx.send(embed=embed)
self.msg_refs.pop(reaction.message)
await asyncio.sleep(3)
await self._handle_state(game)
def _build_result(self, game: RideTheBus, result: Result):
user = self.bot.get_user(result.player.id)
card = result.player.cards[-1]
result = "WON" if result.successful else "LOST"
embed = discord.Embed(
color=COLOR, title="Ride The Bus", description=f"Drew {card} and {result}"
)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
embed.set_image(url=playingcards.get_card_image_url(card))
return embed
def _validate_reaction(
self, game: RideTheBus, reaction: discord.Reaction, user: discord.User
):
if user.id is not game.current_player.id:
return False
try:
if reaction.emoji not in ROUND_MESSAGES[game.state]["reactions"]:
return False
except:
return False
return True
async def _handle_state(self, game: RideTheBus):
if game.state in [GameState.INIT, GameState.COMPLETE]:
return
await self._send_prompt(game)
@commands.group(brief="Overview of Ride The Bus")
async def bus(self, ctx: commands.Context):
if ctx.invoked_subcommand is not None:
return
embed = discord.Embed(
color=COLOR,
title="Ride The Bus",
description="A card based drinking game.",
)
await ctx.send(embed=embed)
@bus.command(brief="Create a game of Ride The Bus")
async def create(self, ctx: commands.Context):
if ctx.channel in self.channels:
await ctx.reply(
f"There is already a Ride The Bus game in progress here!\nUse `{self.bot.command_prefix}bus join {self.channels[ctx.channel].key}` to join."
)
return
game = RideTheBus()
self.channels[ctx.channel] = game
self.keys[game.key] = game
self.context[game.key] = ctx
embed = discord.Embed(
color=COLOR,
title="Ride The Bus",
description=f"Ride The Bus game created.\nUse `{self.bot.command_prefix}bus join {self.channels[ctx.channel].key}` to join.",
)
await ctx.send(embed=embed)
@bus.command(brief="Join an existing game of Ride The Bus")
async def join(self, ctx: commands.Context, key: str):
if key not in self.keys:
await ctx.reply(f"Sorry, there is no game with the key {key}...")
return
if key in self.players:
game = self.players[ctx.author]
await ctx.reply(f"Sorry, you're already in a game {game.key}.")
return
user = ctx.author
game = self.keys[key]
game.add_player(user.id, user.name)
self.players[user] = game
await ctx.reply(f"You have joined the game {game.key}!")
@bus.command(brief="Leave your game of Ride The Bus")
async def leave(self, ctx: commands.Context):
if ctx.author not in self.players:
await ctx.reply(f"Sorry, you're not in any games you can leave...")
return
# Get a reference to the game
game = self.players[ctx.author]
# Remove the player from the game
game.remove_player(ctx.author.id)
# Remove the player from the player list
self.players.pop(ctx.author)
await ctx.reply(f"You have left the game {game.key}.")
@bus.command(brief="Start a game of Ride The Bus")
async def start(self, ctx: commands.Context):
if ctx.author not in self.players:
await ctx.reply(f"Sorry, you're not in any games you can start...")
game = self.players[ctx.author]
game.start()
embed = self._build_round_start(game)
await ctx.send(embed=embed)
await asyncio.sleep(5)
await self._send_prompt(game)
def _build_round_start(self, game: RideTheBus):
player_list = self._build_player_list(game)
embed = discord.Embed(
color=COLOR,
title="Ride The Bus",
description=f"{ROUND_RULES.get(game.state, '')}\n\nPlayers:\n{player_list}",
)
return embed
async def _send_prompt(self, game: RideTheBus):
ctx = self.context[game.key]
user = self.bot.get_user(game.current_player.id)
embed = self._build_prompt(game, user)
msg = await ctx.send(embed=embed)
await self._add_reactions(msg, ROUND_MESSAGES[game.state]["reactions"].keys())
self.msg_refs[msg] = game
def _build_prompt(self, game: RideTheBus, user: discord.User):
prompt = ROUND_MESSAGES[game.state]["prompt"]
embed = discord.Embed(color=COLOR, title="Ride The Bus", description=prompt)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
return embed
def _build_player_list(self, game: RideTheBus):
s = ""
for i, player in enumerate(game.player_list):
s += f"{i+1}: {player.name}\n"
return s
async def _add_reactions(self, msg: discord.Message, reactions: Iterable[str]):
for reaction in reactions:
await msg.add_reaction(reaction)
|
# Tweepy
# Copyright 2009-2022 Joshua Roesslein
# See LICENSE for details.
import requests
class TweepyException(Exception):
"""Base exception for Tweepy
.. versionadded:: 4.0
"""
pass
class HTTPException(TweepyException):
"""HTTPException()
Exception raised when an HTTP request fails
.. versionadded:: 4.0
Attributes
----------
response : requests.Response
Requests Response from the Twitter API
api_errors : List[dict[str, Union[int, str]]]
The errors the Twitter API responded with, if any
api_codes : List[int]
The error codes the Twitter API responded with, if any
api_messages : List[str]
The error messages the Twitter API responded with, if any
"""
def __init__(self, response):
self.response = response
self.api_errors = []
self.api_codes = []
self.api_messages = []
try:
response_json = response.json()
except requests.JSONDecodeError:
super().__init__(f"{response.status_code} {response.reason}")
else:
errors = response_json.get("errors", [])
# Use := when support for Python 3.7 is dropped
if "error" in response_json:
errors.append(response_json["error"])
error_text = ""
for error in errors:
self.api_errors.append(error)
if "code" in error:
self.api_codes.append(error["code"])
if "message" in error:
self.api_messages.append(error["message"])
if "code" in error and "message" in error:
error_text += f"\n{error["code"]} - {error["message"]}"
elif "message" in error:
error_text += '\n' + error["message"]
super().__init__(
f"{response.status_code} {response.reason}{error_text}"
)
class BadRequest(HTTPException):
"""BadRequest()
Exception raised for a 400 HTTP status code
.. versionadded:: 4.0
"""
pass
class Unauthorized(HTTPException):
"""Unauthorized()
Exception raised for a 401 HTTP status code
.. versionadded:: 4.0
"""
pass
class Forbidden(HTTPException):
"""Forbidden()
Exception raised for a 403 HTTP status code
.. versionadded:: 4.0
"""
pass
class NotFound(HTTPException):
"""NotFound()
Exception raised for a 404 HTTP status code
.. versionadded:: 4.0
"""
pass
class TooManyRequests(HTTPException):
"""TooManyRequests()
Exception raised for a 429 HTTP status code
.. versionadded:: 4.0
"""
pass
class TwitterServerError(HTTPException):
"""TwitterServerError()
Exception raised for a 5xx HTTP status code
.. versionadded:: 4.0
"""
pass
|
# Tweepy
# Copyright 2009-2022 Joshua Roesslein
# See LICENSE for details.
import requests
class TweepyException(Exception):
"""Base exception for Tweepy
.. versionadded:: 4.0
"""
pass
class HTTPException(TweepyException):
"""HTTPException()
Exception raised when an HTTP request fails
.. versionadded:: 4.0
Attributes
----------
response : requests.Response
Requests Response from the Twitter API
api_errors : List[dict[str, Union[int, str]]]
The errors the Twitter API responded with, if any
api_codes : List[int]
The error codes the Twitter API responded with, if any
api_messages : List[str]
The error messages the Twitter API responded with, if any
"""
def __init__(self, response):
self.response = response
self.api_errors = []
self.api_codes = []
self.api_messages = []
try:
response_json = response.json()
except requests.JSONDecodeError:
super().__init__(f"{response.status_code} {response.reason}")
else:
errors = response_json.get("errors", [])
# Use := when support for Python 3.7 is dropped
if "error" in response_json:
errors.append(response_json["error"])
error_text = ""
for error in errors:
self.api_errors.append(error)
if "code" in error:
self.api_codes.append(error["code"])
if "message" in error:
self.api_messages.append(error["message"])
if "code" in error and "message" in error:
error_text += f"\n{error['code']} - {error['message']}"
elif "message" in error:
error_text += '\n' + error["message"]
super().__init__(
f"{response.status_code} {response.reason}{error_text}"
)
class BadRequest(HTTPException):
"""BadRequest()
Exception raised for a 400 HTTP status code
.. versionadded:: 4.0
"""
pass
class Unauthorized(HTTPException):
"""Unauthorized()
Exception raised for a 401 HTTP status code
.. versionadded:: 4.0
"""
pass
class Forbidden(HTTPException):
"""Forbidden()
Exception raised for a 403 HTTP status code
.. versionadded:: 4.0
"""
pass
class NotFound(HTTPException):
"""NotFound()
Exception raised for a 404 HTTP status code
.. versionadded:: 4.0
"""
pass
class TooManyRequests(HTTPException):
"""TooManyRequests()
Exception raised for a 429 HTTP status code
.. versionadded:: 4.0
"""
pass
class TwitterServerError(HTTPException):
"""TwitterServerError()
Exception raised for a 5xx HTTP status code
.. versionadded:: 4.0
"""
pass
|
from collections import defaultdict
import csv
from logging import Logger
import logging
import os
import sys
from typing import Callable, Dict, List, Tuple
import numpy as np
import pandas as pd
from .run_training import run_training
from chemprop.args import TrainArgs
from chemprop.constants import TEST_SCORES_FILE_NAME, TRAIN_LOGGER_NAME
from chemprop.data import get_data, get_task_names, MoleculeDataset, validate_dataset_type
from chemprop.utils import create_logger, makedirs, timeit
from chemprop.features import set_extra_atom_fdim, set_extra_bond_fdim
from chemprop.models import MoleculeModel
@timeit(logger_name=TRAIN_LOGGER_NAME)
def cross_validate(args: TrainArgs,
train_func: Callable[[TrainArgs, MoleculeDataset, Logger], Dict[str, List[float]]],
model_list: List[MoleculeModel] = None) -> Tuple[float, float]:
"""
Runs k-fold cross-validation.
For each of k splits (folds) of the data, trains and tests a model on that split
and aggregates the performance across folds.
:param args: A :class:`~chemprop.args.TrainArgs` object containing arguments for
loading data and training the Chemprop model.
:param train_func: Function which runs training.
:param model_list: A list of :class:`~chemprop.models.model.MoleculeModel`.
:return: A tuple containing the mean and standard deviation performance across folds.
"""
logging.root.manager.loggerDict.pop(TRAIN_LOGGER_NAME,None)
logger = create_logger(name=TRAIN_LOGGER_NAME, save_dir=args.save_dir, quiet=args.quiet)
if logger is not None:
debug, info = logger.debug, logger.info
else:
debug = info = print
# Initialize relevant variables
init_seed = args.seed
save_dir = args.save_dir
args.task_names = get_task_names(path=args.data_path, smiles_columns=args.smiles_columns,
target_columns=args.target_columns, ignore_columns=args.ignore_columns)
# Print command line
debug('Command line')
debug(f'python {' '.join(sys.argv)}')
# Print args
debug('Args')
debug(args)
# Save args
makedirs(args.save_dir)
args.save(os.path.join(args.save_dir, 'args.json'))
# Get data
debug('Loading data')
data = get_data(
path=args.data_path,
args=args,
smiles_columns=args.smiles_columns,
logger=logger,
skip_none_targets=True
)
validate_dataset_type(data, dataset_type=args.dataset_type)
args.features_size = data.features_size()
if args.atom_descriptors == 'descriptor':
args.atom_descriptors_size = data.atom_descriptors_size()
args.ffn_hidden_size += args.atom_descriptors_size
elif args.atom_descriptors == 'feature':
args.atom_features_size = data.atom_features_size()
set_extra_atom_fdim(args.atom_features_size)
if args.bond_features_path is not None:
args.bond_features_size = data.bond_features_size()
set_extra_bond_fdim(args.bond_features_size)
debug(f'Number of tasks = {args.num_tasks}')
# Run training on different random seeds for each fold
all_scores = defaultdict(list)
for fold_num in range(args.num_folds):
info(f'Fold {fold_num}')
args.seed = init_seed + fold_num
args.save_dir = os.path.join(save_dir, f'fold_{fold_num}')
makedirs(args.save_dir)
data.reset_features_and_targets()
model_scores = train_func(args, data, logger, model_list)
for metric, scores in model_scores.items():
all_scores[metric].append(scores)
all_scores = dict(all_scores)
# Convert scores to numpy arrays
for metric, scores in all_scores.items():
all_scores[metric] = np.array(scores)
# Report results
info(f'{args.num_folds}-fold cross validation')
# Report scores for each fold
for fold_num in range(args.num_folds):
for metric, scores in all_scores.items():
info(f'\tSeed {init_seed + fold_num} ==> test {metric} = {np.nanmean(scores[fold_num]):.6f}')
if args.show_individual_scores:
for task_name, score in zip(args.task_names, scores[fold_num]):
info(f'\t\tSeed {init_seed + fold_num} ==> test {task_name} {metric} = {score:.6f}')
# Report scores across folds
for metric, scores in all_scores.items():
avg_scores = np.nanmean(scores, axis=1) # average score for each model across tasks
mean_score, std_score = np.nanmean(avg_scores), np.nanstd(avg_scores)
info(f'Overall test {metric} = {mean_score:.6f} +/- {std_score:.6f}')
if args.show_individual_scores:
for task_num, task_name in enumerate(args.task_names):
info(f'\tOverall test {task_name} {metric} = '
f'{np.nanmean(scores[:, task_num]):.6f} +/- {np.nanstd(scores[:, task_num]):.6f}')
# Save scores
with open(os.path.join(save_dir, TEST_SCORES_FILE_NAME), 'w') as f:
writer = csv.writer(f)
header = ['Task']
for metric in args.metrics:
header += [f'Mean {metric}', f'Standard deviation {metric}'] + \
[f'Fold {i} {metric}' for i in range(args.num_folds)]
writer.writerow(header)
for task_num, task_name in enumerate(args.task_names):
row = [task_name]
for metric, scores in all_scores.items():
task_scores = scores[:, task_num]
mean, std = np.nanmean(task_scores), np.nanstd(task_scores)
row += [mean, std] + task_scores.tolist()
writer.writerow(row)
# Determine mean and std score of main metric
avg_scores = np.nanmean(all_scores[args.metric], axis=1)
mean_score, std_score = np.nanmean(avg_scores), np.nanstd(avg_scores)
# Optionally merge and save test preds
if args.save_preds:
all_preds = pd.concat([pd.read_csv(os.path.join(save_dir, f'fold_{fold_num}', 'test_preds.csv'))
for fold_num in range(args.num_folds)])
all_preds.to_csv(os.path.join(save_dir, 'test_preds.csv'), index=False)
for handler in logger.handlers[:]:
handler.close()
logger.removeHandler(handler)
del logger
return mean_score, std_score
def chemprop_train() -> None:
"""Parses Chemprop training arguments and trains (cross-validates) a Chemprop model.
This is the entry point for the command line command :code:`chemprop_train`.
"""
cross_validate(args=TrainArgs().parse_args(), train_func=run_training)
|
from collections import defaultdict
import csv
from logging import Logger
import logging
import os
import sys
from typing import Callable, Dict, List, Tuple
import numpy as np
import pandas as pd
from .run_training import run_training
from chemprop.args import TrainArgs
from chemprop.constants import TEST_SCORES_FILE_NAME, TRAIN_LOGGER_NAME
from chemprop.data import get_data, get_task_names, MoleculeDataset, validate_dataset_type
from chemprop.utils import create_logger, makedirs, timeit
from chemprop.features import set_extra_atom_fdim, set_extra_bond_fdim
from chemprop.models import MoleculeModel
@timeit(logger_name=TRAIN_LOGGER_NAME)
def cross_validate(args: TrainArgs,
train_func: Callable[[TrainArgs, MoleculeDataset, Logger], Dict[str, List[float]]],
model_list: List[MoleculeModel] = None) -> Tuple[float, float]:
"""
Runs k-fold cross-validation.
For each of k splits (folds) of the data, trains and tests a model on that split
and aggregates the performance across folds.
:param args: A :class:`~chemprop.args.TrainArgs` object containing arguments for
loading data and training the Chemprop model.
:param train_func: Function which runs training.
:param model_list: A list of :class:`~chemprop.models.model.MoleculeModel`.
:return: A tuple containing the mean and standard deviation performance across folds.
"""
logging.root.manager.loggerDict.pop(TRAIN_LOGGER_NAME,None)
logger = create_logger(name=TRAIN_LOGGER_NAME, save_dir=args.save_dir, quiet=args.quiet)
if logger is not None:
debug, info = logger.debug, logger.info
else:
debug = info = print
# Initialize relevant variables
init_seed = args.seed
save_dir = args.save_dir
args.task_names = get_task_names(path=args.data_path, smiles_columns=args.smiles_columns,
target_columns=args.target_columns, ignore_columns=args.ignore_columns)
# Print command line
debug('Command line')
debug(f'python {" ".join(sys.argv)}')
# Print args
debug('Args')
debug(args)
# Save args
makedirs(args.save_dir)
args.save(os.path.join(args.save_dir, 'args.json'))
# Get data
debug('Loading data')
data = get_data(
path=args.data_path,
args=args,
smiles_columns=args.smiles_columns,
logger=logger,
skip_none_targets=True
)
validate_dataset_type(data, dataset_type=args.dataset_type)
args.features_size = data.features_size()
if args.atom_descriptors == 'descriptor':
args.atom_descriptors_size = data.atom_descriptors_size()
args.ffn_hidden_size += args.atom_descriptors_size
elif args.atom_descriptors == 'feature':
args.atom_features_size = data.atom_features_size()
set_extra_atom_fdim(args.atom_features_size)
if args.bond_features_path is not None:
args.bond_features_size = data.bond_features_size()
set_extra_bond_fdim(args.bond_features_size)
debug(f'Number of tasks = {args.num_tasks}')
# Run training on different random seeds for each fold
all_scores = defaultdict(list)
for fold_num in range(args.num_folds):
info(f'Fold {fold_num}')
args.seed = init_seed + fold_num
args.save_dir = os.path.join(save_dir, f'fold_{fold_num}')
makedirs(args.save_dir)
data.reset_features_and_targets()
model_scores = train_func(args, data, logger, model_list)
for metric, scores in model_scores.items():
all_scores[metric].append(scores)
all_scores = dict(all_scores)
# Convert scores to numpy arrays
for metric, scores in all_scores.items():
all_scores[metric] = np.array(scores)
# Report results
info(f'{args.num_folds}-fold cross validation')
# Report scores for each fold
for fold_num in range(args.num_folds):
for metric, scores in all_scores.items():
info(f'\tSeed {init_seed + fold_num} ==> test {metric} = {np.nanmean(scores[fold_num]):.6f}')
if args.show_individual_scores:
for task_name, score in zip(args.task_names, scores[fold_num]):
info(f'\t\tSeed {init_seed + fold_num} ==> test {task_name} {metric} = {score:.6f}')
# Report scores across folds
for metric, scores in all_scores.items():
avg_scores = np.nanmean(scores, axis=1) # average score for each model across tasks
mean_score, std_score = np.nanmean(avg_scores), np.nanstd(avg_scores)
info(f'Overall test {metric} = {mean_score:.6f} +/- {std_score:.6f}')
if args.show_individual_scores:
for task_num, task_name in enumerate(args.task_names):
info(f'\tOverall test {task_name} {metric} = '
f'{np.nanmean(scores[:, task_num]):.6f} +/- {np.nanstd(scores[:, task_num]):.6f}')
# Save scores
with open(os.path.join(save_dir, TEST_SCORES_FILE_NAME), 'w') as f:
writer = csv.writer(f)
header = ['Task']
for metric in args.metrics:
header += [f'Mean {metric}', f'Standard deviation {metric}'] + \
[f'Fold {i} {metric}' for i in range(args.num_folds)]
writer.writerow(header)
for task_num, task_name in enumerate(args.task_names):
row = [task_name]
for metric, scores in all_scores.items():
task_scores = scores[:, task_num]
mean, std = np.nanmean(task_scores), np.nanstd(task_scores)
row += [mean, std] + task_scores.tolist()
writer.writerow(row)
# Determine mean and std score of main metric
avg_scores = np.nanmean(all_scores[args.metric], axis=1)
mean_score, std_score = np.nanmean(avg_scores), np.nanstd(avg_scores)
# Optionally merge and save test preds
if args.save_preds:
all_preds = pd.concat([pd.read_csv(os.path.join(save_dir, f'fold_{fold_num}', 'test_preds.csv'))
for fold_num in range(args.num_folds)])
all_preds.to_csv(os.path.join(save_dir, 'test_preds.csv'), index=False)
for handler in logger.handlers[:]:
handler.close()
logger.removeHandler(handler)
del logger
return mean_score, std_score
def chemprop_train() -> None:
"""Parses Chemprop training arguments and trains (cross-validates) a Chemprop model.
This is the entry point for the command line command :code:`chemprop_train`.
"""
cross_validate(args=TrainArgs().parse_args(), train_func=run_training)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetCodeSigningConfigResult',
'AwaitableGetCodeSigningConfigResult',
'get_code_signing_config',
'get_code_signing_config_output',
]
@pulumi.output_type
class GetCodeSigningConfigResult:
"""
A collection of values returned by getCodeSigningConfig.
"""
def __init__(__self__, allowed_publishers=None, arn=None, config_id=None, description=None, id=None, last_modified=None, policies=None):
if allowed_publishers and not isinstance(allowed_publishers, list):
raise TypeError("Expected argument 'allowed_publishers' to be a list")
pulumi.set(__self__, "allowed_publishers", allowed_publishers)
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if config_id and not isinstance(config_id, str):
raise TypeError("Expected argument 'config_id' to be a str")
pulumi.set(__self__, "config_id", config_id)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_modified and not isinstance(last_modified, str):
raise TypeError("Expected argument 'last_modified' to be a str")
pulumi.set(__self__, "last_modified", last_modified)
if policies and not isinstance(policies, list):
raise TypeError("Expected argument 'policies' to be a list")
pulumi.set(__self__, "policies", policies)
@property
@pulumi.getter(name="allowedPublishers")
def allowed_publishers(self) -> Sequence['outputs.GetCodeSigningConfigAllowedPublisherResult']:
"""
List of allowed publishers as signing profiles for this code signing configuration.
"""
return pulumi.get(self, "allowed_publishers")
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="configId")
def config_id(self) -> str:
"""
Unique identifier for the code signing configuration.
"""
return pulumi.get(self, "config_id")
@property
@pulumi.getter
def description(self) -> str:
"""
Code signing configuration description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> str:
"""
The date and time that the code signing configuration was last modified.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter
def policies(self) -> Sequence['outputs.GetCodeSigningConfigPolicyResult']:
"""
List of code signing policies that control the validation failure action for signature mismatch or expiry.
"""
return pulumi.get(self, "policies")
class AwaitableGetCodeSigningConfigResult(GetCodeSigningConfigResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCodeSigningConfigResult(
allowed_publishers=self.allowed_publishers,
arn=self.arn,
config_id=self.config_id,
description=self.description,
id=self.id,
last_modified=self.last_modified,
policies=self.policies)
def get_code_signing_config(arn: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCodeSigningConfigResult:
"""
Provides information about a Lambda Code Signing Config. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail).
For information about Lambda code signing configurations and how to use them, see [configuring code signing for Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/configuration-codesigning.html)
## Example Usage
```python
import pulumi
import pulumi_aws as aws
existing_csc = aws.lambda.get_code_signing_config(arn=f"arn:aws:lambda:{var["aws_region"]}:{var["aws_account"]}:code-signing-config:csc-0f6c334abcdea4d8b")
```
:param str arn: The Amazon Resource Name (ARN) of the code signing configuration.
"""
__args__ = dict()
__args__['arn'] = arn
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:lambda/getCodeSigningConfig:getCodeSigningConfig', __args__, opts=opts, typ=GetCodeSigningConfigResult).value
return AwaitableGetCodeSigningConfigResult(
allowed_publishers=__ret__.allowed_publishers,
arn=__ret__.arn,
config_id=__ret__.config_id,
description=__ret__.description,
id=__ret__.id,
last_modified=__ret__.last_modified,
policies=__ret__.policies)
@_utilities.lift_output_func(get_code_signing_config)
def get_code_signing_config_output(arn: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetCodeSigningConfigResult]:
"""
Provides information about a Lambda Code Signing Config. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail).
For information about Lambda code signing configurations and how to use them, see [configuring code signing for Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/configuration-codesigning.html)
## Example Usage
```python
import pulumi
import pulumi_aws as aws
existing_csc = aws.lambda.get_code_signing_config(arn=f"arn:aws:lambda:{var["aws_region"]}:{var["aws_account"]}:code-signing-config:csc-0f6c334abcdea4d8b")
```
:param str arn: The Amazon Resource Name (ARN) of the code signing configuration.
"""
...
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetCodeSigningConfigResult',
'AwaitableGetCodeSigningConfigResult',
'get_code_signing_config',
'get_code_signing_config_output',
]
@pulumi.output_type
class GetCodeSigningConfigResult:
"""
A collection of values returned by getCodeSigningConfig.
"""
def __init__(__self__, allowed_publishers=None, arn=None, config_id=None, description=None, id=None, last_modified=None, policies=None):
if allowed_publishers and not isinstance(allowed_publishers, list):
raise TypeError("Expected argument 'allowed_publishers' to be a list")
pulumi.set(__self__, "allowed_publishers", allowed_publishers)
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if config_id and not isinstance(config_id, str):
raise TypeError("Expected argument 'config_id' to be a str")
pulumi.set(__self__, "config_id", config_id)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_modified and not isinstance(last_modified, str):
raise TypeError("Expected argument 'last_modified' to be a str")
pulumi.set(__self__, "last_modified", last_modified)
if policies and not isinstance(policies, list):
raise TypeError("Expected argument 'policies' to be a list")
pulumi.set(__self__, "policies", policies)
@property
@pulumi.getter(name="allowedPublishers")
def allowed_publishers(self) -> Sequence['outputs.GetCodeSigningConfigAllowedPublisherResult']:
"""
List of allowed publishers as signing profiles for this code signing configuration.
"""
return pulumi.get(self, "allowed_publishers")
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="configId")
def config_id(self) -> str:
"""
Unique identifier for the code signing configuration.
"""
return pulumi.get(self, "config_id")
@property
@pulumi.getter
def description(self) -> str:
"""
Code signing configuration description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> str:
"""
The date and time that the code signing configuration was last modified.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter
def policies(self) -> Sequence['outputs.GetCodeSigningConfigPolicyResult']:
"""
List of code signing policies that control the validation failure action for signature mismatch or expiry.
"""
return pulumi.get(self, "policies")
class AwaitableGetCodeSigningConfigResult(GetCodeSigningConfigResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCodeSigningConfigResult(
allowed_publishers=self.allowed_publishers,
arn=self.arn,
config_id=self.config_id,
description=self.description,
id=self.id,
last_modified=self.last_modified,
policies=self.policies)
def get_code_signing_config(arn: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCodeSigningConfigResult:
"""
Provides information about a Lambda Code Signing Config. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail).
For information about Lambda code signing configurations and how to use them, see [configuring code signing for Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/configuration-codesigning.html)
## Example Usage
```python
import pulumi
import pulumi_aws as aws
existing_csc = aws.lambda.get_code_signing_config(arn=f"arn:aws:lambda:{var['aws_region']}:{var['aws_account']}:code-signing-config:csc-0f6c334abcdea4d8b")
```
:param str arn: The Amazon Resource Name (ARN) of the code signing configuration.
"""
__args__ = dict()
__args__['arn'] = arn
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:lambda/getCodeSigningConfig:getCodeSigningConfig', __args__, opts=opts, typ=GetCodeSigningConfigResult).value
return AwaitableGetCodeSigningConfigResult(
allowed_publishers=__ret__.allowed_publishers,
arn=__ret__.arn,
config_id=__ret__.config_id,
description=__ret__.description,
id=__ret__.id,
last_modified=__ret__.last_modified,
policies=__ret__.policies)
@_utilities.lift_output_func(get_code_signing_config)
def get_code_signing_config_output(arn: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetCodeSigningConfigResult]:
"""
Provides information about a Lambda Code Signing Config. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail).
For information about Lambda code signing configurations and how to use them, see [configuring code signing for Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/configuration-codesigning.html)
## Example Usage
```python
import pulumi
import pulumi_aws as aws
existing_csc = aws.lambda.get_code_signing_config(arn=f"arn:aws:lambda:{var['aws_region']}:{var['aws_account']}:code-signing-config:csc-0f6c334abcdea4d8b")
```
:param str arn: The Amazon Resource Name (ARN) of the code signing configuration.
"""
...
|
import timeit
import pickle
import os
import errno
import datetime
import shutil
import warnings
import traceback
import pstats
import io
import sys
import gc
import inspect
import importlib
import re
import pathlib
import types
import operator
import subprocess
import shlex
import json
import contextlib
import stat
import itertools
import ast
import builtins
import signal
from collections import OrderedDict
from string import Formatter
import numpy as np
from matplotlib import pyplot
from IPython.utils.capture import capture_output
from swutil import sys_info, np_tools, plots, misc
from swutil.validation import Positive, Integer, String, List, Tuple,Iterable
from swutil.logs import Log
from swutil.hpc import Locker
from swutil.misc import string_dialog, no_context, random_word,\
string_from_seconds,input_with_prefill,is_identifier,smart_range
from swutil.files import append_text, delete_empty_files,\
delete_empty_directories, find_directories, path_from_keywords
from swutil.decorators import print_peak_memory, add_runtime
from swutil.collections import unique
class GitError(Exception):
def __init__(self, message, git_log):
super(GitError, self).__init__(message)
self.git_log = git_log
GRP_WARN = 'Warning'
GRP_ERROR = 'Error'
FILE_DEBUG = '.debug'
FILE_OUTPUT = 'output.pkl'
FILE_INPUT = 'input.pkl'
FILE_INFO = 'summary.txt'
FILE_AUX = 'aux_data.pkl'
FILE_RUNTIME = 'runtime.txt'
FILE_MEMORY = 'memory.txt'
FILE_LOAD = 'load.sh'
FILE_EXP_ERR = 'stderr.txt'
FILE_EXP_OUT = 'stdout.txt'
FILE_LOG = 'log.txt'
FILE_GITLOG = 'git.txt'
FILE_ERR = 'err.txt'
FILE_SOURCE = 'source.txt'
FILE_WD = 'working_directory'
FILE_ANALYSIS = 'analysis'
FILE_EXP = lambda i: f'experiment{i}'
FILE_RANDOMSTATE = 'randomstate.pkl'
STR_GIT_TAG = lambda ID: f'scilog_{ID}'
STR_GIT_LOG = lambda sha1, log: f'#Created git commit {sha1} as snapshot of current state of git repository using the following commands:\n{log}'
STR_GIT_COMMIT_TITLE = lambda branch: f'Snapshot of working directory of branch {branch}'
STR_GIT_COMMIT_BODY = lambda name, ID, directory: f'Created for scilog entry {ID} in {directory}'
STR_LOADSCRIPT = ('#!/bin/sh \n '
+ f' xterm -e {sys.executable} -i -c '
+ r'''"
print('>>> import scilog');
import scilog;
print('>>> entry = scilog.load()');
entry = scilog.load();
try:
import pandas as pd;
print('>>> import pandas as pd');
print('>>> experiments = pd.DataFrame(entry[\'experiments\'])');
experiments = pd.DataFrame(entry['experiments']);
print(experiments);
except:
pass;"''')
STR_MEMFILE = lambda value,memory_profile: value + (
'' if memory_profile == 'detail'
else 'MB (Use `memory_profile==\'detail\'` for a more detailed breakdown)'
)
STR_SOURCE = lambda n, func, module,source: (('#Experiments were conducted with' if n != 1 else '#Experiment was conducted with ')
+ ('class' if inspect.isclass(func) else
(f'{func.__class__.__name__}' if isinstance(func,(types.MethodType,types.FunctionType)) else
f'instance of {func.__class__.__name__}'))
+ (f' called {func.__name__}' if hasattr(func, '__name__') else '')
+ f' from the module {module} whose source code is given below:\n{source}')
STR_TIME = '%y-%m-%d %H:%M:%S'
def STR_PARAMETERS_PROMPT(func,external,current_parameters,known_parameters,allow_variables,class_instance,allow_all_keys):
if class_instance:
why= f'to pass to instance of {func.__class__.__name__}'
else:
if external:
why = f'to fill in `{external}`'
else:
name = _get_name(func)
if inspect.isclass(func):
why= f'to initialize class {name}'
else:
why = f'to pass to {name}'
parameters_string = ', '.join(f'{key}={value!r}' for key,value in current_parameters.items())
require_parameters=[key for key in known_parameters if key not in current_parameters]
if require_parameters:
parameters_string += (', ' if parameters_string else '') + ', '.join(f'{key}=' for key in require_parameters)
if allow_all_keys:
parameters_string += '[, <kwarg>=<value>]*'
return f'>> Specify {'variables or ' if allow_variables else ''}parameters {why} ({parameters_string}):\n\t'
def STR_PARAMETERS_ALLOWED(passed_keys,known_parameters):
forbidden = [key for key in passed_keys if key not in known_parameters]
if len(forbidden)>1:
out = '!! Cannot specify parameters'+', '.join(f'`{key}`' for key in forbidden[:-1]) + f', and `{forbidden[-1]}`'
else:
out = '!! Cannot specify parameter '+f'`{forbidden[0]}`'
return out
STR_PARAMETERS_FORMAT = '!! Input must have form `<key>=<value>[,<key>=<value>]*`\n!! Enter `help` for more information'
STR_PARAMETERS_HELP = lambda allow_variables: (
'?? Parameters are specified by `<key>=<value>` with <key> a Python identifier and <value> a Python expression.'
+(
(
'\n?? Variables have the same syntax, except <value> has the form var(<iterable>).\n'
'?? Variables are used to specify arguments that are varied in a specified range.\n'
'?? Note the difference between <key>=[0,1] and <key>=var([0,1]):\n'
'?? In the first case, `[0,1]` is passed at once; in the second case it is iterated over.'
) if allow_variables else ''
)
)
MSG_DEBUG = 'Debug mode. Entry is not stored permanently, stdout and stderr are not captured, no git commit is created'
MSG_NOGIT = 'Could not find git repository. No snapshot commit will be created'
MSG_START_ANALYSIS = 'Updating analysis'
MSG_START_EXPERIMENT = lambda i,n_experiments,inp: (f'Running experiment {i}' +
(' with variable values {}{}'.format('\n\t' if '\n' in repr(inp) else '',repr(inp))
if inp != {} else ''))
MSG_START_GIT = lambda repo:'Creating snapshot of current working tree of repository \'{}\'. Check {}'.format(repo,FILE_GITLOG)
def MSG_START_EXPERIMENTS(name,variables,parameters):
msg = f'Will call `{name}`'
extend=''
new_line=False
if parameters:
new_line='\n' in str(parameters)
extend= ' with parameters {}{}'.format("\n\t" if new_line else "",parameters)
if variables:
s_var = 'variables' if len(variables)>1 else 'variable'
variable_strings = [(variable[0],str(variable[1])) for variable in variables]
newline_in_vs = any('\n' in vs[1] for vs in variable_strings)
sep = '\n\t' if (len(variables)>1 or newline_in_vs) else ', '
strings = [('' if sep == ', ' else '-') +f'`{vs[0]}`'+ (f' varying in `{vs[1]}`' if not newline_in_vs else '') for vs in variable_strings]
extend += (" \n" if new_line else " ") +(f'and {s_var}' if extend else f' with {s_var}')+(' ' if sep==', ' else sep)+sep.join(strings)
if not extend:
extend =' once'
return msg + extend
MSG_START_ENTRY = lambda directory: f'Created scilog entry {directory}'
MSG_FINISH_EXPERIMENT = lambda i,n_experiments,runtime,result,external: 'Finished experiment {} in {}{}'.format(i,string_from_seconds(runtime),
'' if ('\n' in f'{result}') else (f'. Check {os.path.join(FILE_EXP(i),FILE_EXP_OUT)}' if external else f'. Output: {result}'))
MSG_FINISH_ENTRY=lambda directory: f'Completed scilog entry {directory}'
MSG_SUCCESS = 'All experiments finished successfully'
MSG_FAIL = 'Some experiments failed'
MSG_FINISH_GIT = lambda sha1: f'Successfully created git commit {sha1}'
MSG_ERROR_NOMATCH = 'Could not find matching scilog entry'
MSG_ERROR_MULTIMATCH = lambda entries:'Multiple matching scilog entries (to iterate through all use need_unique=False):\n{}'.format('\n'.join(entries))
MSG_ERROR_LOAD = lambda name: f'Error loading {name}. Are all required modules in the Python path?'
MSG_ERROR_INSTANTIATION = lambda name:f'Could not instantiate class {name} with given parameters'
MSG_ERROR_PARALLEL = 'Error during parallel execution. Try running with `parallel=False`'
MSG_ERROR_BASH_ANALYSIS = 'Cannot analyze output in bash mode'
MSG_ERROR_GIT = lambda file:f'Error during git snapshot creation. Check {file}'
MSG_ERROR_EXPERIMENT = lambda i:f'Experiment {i} failed. Check {os.path.join(FILE_EXP(i),FILE_EXP_ERR)}'
MSG_ERROR_ANALYSIS = lambda file: f'Analysis could not be completed. Check {file}'
MSG_ERROR_DIR = 'Could not create scilog entry directory'
MSG_EXCEPTION_STORE = lambda file: f'Could not store {file}'
MSG_EXCEPTION_ANALYSIS = 'Exception during online analysis'
MSG_EXCEPTION_EXPERIMENT = lambda i: f'Exception during handling of experiment {i}. Check {FILE_ERR}'
MSG_WARN_SOURCE = 'Could not find source code'
MSG_WARN_LOADSCRIPT = 'Error during load script creation'
MSG_WARN_PARALLEL = ('Could not find pathos. This might cause problems with parallel execution.'
+ 'Install pathos via `pip install pathos`.')
MSG_WARN_MEMPROF = 'Could not find memory_profiler. Install memory_profiler via `pip install memory_profiler`.'
MSG_WARN_DILL = ('Could not find dill. Some items might not be storable. '
+ 'Storage of numpy arrays will be slow'
+ 'Install dill via `pip install dill`.')
MSG_INTERRUPT = f'Kill signal received. Stored {FILE_INFO}, closing now.'
LEN_ID = 8
#TODO(low,high) think about using inspect.formatargspec(inspect.getargspec(func)) to directly parse args and kwargs of user input even without named argument
#TODO(med,high) understand ellipses in variable input: Do this at the string input level, so 2**3,...,2**6 can be understood.
#TODO(med,low) make scilog --show work for all scilog entries in current git repo even outside of cwd
#TODO(high,low) make scilog --show smarter: if FUNC doesn't match any scilog entry path, try if it matches a scilog entry ID
#TODO(high,high) remove analysis functionality from scilog.py and add scilog --analyze working as follows: provide a list of entry identifiers (ID or paths) as well as a function that accepts scilog entries (i.e. the summary dicts). the source code file (foo.py) of that function (foo.func) is copied in the analysis subsubdirectories `foo_x` of each scilog entry
#TODO technically: `scilog --analyze X3DH,UH1X --parameters [...] --variables [...] foo.func` starts a scilog run with arguments func=foo.func, analysis = True[effects that git=False, base_directory =tempfile.mktemp(), func is called with parameters={**parameters, entries=[scilog.load(identifier) for identifier in analzsis]} log does not say 'created scilog entry' but instead says which entries will be analyzed with what, and finishes with "added analysis <classification_x to X3DH and <classification>_y to UH1X, and entry is copied into subdireoctry analyze/<classificatoin>_x of X3DH and UH1X with x possibly being adapted to what is already in the analysis of X3DH and UH1X ]
#TODO make load ignore subdirectories of scilog entries (to avoid listing analysis entries)
#TODO(?,?) comunicate to plots.save
#TODO(low,med) understand <param>=<string> without quotes around <string> (simple and stupid: fail, add unrecognized variable to locals, repeat...)
#TODO(low,med) understand scilog foo(a=1)(b=var()) by defining foo in locals() and have it return another function that takes yet more arguments
#TODO(med,low) if copy_output is a path, try to copy that path and only terminate when succeeded (e.g. when it starts existing) also, add argument check_done and if it is provided only try copying as soon as it returns True
#TODO(low,low) store completion flag
#TODO(high,low) make scilog --show show [log, current-stdout,current-stderr] if entry not completed, (so you can avoid screen -r and navigation to the filesystem directory)
#TODO(low,low) make scilog --show show scilog-stderr if it exists and, if all experiments failed, also show current-stderr of last experiment in that case (if at least one succeeded leave it to user to navigate to the failed experiment)
#TODO(med,low) make scilog --show default to no-git (add --git)
#TODO(?,?) make scilog --show first look through screen sessions
#TODO(low,med) Store final note in notes.txt, add --update <REASON> to scilog which then flags all previous runs with same config as outdated in their notes.txt
#TODO(low,low) add --note flag
#TODO(med,low) extend scilog ls output by scilog status (running, terminated, error,n_experiments) (store log files)
#TODO(med,low) include no-shutdown script
def record(func, variables=None, name=None, base_directory=None, aux_data=None,
analysis=None, runtime_profile=False, memory_profile=False,
git=True, no_date=False, parallel=False,
copy_output = None, parameters=None,debug = None,classification= None,dry_run=False):
'''
Call :code:`func` once or multiple times and store results along with auxiliary information
about runtime and memory usage, installed modules, source code, hardware, etc.
code:`func` is called once for each combination of variable values as
specified by the variable ranges in :code:`variables`.
For example, :code:`func` can be a numerical algorithm and :code:`variables`
can be used to specify different mesh resolutions as follow
`variables = {h:[2**(-l) for l in range(10)}`
with the goal to assess the rate of convergence.
Another example would be to specify a list of subroutines with the goal to find
the best subroutine in terms of runtime/memory consumption/....
In the following, each call of :code:`func` is called an 'experiment'.
Scilog creates a directory -- specified by :code:`directory`, :code:`name`,
and optional parameters or a randomly generated ID -- with the following content:
*summary.txt:
*name: Name of scilog entry
*ID: Alphanumeric string identifying the entry
*modules: Module versions
*time: Time of execution
*experiments: For each experiment
*string representation of input,
*string representation of output,
*runtime
*status
*(optional)memory usage
*(optional)parameters: Parameters that are equal for all experiments
*(optional)git_commit: SHA1 of git commit
*(optional)aux_data: Argument :code:`aux_data`
*log.txt
*(optional)err.txt
*(optional)git.txt: stdout of git snapshot creation
*source.txt: Source code of the module containing :code:`func`
*For each experiment a subdirectory 'experiment<i>' with:
*output.pkl: Output of :code:`func`
*(optional)input.pkl: Argument passed to :code:`func`
*(optional)working_directory/: Working directory for call of :code:`func`,
unless parameter :code:`copy_output` is specified
*(optional)stderr.txt:
*(optional)stdout.txt:
*(optional)runtime_profile.txt: Extensive runtime information for each experiment
*(optional)memory_profile.txt: Memory usage information for each experiment
*(optional) analysis/: output of function :code:`analysis`
*(optional)stderr.txt
*(optional)stdout.txt
*(optional)working_directory/: Working directory for call of :code:`analysis`
To load a scilog entry, use the function :code:`scilog.load`.
This function loads summary.txt and replaces the string representations of outputs and inputs
by the actual Python objects.
:param func: Function to be called with different experiment configurations
:type func: function
:param variables: Arguments for call of :code:`func` that are varied.
If not specified, then :code:`func` is called once, without arguments
:type variables: List(-like) if single variable or dictionary of lists
:param name: Name of scilog entry.
If not specified, :code:`func.__name__` is used
:type name: String
:param base_directory: Root directory for storage
:type base_directory: String
:param aux_data: Auxiliary data that should be stored along with the results
:type aux_data: Any
:param analysis: Function that is called after each experiment
Can be used, e.g., for plotting
:param runtime_profile: Store extensive runtime information
May slow down execution
:type runtime_profile: Boolean
:param memory_profile: Track memory usage
May slow down execution
:type memory_profile: Boolean
:param git: Create git snapshot commit
The resulting commit is tagged with the entry ID and resides outside the branch history
The repository path may be specified, else it will be automatically detected
Add 'scilog' to your .gitignore to avoid storing the scilog entries in each snapshot.
(Should you ever want get rid of the snapshots,
use `git tag --list 'scilog_*'|xargs -I % git tag -d %` to remove all scilog commits or
use function `clean_git_repository` to remove all scilog commits whose scilog entry does not reside in repository anymore)
:type git: Boolean or String
:param no_date: Do not store outputs in sub-directories grouped by calendar week
:type no_date: Boolean
:param parameters: Parameters that are equal for all experiments
If :code:`func` is a class, these are used to instantiate this class
:type parameters: Dictionary
:param debug: Force debug mode (otherwise detected automatically)
:param debug: Boolean
:param copy_output: The contents of this directory will be copied into the scilog entry directory
:type copy_output: String
:param classification: Short, human readable description of entry
:type classification: String
:param dry_run: Only setup directory and experiments, don't execute anything
:type dry_run: Boolean
:return: Path of scilog entry
:rtype: String
'''
########################### FIX ARGUMENTS ###########################
variables,parameters,func_initialized,classification_t = _setup_experiments(variables,parameters,func)
classification = classification or classification_t
name = name or _get_name(func)
if dry_run:
return variables,parameters,classification,name
external = _external(func)
debug = debug if debug is not None else aux.isdebugging()
if debug:
git = False
log_nogit = False
if git is not False:
if git is True:
git = _get_func_directory(func)
if not _has_git(git):
log_nogit = True
git = False
########################### SETUP INPUTS ##############################
if len(variables)!=1:#Will result in infinite loop if one variable is infinite.
t = itertools.product(*[variable[1] for variable in variables])
else:
t = ([x] for x in variables[0][1])
inputs = ({variable[0]:tt[i] for i,variable in enumerate(variables)} for tt in t)
try:
n_experiments = int(np.prod([len(variable[1]) for variable in variables]))
except TypeError:
n_experiments = None
########################### CREATE SCILOG ENTRY ########################
entry_directory,ID = _get_directory(base_directory,func,name,no_date,debug,git,classification)
log_file = os.path.join(entry_directory, FILE_LOG)
err_file = os.path.join(entry_directory, FILE_ERR)
info_file = os.path.join(entry_directory, FILE_INFO)
load_file = os.path.join(entry_directory, FILE_LOAD)
aux_data_file = os.path.join(entry_directory, FILE_AUX)
source_file_name = os.path.join(entry_directory, FILE_SOURCE)
git_file = os.path.join(entry_directory, FILE_GITLOG)
locker = Locker()
_log = Log(write_filter=True, print_filter=True, file_name=log_file,lock = locker.get_lock()) # Logging strategy: 1) Redirect out and err of user functions (analysis and experiment) to their own files
_err = Log(write_filter=True, print_filter=False, file_name=err_file,lock = locker.get_lock()) # 2) Log errors outside user functions in _err 3) Log everything (user-err and _err, as well as other info) in _log
_log.log(MSG_START_ENTRY(entry_directory))
if log_nogit:
_log.log(group = GRP_WARN,message = MSG_NOGIT)
if debug:
_log.log(group =GRP_WARN,message = MSG_DEBUG)
info = {
'parameters' : {key:repr(parameters[key]) for key in parameters},
'variables' : [repr(variable) for variable in variables],
'name' : name,
'ID' : ID,
'time' : datetime.datetime.now().strftime(STR_TIME),
'func' : external or repr(func),
'parallel' : parallel,
'hardware' : sys_info.hardware(),
'gitcommit' : None,
'modules' : None,
'note': None,
'experiments' : {
'runtime':[],
'memory':[],
'status':[],
'input':[],
'output':[]
}
}
if not external:
info['modules'] = sys_info.modules()
try:
source = STR_SOURCE(n_experiments,func,sys.modules[func.__module__].__file__, ''.join(inspect.getsourcelines(sys.modules[func.__module__])[0]))
append_text(source_file_name, source)
except Exception: # TypeError only?
_err.log(traceback.format_exc())
_log.log(group=GRP_WARN, message=MSG_WARN_SOURCE)
if memory_profile is not False:
if memory_profile == 'detail':
try:
import memory_profiler # @UnusedImport, just to check if this will be possible in _run_single_experiment
except ImportError:
_log.log(group=GRP_WARN, message=MSG_WARN_MEMPROF)
memory_profile = True
else:
memory_profile = True
try:
with open(load_file, 'w') as fp:
fp.write(STR_LOADSCRIPT)
st = os.stat(load_file)
os.chmod(load_file, st.st_mode | stat.S_IEXEC)
except Exception:
_err.log(message=traceback.format_exc())
_log.log(group=GRP_WARN, message=MSG_WARN_LOADSCRIPT)
if git:
try:
_log.log(message=MSG_START_GIT(os.path.basename(os.path.normpath(git))))
with (capture_output() if not debug else no_context()) as c:
snapshot_id, git_log, _ = _git_snapshot(path=git,commit_body=STR_GIT_COMMIT_BODY(name, ID, entry_directory), ID=ID)
append_text(git_file, STR_GIT_LOG(snapshot_id, git_log))
_log.log(message=MSG_FINISH_GIT(snapshot_id))
info['gitcommit'] = snapshot_id
except GitError as e:
_log.log(group=GRP_ERROR, message=MSG_ERROR_GIT(git_file))
_err.log(message=str(e)+'\n'+c.stderr)
append_text(git_file, e.git_log)
raise
try:
import dill
serializer = dill
except ImportError:
serializer = pickle
_log.log(group=GRP_WARN, message=MSG_WARN_DILL)
_try_store(aux_data,serializer,aux_data_file,_log,_err)
def _update_info(i, runtime, status, memory, input_str, output_str):
for (key,val) in [('runtime',runtime),
('memory',memory if memory_profile is not False else None),
('status',status),
('input',input_str),
('output',output_str),
]:
info['experiments'][key].append(val)
store_info()
def store_info():
with open(info_file,'w') as fp:
json.dump(info,fp,indent = 1,separators = (',\n', ': '))
store_info()
old_wd = os.getcwd()
########################### RUN EXPERIMENTS ###############################
args = (
(
i, input, entry_directory, func_initialized, memory_profile,
runtime_profile, _log,_err,
'pickle' if serializer == pickle else 'dill',
external, debug, copy_output,n_experiments
)
for i, input in enumerate(inputs)
)
_log.log(message=MSG_START_EXPERIMENTS(name,variables,parameters))
def close_entry():
try:
os.chdir(old_wd)
except Exception:
pass
success = all(s=='finished' for s in info['experiments']['status'])
try:
_log.log(MSG_FINISH_ENTRY(entry_directory))
if not success:
_log.log(MSG_FAIL)
except Exception:
pass
if not debug:
note = input('You may add a short note to this entry or simply press Enter to exit:')
if note:
info['note'] = note
store_info()
return entry_directory
if parallel and not debug:
try:
from pathos.multiprocessing import ProcessingPool as Pool
pool = Pool(nodes=n_experiments)
except ImportError:
_err.log(message=traceback.format_exc())
_log.log(group=GRP_WARN, message=MSG_WARN_PARALLEL)
from multiprocessing import Pool
pool = Pool(processes=n_experiments)
try:
outputs = pool.map(_run_single_experiment, args)
except pickle.PicklingError: # @UndefinedVariable
_err.log(message=traceback.format_exc())
_log.log(group=GRP_ERROR, message=MSG_ERROR_PARALLEL)
raise
for output in outputs:
_update_info(*output)
pool.close()
pool.join()
else:
for arg in args:
try:
output = _run_single_experiment(arg)
except Exception:#These come from errors in the code of _run_single_experiments. The user function errors are caught within there
_err.log(message=traceback.format_exc())
_log.log(group=GRP_ERROR, message=MSG_EXCEPTION_EXPERIMENT(arg[0]))
else:
_update_info(*output)
if analysis:
try:
_log.log(message=MSG_START_ANALYSIS)
except BrokenPipeError:#locks raise BrokenPipeError when experiments are terminated using <C-c>
sys.exit(1)
try:
with capture_output():
entry = load(path=entry_directory, need_unique=True, no_objects=False)
analyze(func=analysis, entry=entry, _log=_log, _err=_err, debug=debug)
except Exception:
_err.log(message=traceback.format_exc())
_log.log(group=GRP_ERROR, message=MSG_EXCEPTION_ANALYSIS)
return close_entry()
def _has_git(git):
cwd = os.getcwd()
try:
os.chdir(git)
#subprocess.check_call(['git','status'],stdout = subprocess.PIPE,stderr=subprocess.PIPE)
subprocess.check_call(['git','rev-parse','HEAD',],stdout = subprocess.PIPE,stderr=subprocess.PIPE)#Sometimes git status works but rev-parse, which is used later, fails; e.g. on repos without initial commit
return True
except subprocess.CalledProcessError:
return False
finally:
os.chdir(cwd)
def _external(func):
return func if isinstance(func,str) else False
def _get_func_directory(func):
return os.getcwd() if _external(func) else os.path.dirname(sys.modules[func.__module__].__file__)
def _get_base_directory(directory,func,name,no_date):
directory = directory or _get_func_directory(func)
directory = os.path.join(directory,'scilog')
if no_date:
basepath = os.path.join(directory, name)
else:
date = datetime.date.today()
basepath = os.path.join(directory, date.strftime('w%Wy%y'), name)
return os.path.abspath(basepath)
def _get_name(func):
if _external(func):
nowhite = re.compile('\S*')
path = nowhite.match(func).group(0)
name = os.path.basename(path)
else:
try:#func is a function or a class
name = func.__name__
except AttributeError:#func is an object with __call__ method
name = func.__class__.__name__
return name
def _evaluator(what,locals_dict = None):
locals_dict = locals_dict or {}
return eval(f'(lambda **kwargs: kwargs)({what})',{'range':range,'count':itertools.count,'np':np,'__builtins__':{}},locals_dict)
class _var:
def __init__(self,*obj):
if len(obj)>1:#e.g. var('ab','cd','ef')
self.obj = obj
elif len(obj)==1:#e.g. var(range(3))
if Iterable.valid(obj[0]):
self.obj = list(obj[0])#turn into list so that numpy arrays go through later on (if you leave them as arrays, they will make problems in = comparison, for example)
else:#Allows for --variables p=3 instead of --variables p=[3]
self.obj = [obj[0]]
elif len(obj)==0:
raise ValueError()
def __repr__(self):
return 'var('+repr(self.obj)+')'
def _setup_experiments(variables,parameters,func):
'''
Note: input and output `variables` have iterator type, not _var.
_var only occurs in the processing
'''
external = _external(func)
def _get_kwargs(func,external,variables,parameters,class_instance=False):
if inspect.isclass(func):
allow_variables =False
variables = None
else:
allow_variables=True
parameters_passed = parameters is not None
variables_passed = variables is not None
if parameters is None:
parameters = {}
if variables is None:
variables = {}
if external:
field_names = [fname for _, fname, _, _ in Formatter().parse(external) if fname is not None]
new_var_n = 0
new_names = []
for i,fname in enumerate(field_names):
if fname == '':
while True:
if f'arg{new_var_n}' not in field_names:
new_names.append(f'arg{new_var_n}')
break
else:
new_var_n+=1
external = external.format(
*[f'{{{new_name}}}' for new_name in new_names],
**{fname:f'{{{fname}}}' for fname in field_names if fname !='' }
)
known_parameters = OrderedDict((fname,inspect._empty) for _, fname, _, _ in Formatter().parse(external) if fname is not None)
if len(known_parameters)==1 and list(known_parameters.keys())[0] == None:
known_parameters = []
allow_all_keys = False
default_parameters = {}
else:
func_parameters = inspect.signature(func).parameters
default_parameters = {
key:value.default for key,value in func_parameters.items()
if (value.default != inspect._empty)
}
allow_all_keys = any(value.kind ==4 for key,value in func_parameters.items())
known_parameters = OrderedDict(
(key,value.default) for key,value in func_parameters.items()
if (value.kind not in [2,4])
)
kwargs=default_parameters.copy()
free_keys=lambda : allow_all_keys or any(key not in variables and key not in parameters for key in known_parameters)
required_keys=lambda : [key for key in known_parameters if key not in kwargs]
is_allowed_key=lambda key: allow_all_keys or key in known_parameters
if variables:
if not isinstance(variables,dict):#allow for single range instead of var dictionary
non_default_parameters = [key for key in known_parameters if key not in default_parameters]
if len(non_default_parameters) == 1:
variables={non_default_parameters[0]:variables}
elif len(known_parameters) ==1:
variables = {list(known_parameters.keys())[0]:variables}
else:
raise ValueError(f'Must specify name for variable {variables}')
if any(not is_allowed_key(key) or not is_identifier(key) for key in variables):
raise ValueError('Invalid variable names for function {}: {}'.format(external or func,{key for key in variables if not is_allowed_key(key)}))
variables_update = {key:_var(value) for key,value in variables.items()}
else:
variables_update = {}
if parameters:
if any(key in variables_update for key in parameters):
raise ValueError('Parameter names already defined as variables: {}'.format({key for key in parameters if key in kwargs}))
if any(not is_allowed_key(key) or not is_identifier(key) for key in parameters):
raise ValueError('Invalid parameter names for function {}: {}'.format(external or func,{key for key in parameters if not is_allowed_key(key)}))
parameters_update = parameters
else:
parameters_update = parameters
kwargs.update(**variables_update,**parameters_update)
if (((not parameters_passed and not class_instance) or (not variables_passed and allow_variables)) and free_keys()) or required_keys():
while True:
prefill=', '.join([key+'=' for key in required_keys()])
parameters_string = input_with_prefill(STR_PARAMETERS_PROMPT(func,external,kwargs,known_parameters,allow_variables,class_instance,allow_all_keys),prefill)
if parameters_string in ['?','help','--help','??']:
print(STR_PARAMETERS_HELP(allow_variables))
continue
try:
update_kwargs = _evaluator(parameters_string,{'var':_var} if allow_variables else {})
except Exception:#(ValueError,SyntaxError):
if '=help' in parameters_string:
print(STR_PARAMETERS_HELP(allow_variables))
else:
print(STR_PARAMETERS_FORMAT)
else:
kwargs.update({key: value for key,value in update_kwargs.items() if is_allowed_key(key)})
done = True
if not all(key in kwargs for key in known_parameters):
if parameters_string =='':
print(STR_PARAMETERS_FORMAT)
done = False
if any(not is_allowed_key(key) for key in update_kwargs):
print(STR_PARAMETERS_ALLOWED(update_kwargs,known_parameters))
done = False
if done:
break
return kwargs,external,default_parameters,known_parameters
if external:
def func(**kwargs):
subprocess.check_call(external.format(**kwargs), stdout=sys.stdout, stderr=sys.stderr, shell=True)
classification_variables = {}#variables.copy() if isinstance(variables,dict) else {}#can be None or a single unnamed iterable whose name will be found out only later
classification_parameters = {}#parameters.copy() if isinstance(parameters,dict) else {}# can be None
if inspect.isclass(func):# in this case, parameters are for initialization and variables for function call
parameters,_,default_parameters,_ = _get_kwargs(func,False,None,parameters)
func_initialized=func(**parameters)
variables,_,default_parameters_2,known_parameters_2 = _get_kwargs(func_initialized,False,variables,None,class_instance=True)
real_variables = {key:value for key,value in variables.items() if isinstance(value,_var)}
classification_parameters.update({key:value for key,value in parameters.items() if key not in default_parameters or (key in default_parameters and value !=default_parameters[key])})
if len(variables)<=1:#nothing possibly interesting can be said if there is only one variable except if variable was not known (i.e. keyword argument)
if not classification_parameters:#If not any classification yet take what you have
classification_variables.update({key:value for key,value in variables.items()})
else:
classification_variables.update({key:value for key,value in variables.items() if key not in known_parameters_2})
else:
classification_variables.update({key:value for key,value in variables.items() if key not in known_parameters_2 or (key in default_parameters_2 and value!=default_parameters_2[key])})
if any(key not in real_variables for key in variables if not key in default_parameters_2):#Not all nondefault parameters actually vary, so list those that do
classification_variables.update({key:value for key,value in real_variables.items() if key not in default_parameters_2})
variables = {key:(value.obj if isinstance(value,_var) else [value]) for key,value in variables.items()}#Users are prompeted vor variables or parameters, but if they enter parameters, i.e. a single value, the willy still be handled as variables taking only one value
else:
kwargs,external,default_parameters,_ =_get_kwargs(func,external,variables,parameters)#use all as name, first params as usual (all hand selected, fill to 5), then __var_l_h
variables = {key:value.obj for key,value in kwargs.items() if isinstance(value,_var)}
parameters ={key:value for key,value in kwargs.items() if not isinstance(value,_var)}
#use classification even if only one known parameter, this helps if the braces in a bash command string are changed and suddenly control something very different
classification_parameters.update({key:value for key,value in parameters.items() if key not in default_parameters or (key in default_parameters and value!=default_parameters[key])})
classification_variables.update({key:value for key,value in variables.items() if key not in default_parameters or (key in default_parameters and value!=default_parameters[key])})
def func_initialized(**experiment):
return func(**experiment,**parameters)
variables = list(variables.items())
classification_p = path_from_keywords(classification_parameters,into='file')
classification_v = '_'.join(s.replace('_','') for s in classification_variables.keys())
classification = classification_p+('+' if classification_v else '') +classification_v
for j,variable in enumerate(variables):
if (List|Tuple).valid(variable[1]) and Ellipsis in variable[1]:
variables[j] = (variable[0],smart_range(*[e for e in variable[1] if e != Ellipsis]))
return variables,parameters,func_initialized,classification
def _run_single_experiment(arg):
(i, input, directory, func, memory_profile,
runtime_profile, _log,_err, serializer,
external, debug, copy_output,n_experiments) = arg
experiment_directory = os.path.join(directory, FILE_EXP(i))
stderr_file = os.path.join(experiment_directory, FILE_EXP_ERR)
stdout_file = os.path.join(experiment_directory, FILE_EXP_OUT)
input_file = os.path.join(experiment_directory, FILE_INPUT)
output_file = os.path.join(experiment_directory, FILE_OUTPUT)
randomstate_file = os.path.join(experiment_directory, FILE_RANDOMSTATE)
runtime_profile_file = os.path.join(experiment_directory, FILE_RUNTIME)
memory_profile_file = os.path.join(experiment_directory, FILE_MEMORY)
experiment_working_directory = os.path.join(experiment_directory, FILE_WD)
if serializer == 'pickle':
serializer = pickle
else:
import dill
serializer = dill
_log.log(MSG_START_EXPERIMENT(i,n_experiments,input))
runtime = None
output = None
input_str = repr(input)
memory = None
status = 'failed'
randomstate = None
if not external:
randomstate = np.random.get_state()
if hasattr(func, '__name__'):#func is function
temp_func = func
else:#func is object
temp_func = func.__call__
if copy_output is None:
os.makedirs(experiment_working_directory)
os.chdir(experiment_working_directory)
else:
os.makedirs(experiment_directory)
try:
_try_store(input,serializer,input_file,_log,_err)
_try_store(randomstate,serializer,randomstate_file,_log,_err)
if memory_profile == 'detail':#Needs to be before runtime decorator so it gets the full view (otherwise it will profile the runtime decorator)
m = io.StringIO()
import memory_profiler
temp_func = memory_profiler.profile(func = temp_func,stream =m ,precision = 4)
if runtime_profile:
temp_func = add_runtime(temp_func)
if memory_profile is True:#Needs to be after runtime decorator so runtime gets the full view (since peak_memory is threaded)
m = io.StringIO()
temp_func = print_peak_memory(func=temp_func, stream=m)
stderr_append = ''
with open(stderr_file, 'a', 1) as err:
with open(stdout_file, 'a', 1) as out:
with contextlib.redirect_stdout(out) if not debug else no_context():
with contextlib.redirect_stderr(err) if not debug else no_context():
tic = timeit.default_timer()
try:
output = temp_func(**input)
except Exception:
status = 'failed'
if debug:
traceback.print_exc()
try:
import ipdb as debugger
except ModuleNotFoundError:
import pdb as debugger
debugger.post_mortem(sys.exc_info()[2])
stderr_append = traceback.format_exc()
else:
status = 'finished'
runtime = timeit.default_timer() - tic
delete_empty_files([stderr_file, stdout_file])
if status == 'failed':
append_text(stderr_file, stderr_append)
_log.log(group=GRP_ERROR, message=MSG_ERROR_EXPERIMENT(i),use_lock = False)#locks are often broken already which leads to ugly printouts, also errors don't matter at this point anyway
else:
if runtime_profile:
profile, output = output
s = io.StringIO()
ps = pstats.Stats(profile, stream=s)
ps.sort_stats('cumulative')
ps.print_stats()
append_text(runtime_profile_file, s.getvalue())
s.close()
if memory_profile:
append_text(memory_profile_file,STR_MEMFILE(m.getvalue(),memory_profile))
memory = _max_mem(m.getvalue(), type=memory_profile)
except Exception:
_err.log(message=traceback.format_exc())
_log.log(group=GRP_ERROR, message=MSG_EXCEPTION_EXPERIMENT(i))
if copy_output is None:
os.chdir(directory)
else:
shutil.copytree(copy_output, experiment_working_directory, symlinks=False, ignore_dangling_symlinks=True)
delete_empty_directories([experiment_working_directory])
output_str = str(output)
_try_store(output,serializer,output_file,_log,_err)
del output
if status == 'finished':
_log.log(MSG_FINISH_EXPERIMENT(i, n_experiments, runtime, output_str,external))
gc.collect()
return (i, runtime, status, memory, input_str, output_str)
class ConvergencePlotter():
def __init__(self, *qois, cumulative=False, work=None, extrapolate=0,reference = 'self'):
'''
Create convergence plots (of given quantities of interest (qois))
This is an auxiliary class that may be used as argument for parameter
`analyze` of scilog.record or parameter `func` of scilog.analyze
:param qois: List of functions that can be applied to the outputs of an experiment
:param cumulative: Specify whether work is cumulative across the experiments
:param work: If a measure other than runtime should be used, this must be a function taking integers and returning reals
:param extrapolate: Degree of Richardson extrapolation
extrapolate = -1 uses exponential extrapolation, whereas
positive values merely improve algebraic convergence orders
'''
self.qois = qois
self.cumulative = cumulative
self.work = work
self.extrapolate = extrapolate
self.reference = reference
def __call__(self, entry):
experiments = entry['experiments']
single_reference = (self.reference == 'self')
ind_finished = [j for (j, s) in enumerate(experiments['status']) if s == 'finished']
if len(ind_finished) > 2 + (self.extrapolate if self.extrapolate >= 0 else 0):
if self.work is None:
times = experiments['runtime'][ind_finished]
else:
times = [self.work(i) for i in ind_finished]
results = experiments[i]['output'][ind_finished]
if self.cumulative:
times = np.cumsum(times)
if not self.qois:
if hasattr(results[0], '__len__') and not isinstance(results[0], np.ndarray):
self.qois = [lambda x,k = k: x[k] for k in range(len(results[0]))]
else:
self.qois = [lambda x:x]
single_reference = True
if single_reference:
self.reference = [self.reference]*len(self.qois)
for (k, qoi) in enumerate(self.qois):
try:
pyplot.figure(k).clf()
qoi_values = np.array([qoi(result) for result in results])
qoi_times = np.array(times)
if self.extrapolate:
qoi_values, qoi_times = np_tools.extrapolate(qoi_values, qoi_times, self.extrapolate)
plots.plot_convergence(qoi_times, qoi_values,reference = self.reference[k])
plots.save('convergence')
except Exception:
traceback.print_exc()
def _try_store(what,serializer,file,_log,_err):
if what is not None:
try:
with open(file, 'wb') as fp:
serializer.dump(what, fp)
except (TypeError, pickle.PicklingError):
_err.log(message=traceback.format_exc())
_log.log(group=GRP_WARN, message=MSG_EXCEPTION_STORE(os.path.split(file)[-1]))
def clean_git_repository(directory=None,dry_run = True):
'''
Delete all commits in repository specified by :code:`directory` which do not have matching
scilog entry in repository directory.
:param directory: Path that is under version control
:type directory: String
:param dry_run: Actually go ahead and delete commits, else just list them
:type dry_run: Bool
'''
directory = directory or os.getcwd()
os.chdir(directory)
scilog_tag = re.compile('scilog_.*')
tags = [tag for tag in _git_command('tag --list',add_input = False).splitlines() if scilog_tag.match(tag)]
git_directory = _git_command('rev-parse --show-toplevel', add_input=False).rstrip()
os.chdir(git_directory)
entries = load(need_unique=False,no_objects=True)
IDs = [entry['ID'] for entry in entries]
unmatched = [tag for tag in tags if tag[7:] not in IDs]
if unmatched:
print(f'The following scilog git commits have no matching scilog entry in {directory}:')
[print(tag) for tag in unmatched]
if dry_run:
print('Specify `dry_run=False` to remove unmatched commits')
else:
print('Removing unmatched commits...',end='')
[_git_command(f'tag -d {tag}') for tag in unmatched]
print('done')
else:
print(f'All scilog git commits have matching scilog entries in {directory}')
def analyze(entry,func, _log=None, _err=None, debug=False):
'''
Add analysis to scilog entry or entries
:param func: Function that performs analysis
:param entry: scilog entry or entries (as returned by scilog.load)
:param _log: Log object to be used instead of writing to standard stdout
:param _err: Log object to be used instead of writing to standard stderr
:param debug: If True, output is printed instead of being redirected into files
'''
if not _log:
_log = Log(print_filter=True)
if not _err:
_err = Log(print_filter=True)
try:
import dill
serializer = dill
except ImportError:
serializer = pickle
_log.log(group=GRP_WARN, message=MSG_WARN_DILL)
cwd = os.getcwd()
try:
if not inspect.isgenerator(entry):
entries = [entry]
else:
entries = entry
for entry in entries:
analysis_directory_tmp = os.path.join(entry['path'], 'tmp',FILE_ANALYSIS)
working_directory = os.path.join(analysis_directory_tmp, FILE_WD)
stderr_file = os.path.join(analysis_directory_tmp, FILE_EXP_ERR)
stdout_file = os.path.join(analysis_directory_tmp, FILE_EXP_OUT)
output_file = os.path.join(analysis_directory_tmp, FILE_OUTPUT)
os.makedirs(analysis_directory_tmp)
os.mkdir(working_directory)
os.chdir(working_directory)
output = None
stderr_append = ''
with open(stderr_file, 'a', 1) as err:
with open(stdout_file, 'a', 1) as out:
with contextlib.redirect_stdout(out) if not debug else no_context():
with contextlib.redirect_stderr(err) if not debug else no_context():
try:
output = func(entry)
except Exception:
stderr_append = traceback.format_exc()
delete_empty_files([stderr_file, stdout_file])
delete_empty_directories([working_directory])
if stderr_append:
append_text(stderr_file, stderr_append)
_log.log(group=GRP_ERROR, message=MSG_ERROR_ANALYSIS(stderr_file))
_try_store(output,serializer,output_file,_log,_err)
os.chdir(cwd)
analysis_directory = os.path.join(entry['path'], FILE_ANALYSIS)
shutil.rmtree(analysis_directory, ignore_errors=True)
shutil.move(analysis_directory_tmp, entry['path'])
shutil.rmtree(os.path.split(analysis_directory_tmp)[0], ignore_errors=True)
except Exception:
os.chdir(cwd)
class RE:
def __init__(self,s):
self.s=s
def load(search_pattern='*', path='', ID=None, no_objects=False, need_unique=True,include_modules=False,parameters=None,fix_broken_summary_txt=False):#TODO remove fix_borken_summary_txt
'''
Load scilog entry/entries.
:param search_pattern: Shell-style glob/search pattern using wildcards
If there are multiple entries of the same name (those are stored as
<name>/v0 <name>/v1 ... in the filesystem) and they should all be returned,
use `search_pattern=<name>/v*` and `need_unique=False`
:type search_pattern: String, e.g. search_pattern='foo*' matches `foobar`
:param path: Path of exact location if known (possibly only partially), relative or absolute
:type path: String, e.g. '/home/username/<project>' or '<project>'
:param no_objects: To save time, only load information about scilog entry, not results
:type no_objects: Boolean
:param need_unique: Require unique identification of scilog entry.
:type need_unique: Boolean
:param parameters: Search pattern that is applied to the scilog parameters
:type parameters: Dictionary of regular expression strings or objects (which will be converted to strings)
:return: Scilog entry
:rtype: If need_unique=True, a single Namespace obejct
If need_unique=False, a generator of such objects
'''
deserializer = pickle
try:
import dill
deserializer = dill
except ImportError:
warnings.warn(MSG_WARN_DILL)
if os.sep in search_pattern and path == '':#Use absolute path part of search pattern as path, if existent
temp_path, temp_search_pattern = search_pattern.rsplit(os.sep, 1)
if os.path.isabs(temp_path):
path, search_pattern = temp_path, temp_search_pattern
if search_pattern[-1]!='*':
search_pattern = search_pattern+'*'
entries = []
entries.extend(find_directories(search_pattern, path=path))
entries.extend(find_directories('*/' + search_pattern, path=path))
entries = [entry for entry in entries if _is_experiment_directory(entry)]
def get_output(entry, no_objects):
file_name = os.path.join(entry, FILE_INFO)
with open(file_name, 'r') as fp:
try:
info = json.load(fp)
except Exception:
raise ValueError(f'Problem with {file_name}')
info['path'] = entry
if not include_modules:
del info['modules']
if not no_objects:
#if isinstance(info['experiments'],dict):#Old version of scilog:
# DL = info['experiments']
# info['experiments'] = [dict(zip(DL,t)) for t in zip(*DL.values())]
for j,s in enumerate(info['experiments']['status'] if not fix_broken_summary_txt else ('finished' for i in itertools.count())):
if s == 'finished':
try:
output_file_name = os.path.join(entry, FILE_EXP(j), FILE_OUTPUT)
with open(output_file_name, 'rb') as fp:
output = deserializer.load(fp)
if fix_broken_summary_txt:
info['experiments']['output'].append(output)
else:
info['experiments']['output'][j] = output
except Exception:
warnings.warn(MSG_ERROR_LOAD('file ' + output_file_name))
if fix_broken_summary_txt:
break
traceback.print_exc()
if not fix_broken_summary_txt:
try:
input_file_name = os.path.join(entry,FILE_EXP(j),FILE_INPUT)
with open(input_file_name,'rb') as fp:
input = deserializer.load(fp)
info['experiments']['input'][j] = input
except Exception:
warnings.warn(MSG_ERROR_LOAD('file ' + input_file_name))
traceback.print_exc()
for key in info['experiments']:
try:
info['experiments'][key] = np.array(info['experiments'][key])
except Exception:
pass
return info
if ID:
partial_id = re.compile(ID)
entries = [entry for entry in entries if partial_id.match(get_output(entry, no_objects = True)['ID'])]
if parameters:
parameters = {key:(repr(value) if not isinstance(value,RE) else value) for (key,value) in parameters.items()}
def matches_parameters(entry):
out = get_output(entry,no_objects=True)
if not 'parameters' in out:
return False
else:
test = out['parameters']
for key,value in parameters.items():
if key not in test:
return False
if isinstance(value,RE):
if not re.match(value.s,test[key]):
return False
else:
if not value == test[key]:
return False
return True
entries = [entry for entry in entries if matches_parameters(entry)]
if len(entries)>1 and need_unique:
basenames = [os.path.basename(get_output(entry,no_objects=True)['path']).rsplit('_',1) for entry in entries]
if len(set(bn[0] for bn in basenames))==1:
entries = [max(entries,key = lambda entry: get_output(entry,no_objects=True)['time'])]
entries = unique(entries)
if not need_unique:
return (get_output(entry, no_objects=no_objects) for entry in entries)
else:
if len(entries) == 0:
raise ValueError(MSG_ERROR_NOMATCH)
if len(entries) > 1:
raise ValueError(MSG_ERROR_MULTIMATCH(entries))
return get_output(entries[0], no_objects=no_objects)
def _is_experiment_directory(directory):
return os.path.isfile(os.path.join(directory, FILE_INFO))
def _max_mem(m, type): # @ReservedAssignment
if m == '':
return -1
if type == 'detail': # Output of memory_profiler package
find = re.compile('.*?(\d{1,}\.\d{4}) MiB.*')
matches = [find.match(line) for line in m.splitlines()]
values = [float(match.groups()[0]) for match in matches if match is not None]
return max(values) - min(values)
else: # Output of print_peak_memory
return float(m)
def _get_directory(directory,func,name,no_date, debug, git, classification):
basepath = _get_base_directory(directory,func,name,no_date)
if debug:
directory = os.path.join(basepath, FILE_DEBUG)
try:
shutil.rmtree(directory)
except FileNotFoundError:
pass
os.makedirs(directory)
return directory, FILE_DEBUG
try_classification_based_directory = 1 if classification else 0
for attempt in range(20): # Try keyword format, then random words, fail if cannot find unused
ID = random_word(length = LEN_ID,dictionary = (attempt<10))
if try_classification_based_directory:
directory = os.path.join(basepath,classification+f'_{try_classification_based_directory-1}')
else:
directory = os.path.join(basepath,ID)
try:
os.makedirs(directory)
if git and _git_has_tag(git,STR_GIT_TAG(ID)):
delete_empty_directories([directory])
else:
return directory,ID
except OSError as exc:
if exc.errno == errno.EEXIST:
if try_classification_based_directory:
try_classification_based_directory += 1
else:
if try_classification_based_directory:#There was a problem creating a directory with the keyword format
try_classification_based_directory = 0#Maybe illegal characters in parameters, try it with random words
else:#Already tried random words, something else is wrong
raise
raise ValueError(MSG_ERROR_DIR)
def _git_command(string, add_input=True):
string = 'git ' + string
output = '$ ' + string + '\n' if add_input else ''
args = shlex.split(string)
output += subprocess.check_output(args, stderr=subprocess.STDOUT).decode('UTF8')
return output
def _git_id():
return _git_command('log --format="%H" -n 1', add_input=False).rstrip()
def _git_has_tag(path,tag):
initial_directory = os.getcwd()
os.chdir(path)
try:
out = _git_command('tag --list',add_input = False)
return tag in out.splitlines()
except subprocess.CalledProcessError:
os.chdir(initial_directory)
raise
def _git_snapshot(path, commit_body, ID):
initial_directory = os.getcwd()
os.chdir(path)
git_directory = _git_command('rev-parse --show-toplevel', add_input=False).rstrip()
os.chdir(git_directory)
active_branch = _git_command('rev-parse --abbrev-ref HEAD', add_input=False)
try:
out = ''
out += _git_command('add --all')
out += _git_command('rm -r --cached .')
out += _git_command('add --all')
out += _git_command('commit --allow-empty -m "{0} \n {1}"'.format(STR_GIT_COMMIT_TITLE(active_branch), commit_body))
out += _git_command('tag {}'.format(STR_GIT_TAG(ID)))
snap_id = _git_id()
out += _git_command('reset HEAD~1')
except subprocess.CalledProcessError as e:
raise GitError(traceback.format_exc(), out + '\n' + str(e.output))
except Exception:
raise GitError(traceback.format_exc(), out)
os.chdir(initial_directory)
return snap_id, out, git_directory
|
import timeit
import pickle
import os
import errno
import datetime
import shutil
import warnings
import traceback
import pstats
import io
import sys
import gc
import inspect
import importlib
import re
import pathlib
import types
import operator
import subprocess
import shlex
import json
import contextlib
import stat
import itertools
import ast
import builtins
import signal
from collections import OrderedDict
from string import Formatter
import numpy as np
from matplotlib import pyplot
from IPython.utils.capture import capture_output
from swutil import sys_info, np_tools, plots, misc
from swutil.validation import Positive, Integer, String, List, Tuple,Iterable
from swutil.logs import Log
from swutil.hpc import Locker
from swutil.misc import string_dialog, no_context, random_word,\
string_from_seconds,input_with_prefill,is_identifier,smart_range
from swutil.files import append_text, delete_empty_files,\
delete_empty_directories, find_directories, path_from_keywords
from swutil.decorators import print_peak_memory, add_runtime
from swutil.collections import unique
class GitError(Exception):
def __init__(self, message, git_log):
super(GitError, self).__init__(message)
self.git_log = git_log
GRP_WARN = 'Warning'
GRP_ERROR = 'Error'
FILE_DEBUG = '.debug'
FILE_OUTPUT = 'output.pkl'
FILE_INPUT = 'input.pkl'
FILE_INFO = 'summary.txt'
FILE_AUX = 'aux_data.pkl'
FILE_RUNTIME = 'runtime.txt'
FILE_MEMORY = 'memory.txt'
FILE_LOAD = 'load.sh'
FILE_EXP_ERR = 'stderr.txt'
FILE_EXP_OUT = 'stdout.txt'
FILE_LOG = 'log.txt'
FILE_GITLOG = 'git.txt'
FILE_ERR = 'err.txt'
FILE_SOURCE = 'source.txt'
FILE_WD = 'working_directory'
FILE_ANALYSIS = 'analysis'
FILE_EXP = lambda i: f'experiment{i}'
FILE_RANDOMSTATE = 'randomstate.pkl'
STR_GIT_TAG = lambda ID: f'scilog_{ID}'
STR_GIT_LOG = lambda sha1, log: f'#Created git commit {sha1} as snapshot of current state of git repository using the following commands:\n{log}'
STR_GIT_COMMIT_TITLE = lambda branch: f'Snapshot of working directory of branch {branch}'
STR_GIT_COMMIT_BODY = lambda name, ID, directory: f'Created for scilog entry {ID} in {directory}'
STR_LOADSCRIPT = ('#!/bin/sh \n '
+ f' xterm -e {sys.executable} -i -c '
+ r'''"
print('>>> import scilog');
import scilog;
print('>>> entry = scilog.load()');
entry = scilog.load();
try:
import pandas as pd;
print('>>> import pandas as pd');
print('>>> experiments = pd.DataFrame(entry[\'experiments\'])');
experiments = pd.DataFrame(entry['experiments']);
print(experiments);
except:
pass;"''')
STR_MEMFILE = lambda value,memory_profile: value + (
'' if memory_profile == 'detail'
else 'MB (Use `memory_profile==\'detail\'` for a more detailed breakdown)'
)
STR_SOURCE = lambda n, func, module,source: (('#Experiments were conducted with' if n != 1 else '#Experiment was conducted with ')
+ ('class' if inspect.isclass(func) else
(f'{func.__class__.__name__}' if isinstance(func,(types.MethodType,types.FunctionType)) else
f'instance of {func.__class__.__name__}'))
+ (f' called {func.__name__}' if hasattr(func, '__name__') else '')
+ f' from the module {module} whose source code is given below:\n{source}')
STR_TIME = '%y-%m-%d %H:%M:%S'
def STR_PARAMETERS_PROMPT(func,external,current_parameters,known_parameters,allow_variables,class_instance,allow_all_keys):
if class_instance:
why= f'to pass to instance of {func.__class__.__name__}'
else:
if external:
why = f'to fill in `{external}`'
else:
name = _get_name(func)
if inspect.isclass(func):
why= f'to initialize class {name}'
else:
why = f'to pass to {name}'
parameters_string = ', '.join(f'{key}={value!r}' for key,value in current_parameters.items())
require_parameters=[key for key in known_parameters if key not in current_parameters]
if require_parameters:
parameters_string += (', ' if parameters_string else '') + ', '.join(f'{key}=' for key in require_parameters)
if allow_all_keys:
parameters_string += '[, <kwarg>=<value>]*'
return f'>> Specify {"variables or " if allow_variables else ""}parameters {why} ({parameters_string}):\n\t'
def STR_PARAMETERS_ALLOWED(passed_keys,known_parameters):
forbidden = [key for key in passed_keys if key not in known_parameters]
if len(forbidden)>1:
out = '!! Cannot specify parameters'+', '.join(f'`{key}`' for key in forbidden[:-1]) + f', and `{forbidden[-1]}`'
else:
out = '!! Cannot specify parameter '+f'`{forbidden[0]}`'
return out
STR_PARAMETERS_FORMAT = '!! Input must have form `<key>=<value>[,<key>=<value>]*`\n!! Enter `help` for more information'
STR_PARAMETERS_HELP = lambda allow_variables: (
'?? Parameters are specified by `<key>=<value>` with <key> a Python identifier and <value> a Python expression.'
+(
(
'\n?? Variables have the same syntax, except <value> has the form var(<iterable>).\n'
'?? Variables are used to specify arguments that are varied in a specified range.\n'
'?? Note the difference between <key>=[0,1] and <key>=var([0,1]):\n'
'?? In the first case, `[0,1]` is passed at once; in the second case it is iterated over.'
) if allow_variables else ''
)
)
MSG_DEBUG = 'Debug mode. Entry is not stored permanently, stdout and stderr are not captured, no git commit is created'
MSG_NOGIT = 'Could not find git repository. No snapshot commit will be created'
MSG_START_ANALYSIS = 'Updating analysis'
MSG_START_EXPERIMENT = lambda i,n_experiments,inp: (f'Running experiment {i}' +
(' with variable values {}{}'.format('\n\t' if '\n' in repr(inp) else '',repr(inp))
if inp != {} else ''))
MSG_START_GIT = lambda repo:'Creating snapshot of current working tree of repository \'{}\'. Check {}'.format(repo,FILE_GITLOG)
def MSG_START_EXPERIMENTS(name,variables,parameters):
msg = f'Will call `{name}`'
extend=''
new_line=False
if parameters:
new_line='\n' in str(parameters)
extend= ' with parameters {}{}'.format("\n\t" if new_line else "",parameters)
if variables:
s_var = 'variables' if len(variables)>1 else 'variable'
variable_strings = [(variable[0],str(variable[1])) for variable in variables]
newline_in_vs = any('\n' in vs[1] for vs in variable_strings)
sep = '\n\t' if (len(variables)>1 or newline_in_vs) else ', '
strings = [('' if sep == ', ' else '-') +f'`{vs[0]}`'+ (f' varying in `{vs[1]}`' if not newline_in_vs else '') for vs in variable_strings]
extend += (" \n" if new_line else " ") +(f'and {s_var}' if extend else f' with {s_var}')+(' ' if sep==', ' else sep)+sep.join(strings)
if not extend:
extend =' once'
return msg + extend
MSG_START_ENTRY = lambda directory: f'Created scilog entry {directory}'
MSG_FINISH_EXPERIMENT = lambda i,n_experiments,runtime,result,external: 'Finished experiment {} in {}{}'.format(i,string_from_seconds(runtime),
'' if ('\n' in f'{result}') else (f'. Check {os.path.join(FILE_EXP(i),FILE_EXP_OUT)}' if external else f'. Output: {result}'))
MSG_FINISH_ENTRY=lambda directory: f'Completed scilog entry {directory}'
MSG_SUCCESS = 'All experiments finished successfully'
MSG_FAIL = 'Some experiments failed'
MSG_FINISH_GIT = lambda sha1: f'Successfully created git commit {sha1}'
MSG_ERROR_NOMATCH = 'Could not find matching scilog entry'
MSG_ERROR_MULTIMATCH = lambda entries:'Multiple matching scilog entries (to iterate through all use need_unique=False):\n{}'.format('\n'.join(entries))
MSG_ERROR_LOAD = lambda name: f'Error loading {name}. Are all required modules in the Python path?'
MSG_ERROR_INSTANTIATION = lambda name:f'Could not instantiate class {name} with given parameters'
MSG_ERROR_PARALLEL = 'Error during parallel execution. Try running with `parallel=False`'
MSG_ERROR_BASH_ANALYSIS = 'Cannot analyze output in bash mode'
MSG_ERROR_GIT = lambda file:f'Error during git snapshot creation. Check {file}'
MSG_ERROR_EXPERIMENT = lambda i:f'Experiment {i} failed. Check {os.path.join(FILE_EXP(i),FILE_EXP_ERR)}'
MSG_ERROR_ANALYSIS = lambda file: f'Analysis could not be completed. Check {file}'
MSG_ERROR_DIR = 'Could not create scilog entry directory'
MSG_EXCEPTION_STORE = lambda file: f'Could not store {file}'
MSG_EXCEPTION_ANALYSIS = 'Exception during online analysis'
MSG_EXCEPTION_EXPERIMENT = lambda i: f'Exception during handling of experiment {i}. Check {FILE_ERR}'
MSG_WARN_SOURCE = 'Could not find source code'
MSG_WARN_LOADSCRIPT = 'Error during load script creation'
MSG_WARN_PARALLEL = ('Could not find pathos. This might cause problems with parallel execution.'
+ 'Install pathos via `pip install pathos`.')
MSG_WARN_MEMPROF = 'Could not find memory_profiler. Install memory_profiler via `pip install memory_profiler`.'
MSG_WARN_DILL = ('Could not find dill. Some items might not be storable. '
+ 'Storage of numpy arrays will be slow'
+ 'Install dill via `pip install dill`.')
MSG_INTERRUPT = f'Kill signal received. Stored {FILE_INFO}, closing now.'
LEN_ID = 8
#TODO(low,high) think about using inspect.formatargspec(inspect.getargspec(func)) to directly parse args and kwargs of user input even without named argument
#TODO(med,high) understand ellipses in variable input: Do this at the string input level, so 2**3,...,2**6 can be understood.
#TODO(med,low) make scilog --show work for all scilog entries in current git repo even outside of cwd
#TODO(high,low) make scilog --show smarter: if FUNC doesn't match any scilog entry path, try if it matches a scilog entry ID
#TODO(high,high) remove analysis functionality from scilog.py and add scilog --analyze working as follows: provide a list of entry identifiers (ID or paths) as well as a function that accepts scilog entries (i.e. the summary dicts). the source code file (foo.py) of that function (foo.func) is copied in the analysis subsubdirectories `foo_x` of each scilog entry
#TODO technically: `scilog --analyze X3DH,UH1X --parameters [...] --variables [...] foo.func` starts a scilog run with arguments func=foo.func, analysis = True[effects that git=False, base_directory =tempfile.mktemp(), func is called with parameters={**parameters, entries=[scilog.load(identifier) for identifier in analzsis]} log does not say 'created scilog entry' but instead says which entries will be analyzed with what, and finishes with "added analysis <classification_x to X3DH and <classification>_y to UH1X, and entry is copied into subdireoctry analyze/<classificatoin>_x of X3DH and UH1X with x possibly being adapted to what is already in the analysis of X3DH and UH1X ]
#TODO make load ignore subdirectories of scilog entries (to avoid listing analysis entries)
#TODO(?,?) comunicate to plots.save
#TODO(low,med) understand <param>=<string> without quotes around <string> (simple and stupid: fail, add unrecognized variable to locals, repeat...)
#TODO(low,med) understand scilog foo(a=1)(b=var()) by defining foo in locals() and have it return another function that takes yet more arguments
#TODO(med,low) if copy_output is a path, try to copy that path and only terminate when succeeded (e.g. when it starts existing) also, add argument check_done and if it is provided only try copying as soon as it returns True
#TODO(low,low) store completion flag
#TODO(high,low) make scilog --show show [log, current-stdout,current-stderr] if entry not completed, (so you can avoid screen -r and navigation to the filesystem directory)
#TODO(low,low) make scilog --show show scilog-stderr if it exists and, if all experiments failed, also show current-stderr of last experiment in that case (if at least one succeeded leave it to user to navigate to the failed experiment)
#TODO(med,low) make scilog --show default to no-git (add --git)
#TODO(?,?) make scilog --show first look through screen sessions
#TODO(low,med) Store final note in notes.txt, add --update <REASON> to scilog which then flags all previous runs with same config as outdated in their notes.txt
#TODO(low,low) add --note flag
#TODO(med,low) extend scilog ls output by scilog status (running, terminated, error,n_experiments) (store log files)
#TODO(med,low) include no-shutdown script
def record(func, variables=None, name=None, base_directory=None, aux_data=None,
analysis=None, runtime_profile=False, memory_profile=False,
git=True, no_date=False, parallel=False,
copy_output = None, parameters=None,debug = None,classification= None,dry_run=False):
'''
Call :code:`func` once or multiple times and store results along with auxiliary information
about runtime and memory usage, installed modules, source code, hardware, etc.
code:`func` is called once for each combination of variable values as
specified by the variable ranges in :code:`variables`.
For example, :code:`func` can be a numerical algorithm and :code:`variables`
can be used to specify different mesh resolutions as follow
`variables = {h:[2**(-l) for l in range(10)}`
with the goal to assess the rate of convergence.
Another example would be to specify a list of subroutines with the goal to find
the best subroutine in terms of runtime/memory consumption/....
In the following, each call of :code:`func` is called an 'experiment'.
Scilog creates a directory -- specified by :code:`directory`, :code:`name`,
and optional parameters or a randomly generated ID -- with the following content:
*summary.txt:
*name: Name of scilog entry
*ID: Alphanumeric string identifying the entry
*modules: Module versions
*time: Time of execution
*experiments: For each experiment
*string representation of input,
*string representation of output,
*runtime
*status
*(optional)memory usage
*(optional)parameters: Parameters that are equal for all experiments
*(optional)git_commit: SHA1 of git commit
*(optional)aux_data: Argument :code:`aux_data`
*log.txt
*(optional)err.txt
*(optional)git.txt: stdout of git snapshot creation
*source.txt: Source code of the module containing :code:`func`
*For each experiment a subdirectory 'experiment<i>' with:
*output.pkl: Output of :code:`func`
*(optional)input.pkl: Argument passed to :code:`func`
*(optional)working_directory/: Working directory for call of :code:`func`,
unless parameter :code:`copy_output` is specified
*(optional)stderr.txt:
*(optional)stdout.txt:
*(optional)runtime_profile.txt: Extensive runtime information for each experiment
*(optional)memory_profile.txt: Memory usage information for each experiment
*(optional) analysis/: output of function :code:`analysis`
*(optional)stderr.txt
*(optional)stdout.txt
*(optional)working_directory/: Working directory for call of :code:`analysis`
To load a scilog entry, use the function :code:`scilog.load`.
This function loads summary.txt and replaces the string representations of outputs and inputs
by the actual Python objects.
:param func: Function to be called with different experiment configurations
:type func: function
:param variables: Arguments for call of :code:`func` that are varied.
If not specified, then :code:`func` is called once, without arguments
:type variables: List(-like) if single variable or dictionary of lists
:param name: Name of scilog entry.
If not specified, :code:`func.__name__` is used
:type name: String
:param base_directory: Root directory for storage
:type base_directory: String
:param aux_data: Auxiliary data that should be stored along with the results
:type aux_data: Any
:param analysis: Function that is called after each experiment
Can be used, e.g., for plotting
:param runtime_profile: Store extensive runtime information
May slow down execution
:type runtime_profile: Boolean
:param memory_profile: Track memory usage
May slow down execution
:type memory_profile: Boolean
:param git: Create git snapshot commit
The resulting commit is tagged with the entry ID and resides outside the branch history
The repository path may be specified, else it will be automatically detected
Add 'scilog' to your .gitignore to avoid storing the scilog entries in each snapshot.
(Should you ever want get rid of the snapshots,
use `git tag --list 'scilog_*'|xargs -I % git tag -d %` to remove all scilog commits or
use function `clean_git_repository` to remove all scilog commits whose scilog entry does not reside in repository anymore)
:type git: Boolean or String
:param no_date: Do not store outputs in sub-directories grouped by calendar week
:type no_date: Boolean
:param parameters: Parameters that are equal for all experiments
If :code:`func` is a class, these are used to instantiate this class
:type parameters: Dictionary
:param debug: Force debug mode (otherwise detected automatically)
:param debug: Boolean
:param copy_output: The contents of this directory will be copied into the scilog entry directory
:type copy_output: String
:param classification: Short, human readable description of entry
:type classification: String
:param dry_run: Only setup directory and experiments, don't execute anything
:type dry_run: Boolean
:return: Path of scilog entry
:rtype: String
'''
########################### FIX ARGUMENTS ###########################
variables,parameters,func_initialized,classification_t = _setup_experiments(variables,parameters,func)
classification = classification or classification_t
name = name or _get_name(func)
if dry_run:
return variables,parameters,classification,name
external = _external(func)
debug = debug if debug is not None else aux.isdebugging()
if debug:
git = False
log_nogit = False
if git is not False:
if git is True:
git = _get_func_directory(func)
if not _has_git(git):
log_nogit = True
git = False
########################### SETUP INPUTS ##############################
if len(variables)!=1:#Will result in infinite loop if one variable is infinite.
t = itertools.product(*[variable[1] for variable in variables])
else:
t = ([x] for x in variables[0][1])
inputs = ({variable[0]:tt[i] for i,variable in enumerate(variables)} for tt in t)
try:
n_experiments = int(np.prod([len(variable[1]) for variable in variables]))
except TypeError:
n_experiments = None
########################### CREATE SCILOG ENTRY ########################
entry_directory,ID = _get_directory(base_directory,func,name,no_date,debug,git,classification)
log_file = os.path.join(entry_directory, FILE_LOG)
err_file = os.path.join(entry_directory, FILE_ERR)
info_file = os.path.join(entry_directory, FILE_INFO)
load_file = os.path.join(entry_directory, FILE_LOAD)
aux_data_file = os.path.join(entry_directory, FILE_AUX)
source_file_name = os.path.join(entry_directory, FILE_SOURCE)
git_file = os.path.join(entry_directory, FILE_GITLOG)
locker = Locker()
_log = Log(write_filter=True, print_filter=True, file_name=log_file,lock = locker.get_lock()) # Logging strategy: 1) Redirect out and err of user functions (analysis and experiment) to their own files
_err = Log(write_filter=True, print_filter=False, file_name=err_file,lock = locker.get_lock()) # 2) Log errors outside user functions in _err 3) Log everything (user-err and _err, as well as other info) in _log
_log.log(MSG_START_ENTRY(entry_directory))
if log_nogit:
_log.log(group = GRP_WARN,message = MSG_NOGIT)
if debug:
_log.log(group =GRP_WARN,message = MSG_DEBUG)
info = {
'parameters' : {key:repr(parameters[key]) for key in parameters},
'variables' : [repr(variable) for variable in variables],
'name' : name,
'ID' : ID,
'time' : datetime.datetime.now().strftime(STR_TIME),
'func' : external or repr(func),
'parallel' : parallel,
'hardware' : sys_info.hardware(),
'gitcommit' : None,
'modules' : None,
'note': None,
'experiments' : {
'runtime':[],
'memory':[],
'status':[],
'input':[],
'output':[]
}
}
if not external:
info['modules'] = sys_info.modules()
try:
source = STR_SOURCE(n_experiments,func,sys.modules[func.__module__].__file__, ''.join(inspect.getsourcelines(sys.modules[func.__module__])[0]))
append_text(source_file_name, source)
except Exception: # TypeError only?
_err.log(traceback.format_exc())
_log.log(group=GRP_WARN, message=MSG_WARN_SOURCE)
if memory_profile is not False:
if memory_profile == 'detail':
try:
import memory_profiler # @UnusedImport, just to check if this will be possible in _run_single_experiment
except ImportError:
_log.log(group=GRP_WARN, message=MSG_WARN_MEMPROF)
memory_profile = True
else:
memory_profile = True
try:
with open(load_file, 'w') as fp:
fp.write(STR_LOADSCRIPT)
st = os.stat(load_file)
os.chmod(load_file, st.st_mode | stat.S_IEXEC)
except Exception:
_err.log(message=traceback.format_exc())
_log.log(group=GRP_WARN, message=MSG_WARN_LOADSCRIPT)
if git:
try:
_log.log(message=MSG_START_GIT(os.path.basename(os.path.normpath(git))))
with (capture_output() if not debug else no_context()) as c:
snapshot_id, git_log, _ = _git_snapshot(path=git,commit_body=STR_GIT_COMMIT_BODY(name, ID, entry_directory), ID=ID)
append_text(git_file, STR_GIT_LOG(snapshot_id, git_log))
_log.log(message=MSG_FINISH_GIT(snapshot_id))
info['gitcommit'] = snapshot_id
except GitError as e:
_log.log(group=GRP_ERROR, message=MSG_ERROR_GIT(git_file))
_err.log(message=str(e)+'\n'+c.stderr)
append_text(git_file, e.git_log)
raise
try:
import dill
serializer = dill
except ImportError:
serializer = pickle
_log.log(group=GRP_WARN, message=MSG_WARN_DILL)
_try_store(aux_data,serializer,aux_data_file,_log,_err)
def _update_info(i, runtime, status, memory, input_str, output_str):
for (key,val) in [('runtime',runtime),
('memory',memory if memory_profile is not False else None),
('status',status),
('input',input_str),
('output',output_str),
]:
info['experiments'][key].append(val)
store_info()
def store_info():
with open(info_file,'w') as fp:
json.dump(info,fp,indent = 1,separators = (',\n', ': '))
store_info()
old_wd = os.getcwd()
########################### RUN EXPERIMENTS ###############################
args = (
(
i, input, entry_directory, func_initialized, memory_profile,
runtime_profile, _log,_err,
'pickle' if serializer == pickle else 'dill',
external, debug, copy_output,n_experiments
)
for i, input in enumerate(inputs)
)
_log.log(message=MSG_START_EXPERIMENTS(name,variables,parameters))
def close_entry():
try:
os.chdir(old_wd)
except Exception:
pass
success = all(s=='finished' for s in info['experiments']['status'])
try:
_log.log(MSG_FINISH_ENTRY(entry_directory))
if not success:
_log.log(MSG_FAIL)
except Exception:
pass
if not debug:
note = input('You may add a short note to this entry or simply press Enter to exit:')
if note:
info['note'] = note
store_info()
return entry_directory
if parallel and not debug:
try:
from pathos.multiprocessing import ProcessingPool as Pool
pool = Pool(nodes=n_experiments)
except ImportError:
_err.log(message=traceback.format_exc())
_log.log(group=GRP_WARN, message=MSG_WARN_PARALLEL)
from multiprocessing import Pool
pool = Pool(processes=n_experiments)
try:
outputs = pool.map(_run_single_experiment, args)
except pickle.PicklingError: # @UndefinedVariable
_err.log(message=traceback.format_exc())
_log.log(group=GRP_ERROR, message=MSG_ERROR_PARALLEL)
raise
for output in outputs:
_update_info(*output)
pool.close()
pool.join()
else:
for arg in args:
try:
output = _run_single_experiment(arg)
except Exception:#These come from errors in the code of _run_single_experiments. The user function errors are caught within there
_err.log(message=traceback.format_exc())
_log.log(group=GRP_ERROR, message=MSG_EXCEPTION_EXPERIMENT(arg[0]))
else:
_update_info(*output)
if analysis:
try:
_log.log(message=MSG_START_ANALYSIS)
except BrokenPipeError:#locks raise BrokenPipeError when experiments are terminated using <C-c>
sys.exit(1)
try:
with capture_output():
entry = load(path=entry_directory, need_unique=True, no_objects=False)
analyze(func=analysis, entry=entry, _log=_log, _err=_err, debug=debug)
except Exception:
_err.log(message=traceback.format_exc())
_log.log(group=GRP_ERROR, message=MSG_EXCEPTION_ANALYSIS)
return close_entry()
def _has_git(git):
cwd = os.getcwd()
try:
os.chdir(git)
#subprocess.check_call(['git','status'],stdout = subprocess.PIPE,stderr=subprocess.PIPE)
subprocess.check_call(['git','rev-parse','HEAD',],stdout = subprocess.PIPE,stderr=subprocess.PIPE)#Sometimes git status works but rev-parse, which is used later, fails; e.g. on repos without initial commit
return True
except subprocess.CalledProcessError:
return False
finally:
os.chdir(cwd)
def _external(func):
return func if isinstance(func,str) else False
def _get_func_directory(func):
return os.getcwd() if _external(func) else os.path.dirname(sys.modules[func.__module__].__file__)
def _get_base_directory(directory,func,name,no_date):
directory = directory or _get_func_directory(func)
directory = os.path.join(directory,'scilog')
if no_date:
basepath = os.path.join(directory, name)
else:
date = datetime.date.today()
basepath = os.path.join(directory, date.strftime('w%Wy%y'), name)
return os.path.abspath(basepath)
def _get_name(func):
if _external(func):
nowhite = re.compile('\S*')
path = nowhite.match(func).group(0)
name = os.path.basename(path)
else:
try:#func is a function or a class
name = func.__name__
except AttributeError:#func is an object with __call__ method
name = func.__class__.__name__
return name
def _evaluator(what,locals_dict = None):
locals_dict = locals_dict or {}
return eval(f'(lambda **kwargs: kwargs)({what})',{'range':range,'count':itertools.count,'np':np,'__builtins__':{}},locals_dict)
class _var:
def __init__(self,*obj):
if len(obj)>1:#e.g. var('ab','cd','ef')
self.obj = obj
elif len(obj)==1:#e.g. var(range(3))
if Iterable.valid(obj[0]):
self.obj = list(obj[0])#turn into list so that numpy arrays go through later on (if you leave them as arrays, they will make problems in = comparison, for example)
else:#Allows for --variables p=3 instead of --variables p=[3]
self.obj = [obj[0]]
elif len(obj)==0:
raise ValueError()
def __repr__(self):
return 'var('+repr(self.obj)+')'
def _setup_experiments(variables,parameters,func):
'''
Note: input and output `variables` have iterator type, not _var.
_var only occurs in the processing
'''
external = _external(func)
def _get_kwargs(func,external,variables,parameters,class_instance=False):
if inspect.isclass(func):
allow_variables =False
variables = None
else:
allow_variables=True
parameters_passed = parameters is not None
variables_passed = variables is not None
if parameters is None:
parameters = {}
if variables is None:
variables = {}
if external:
field_names = [fname for _, fname, _, _ in Formatter().parse(external) if fname is not None]
new_var_n = 0
new_names = []
for i,fname in enumerate(field_names):
if fname == '':
while True:
if f'arg{new_var_n}' not in field_names:
new_names.append(f'arg{new_var_n}')
break
else:
new_var_n+=1
external = external.format(
*[f'{{{new_name}}}' for new_name in new_names],
**{fname:f'{{{fname}}}' for fname in field_names if fname !='' }
)
known_parameters = OrderedDict((fname,inspect._empty) for _, fname, _, _ in Formatter().parse(external) if fname is not None)
if len(known_parameters)==1 and list(known_parameters.keys())[0] == None:
known_parameters = []
allow_all_keys = False
default_parameters = {}
else:
func_parameters = inspect.signature(func).parameters
default_parameters = {
key:value.default for key,value in func_parameters.items()
if (value.default != inspect._empty)
}
allow_all_keys = any(value.kind ==4 for key,value in func_parameters.items())
known_parameters = OrderedDict(
(key,value.default) for key,value in func_parameters.items()
if (value.kind not in [2,4])
)
kwargs=default_parameters.copy()
free_keys=lambda : allow_all_keys or any(key not in variables and key not in parameters for key in known_parameters)
required_keys=lambda : [key for key in known_parameters if key not in kwargs]
is_allowed_key=lambda key: allow_all_keys or key in known_parameters
if variables:
if not isinstance(variables,dict):#allow for single range instead of var dictionary
non_default_parameters = [key for key in known_parameters if key not in default_parameters]
if len(non_default_parameters) == 1:
variables={non_default_parameters[0]:variables}
elif len(known_parameters) ==1:
variables = {list(known_parameters.keys())[0]:variables}
else:
raise ValueError(f'Must specify name for variable {variables}')
if any(not is_allowed_key(key) or not is_identifier(key) for key in variables):
raise ValueError('Invalid variable names for function {}: {}'.format(external or func,{key for key in variables if not is_allowed_key(key)}))
variables_update = {key:_var(value) for key,value in variables.items()}
else:
variables_update = {}
if parameters:
if any(key in variables_update for key in parameters):
raise ValueError('Parameter names already defined as variables: {}'.format({key for key in parameters if key in kwargs}))
if any(not is_allowed_key(key) or not is_identifier(key) for key in parameters):
raise ValueError('Invalid parameter names for function {}: {}'.format(external or func,{key for key in parameters if not is_allowed_key(key)}))
parameters_update = parameters
else:
parameters_update = parameters
kwargs.update(**variables_update,**parameters_update)
if (((not parameters_passed and not class_instance) or (not variables_passed and allow_variables)) and free_keys()) or required_keys():
while True:
prefill=', '.join([key+'=' for key in required_keys()])
parameters_string = input_with_prefill(STR_PARAMETERS_PROMPT(func,external,kwargs,known_parameters,allow_variables,class_instance,allow_all_keys),prefill)
if parameters_string in ['?','help','--help','??']:
print(STR_PARAMETERS_HELP(allow_variables))
continue
try:
update_kwargs = _evaluator(parameters_string,{'var':_var} if allow_variables else {})
except Exception:#(ValueError,SyntaxError):
if '=help' in parameters_string:
print(STR_PARAMETERS_HELP(allow_variables))
else:
print(STR_PARAMETERS_FORMAT)
else:
kwargs.update({key: value for key,value in update_kwargs.items() if is_allowed_key(key)})
done = True
if not all(key in kwargs for key in known_parameters):
if parameters_string =='':
print(STR_PARAMETERS_FORMAT)
done = False
if any(not is_allowed_key(key) for key in update_kwargs):
print(STR_PARAMETERS_ALLOWED(update_kwargs,known_parameters))
done = False
if done:
break
return kwargs,external,default_parameters,known_parameters
if external:
def func(**kwargs):
subprocess.check_call(external.format(**kwargs), stdout=sys.stdout, stderr=sys.stderr, shell=True)
classification_variables = {}#variables.copy() if isinstance(variables,dict) else {}#can be None or a single unnamed iterable whose name will be found out only later
classification_parameters = {}#parameters.copy() if isinstance(parameters,dict) else {}# can be None
if inspect.isclass(func):# in this case, parameters are for initialization and variables for function call
parameters,_,default_parameters,_ = _get_kwargs(func,False,None,parameters)
func_initialized=func(**parameters)
variables,_,default_parameters_2,known_parameters_2 = _get_kwargs(func_initialized,False,variables,None,class_instance=True)
real_variables = {key:value for key,value in variables.items() if isinstance(value,_var)}
classification_parameters.update({key:value for key,value in parameters.items() if key not in default_parameters or (key in default_parameters and value !=default_parameters[key])})
if len(variables)<=1:#nothing possibly interesting can be said if there is only one variable except if variable was not known (i.e. keyword argument)
if not classification_parameters:#If not any classification yet take what you have
classification_variables.update({key:value for key,value in variables.items()})
else:
classification_variables.update({key:value for key,value in variables.items() if key not in known_parameters_2})
else:
classification_variables.update({key:value for key,value in variables.items() if key not in known_parameters_2 or (key in default_parameters_2 and value!=default_parameters_2[key])})
if any(key not in real_variables for key in variables if not key in default_parameters_2):#Not all nondefault parameters actually vary, so list those that do
classification_variables.update({key:value for key,value in real_variables.items() if key not in default_parameters_2})
variables = {key:(value.obj if isinstance(value,_var) else [value]) for key,value in variables.items()}#Users are prompeted vor variables or parameters, but if they enter parameters, i.e. a single value, the willy still be handled as variables taking only one value
else:
kwargs,external,default_parameters,_ =_get_kwargs(func,external,variables,parameters)#use all as name, first params as usual (all hand selected, fill to 5), then __var_l_h
variables = {key:value.obj for key,value in kwargs.items() if isinstance(value,_var)}
parameters ={key:value for key,value in kwargs.items() if not isinstance(value,_var)}
#use classification even if only one known parameter, this helps if the braces in a bash command string are changed and suddenly control something very different
classification_parameters.update({key:value for key,value in parameters.items() if key not in default_parameters or (key in default_parameters and value!=default_parameters[key])})
classification_variables.update({key:value for key,value in variables.items() if key not in default_parameters or (key in default_parameters and value!=default_parameters[key])})
def func_initialized(**experiment):
return func(**experiment,**parameters)
variables = list(variables.items())
classification_p = path_from_keywords(classification_parameters,into='file')
classification_v = '_'.join(s.replace('_','') for s in classification_variables.keys())
classification = classification_p+('+' if classification_v else '') +classification_v
for j,variable in enumerate(variables):
if (List|Tuple).valid(variable[1]) and Ellipsis in variable[1]:
variables[j] = (variable[0],smart_range(*[e for e in variable[1] if e != Ellipsis]))
return variables,parameters,func_initialized,classification
def _run_single_experiment(arg):
(i, input, directory, func, memory_profile,
runtime_profile, _log,_err, serializer,
external, debug, copy_output,n_experiments) = arg
experiment_directory = os.path.join(directory, FILE_EXP(i))
stderr_file = os.path.join(experiment_directory, FILE_EXP_ERR)
stdout_file = os.path.join(experiment_directory, FILE_EXP_OUT)
input_file = os.path.join(experiment_directory, FILE_INPUT)
output_file = os.path.join(experiment_directory, FILE_OUTPUT)
randomstate_file = os.path.join(experiment_directory, FILE_RANDOMSTATE)
runtime_profile_file = os.path.join(experiment_directory, FILE_RUNTIME)
memory_profile_file = os.path.join(experiment_directory, FILE_MEMORY)
experiment_working_directory = os.path.join(experiment_directory, FILE_WD)
if serializer == 'pickle':
serializer = pickle
else:
import dill
serializer = dill
_log.log(MSG_START_EXPERIMENT(i,n_experiments,input))
runtime = None
output = None
input_str = repr(input)
memory = None
status = 'failed'
randomstate = None
if not external:
randomstate = np.random.get_state()
if hasattr(func, '__name__'):#func is function
temp_func = func
else:#func is object
temp_func = func.__call__
if copy_output is None:
os.makedirs(experiment_working_directory)
os.chdir(experiment_working_directory)
else:
os.makedirs(experiment_directory)
try:
_try_store(input,serializer,input_file,_log,_err)
_try_store(randomstate,serializer,randomstate_file,_log,_err)
if memory_profile == 'detail':#Needs to be before runtime decorator so it gets the full view (otherwise it will profile the runtime decorator)
m = io.StringIO()
import memory_profiler
temp_func = memory_profiler.profile(func = temp_func,stream =m ,precision = 4)
if runtime_profile:
temp_func = add_runtime(temp_func)
if memory_profile is True:#Needs to be after runtime decorator so runtime gets the full view (since peak_memory is threaded)
m = io.StringIO()
temp_func = print_peak_memory(func=temp_func, stream=m)
stderr_append = ''
with open(stderr_file, 'a', 1) as err:
with open(stdout_file, 'a', 1) as out:
with contextlib.redirect_stdout(out) if not debug else no_context():
with contextlib.redirect_stderr(err) if not debug else no_context():
tic = timeit.default_timer()
try:
output = temp_func(**input)
except Exception:
status = 'failed'
if debug:
traceback.print_exc()
try:
import ipdb as debugger
except ModuleNotFoundError:
import pdb as debugger
debugger.post_mortem(sys.exc_info()[2])
stderr_append = traceback.format_exc()
else:
status = 'finished'
runtime = timeit.default_timer() - tic
delete_empty_files([stderr_file, stdout_file])
if status == 'failed':
append_text(stderr_file, stderr_append)
_log.log(group=GRP_ERROR, message=MSG_ERROR_EXPERIMENT(i),use_lock = False)#locks are often broken already which leads to ugly printouts, also errors don't matter at this point anyway
else:
if runtime_profile:
profile, output = output
s = io.StringIO()
ps = pstats.Stats(profile, stream=s)
ps.sort_stats('cumulative')
ps.print_stats()
append_text(runtime_profile_file, s.getvalue())
s.close()
if memory_profile:
append_text(memory_profile_file,STR_MEMFILE(m.getvalue(),memory_profile))
memory = _max_mem(m.getvalue(), type=memory_profile)
except Exception:
_err.log(message=traceback.format_exc())
_log.log(group=GRP_ERROR, message=MSG_EXCEPTION_EXPERIMENT(i))
if copy_output is None:
os.chdir(directory)
else:
shutil.copytree(copy_output, experiment_working_directory, symlinks=False, ignore_dangling_symlinks=True)
delete_empty_directories([experiment_working_directory])
output_str = str(output)
_try_store(output,serializer,output_file,_log,_err)
del output
if status == 'finished':
_log.log(MSG_FINISH_EXPERIMENT(i, n_experiments, runtime, output_str,external))
gc.collect()
return (i, runtime, status, memory, input_str, output_str)
class ConvergencePlotter():
def __init__(self, *qois, cumulative=False, work=None, extrapolate=0,reference = 'self'):
'''
Create convergence plots (of given quantities of interest (qois))
This is an auxiliary class that may be used as argument for parameter
`analyze` of scilog.record or parameter `func` of scilog.analyze
:param qois: List of functions that can be applied to the outputs of an experiment
:param cumulative: Specify whether work is cumulative across the experiments
:param work: If a measure other than runtime should be used, this must be a function taking integers and returning reals
:param extrapolate: Degree of Richardson extrapolation
extrapolate = -1 uses exponential extrapolation, whereas
positive values merely improve algebraic convergence orders
'''
self.qois = qois
self.cumulative = cumulative
self.work = work
self.extrapolate = extrapolate
self.reference = reference
def __call__(self, entry):
experiments = entry['experiments']
single_reference = (self.reference == 'self')
ind_finished = [j for (j, s) in enumerate(experiments['status']) if s == 'finished']
if len(ind_finished) > 2 + (self.extrapolate if self.extrapolate >= 0 else 0):
if self.work is None:
times = experiments['runtime'][ind_finished]
else:
times = [self.work(i) for i in ind_finished]
results = experiments[i]['output'][ind_finished]
if self.cumulative:
times = np.cumsum(times)
if not self.qois:
if hasattr(results[0], '__len__') and not isinstance(results[0], np.ndarray):
self.qois = [lambda x,k = k: x[k] for k in range(len(results[0]))]
else:
self.qois = [lambda x:x]
single_reference = True
if single_reference:
self.reference = [self.reference]*len(self.qois)
for (k, qoi) in enumerate(self.qois):
try:
pyplot.figure(k).clf()
qoi_values = np.array([qoi(result) for result in results])
qoi_times = np.array(times)
if self.extrapolate:
qoi_values, qoi_times = np_tools.extrapolate(qoi_values, qoi_times, self.extrapolate)
plots.plot_convergence(qoi_times, qoi_values,reference = self.reference[k])
plots.save('convergence')
except Exception:
traceback.print_exc()
def _try_store(what,serializer,file,_log,_err):
if what is not None:
try:
with open(file, 'wb') as fp:
serializer.dump(what, fp)
except (TypeError, pickle.PicklingError):
_err.log(message=traceback.format_exc())
_log.log(group=GRP_WARN, message=MSG_EXCEPTION_STORE(os.path.split(file)[-1]))
def clean_git_repository(directory=None,dry_run = True):
'''
Delete all commits in repository specified by :code:`directory` which do not have matching
scilog entry in repository directory.
:param directory: Path that is under version control
:type directory: String
:param dry_run: Actually go ahead and delete commits, else just list them
:type dry_run: Bool
'''
directory = directory or os.getcwd()
os.chdir(directory)
scilog_tag = re.compile('scilog_.*')
tags = [tag for tag in _git_command('tag --list',add_input = False).splitlines() if scilog_tag.match(tag)]
git_directory = _git_command('rev-parse --show-toplevel', add_input=False).rstrip()
os.chdir(git_directory)
entries = load(need_unique=False,no_objects=True)
IDs = [entry['ID'] for entry in entries]
unmatched = [tag for tag in tags if tag[7:] not in IDs]
if unmatched:
print(f'The following scilog git commits have no matching scilog entry in {directory}:')
[print(tag) for tag in unmatched]
if dry_run:
print('Specify `dry_run=False` to remove unmatched commits')
else:
print('Removing unmatched commits...',end='')
[_git_command(f'tag -d {tag}') for tag in unmatched]
print('done')
else:
print(f'All scilog git commits have matching scilog entries in {directory}')
def analyze(entry,func, _log=None, _err=None, debug=False):
'''
Add analysis to scilog entry or entries
:param func: Function that performs analysis
:param entry: scilog entry or entries (as returned by scilog.load)
:param _log: Log object to be used instead of writing to standard stdout
:param _err: Log object to be used instead of writing to standard stderr
:param debug: If True, output is printed instead of being redirected into files
'''
if not _log:
_log = Log(print_filter=True)
if not _err:
_err = Log(print_filter=True)
try:
import dill
serializer = dill
except ImportError:
serializer = pickle
_log.log(group=GRP_WARN, message=MSG_WARN_DILL)
cwd = os.getcwd()
try:
if not inspect.isgenerator(entry):
entries = [entry]
else:
entries = entry
for entry in entries:
analysis_directory_tmp = os.path.join(entry['path'], 'tmp',FILE_ANALYSIS)
working_directory = os.path.join(analysis_directory_tmp, FILE_WD)
stderr_file = os.path.join(analysis_directory_tmp, FILE_EXP_ERR)
stdout_file = os.path.join(analysis_directory_tmp, FILE_EXP_OUT)
output_file = os.path.join(analysis_directory_tmp, FILE_OUTPUT)
os.makedirs(analysis_directory_tmp)
os.mkdir(working_directory)
os.chdir(working_directory)
output = None
stderr_append = ''
with open(stderr_file, 'a', 1) as err:
with open(stdout_file, 'a', 1) as out:
with contextlib.redirect_stdout(out) if not debug else no_context():
with contextlib.redirect_stderr(err) if not debug else no_context():
try:
output = func(entry)
except Exception:
stderr_append = traceback.format_exc()
delete_empty_files([stderr_file, stdout_file])
delete_empty_directories([working_directory])
if stderr_append:
append_text(stderr_file, stderr_append)
_log.log(group=GRP_ERROR, message=MSG_ERROR_ANALYSIS(stderr_file))
_try_store(output,serializer,output_file,_log,_err)
os.chdir(cwd)
analysis_directory = os.path.join(entry['path'], FILE_ANALYSIS)
shutil.rmtree(analysis_directory, ignore_errors=True)
shutil.move(analysis_directory_tmp, entry['path'])
shutil.rmtree(os.path.split(analysis_directory_tmp)[0], ignore_errors=True)
except Exception:
os.chdir(cwd)
class RE:
def __init__(self,s):
self.s=s
def load(search_pattern='*', path='', ID=None, no_objects=False, need_unique=True,include_modules=False,parameters=None,fix_broken_summary_txt=False):#TODO remove fix_borken_summary_txt
'''
Load scilog entry/entries.
:param search_pattern: Shell-style glob/search pattern using wildcards
If there are multiple entries of the same name (those are stored as
<name>/v0 <name>/v1 ... in the filesystem) and they should all be returned,
use `search_pattern=<name>/v*` and `need_unique=False`
:type search_pattern: String, e.g. search_pattern='foo*' matches `foobar`
:param path: Path of exact location if known (possibly only partially), relative or absolute
:type path: String, e.g. '/home/username/<project>' or '<project>'
:param no_objects: To save time, only load information about scilog entry, not results
:type no_objects: Boolean
:param need_unique: Require unique identification of scilog entry.
:type need_unique: Boolean
:param parameters: Search pattern that is applied to the scilog parameters
:type parameters: Dictionary of regular expression strings or objects (which will be converted to strings)
:return: Scilog entry
:rtype: If need_unique=True, a single Namespace obejct
If need_unique=False, a generator of such objects
'''
deserializer = pickle
try:
import dill
deserializer = dill
except ImportError:
warnings.warn(MSG_WARN_DILL)
if os.sep in search_pattern and path == '':#Use absolute path part of search pattern as path, if existent
temp_path, temp_search_pattern = search_pattern.rsplit(os.sep, 1)
if os.path.isabs(temp_path):
path, search_pattern = temp_path, temp_search_pattern
if search_pattern[-1]!='*':
search_pattern = search_pattern+'*'
entries = []
entries.extend(find_directories(search_pattern, path=path))
entries.extend(find_directories('*/' + search_pattern, path=path))
entries = [entry for entry in entries if _is_experiment_directory(entry)]
def get_output(entry, no_objects):
file_name = os.path.join(entry, FILE_INFO)
with open(file_name, 'r') as fp:
try:
info = json.load(fp)
except Exception:
raise ValueError(f'Problem with {file_name}')
info['path'] = entry
if not include_modules:
del info['modules']
if not no_objects:
#if isinstance(info['experiments'],dict):#Old version of scilog:
# DL = info['experiments']
# info['experiments'] = [dict(zip(DL,t)) for t in zip(*DL.values())]
for j,s in enumerate(info['experiments']['status'] if not fix_broken_summary_txt else ('finished' for i in itertools.count())):
if s == 'finished':
try:
output_file_name = os.path.join(entry, FILE_EXP(j), FILE_OUTPUT)
with open(output_file_name, 'rb') as fp:
output = deserializer.load(fp)
if fix_broken_summary_txt:
info['experiments']['output'].append(output)
else:
info['experiments']['output'][j] = output
except Exception:
warnings.warn(MSG_ERROR_LOAD('file ' + output_file_name))
if fix_broken_summary_txt:
break
traceback.print_exc()
if not fix_broken_summary_txt:
try:
input_file_name = os.path.join(entry,FILE_EXP(j),FILE_INPUT)
with open(input_file_name,'rb') as fp:
input = deserializer.load(fp)
info['experiments']['input'][j] = input
except Exception:
warnings.warn(MSG_ERROR_LOAD('file ' + input_file_name))
traceback.print_exc()
for key in info['experiments']:
try:
info['experiments'][key] = np.array(info['experiments'][key])
except Exception:
pass
return info
if ID:
partial_id = re.compile(ID)
entries = [entry for entry in entries if partial_id.match(get_output(entry, no_objects = True)['ID'])]
if parameters:
parameters = {key:(repr(value) if not isinstance(value,RE) else value) for (key,value) in parameters.items()}
def matches_parameters(entry):
out = get_output(entry,no_objects=True)
if not 'parameters' in out:
return False
else:
test = out['parameters']
for key,value in parameters.items():
if key not in test:
return False
if isinstance(value,RE):
if not re.match(value.s,test[key]):
return False
else:
if not value == test[key]:
return False
return True
entries = [entry for entry in entries if matches_parameters(entry)]
if len(entries)>1 and need_unique:
basenames = [os.path.basename(get_output(entry,no_objects=True)['path']).rsplit('_',1) for entry in entries]
if len(set(bn[0] for bn in basenames))==1:
entries = [max(entries,key = lambda entry: get_output(entry,no_objects=True)['time'])]
entries = unique(entries)
if not need_unique:
return (get_output(entry, no_objects=no_objects) for entry in entries)
else:
if len(entries) == 0:
raise ValueError(MSG_ERROR_NOMATCH)
if len(entries) > 1:
raise ValueError(MSG_ERROR_MULTIMATCH(entries))
return get_output(entries[0], no_objects=no_objects)
def _is_experiment_directory(directory):
return os.path.isfile(os.path.join(directory, FILE_INFO))
def _max_mem(m, type): # @ReservedAssignment
if m == '':
return -1
if type == 'detail': # Output of memory_profiler package
find = re.compile('.*?(\d{1,}\.\d{4}) MiB.*')
matches = [find.match(line) for line in m.splitlines()]
values = [float(match.groups()[0]) for match in matches if match is not None]
return max(values) - min(values)
else: # Output of print_peak_memory
return float(m)
def _get_directory(directory,func,name,no_date, debug, git, classification):
basepath = _get_base_directory(directory,func,name,no_date)
if debug:
directory = os.path.join(basepath, FILE_DEBUG)
try:
shutil.rmtree(directory)
except FileNotFoundError:
pass
os.makedirs(directory)
return directory, FILE_DEBUG
try_classification_based_directory = 1 if classification else 0
for attempt in range(20): # Try keyword format, then random words, fail if cannot find unused
ID = random_word(length = LEN_ID,dictionary = (attempt<10))
if try_classification_based_directory:
directory = os.path.join(basepath,classification+f'_{try_classification_based_directory-1}')
else:
directory = os.path.join(basepath,ID)
try:
os.makedirs(directory)
if git and _git_has_tag(git,STR_GIT_TAG(ID)):
delete_empty_directories([directory])
else:
return directory,ID
except OSError as exc:
if exc.errno == errno.EEXIST:
if try_classification_based_directory:
try_classification_based_directory += 1
else:
if try_classification_based_directory:#There was a problem creating a directory with the keyword format
try_classification_based_directory = 0#Maybe illegal characters in parameters, try it with random words
else:#Already tried random words, something else is wrong
raise
raise ValueError(MSG_ERROR_DIR)
def _git_command(string, add_input=True):
string = 'git ' + string
output = '$ ' + string + '\n' if add_input else ''
args = shlex.split(string)
output += subprocess.check_output(args, stderr=subprocess.STDOUT).decode('UTF8')
return output
def _git_id():
return _git_command('log --format="%H" -n 1', add_input=False).rstrip()
def _git_has_tag(path,tag):
initial_directory = os.getcwd()
os.chdir(path)
try:
out = _git_command('tag --list',add_input = False)
return tag in out.splitlines()
except subprocess.CalledProcessError:
os.chdir(initial_directory)
raise
def _git_snapshot(path, commit_body, ID):
initial_directory = os.getcwd()
os.chdir(path)
git_directory = _git_command('rev-parse --show-toplevel', add_input=False).rstrip()
os.chdir(git_directory)
active_branch = _git_command('rev-parse --abbrev-ref HEAD', add_input=False)
try:
out = ''
out += _git_command('add --all')
out += _git_command('rm -r --cached .')
out += _git_command('add --all')
out += _git_command('commit --allow-empty -m "{0} \n {1}"'.format(STR_GIT_COMMIT_TITLE(active_branch), commit_body))
out += _git_command('tag {}'.format(STR_GIT_TAG(ID)))
snap_id = _git_id()
out += _git_command('reset HEAD~1')
except subprocess.CalledProcessError as e:
raise GitError(traceback.format_exc(), out + '\n' + str(e.output))
except Exception:
raise GitError(traceback.format_exc(), out)
os.chdir(initial_directory)
return snap_id, out, git_directory
|
#!/usr/bin/env python3
import sys
from functools import reduce
tree_encounter_check = lambda pos: 1 if pos == "#" else 0
def main(forest):
slope_mode = [
(1, 1),
(3, 1),
(5, 1),
(7, 1),
(1, 2),
]
mode_to_result = [(mode, resolve_encounters(forest, *mode)) for mode in slope_mode]
for mode, result in mode_to_result:
print(f"For {mode}, encounters={result}")
results = [r for _, r in mode_to_result]
result = reduce(lambda x, y: x * y, results)
print(f"product({",".join(str(r) for r in results)}) = {result}")
def resolve_encounters(forest, shift_right, shift_bottom, verbose = "-v" in sys.argv) -> int:
map_length = len(forest[0])
# Resolve the initial position as the first encounter
n_tree_encounter = tree_encounter_check(forest[0][0])
column = shift_right # Start from initial position + shift_right
if verbose:
print(f">> For Right {shift_right}, Down {shift_bottom}")
print(("X" if forest[0][0] == "#" else "O") + forest[0][1:])
for row, line in enumerate(forest[1:]):
if (row + 1) % shift_bottom != 0:
if verbose: print(line)
continue
# Consider this new tile
column_rel = column % map_length
n_tree_encounter += tree_encounter_check(line[column_rel])
if verbose:
marked_line = line[0:column_rel] + ("X" if line[column_rel] == "#" else "O") + line[column_rel+1:]
print(marked_line)
# And continue to the next slope
column += shift_right
return n_tree_encounter
if __name__ == '__main__':
main([i.strip() for i in open(sys.argv[1]).readlines()])
|
#!/usr/bin/env python3
import sys
from functools import reduce
tree_encounter_check = lambda pos: 1 if pos == "#" else 0
def main(forest):
slope_mode = [
(1, 1),
(3, 1),
(5, 1),
(7, 1),
(1, 2),
]
mode_to_result = [(mode, resolve_encounters(forest, *mode)) for mode in slope_mode]
for mode, result in mode_to_result:
print(f"For {mode}, encounters={result}")
results = [r for _, r in mode_to_result]
result = reduce(lambda x, y: x * y, results)
print(f"product({','.join(str(r) for r in results)}) = {result}")
def resolve_encounters(forest, shift_right, shift_bottom, verbose = "-v" in sys.argv) -> int:
map_length = len(forest[0])
# Resolve the initial position as the first encounter
n_tree_encounter = tree_encounter_check(forest[0][0])
column = shift_right # Start from initial position + shift_right
if verbose:
print(f">> For Right {shift_right}, Down {shift_bottom}")
print(("X" if forest[0][0] == "#" else "O") + forest[0][1:])
for row, line in enumerate(forest[1:]):
if (row + 1) % shift_bottom != 0:
if verbose: print(line)
continue
# Consider this new tile
column_rel = column % map_length
n_tree_encounter += tree_encounter_check(line[column_rel])
if verbose:
marked_line = line[0:column_rel] + ("X" if line[column_rel] == "#" else "O") + line[column_rel+1:]
print(marked_line)
# And continue to the next slope
column += shift_right
return n_tree_encounter
if __name__ == '__main__':
main([i.strip() for i in open(sys.argv[1]).readlines()])
|
from collections import defaultdict
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
from typing import Callable
from itertools import product
from functools import lru_cache
from time import time
import platform
import multiprocessing
#empyrical风险指标计算模块
from empyrical import (sortino_ratio,omega_ratio,annual_volatility,cagr,conditional_value_at_risk,downside_risk,stability_of_timeseries,tail_ratio,value_at_risk)
import random
import os
import traceback
import numpy as np
np.seterr(divide='ignore',invalid='ignore')
import matplotlib.pyplot as plt
#matplotlib 美化样式:bmh,ggplot
plt.style.use("ggplot")
import scipy.stats as scs
import seaborn as sns
from pandas import DataFrame
from deap import creator, base, tools, algorithms
import redis
import zlib
import pickle
REDIS_CLIENT =redis.Redis("localhost",12580)
from vnpy.trader.constant import (Direction, Offset, Exchange, Interval, Status,OrderType)
from vnpy.trader.database import database_manager
from vnpy.trader.object import OrderData, TradeData, BarData, TickData
from vnpy.trader.utility import (extract_vt_symbol,round_to)
from vnpy.app.cta_strategy.base import (BacktestingMode, EngineType, STOPORDER_PREFIX, StopOrder, StopOrderStatus)
from vnpy.app.cta_strategy.template import CtaTemplate
sns.set_style('whitegrid')
creator.create('FitnessMax', base.Fitness, weights=(1.0,)) #优化方向1求最大值,-1求最小值
creator.create('Individual', list, fitness=creator.FitnessMax)
#年总交易日
TRADING_DAY = 365#365,252
class OptimizationSetting:
'''
回测优化设置
'''
def __init__(self):
''''''
self.params = {}
self.target_name = ''
def add_parameter(self, name: str, start: float, end: float = None, step: float = None ):
"""
设置优化参数
"""
if not end and not step:
self.params[name] = [start]
return
if start >= end:
print('参数优化起始点必须小于终止点')
return
if step <= 0:
print('参数优化步进必须大于0')
return
value = start
value_list = []
while value <= end:
value_list.append(value)
value += step
self.params[name] = value_list
def set_target(self, target_name: str):
"""设置优化目标"""
self.target_name = target_name
def generate_setting(self):
keys = self.params.keys()
values = self.params.values()
products = list(product(*values))
settings = []
for p in products:
setting = dict(zip(keys, p))
settings.append(setting)
return settings
def generate_setting_ga(self):
''''''
settings_ga = []
settings = self.generate_setting()
for d in settings:
param = [tuple(i) for i in d.items()]
settings_ga.append(param)
return settings_ga
class BacktestingEngine:
"""
回测引擎
"""
engine_type = EngineType.BACKTESTING
gateway_name = 'BACKTESTING'
def __init__(self):
self.vt_symbol = ''
self.symbol = ''
self.exchange = None
self.start = None
self.end = None
self.rate = 0
self.slippage = 0
self.size = 1
self.price_tick = 0
self.capital = 100000
self.strategy_class = None
self.strategy = None
self.tick: TickData
self.bar: BarData
self.datetime = None
self.interval = None
self.days = 0
self.callback = None
self.history_data = []
self.stop_order_count = 0
self.stop_orders = {}
self.active_stop_orders = {}
self.limit_order_count = 0
self.limit_orders = {}
self.active_limit_orders = {}
self.trade_count = 0
self.trades = {}
self.logs = []
self.daily_results = {}
self.daily_df = None
#保存回测结果,优化结果路径
self.result_path = None
# 持仓盁亏初始化
self.long_avg_cost = 0 #多头持仓均价
self.short_avg_cost = 0 #空头持仓均价
self.long_pos = 0 #多头仓位
self.short_pos = 0 #空头仓位
self.long_profit_total = 0 #多头总盈亏
self.short_profit_total = 0 #空头总盈亏
#净值指标
self.net_value = 0
self.net_value_list = []
#月度盈亏参数
self.last_month_date = None
self.month_pnl = 0
self.month_dict = {}
def set_capital(self,capital):
"""设置初始资金"""
self.capital = capital
def clear_data(self):
'''
Clear all data of last backtesting.
'''
self.strategy = None
self.tick = None
self.bar = None
self.datetime = None
self.stop_order_count = 0
self.stop_orders.clear()
self.active_stop_orders.clear()
self.limit_order_count = 0
self.limit_orders.clear()
self.active_limit_orders.clear()
self.trade_count = 0
self.trades.clear()
self.logs.clear()
self.daily_results.clear()
def set_parameters(self, vt_symbol: str, start: datetime, rate: float, slippage: float, size: float, price_tick: float, capital: int = 0, end: datetime = None, mode: BacktestingMode = BacktestingMode.BAR, ):
''''''
self.mode = mode
self.vt_symbol = vt_symbol
if self.mode == BacktestingMode.BAR:
self.interval = Interval.MINUTE
self.rate = rate
self.slippage = slippage
self.size = size
self.price_tick = price_tick
self.start = start
self.symbol, exchange,gateway_name = extract_vt_symbol(vt_symbol)
self.exchange = Exchange(exchange)
if capital:
self.capital = capital
if end:
self.end = end
if mode:
self.mode = mode
def add_strategy(self, strategy_class: type, setting: dict):
''''''
self.strategy_class = strategy_class
self.strategy = strategy_class(self, strategy_class.__name__, self.vt_symbol, setting )
#初始化策略盈亏参数
self.strategy.capital = 0 #初始资金
self.strategy.balance = self.capital #总资金
self.strategy.long_pos = 0 #多头仓位
self.strategy.short_pos = 0 #空头仓位
self.strategy.long_profit = 0 #多头收益
self.strategy.short_profit = 0 #空头收益
self.strategy.size = self.size #每手乘数
self.strategy.price_tick = self.price_tick #最小价格变动
self.strategy.active_limit_orders = self.active_limit_orders #未成交限价单
self.strategy.active_stop_orders = self.active_stop_orders #未成交停止单
if setting:
unactive_param = [loss_param for loss_param in list(setting.keys()) if loss_param not in self.strategy.parameters]
assert not unactive_param,f"不在策略参数列表内的回测参数:{unactive_param}"
def load_data(self):
"""加载历史数据"""
self.output("开始加载历史数据")
if not self.end:
self.end = datetime.now()
self.history_data.clear() #载入数据前清除历史数据
assert self.start < self.end,"回测开始时间必须小于结束时间,请核实!"
if self.mode == BacktestingMode.BAR:
self.history_data = load_bar_data(self.symbol, self.exchange, self.interval, self.start, self.end)
else:
self.history_data = load_tick_data(self.symbol, self.exchange, self.start, self.end)
self.output(f"历史数据加载完成,数据量:{len(self.history_data)}")
def run_backtesting(self):
"""回放历史数据"""
if self.mode == BacktestingMode.BAR:
func = self.new_bar
else:
func = self.new_tick
self.strategy.on_init()
# Use the first [days] of history data for initializing strategy
day_count = 0
ix = 0
for ix, data in enumerate(self.history_data):
if self.datetime and data.datetime.day != self.datetime.day:
day_count += 1
if day_count >= self.days:
break
self.datetime = data.datetime
try:
self.callback(data)
except Exception:
self.output("触发异常,回测终止")
self.output(traceback.format_exc())
return
self.strategy.inited = True
self.output('策略初始化完成')
self.strategy.on_start()
self.strategy.trading = True
self.output('开始回放历史数据')
#回放history_data数据到on_tick/on_bar
for data in self.history_data[ix:]:
try:
func(data)
except Exception:
self.output("触发异常,回测终止")
self.output(traceback.format_exc())
return
self.output('历史数据回放结束')
def calculate_result(self):
"""
返回daily_df:DataFrame
"""
self.output('开始计算逐日盯市盈亏')
if not self.trades:
self.output('成交记录为空,无法计算')
return
# Add trade data into daily reuslt.
for trade in self.trades.values():
trade_date = trade.datetime.date()
daily_result = self.daily_results[trade_date]
daily_result.add_trade(trade)
# Calculate daily result by iteration.
pre_close = 0
start_pos = 0
for daily_result in self.daily_results.values():
daily_result.calculate_pnl(pre_close, start_pos, self.size, self.rate, self.slippage )
pre_close = daily_result.close_price
start_pos = daily_result.end_pos
# Generate dataframe
results = defaultdict(list)
for daily_result in self.daily_results.values():
for key, value in daily_result.__dict__.items():
results[key].append(value)
self.daily_df = DataFrame.from_dict(results).set_index('date')
self.output('逐日盯市盈亏计算完成')
return self.daily_df
#----------------------------------------------------------------------
def statistics_status(self,array):
"""返回array均值,标准差,偏度,峰度"""
stats = scs.describe(array)
return stats[2],np.sqrt(stats[3]),stats[4],stats[5]
#----------------------------------------------------------------------
def calculate_statistics(self, df: DataFrame = None,strategy_name=None,write_result=True):
"""计算回测结果"""
from pyecharts.charts import (Bar,Line,Graph,Gauge,Page)#柱状图,折线图,关系图,仪表盘,多图同表
from pyecharts import options as opts
self.output('开始计算策略统计指标')
if df is None:
#初始化统计变量
start_date = ''
end_date = ''
total_days = 0
profit_days = 0
loss_days = 0
end_balance = 0
max_drawdown = 0
max_drawdown_percent = 0
max_drawdown_duration = 0
total_net_pnl = 0
daily_net_pnl = 0
total_commission = 0
daily_commission = 0
total_slippage = 0
daily_slippage = 0
total_turnover = 0
daily_turnover = 0
total_trade_count = 0
daily_trade_count = 0
total_return = 0
annual_return = 0
return_mean = 0
return_std = 0
return_skew = 0
return_kurt = 0
sharpe_ratio = 0
calmar_ratio = 0
return_drawdown = 0
return_drawdown_ratio = 0
sortino_info = 0
omega_info = 0
annual_volatility_info = 0
cagr_info = 0
annual_downside_risk = 0
c_var = 0
var_info = 0
calmar_ratio = 0
stability_return = 0
tail_ratio_info = 0
else:
# Calculate balance related time series data
trades_list =[] #成交明细列表
df['balance'] = df['net_pnl'].cumsum() + self.capital #总资金
df['return'] = (np.log(df['balance']) - np.log(df['balance'].shift(1))).fillna(0) #净收益率
df['highlevel'] = (df['balance'].rolling( min_periods=1, window=len(df), center=False).max()) #净值高点
df['drawdown'] = df['balance'] - df['highlevel']
df['ddpercent'] = df['drawdown'] / df['highlevel'] * 100 #回撤百分比
# Calculate statistics value
start_date = df.index[0]
end_date = df.index[-1]
total_days = len(df)
profit_days = len(df[df['net_pnl'] > 0])
loss_days = len(df[df['net_pnl'] < 0])
end_balance = df['balance'].iloc[-1] #最终收益
max_drawdown = df['drawdown'].min() #最大回撤
max_drawdown_percent = df['ddpercent'].min() #最大回撤率
#最大回撤期,优化时max_drawdown_end可能为NAN需要做异常处理
max_drawdown_end = df["drawdown"].idxmin()
if isinstance(max_drawdown_end,date):
max_drawdown_start = df["balance"][:max_drawdown_end].idxmax()
max_drawdown_duration = (max_drawdown_end - max_drawdown_start).days
else:
max_drawdown_start = ""
max_drawdown_end = ""
max_drawdown_duration = 0
total_net_pnl = df['net_pnl'].sum() #总净值
daily_net_pnl = total_net_pnl / total_days #日净值
total_commission = df['commission'].sum() #总手续费
daily_commission = total_commission / total_days
total_slippage = df['slippage'].sum() #总滑点
daily_slippage = total_slippage / total_days
total_turnover = df['turnover'].sum()
daily_turnover = total_turnover / total_days
total_trade_count = df['trade_count'].sum() #总交易次数
daily_trade_count = total_trade_count / total_days
total_return = (end_balance / self.capital - 1) * 100 #总收益率
annual_return = total_return / total_days * TRADING_DAY #年化收益率
#收益率均值,标准差,偏度,峰度
return_mean,return_std,return_skew, return_kurt = self.statistics_status(df['return'].values)
#sortino_info
sortino_info = sortino_ratio(df['return'])
omega_info = omega_ratio(df['return'])
#年化波动率
annual_volatility_info = annual_volatility(df['return'])
#年化复合增长率
cagr_info = cagr(df['return'])
#年化下行风险率
annual_downside_risk = downside_risk(df['return'])
"""CVaR即条件风险价值,其含义为在投资组合的损失超过某个给定VaR值的条件下,该投资组合的平均损失值。"""
c_var = conditional_value_at_risk(df['return'])
"""风险价值(VaR)是对投资损失风险的一种度量。它估计在正常的市场条件下,在设定的时间段(例如一天)中,
一组投资可能(以给定的概率)损失多少。金融业中的公司和监管机构通常使用VaR来衡量弥补可能损失所需的资产数量"""
var_info = value_at_risk(df['return'])
#calmar_ratio:年化收益率与历史最大回撤率之间的比率
calmar_ratio = annual_return / abs(max_drawdown_percent)
#收益稳定率
stability_return = stability_of_timeseries(df['return'])
#尾部比率0.25 == 1/4,收益1,风险4
tail_ratio_info = tail_ratio(df['return'])
if return_std:
sharpe_ratio = return_mean / return_std * np.sqrt(TRADING_DAY)
else:
sharpe_ratio = 0
#收益回撤比
return_drawdown = -total_net_pnl/max_drawdown
#收益率回撤率比
return_drawdown_ratio = -total_return / max_drawdown_percent
for index in range(len(df['balance'])):
if index == 0:
nets_pnl = 1
else:
nets_pnl = df['balance'][index]/df['balance'][index-1]-1
self.net_value += nets_pnl
self.net_value_list.append(round(float(self.net_value),3))
#----------------------------------------------------------------------
if write_result:
self.output('-' * 70)
if hasattr(self.strategy,'strategy_name'):
self.output(f"策略名称:{self.strategy.strategy_name},交易标的:{self.vt_symbol}")
else:
self.output(f"策略名称:{strategy_name},交易标的:{self.vt_symbol}")
self.output(f"首个交易日:\t{start_date},最后交易日:\t{end_date},总交易日:\t{total_days}")
self.output(f"盈利交易日:\t{profit_days},亏损交易日:\t{loss_days}")
self.output(f"起始资金:\t{self.capital:,.3f},结束资金:\t{end_balance:,.3f}")
self.output(f"总盈亏:\t{total_net_pnl:,.3f}")
self.output(f"总收益率:\t{total_return:,.3f}%,复利净值:\t{self.net_value_list[-1]:,.3f}")
self.output(f"收益回撤比:\t{return_drawdown:,.3f}")
self.output(f"收益率回撤率比:\t{return_drawdown_ratio:,.3f}")
self.output(f"最大回撤资金: \t{max_drawdown:,.3f},最大回撤日期:\t{max_drawdown_start}至{max_drawdown_end},最大回撤天数: \t{max_drawdown_duration}")
self.output(f"最大回撤率: {max_drawdown_percent:,.3f}%")
self.output(f"总手续费:\t{total_commission:,.3f}")
self.output(f"总滑点:\t{total_slippage:,.3f}")
self.output(f"总成交金额:\t{total_turnover:,.3f}")
self.output(f"总成交笔数:\t{total_trade_count}")
self.output(f"日均盈亏:\t{daily_net_pnl:,.3f}")
self.output(f"日均手续费:\t{daily_commission:,.3f}")
self.output(f"日均滑点:\t{daily_slippage:,.3f}")
self.output(f"日均成交金额:\t{daily_turnover:,.3f}")
self.output(f"日均成交笔数:\t{daily_trade_count:,.3f}")
self.output(f"年化收益率:\t{annual_return:,.3f}%")
self.output(f"日均收益率:\t{return_mean*100:,.3f}%,收益率标准差:\t{return_std*100:,.3f}%,收益率偏度:\t{return_skew:,.3f},收益率峰度:\t{return_kurt:,.3f}")
self.output(f"sharpe_ratio:\t{sharpe_ratio:,.3f}")
self.output(f"calmar_ratio:\t{calmar_ratio:,.3f}")
self.output(f"sortino_info:\t{sortino_info:,.3f}")
self.output(f"omega_info:\t{omega_info:,.3f}")
self.output(f"年化波动率:\t{annual_volatility_info:,.3f}")
self.output(f"年化复合增长率:\t{cagr_info:,.3f}")
self.output(f"年化下行风险率:\t{annual_downside_risk:,.3f}")
self.output(f"c_var:\t{c_var:,.3f}")
self.output(f"var_info:\t{var_info:,.3f}")
self.output(f"收益稳定率:\t{stability_return:,.3f}")
self.output(f"尾部比率:\t{tail_ratio_info:,.3f}")
#回测统计结果和交易明细保存到backtesting_result文件夹
if hasattr(self.strategy,'strategy_name'):
symbol,exchange,gateway_name = extract_vt_symbol(self.vt_symbol)
path_symbol = f"{symbol}_{exchange.value}"
if platform.uname().system == "Windows":
self.result_path = f"C:\\ProgramData\\Anaconda3\\Lib\\site-packages\\vnpy-2.1.0-py3.7.egg\\vnpy\\app\\cta_strategy\\backtesting_result\\{datetime.now().date()}_bcaktesting_{path_symbol}_{self.strategy.strategy_name}.csv"
elif platform.uname().system == "Linux":
self.result_path = f"/home/xldistance/anaconda3/lib/python3.7/site-packages/vnpy/app/cta_strategy/backtesting_result/{datetime.now().date()}_bcaktesting_{path_symbol}_{self.strategy.strategy_name}.csv"
else:
if platform.uname().system == "Windows":
self.result_path = f"C:\\ProgramData\\Anaconda3\\Lib\\site-packages\\vnpy-2.1.0-py3.7.egg\\vnpy\\app\\cta_strategy\\backtesting_result\\{datetime.now().date()}_bcaktesting_{strategy_name}.csv"
elif platform.uname().system == "Linux":
self.result_path = f"/home/xldistance/anaconda3/lib/python3.7/site-packages/vnpy/app/cta_strategy/backtesting_result/{datetime.now().date()}_bcaktesting_{strategy_name}.csv"
df.to_csv(self.result_path,encoding='utf_8_sig') #保存回测统计数据到CSV
#交易类转化为可读字典存到本地csv
for trade_class in df['trades']:
if trade_class:
for trade in trade_class:
trades_list.append(trade.__dict__)
DataFrame(trades_list).to_csv(self.result_path.replace('_bcaktesting_','_trade_dict_'),encoding='utf_8_sig')
#----------------------------------------------------------------------
#pyecharts绘图写入html,mark_point标记点,mark_point_symbol标记点图形'circle', 'diamond', 'rounddiamond', 'triangle','pin', 'arrow'可选
bar_1 = Bar()
bar_1.add_xaxis(df['balance'].index.tolist())
if hasattr(self.strategy,'strategy_name'):
bar_1.add_yaxis(f"策略:{self.vt_symbol}_{self.strategy.strategy_name}\n\n总资金\n\n起止时间:{df["balance"].index[0]}至{df["balance"].index[-1]}",df['balance'].tolist()) #主标题
else:
bar_1.add_yaxis(f"策略:{self.vt_symbol}_{strategy_name}\n\n总资金\n\n起止时间:{df["balance"].index[0]}至{df["balance"].index[-1]}",df['balance'].tolist()) #主标题
bar_1.set_global_opts(opts.TitleOpts(title=f"资金\n\n总收益率:{total_return:,.3f}%"),toolbox_opts=opts.ToolboxOpts()) #副标题,ToolboxOpts设置工具箱配置项
bar_1.set_series_opts(label_opts=opts.LabelOpts(is_show=False)) #系列配置项
#成交记录画图
trade_datetime = []
trade_price = []
for trade in trades_list:
trade_datetime.append(trade["datetime"])
trade_price.append(trade["price"])
trades_opts_data = [opts.MarkPointItem(
name = f"orderid:{trade["orderid"]},标的:{trade["vt_symbol"]},方向:{trade["direction"].value},{trade["offset"].value},价格:{trade["price"]},成交量:{trade["volume"]}", #成交详细信息添加到name
itemstyle_opts = opts.ItemStyleOpts(color= "#ec0000" if trade["direction"].value == "多" else "#00da3c"),
coord = [trade["datetime"],trade["price"] * random.randrange(1000,1010) / 1000], #标注的坐标
value = trade["direction"].value + trade["offset"].value
) for trade in trades_list]
bar_2 = Line()
bar_2.add_xaxis(trade_datetime)
bar_2.add_yaxis(f"交易价格:交易时间:{trade_datetime[0]}至{trade_datetime[-1]}\n\n成交笔数:{len(trades_list)}",trade_price) #主标题
bar_2.set_global_opts(opts.TitleOpts(title="交易记录"),toolbox_opts=opts.ToolboxOpts()) #设置工具箱配置项
bar_2.set_series_opts(label_opts=opts.LabelOpts(is_show=False), #标签配置项
markpoint_opts = opts.MarkPointOpts(data = trades_opts_data,
#标记的图形圆形:"circle'",方形:"rect'", 圆角方形:"roundRect'",三角形:"triangle'",菱形:"diamond'",水滴:"pin'",箭头:'arrow'
symbol = "pin"
),
itemstyle_opts = opts.ItemStyleOpts(color = "#ec0000",color0 = "#00da3c"),
) #系列配置项
bar_3 = Bar()
bar_3.add_xaxis(df['balance'].index.tolist())
bar_3.add_yaxis(f"复利净值最高点:{max(self.net_value_list)}\t复利净值最低点:{min(self.net_value_list)}",self.net_value_list)
bar_3.set_global_opts(opts.TitleOpts(title="复利净值"),toolbox_opts=opts.ToolboxOpts()) #设置工具箱配置项
bar_3.set_series_opts(label_opts=opts.LabelOpts(is_show=False)) #系列配置项
bar_4 = Bar()
bar_4.add_xaxis(df['drawdown'].index.tolist())
bar_4.add_yaxis(f"回撤资金\n\n最大回撤资金:{max_drawdown:,.3f}\n最大回撤日期: \t{max_drawdown_start}至{max_drawdown_end},最大回撤天数: \t{max_drawdown_duration}",df['drawdown'].tolist())
bar_4.set_global_opts(opts.TitleOpts(title="资金"),toolbox_opts=opts.ToolboxOpts()) #设置工具箱配置项
bar_4.set_series_opts(label_opts=opts.LabelOpts(is_show=False)) #系列配置项
bar_5 = Bar()
bar_5.add_xaxis(df['ddpercent'].index.tolist())
bar_5.add_yaxis(f"回撤百分比\n\n最大回撤率:{max_drawdown_percent:,.3f}%",df['ddpercent'].tolist())
bar_5.set_global_opts(opts.TitleOpts(title="回撤百分比"),toolbox_opts=opts.ToolboxOpts()) #设置工具箱配置项
bar_5.set_series_opts(label_opts=opts.LabelOpts(is_show=False)) #系列配置项
bar_6 = Bar()
bar_6.add_xaxis(df['net_pnl'].index.tolist())
bar_6.add_yaxis(f"日盈亏\n\n最大日盈利:{df["net_pnl"].max():,.3f}\n\n最大日亏损:{df["net_pnl"].min():,.3f}",df['net_pnl'].tolist())
bar_6.set_global_opts(opts.TitleOpts(title="日盈亏"),toolbox_opts=opts.ToolboxOpts()) #设置工具箱配置项
bar_6.set_series_opts(label_opts=opts.LabelOpts(is_show=False)) #系列配置项
for pnl_index in df['net_pnl'].index:
month_date = f"{pnl_index.year}-{pnl_index.month}"
if month_date == self.last_month_date:
self.month_pnl += df['net_pnl'][pnl_index]
else:
#月份减一保存实际月份收益
self.month_dict.update({month_date:self.month_pnl})
for key,value in list(self.month_dict.items()):
if isinstance(key,datetime):
continue
key = datetime.strptime(key,"%Y-%m") - relativedelta(months = 1)
self.month_dict.update({key:value})
#month_dict删除原始的str键值对
for key,value in list(self.month_dict.items()):
if isinstance(key,str):
self.month_dict.pop(key)
self.month_pnl = df['net_pnl'][pnl_index]
self.last_month_date = month_date
self.month_dict.pop(list(self.month_dict.keys())[0])
max_month_pnl = max(self.month_dict.values())
min_month_pnl = min(self.month_dict.values())
bar_7 = Bar()
bar_7.add_xaxis(list(self.month_dict.keys()))
bar_7.add_yaxis(f"月盈亏\n\n最大月盈利:{max_month_pnl:,.3f}\n\n最大月亏损:{min_month_pnl:,.3f}",list(self.month_dict.values()))
bar_7.set_global_opts(opts.TitleOpts(title="月盈亏"),toolbox_opts=opts.ToolboxOpts()) #设置工具箱配置项
bar_7.set_series_opts(label_opts=opts.LabelOpts(is_show=False)
) #系列配置项
hist,bin_edges= np.histogram(df['net_pnl'], bins=50)
bar_8 = Bar()
bar_8.add_xaxis(bin_edges[1:].tolist())
bar_8.add_yaxis("盈亏分布直方图",hist.tolist())
bar_8.set_global_opts(opts.TitleOpts(title="频数"),toolbox_opts=opts.ToolboxOpts()) #设置工具箱配置项
bar_8.set_series_opts(label_opts=opts.LabelOpts(is_show=False)) #系列配置项
bar_9 = Bar()
bar_9.add_xaxis(df['commission'].index.tolist())
bar_9.add_yaxis(f"每日手续费\n\n日最高手续费:{df["commission"].max():,.3f}",df['commission'].tolist())
bar_9.set_global_opts(opts.TitleOpts(title="手续费"),toolbox_opts=opts.ToolboxOpts()) #设置工具箱配置项
bar_9.set_series_opts(label_opts=opts.LabelOpts(is_show=False)) #系列配置项
page = Page()
page.add(bar_1)
page.add(bar_2)
page.add(bar_3)
page.add(bar_4)
page.add(bar_5)
page.add(bar_6)
page.add(bar_7)
page.add(bar_8)
page.add(bar_9)
#图表结果保存为html
page.render(self.result_path.replace('.csv','.html'))
#----------------------------------------------------------------------
statistics = {
'start_date': start_date,
'end_date': end_date,
'total_days': total_days,
'profit_days': profit_days,
'loss_days': loss_days,
'capital': self.capital,
'end_balance': end_balance,
'max_drawdown': max_drawdown,
'max_drawdown_percent': max_drawdown_percent,
"max_drawdown_duration": max_drawdown_duration,
'total_net_pnl': total_net_pnl,
'daily_net_pnl': daily_net_pnl,
'total_commission': total_commission,
'daily_commission': daily_commission,
'total_slippage': total_slippage,
'daily_slippage': daily_slippage,
'total_turnover': total_turnover,
'daily_turnover': daily_turnover,
'total_trade_count': total_trade_count,
'daily_trade_count': daily_trade_count,
'total_return': total_return,
'annual_return': annual_return,
'return_mean': return_mean,
'return_std': return_std,
'return_skew': return_skew,
'return_kurt': return_kurt,
'sharpe_ratio': sharpe_ratio,
'calmar_ratio': calmar_ratio,
'sortino_info': sortino_info,
'omega_info': omega_info,
'annual_volatility_info': annual_volatility_info,
'cagr_info': cagr_info,
'annual_downside_risk': annual_downside_risk,
'c_var': c_var,
'var_info': var_info,
'stability_return': stability_return,
'tail_ratio_info': tail_ratio_info,
'return_drawdown': return_drawdown,
'return_drawdown_ratio': return_drawdown_ratio,
}
for key,value in statistics.items():
if value in (np.inf,-np.inf):
value = 0
statistics[key] = np.nan_to_num(value)
self.output("策略统计指标计算完成")
return statistics
#----------------------------------------------------------------------
def get_information_ratio(self,returns,benchmark=0.00008):
#benchmark基准收益率
diff = returns - benchmark
return np.mean(diff) / np.std(diff) * np.sqrt(TRADING_DAY)
#----------------------------------------------------------------------
def show_chart(self, df: DataFrame = None):
"""matplotlib画图"""
if df is None:
return
plt.figure(figsize=(10, 16))
balance_plot = plt.subplot(5, 1, 1)
balance_plot.set_title('Balance')
df['balance'].plot(legend=True)
drawdown_plot = plt.subplot(5, 1, 2)
drawdown_plot.set_title('Drawdown')
drawdown_plot.fill_between(range(len(df)), df['drawdown'].values)
drawdown_percent = plt.subplot(5, 1, 3)
drawdown_percent.set_title('DrawdownPercent')
drawdown_percent.fill_between(range(len(df)), df['ddpercent'].values)
pnl_plot = plt.subplot(5, 1, 4)
pnl_plot.set_title('Daily Pnl')
df['net_pnl'].plot(kind='bar', legend=False, grid=False, xticks=[])
distribution_plot = plt.subplot(5, 1, 5)
distribution_plot.set_title('Daily Pnl Distribution')
df['net_pnl'].hist(bins=50)
plt.show()
def run_optimization(self, optimization_setting: OptimizationSetting,target_reverse =True):
"""多进程优化"""
# Get optimization setting and target
settings = optimization_setting.generate_setting()
target_name = optimization_setting.target_name
if not settings:
self.output('优化参数组合为空,请检查')
return
if not target_name:
self.output('优化目标未设置,请检查')
return
# Use multiprocessing pool for running backtesting with different setting
pool = multiprocessing.Pool(multiprocessing.cpu_count(), maxtasksperchild=1)
results = []
for setting in settings:
result = (pool.apply_async(optimize, (
target_name,
self.strategy_class,
setting,
self.vt_symbol,
self.start,
self.rate,
self.slippage,
self.size,
self.price_tick,
self.capital,
self.end,
self.mode
)))
results.append(result)
pool.close()
pool.join()
# Sort results and output
result_values = [result.get() for result in results]
result_values.sort(reverse=target_reverse, key=lambda result: result[1])
for value in result_values:
msg = f'参数:{value[0]}, 目标:{value[1]}'
self.output(msg)
return result_values
def run_ga_optimization(self, optimization_setting: OptimizationSetting, population_size=200, ngen_size=30):
"""遗传算法优化"""
# Get optimization setting and target
settings = optimization_setting.generate_setting_ga()
target_name = optimization_setting.target_name
if not settings:
self.output('优化参数组合为空,请检查')
return
if not target_name:
self.output('优化目标未设置,请检查')
return
# Define parameter generation function
def generate_parameter():
''''''
return random.choice(settings)
def mutate_individual(individual, indpb):
''''''
size = len(individual)
paramlist = generate_parameter()
for i in range(size):
if random.random() < indpb:
individual[i] = paramlist[i]
return individual,
# Create ga object function
global ga_target_name
global ga_strategy_class
global ga_setting
global ga_vt_symbol
global ga_interval
global ga_start
global ga_rate
global ga_slippage
global ga_size
global ga_price_tick
global ga_capital
global ga_end
global ga_mode
ga_target_name = target_name
ga_strategy_class = self.strategy_class
ga_setting = settings[0]
ga_vt_symbol = self.vt_symbol
ga_interval = self.interval
ga_start = self.start
ga_rate = self.rate
ga_slippage = self.slippage
ga_size = self.size
ga_price_tick = self.price_tick
ga_capital = self.capital
ga_end = self.end
ga_mode = self.mode
# Set up genetic algorithem
toolbox = base.Toolbox()
toolbox.register('individual', tools.initIterate, creator.Individual, generate_parameter)
toolbox.register('population', tools.initRepeat, list, toolbox.individual)
toolbox.register('mate', tools.cxTwoPoint)
toolbox.register('mutate', mutate_individual, indpb=1)
toolbox.register('evaluate', ga_optimize)
toolbox.register('select', tools.selNSGA2)
total_size = len(settings)
pop_size = population_size #族群里面的个体数量
lambda_ = int(pop_size * 0.5) #每一代产生的子女数
mu = int(pop_size * 0.25) #每一代选择的个体数
cxpb = 0.5 #种群内部个体的交叉概率
mutpb = 1 - cxpb #种群内部个体的变异概率
ngen = ngen_size #产生种群代数,NGEN = 10要跑10个轮回
pop = toolbox.population(pop_size)
hof = tools.ParetoFront() # end result of pareto front
stats = tools.Statistics(lambda ind: ind.fitness.values)
np.set_printoptions(suppress=True)
stats.register('mean', np.mean, axis=0)
stats.register('std', np.std, axis=0)
stats.register('min', np.min, axis=0)
stats.register('max', np.max, axis=0)
# Multiprocessing is not supported yet.
# pool = multiprocessing.Pool(multiprocessing.cpu_count())
# toolbox.register('map', pool.map)
# Run ga optimization
self.output(f'参数优化空间:{total_size}')
self.output(f'每代族群总数:{pop_size}')
self.output(f'优良筛选个数:{mu}')
self.output(f'迭代次数:{ngen}')
self.output(f'交叉概率:{cxpb:.0%}')
self.output(f'突变概率:{mutpb:.0%}')
start = time()
algorithms.eaMuPlusLambda(
pop,
toolbox,
mu,
lambda_,
cxpb,
mutpb,
ngen,
stats,
halloffame=hof
)
end = time()
cost = int((end - start))
self.output(f'遗传算法优化完成,耗时{cost}秒')
# Return result list
results = []
for parameter_values in hof:
setting = dict(parameter_values)
target_value = ga_optimize(parameter_values)[0]
results.append((setting, target_value, {}))
self.output(results)
return results
def update_daily_close(self, price: float):
''''''
d = self.datetime.date()
daily_result = self.daily_results.get(d, None)
if daily_result:
daily_result.close_price = price
else:
self.daily_results[d] = DailyResult(d, price)
def new_bar(self, bar: BarData):
''''''
self.bar = bar
self.datetime = bar.datetime
self.cross_limit_order() #先撮合限价单
self.cross_stop_order() #再撮合停止单
self.strategy.on_bar(bar) #推送K线到策略中
self.update_postion() #更新持仓数据
self.update_daily_close(bar.close_price)
def new_tick(self, tick: TickData):
''''''
self.tick = tick
self.datetime = tick.datetime
self.cross_limit_order()
self.cross_stop_order()
self.strategy.on_tick(tick)
self.update_postion() #更新持仓数据
self.update_daily_close(tick.last_price)
def cross_limit_order(self):
'''
Cross limit order with last bar/tick data.
'''
if self.mode == BacktestingMode.BAR:
long_cross_price = self.bar.low_price
short_cross_price = self.bar.high_price
long_best_price = self.bar.open_price
short_best_price = self.bar.open_price
else:
long_cross_price = self.tick.ask_price_1
short_cross_price = self.tick.bid_price_1
long_best_price = long_cross_price
short_best_price = short_cross_price
for order in list(self.active_limit_orders.values()):
is_submitting = False
# Push order update with status 'not traded' (pending).
if order.status == Status.SUBMITTING:
is_submitting = True
order.status = Status.NOTTRADED
self.strategy.on_order(order)
# Check whether limit orders can be filled.
long_cross = (
order.direction == Direction.LONG
and order.price >= long_cross_price
and 0 < long_cross_price < 9999999
)
short_cross = (
order.direction == Direction.SHORT
and order.price <= short_cross_price
and 0 < short_cross_price < 9999999
)
if not long_cross and not short_cross:
continue
# Push order udpate with status 'all traded' (filled).
order.traded = order.volume
order.status = Status.ALLTRADED
self.active_limit_orders.pop(order.vt_orderid)
self.strategy.on_order(order)
# Push trade update
self.trade_count += 1
#直接成交使用order.price作为交易价
trade_price = order.price
#计算挂单成交价
if long_cross:
if is_submitting:
trade_price = min(order.price, long_best_price)
pos_change = order.volume
elif short_cross:
if is_submitting:
trade_price = max(order.price, short_best_price)
pos_change = -order.volume
trade = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=str(self.trade_count),
direction=order.direction,
offset=order.offset,
price=trade_price,
volume=order.volume,
date=self.datetime.strftime('%Y%m%d'),
time=self.datetime.strftime('%H:%M:%S'),
gateway_name=self.gateway_name,
)
trade.datetime = self.datetime
self.strategy.pos += pos_change
self.strategy.on_trade(trade)
self.trades[trade.vt_tradeid] = trade
# 更新持仓数据
self.update_postion(trade=trade)
def cross_stop_order(self):
'''
Cross stop order with last bar/tick data.
'''
if self.mode == BacktestingMode.BAR:
long_cross_price = self.bar.high_price
short_cross_price = self.bar.low_price
long_best_price = self.bar.open_price
short_best_price = self.bar.open_price
else:
long_cross_price = self.tick.last_price
short_cross_price = self.tick.last_price
long_best_price = long_cross_price
short_best_price = short_cross_price
for stop_order in list(self.active_stop_orders.values()):
# Check whether stop order can be triggered.
long_cross = (
stop_order.direction == Direction.LONG
and stop_order.price <= long_cross_price
)
short_cross = (
stop_order.direction == Direction.SHORT
and stop_order.price >= short_cross_price
)
if not long_cross and not short_cross:
continue
# Create order data.
self.limit_order_count += 1
order = OrderData(
symbol=self.symbol,
exchange=self.exchange,
orderid=str(self.limit_order_count),
direction=stop_order.direction,
offset=stop_order.offset,
price=stop_order.price,
volume=stop_order.volume,
traded=stop_order.volume,
status=Status.ALLTRADED,
gateway_name=self.gateway_name,
)
order.datetime = self.datetime
self.limit_orders[order.vt_orderid] = order
# Create trade data.
if long_cross:
trade_price = max(stop_order.price, long_best_price)
pos_change = order.volume
else:
trade_price = min(stop_order.price, short_best_price)
pos_change = -order.volume
self.trade_count += 1
trade = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=str(self.trade_count),
direction=order.direction,
offset=order.offset,
price=trade_price,
volume=order.volume,
date=self.datetime.strftime('%Y%m%d'),
time=self.datetime.strftime('%H:%M:%S'),
gateway_name=self.gateway_name,
)
trade.datetime = self.datetime
self.trades[trade.vt_tradeid] = trade
# Update stop order.
stop_order.vt_orderids.append(order.vt_orderid)
stop_order.status = StopOrderStatus.TRIGGERED
if stop_order.stop_orderid in self.active_stop_orders:
self.active_stop_orders.pop(stop_order.stop_orderid)
# Push update to strategy.
self.strategy.on_stop_order(stop_order)
self.strategy.on_order(order)
self.strategy.pos += pos_change
self.strategy.on_trade(trade)
# 更新持仓数据
self.update_postion(trade=trade)
#----------------------------------------------------------------------
def update_postion(self, trade =None):
"""持仓监控"""
if trade:
if trade.direction == Direction.LONG:
# 做多单
if trade.offset == Offset.OPEN:
long_cost = self.long_avg_cost * self.long_pos
long_cost += trade.price * trade.volume
# 平均成本
self.long_pos += trade.volume
if self.long_pos > 0:
self.long_avg_cost = round(long_cost / float(self.long_pos), 3)
else:
self.short_pos -= trade.volume
else:
# 做空单
if trade.offset == Offset.OPEN:
short_cost = self.short_avg_cost * self.short_pos
short_cost += trade.price * trade.volume
# 平均成本
self.short_pos += trade.volume
if self.short_pos > 0:
self.short_avg_cost = round(short_cost / float(self.short_pos), 3)
else:
self.long_pos -= trade.volume
# 多/空仓收益
if self.mode == BacktestingMode.BAR:
last_price = self.bar.close_price
else:
last_price = self.tick.last_price
long_profit = (last_price - self.long_avg_cost) * self.long_pos * self.size
short_profit = (self.short_avg_cost - last_price) * self.short_pos * self.size
if trade:
if trade.direction == Direction.LONG:
self.long_profit_total += long_profit
if trade.direction == Direction.SHORT:
self.short_profit_total += short_profit
self.strategy.long_pos = self.long_pos
self.strategy.short_pos = self.short_pos
self.strategy.long_profit = long_profit
self.strategy.short_profit = short_profit
self.strategy.balance = self.capital + self.long_profit_total + self.short_profit_total
def load_bar(
self, vt_symbol: str, days: int, interval: Interval, callback: Callable
):
''''''
self.days = days
self.callback = callback
def load_tick(self, vt_symbol: str, days: int, callback: Callable):
''''''
self.days = days
self.callback = callback
def send_order(self,vt_symbol, direction: Direction, offset: Offset, price: float, volume: float, stop: bool,line:bool, lock: bool,strategy:CtaTemplate,order_type:OrderType):
"""
发送委托单
"""
#价格,发单量取整到最小变动
price = round_to(price, self.price_tick)
volume = round_to(volume, 1)
#过滤非正常下单价格与委托量
if not price or not volume:
return []
#平仓时仓位为0直接返回
if offset == Offset.CLOSE:
if self.strategy.pos == 0:
return
if stop:
vt_orderid = self.send_stop_order(vt_symbol,direction, offset, price, volume,self,OrderType.STOP)
else:
vt_orderid = self.send_limit_order(vt_symbol,direction, offset, price, volume,self,OrderType.LIMIT)
return [vt_orderid]
def send_stop_order(self, vt_symbol, direction: Direction, offset: Offset, price: float, volume: float, strategy: CtaTemplate, order_type: OrderType, ):
"""
发送本地停止单
"""
self.stop_order_count += 1
stop_order = StopOrder(
vt_symbol=self.vt_symbol,
direction=direction,
offset=offset,
price=price,
volume=volume,
stop_orderid=f'{STOPORDER_PREFIX}.{self.stop_order_count}',
strategy_name=self.strategy.strategy_name,
)
self.strategy.on_stop_order(stop_order)
self.active_stop_orders[stop_order.stop_orderid] = stop_order
self.stop_orders[stop_order.stop_orderid] = stop_order
return stop_order.stop_orderid
def send_limit_order(self, vt_symbol, direction: Direction, offset: Offset, price: float, volume: float, strategy: CtaTemplate, order_type: OrderType, ):
''''''
self.limit_order_count += 1
order = OrderData(
symbol=self.symbol,
exchange=self.exchange,
orderid=str(self.limit_order_count),
direction=direction,
offset=offset,
price=price,
volume=volume,
traded=volume,
status=Status.NOTTRADED,
gateway_name=self.gateway_name,
)
order.datetime = self.datetime
self.active_limit_orders[order.vt_orderid] = order
self.limit_orders[order.vt_orderid] = order
return order.vt_orderid
def cancel_order(self, strategy: CtaTemplate, vt_orderid: str):
"""
用vt_orderid撤销委托单
"""
if vt_orderid.startswith(STOPORDER_PREFIX):
self.cancel_stop_order(strategy, vt_orderid)
else:
self.cancel_limit_order(strategy, vt_orderid)
def cancel_stop_order(self, strategy: CtaTemplate, vt_orderid: str):
''''''
if vt_orderid not in self.active_stop_orders:
return
stop_order = self.active_stop_orders.pop(vt_orderid)
stop_order.status = StopOrderStatus.CANCELLED
self.strategy.on_stop_order(stop_order)
def cancel_limit_order(self, strategy: CtaTemplate, vt_orderid: str):
''''''
if vt_orderid not in self.active_limit_orders:
return
order = self.active_limit_orders.pop(vt_orderid)
order.status = Status.CANCELLED
self.strategy.on_order(order)
def cancel_all(self, strategy: CtaTemplate):
'''
Cancel all orders, both limit and stop.
'''
vt_orderids = list(self.active_limit_orders.keys())
for vt_orderid in vt_orderids:
self.cancel_limit_order(strategy, vt_orderid)
stop_orderids = list(self.active_stop_orders.keys())
for vt_orderid in stop_orderids:
self.cancel_stop_order(strategy, vt_orderid)
def write_log(self, msg: str, strategy: CtaTemplate = None):
"""
Write log message.
"""
msg = '{0}\t{1}'.format(self.datetime,msg)
self.logs.append(msg)
def send_email(self, msg: str, strategy: CtaTemplate = None):
'''
Send email to default receiver.
'''
pass
def sync_strategy_data(self, strategy: CtaTemplate = None):
pass
def get_engine_type(self):
'''
Return engine type.
'''
return self.engine_type
def put_strategy_event(self, strategy: CtaTemplate):
'''
Put an event to update strategy status.
'''
pass
def output(self, msg):
'''
Output message of backtesting engine.
'''
print(f'{datetime.now()}\t{msg}')
def get_all_trades(self):
"""
Return all trade data of current backtesting result.
"""
return list(self.trades.values())
def get_all_orders(self):
"""
Return all limit order data of current backtesting result.
"""
return list(self.limit_orders.values())
def get_all_daily_results(self):
"""
Return all daily result data.
"""
return list(self.daily_results.values())
class DailyResult:
''''''
def __init__(self, date: date, close_price: float):
''''''
self.date = date
self.close_price = close_price
self.pre_close = 0
self.trades = []
self.trade_count = 0
self.start_pos = 0
self.end_pos = 0
self.turnover = 0
self.commission = 0
self.slippage = 0
self.trading_pnl = 0
self.holding_pnl = 0
self.total_pnl = 0
self.net_pnl = 0
def add_trade(self, trade: TradeData):
''''''
self.trades.append(trade)
def calculate_pnl(
self,
pre_close: float,
start_pos: float,
size: int,
rate: float,
slippage: float,
):
''''''
self.pre_close = pre_close
# Holding pnl is the pnl from holding position at day start
self.start_pos = start_pos
self.end_pos = start_pos
self.holding_pnl = self.start_pos * (self.close_price - self.pre_close) * size
# Trading pnl is the pnl from new trade during the day
self.trade_count = len(self.trades)
for trade in self.trades:
if trade.direction == Direction.LONG:
pos_change = trade.volume
else:
pos_change = -trade.volume
turnover = trade.price * trade.volume * size
self.trading_pnl += pos_change * (self.close_price - trade.price) * size
self.end_pos += pos_change
self.turnover += turnover
self.commission += turnover * rate
self.slippage += trade.volume * size * slippage
# Net pnl takes account of commission and slippage cost
self.total_pnl = self.trading_pnl + self.holding_pnl
self.net_pnl = self.total_pnl - self.commission - self.slippage
def optimize(
target_name: str,
strategy_class: CtaTemplate,
setting: dict,
vt_symbol: str,
start: datetime,
rate: float,
slippage: float,
size: float,
price_tick: float,
capital: int,
end: datetime,
mode: BacktestingMode
):
'''
Function for running in multiprocessing.pool
'''
engine = BacktestingEngine()
engine.clear_data()
engine.set_parameters(
vt_symbol=vt_symbol,
start=start,
rate=rate,
slippage=slippage,
size=size,
price_tick=price_tick,
capital=capital,
end=end,
mode=mode
)
engine.add_strategy(strategy_class, setting)
engine.load_data()
engine.run_backtesting()
daily_df = engine.calculate_result()
statistics = engine.calculate_statistics(daily_df,write_result=False)
target_value = statistics[target_name]
return (str(setting), target_value, statistics)
@lru_cache(maxsize=1000000)
def _ga_optimize(parameter_values: tuple):
''''''
setting = dict(parameter_values)
result = optimize(
ga_target_name,
ga_strategy_class,
setting,
ga_vt_symbol,
ga_start,
ga_rate,
ga_slippage,
ga_size,
ga_price_tick,
ga_capital,
ga_end,
ga_mode
)
return (result[1],)
def ga_optimize(parameter_values: list):
''''''
return _ga_optimize(tuple(parameter_values))
@lru_cache(maxsize=10)
def load_bar_data(
symbol: str,
exchange: Exchange,
interval: Interval,
start: datetime,
end: datetime
):
"""bar数据redis序列化存取"""
file_name = f"{symbol}_{exchange.value}_{start.date()}_{end.date()}_bar"
redis_data = REDIS_CLIENT.hget(file_name, file_name)
if not redis_data:
bar_data = database_manager.load_bar_data( symbol, exchange, interval, start, end )
REDIS_CLIENT.hset(file_name, file_name, zlib.compress(pickle.dumps(bar_data), 5))
else:
bar_data = pickle.loads(zlib.decompress(redis_data))
return bar_data
"""数据缓存为pkl格式到本地硬盘"""
""" dir_path = f"H:\\pickle_data\\"
file_name = f"{symbol}_{exchange.value}_{start.date()}_{end.date()}_bar"
pickle_path = dir_path + file_name + ".pkl"
data_size =0
if not os.path.exists(pickle_path):
bar_data = database_manager.load_bar_data( symbol, exchange, interval, start, end )
pickle_file = open(pickle_path,'wb')
pickle.dump(bar_data,pickle_file)
pickle_file.close()
else:
pickle_file = open(pickle_path,'rb')
bar_data =pickle.load(pickle_file)
pickle_file.close()
#pickle_data文件夹大于50G删除缓存数据
for dirpath, dirnames, filenames in os.walk(dir_path):
for file_name in filenames: #当前目录所有文件名
data_size += os.path.getsize(dirpath + file_name)
if data_size / (1024 ** 3) > 50:
for dirpath, dirnames, filenames in os.walk(dir_path):
for file_name in filenames:
os.remove(dirpath + file_name)
return bar_data"""
@lru_cache(maxsize=10)
def load_tick_data(
symbol: str,
exchange: Exchange,
start: datetime,
end: datetime
):
"""tick数据redis序列化存取"""
file_name = f"{symbol}_{exchange.value}_{start.date()}_{end.date()}_tick"
redis_data = REDIS_CLIENT.hget(file_name, file_name)
if not redis_data:
tick_data = database_manager.load_tick_data( symbol, exchange, start, end )
REDIS_CLIENT.hset(file_name, file_name, zlib.compress(pickle.dumps(tick_data), 5))
else:
tick_data = pickle.loads(zlib.decompress(redis_data))
return tick_data
"""数据缓存为pkl格式到本地硬盘"""
""" dir_path = f"H:\\pickle_data\\"
file_name = f"{symbol}_{exchange.value}_{start.date()}_{end.date()}_tick"
pickle_path = dir_path + file_name + ".pkl"
data_size =0
if not os.path.exists(pickle_path):
tick_data = database_manager.load_tick_data( symbol, exchange, start, end )
pickle_file = open(pickle_path,'wb')
pickle.dump(tick_data,pickle_file)
pickle_file.close()
else:
pickle_file = open(pickle_path,'rb')
tick_data =pickle.load(pickle_file)
pickle_file.close()
#pickle_data文件夹大于50G删除缓存数据
for dirpath, dirnames, filenames in os.walk(dir_path):
for file_name in filenames: #当前目录所有文件名
data_size += os.path.getsize(dirpath + file_name)
if data_size / (1024 ** 3) > 50:
for dirpath, dirnames, filenames in os.walk(dir_path):
for file_name in filenames:
os.remove(dirpath + file_name)
return tick_data"""
# GA related global value
ga_end = None
ga_mode = None
ga_target_name = None
ga_strategy_class = None
ga_setting = None
ga_vt_symbol = None
ga_interval = None
ga_start = None
ga_rate = None
ga_slippage = None
ga_size = None
ga_price_tick = None
ga_capital = None
|
from collections import defaultdict
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
from typing import Callable
from itertools import product
from functools import lru_cache
from time import time
import platform
import multiprocessing
#empyrical风险指标计算模块
from empyrical import (sortino_ratio,omega_ratio,annual_volatility,cagr,conditional_value_at_risk,downside_risk,stability_of_timeseries,tail_ratio,value_at_risk)
import random
import os
import traceback
import numpy as np
np.seterr(divide='ignore',invalid='ignore')
import matplotlib.pyplot as plt
#matplotlib 美化样式:bmh,ggplot
plt.style.use("ggplot")
import scipy.stats as scs
import seaborn as sns
from pandas import DataFrame
from deap import creator, base, tools, algorithms
import redis
import zlib
import pickle
REDIS_CLIENT =redis.Redis("localhost",12580)
from vnpy.trader.constant import (Direction, Offset, Exchange, Interval, Status,OrderType)
from vnpy.trader.database import database_manager
from vnpy.trader.object import OrderData, TradeData, BarData, TickData
from vnpy.trader.utility import (extract_vt_symbol,round_to)
from vnpy.app.cta_strategy.base import (BacktestingMode, EngineType, STOPORDER_PREFIX, StopOrder, StopOrderStatus)
from vnpy.app.cta_strategy.template import CtaTemplate
sns.set_style('whitegrid')
creator.create('FitnessMax', base.Fitness, weights=(1.0,)) #优化方向1求最大值,-1求最小值
creator.create('Individual', list, fitness=creator.FitnessMax)
#年总交易日
TRADING_DAY = 365#365,252
class OptimizationSetting:
'''
回测优化设置
'''
def __init__(self):
''''''
self.params = {}
self.target_name = ''
def add_parameter(self, name: str, start: float, end: float = None, step: float = None ):
"""
设置优化参数
"""
if not end and not step:
self.params[name] = [start]
return
if start >= end:
print('参数优化起始点必须小于终止点')
return
if step <= 0:
print('参数优化步进必须大于0')
return
value = start
value_list = []
while value <= end:
value_list.append(value)
value += step
self.params[name] = value_list
def set_target(self, target_name: str):
"""设置优化目标"""
self.target_name = target_name
def generate_setting(self):
keys = self.params.keys()
values = self.params.values()
products = list(product(*values))
settings = []
for p in products:
setting = dict(zip(keys, p))
settings.append(setting)
return settings
def generate_setting_ga(self):
''''''
settings_ga = []
settings = self.generate_setting()
for d in settings:
param = [tuple(i) for i in d.items()]
settings_ga.append(param)
return settings_ga
class BacktestingEngine:
"""
回测引擎
"""
engine_type = EngineType.BACKTESTING
gateway_name = 'BACKTESTING'
def __init__(self):
self.vt_symbol = ''
self.symbol = ''
self.exchange = None
self.start = None
self.end = None
self.rate = 0
self.slippage = 0
self.size = 1
self.price_tick = 0
self.capital = 100000
self.strategy_class = None
self.strategy = None
self.tick: TickData
self.bar: BarData
self.datetime = None
self.interval = None
self.days = 0
self.callback = None
self.history_data = []
self.stop_order_count = 0
self.stop_orders = {}
self.active_stop_orders = {}
self.limit_order_count = 0
self.limit_orders = {}
self.active_limit_orders = {}
self.trade_count = 0
self.trades = {}
self.logs = []
self.daily_results = {}
self.daily_df = None
#保存回测结果,优化结果路径
self.result_path = None
# 持仓盁亏初始化
self.long_avg_cost = 0 #多头持仓均价
self.short_avg_cost = 0 #空头持仓均价
self.long_pos = 0 #多头仓位
self.short_pos = 0 #空头仓位
self.long_profit_total = 0 #多头总盈亏
self.short_profit_total = 0 #空头总盈亏
#净值指标
self.net_value = 0
self.net_value_list = []
#月度盈亏参数
self.last_month_date = None
self.month_pnl = 0
self.month_dict = {}
def set_capital(self,capital):
"""设置初始资金"""
self.capital = capital
def clear_data(self):
'''
Clear all data of last backtesting.
'''
self.strategy = None
self.tick = None
self.bar = None
self.datetime = None
self.stop_order_count = 0
self.stop_orders.clear()
self.active_stop_orders.clear()
self.limit_order_count = 0
self.limit_orders.clear()
self.active_limit_orders.clear()
self.trade_count = 0
self.trades.clear()
self.logs.clear()
self.daily_results.clear()
def set_parameters(self, vt_symbol: str, start: datetime, rate: float, slippage: float, size: float, price_tick: float, capital: int = 0, end: datetime = None, mode: BacktestingMode = BacktestingMode.BAR, ):
''''''
self.mode = mode
self.vt_symbol = vt_symbol
if self.mode == BacktestingMode.BAR:
self.interval = Interval.MINUTE
self.rate = rate
self.slippage = slippage
self.size = size
self.price_tick = price_tick
self.start = start
self.symbol, exchange,gateway_name = extract_vt_symbol(vt_symbol)
self.exchange = Exchange(exchange)
if capital:
self.capital = capital
if end:
self.end = end
if mode:
self.mode = mode
def add_strategy(self, strategy_class: type, setting: dict):
''''''
self.strategy_class = strategy_class
self.strategy = strategy_class(self, strategy_class.__name__, self.vt_symbol, setting )
#初始化策略盈亏参数
self.strategy.capital = 0 #初始资金
self.strategy.balance = self.capital #总资金
self.strategy.long_pos = 0 #多头仓位
self.strategy.short_pos = 0 #空头仓位
self.strategy.long_profit = 0 #多头收益
self.strategy.short_profit = 0 #空头收益
self.strategy.size = self.size #每手乘数
self.strategy.price_tick = self.price_tick #最小价格变动
self.strategy.active_limit_orders = self.active_limit_orders #未成交限价单
self.strategy.active_stop_orders = self.active_stop_orders #未成交停止单
if setting:
unactive_param = [loss_param for loss_param in list(setting.keys()) if loss_param not in self.strategy.parameters]
assert not unactive_param,f"不在策略参数列表内的回测参数:{unactive_param}"
def load_data(self):
"""加载历史数据"""
self.output("开始加载历史数据")
if not self.end:
self.end = datetime.now()
self.history_data.clear() #载入数据前清除历史数据
assert self.start < self.end,"回测开始时间必须小于结束时间,请核实!"
if self.mode == BacktestingMode.BAR:
self.history_data = load_bar_data(self.symbol, self.exchange, self.interval, self.start, self.end)
else:
self.history_data = load_tick_data(self.symbol, self.exchange, self.start, self.end)
self.output(f"历史数据加载完成,数据量:{len(self.history_data)}")
def run_backtesting(self):
"""回放历史数据"""
if self.mode == BacktestingMode.BAR:
func = self.new_bar
else:
func = self.new_tick
self.strategy.on_init()
# Use the first [days] of history data for initializing strategy
day_count = 0
ix = 0
for ix, data in enumerate(self.history_data):
if self.datetime and data.datetime.day != self.datetime.day:
day_count += 1
if day_count >= self.days:
break
self.datetime = data.datetime
try:
self.callback(data)
except Exception:
self.output("触发异常,回测终止")
self.output(traceback.format_exc())
return
self.strategy.inited = True
self.output('策略初始化完成')
self.strategy.on_start()
self.strategy.trading = True
self.output('开始回放历史数据')
#回放history_data数据到on_tick/on_bar
for data in self.history_data[ix:]:
try:
func(data)
except Exception:
self.output("触发异常,回测终止")
self.output(traceback.format_exc())
return
self.output('历史数据回放结束')
def calculate_result(self):
"""
返回daily_df:DataFrame
"""
self.output('开始计算逐日盯市盈亏')
if not self.trades:
self.output('成交记录为空,无法计算')
return
# Add trade data into daily reuslt.
for trade in self.trades.values():
trade_date = trade.datetime.date()
daily_result = self.daily_results[trade_date]
daily_result.add_trade(trade)
# Calculate daily result by iteration.
pre_close = 0
start_pos = 0
for daily_result in self.daily_results.values():
daily_result.calculate_pnl(pre_close, start_pos, self.size, self.rate, self.slippage )
pre_close = daily_result.close_price
start_pos = daily_result.end_pos
# Generate dataframe
results = defaultdict(list)
for daily_result in self.daily_results.values():
for key, value in daily_result.__dict__.items():
results[key].append(value)
self.daily_df = DataFrame.from_dict(results).set_index('date')
self.output('逐日盯市盈亏计算完成')
return self.daily_df
#----------------------------------------------------------------------
def statistics_status(self,array):
"""返回array均值,标准差,偏度,峰度"""
stats = scs.describe(array)
return stats[2],np.sqrt(stats[3]),stats[4],stats[5]
#----------------------------------------------------------------------
def calculate_statistics(self, df: DataFrame = None,strategy_name=None,write_result=True):
"""计算回测结果"""
from pyecharts.charts import (Bar,Line,Graph,Gauge,Page)#柱状图,折线图,关系图,仪表盘,多图同表
from pyecharts import options as opts
self.output('开始计算策略统计指标')
if df is None:
#初始化统计变量
start_date = ''
end_date = ''
total_days = 0
profit_days = 0
loss_days = 0
end_balance = 0
max_drawdown = 0
max_drawdown_percent = 0
max_drawdown_duration = 0
total_net_pnl = 0
daily_net_pnl = 0
total_commission = 0
daily_commission = 0
total_slippage = 0
daily_slippage = 0
total_turnover = 0
daily_turnover = 0
total_trade_count = 0
daily_trade_count = 0
total_return = 0
annual_return = 0
return_mean = 0
return_std = 0
return_skew = 0
return_kurt = 0
sharpe_ratio = 0
calmar_ratio = 0
return_drawdown = 0
return_drawdown_ratio = 0
sortino_info = 0
omega_info = 0
annual_volatility_info = 0
cagr_info = 0
annual_downside_risk = 0
c_var = 0
var_info = 0
calmar_ratio = 0
stability_return = 0
tail_ratio_info = 0
else:
# Calculate balance related time series data
trades_list =[] #成交明细列表
df['balance'] = df['net_pnl'].cumsum() + self.capital #总资金
df['return'] = (np.log(df['balance']) - np.log(df['balance'].shift(1))).fillna(0) #净收益率
df['highlevel'] = (df['balance'].rolling( min_periods=1, window=len(df), center=False).max()) #净值高点
df['drawdown'] = df['balance'] - df['highlevel']
df['ddpercent'] = df['drawdown'] / df['highlevel'] * 100 #回撤百分比
# Calculate statistics value
start_date = df.index[0]
end_date = df.index[-1]
total_days = len(df)
profit_days = len(df[df['net_pnl'] > 0])
loss_days = len(df[df['net_pnl'] < 0])
end_balance = df['balance'].iloc[-1] #最终收益
max_drawdown = df['drawdown'].min() #最大回撤
max_drawdown_percent = df['ddpercent'].min() #最大回撤率
#最大回撤期,优化时max_drawdown_end可能为NAN需要做异常处理
max_drawdown_end = df["drawdown"].idxmin()
if isinstance(max_drawdown_end,date):
max_drawdown_start = df["balance"][:max_drawdown_end].idxmax()
max_drawdown_duration = (max_drawdown_end - max_drawdown_start).days
else:
max_drawdown_start = ""
max_drawdown_end = ""
max_drawdown_duration = 0
total_net_pnl = df['net_pnl'].sum() #总净值
daily_net_pnl = total_net_pnl / total_days #日净值
total_commission = df['commission'].sum() #总手续费
daily_commission = total_commission / total_days
total_slippage = df['slippage'].sum() #总滑点
daily_slippage = total_slippage / total_days
total_turnover = df['turnover'].sum()
daily_turnover = total_turnover / total_days
total_trade_count = df['trade_count'].sum() #总交易次数
daily_trade_count = total_trade_count / total_days
total_return = (end_balance / self.capital - 1) * 100 #总收益率
annual_return = total_return / total_days * TRADING_DAY #年化收益率
#收益率均值,标准差,偏度,峰度
return_mean,return_std,return_skew, return_kurt = self.statistics_status(df['return'].values)
#sortino_info
sortino_info = sortino_ratio(df['return'])
omega_info = omega_ratio(df['return'])
#年化波动率
annual_volatility_info = annual_volatility(df['return'])
#年化复合增长率
cagr_info = cagr(df['return'])
#年化下行风险率
annual_downside_risk = downside_risk(df['return'])
"""CVaR即条件风险价值,其含义为在投资组合的损失超过某个给定VaR值的条件下,该投资组合的平均损失值。"""
c_var = conditional_value_at_risk(df['return'])
"""风险价值(VaR)是对投资损失风险的一种度量。它估计在正常的市场条件下,在设定的时间段(例如一天)中,
一组投资可能(以给定的概率)损失多少。金融业中的公司和监管机构通常使用VaR来衡量弥补可能损失所需的资产数量"""
var_info = value_at_risk(df['return'])
#calmar_ratio:年化收益率与历史最大回撤率之间的比率
calmar_ratio = annual_return / abs(max_drawdown_percent)
#收益稳定率
stability_return = stability_of_timeseries(df['return'])
#尾部比率0.25 == 1/4,收益1,风险4
tail_ratio_info = tail_ratio(df['return'])
if return_std:
sharpe_ratio = return_mean / return_std * np.sqrt(TRADING_DAY)
else:
sharpe_ratio = 0
#收益回撤比
return_drawdown = -total_net_pnl/max_drawdown
#收益率回撤率比
return_drawdown_ratio = -total_return / max_drawdown_percent
for index in range(len(df['balance'])):
if index == 0:
nets_pnl = 1
else:
nets_pnl = df['balance'][index]/df['balance'][index-1]-1
self.net_value += nets_pnl
self.net_value_list.append(round(float(self.net_value),3))
#----------------------------------------------------------------------
if write_result:
self.output('-' * 70)
if hasattr(self.strategy,'strategy_name'):
self.output(f"策略名称:{self.strategy.strategy_name},交易标的:{self.vt_symbol}")
else:
self.output(f"策略名称:{strategy_name},交易标的:{self.vt_symbol}")
self.output(f"首个交易日:\t{start_date},最后交易日:\t{end_date},总交易日:\t{total_days}")
self.output(f"盈利交易日:\t{profit_days},亏损交易日:\t{loss_days}")
self.output(f"起始资金:\t{self.capital:,.3f},结束资金:\t{end_balance:,.3f}")
self.output(f"总盈亏:\t{total_net_pnl:,.3f}")
self.output(f"总收益率:\t{total_return:,.3f}%,复利净值:\t{self.net_value_list[-1]:,.3f}")
self.output(f"收益回撤比:\t{return_drawdown:,.3f}")
self.output(f"收益率回撤率比:\t{return_drawdown_ratio:,.3f}")
self.output(f"最大回撤资金: \t{max_drawdown:,.3f},最大回撤日期:\t{max_drawdown_start}至{max_drawdown_end},最大回撤天数: \t{max_drawdown_duration}")
self.output(f"最大回撤率: {max_drawdown_percent:,.3f}%")
self.output(f"总手续费:\t{total_commission:,.3f}")
self.output(f"总滑点:\t{total_slippage:,.3f}")
self.output(f"总成交金额:\t{total_turnover:,.3f}")
self.output(f"总成交笔数:\t{total_trade_count}")
self.output(f"日均盈亏:\t{daily_net_pnl:,.3f}")
self.output(f"日均手续费:\t{daily_commission:,.3f}")
self.output(f"日均滑点:\t{daily_slippage:,.3f}")
self.output(f"日均成交金额:\t{daily_turnover:,.3f}")
self.output(f"日均成交笔数:\t{daily_trade_count:,.3f}")
self.output(f"年化收益率:\t{annual_return:,.3f}%")
self.output(f"日均收益率:\t{return_mean*100:,.3f}%,收益率标准差:\t{return_std*100:,.3f}%,收益率偏度:\t{return_skew:,.3f},收益率峰度:\t{return_kurt:,.3f}")
self.output(f"sharpe_ratio:\t{sharpe_ratio:,.3f}")
self.output(f"calmar_ratio:\t{calmar_ratio:,.3f}")
self.output(f"sortino_info:\t{sortino_info:,.3f}")
self.output(f"omega_info:\t{omega_info:,.3f}")
self.output(f"年化波动率:\t{annual_volatility_info:,.3f}")
self.output(f"年化复合增长率:\t{cagr_info:,.3f}")
self.output(f"年化下行风险率:\t{annual_downside_risk:,.3f}")
self.output(f"c_var:\t{c_var:,.3f}")
self.output(f"var_info:\t{var_info:,.3f}")
self.output(f"收益稳定率:\t{stability_return:,.3f}")
self.output(f"尾部比率:\t{tail_ratio_info:,.3f}")
#回测统计结果和交易明细保存到backtesting_result文件夹
if hasattr(self.strategy,'strategy_name'):
symbol,exchange,gateway_name = extract_vt_symbol(self.vt_symbol)
path_symbol = f"{symbol}_{exchange.value}"
if platform.uname().system == "Windows":
self.result_path = f"C:\\ProgramData\\Anaconda3\\Lib\\site-packages\\vnpy-2.1.0-py3.7.egg\\vnpy\\app\\cta_strategy\\backtesting_result\\{datetime.now().date()}_bcaktesting_{path_symbol}_{self.strategy.strategy_name}.csv"
elif platform.uname().system == "Linux":
self.result_path = f"/home/xldistance/anaconda3/lib/python3.7/site-packages/vnpy/app/cta_strategy/backtesting_result/{datetime.now().date()}_bcaktesting_{path_symbol}_{self.strategy.strategy_name}.csv"
else:
if platform.uname().system == "Windows":
self.result_path = f"C:\\ProgramData\\Anaconda3\\Lib\\site-packages\\vnpy-2.1.0-py3.7.egg\\vnpy\\app\\cta_strategy\\backtesting_result\\{datetime.now().date()}_bcaktesting_{strategy_name}.csv"
elif platform.uname().system == "Linux":
self.result_path = f"/home/xldistance/anaconda3/lib/python3.7/site-packages/vnpy/app/cta_strategy/backtesting_result/{datetime.now().date()}_bcaktesting_{strategy_name}.csv"
df.to_csv(self.result_path,encoding='utf_8_sig') #保存回测统计数据到CSV
#交易类转化为可读字典存到本地csv
for trade_class in df['trades']:
if trade_class:
for trade in trade_class:
trades_list.append(trade.__dict__)
DataFrame(trades_list).to_csv(self.result_path.replace('_bcaktesting_','_trade_dict_'),encoding='utf_8_sig')
#----------------------------------------------------------------------
#pyecharts绘图写入html,mark_point标记点,mark_point_symbol标记点图形'circle', 'diamond', 'rounddiamond', 'triangle','pin', 'arrow'可选
bar_1 = Bar()
bar_1.add_xaxis(df['balance'].index.tolist())
if hasattr(self.strategy,'strategy_name'):
bar_1.add_yaxis(f"策略:{self.vt_symbol}_{self.strategy.strategy_name}\n\n总资金\n\n起止时间:{df['balance'].index[0]}至{df['balance'].index[-1]}",df['balance'].tolist()) #主标题
else:
bar_1.add_yaxis(f"策略:{self.vt_symbol}_{strategy_name}\n\n总资金\n\n起止时间:{df['balance'].index[0]}至{df['balance'].index[-1]}",df['balance'].tolist()) #主标题
bar_1.set_global_opts(opts.TitleOpts(title=f"资金\n\n总收益率:{total_return:,.3f}%"),toolbox_opts=opts.ToolboxOpts()) #副标题,ToolboxOpts设置工具箱配置项
bar_1.set_series_opts(label_opts=opts.LabelOpts(is_show=False)) #系列配置项
#成交记录画图
trade_datetime = []
trade_price = []
for trade in trades_list:
trade_datetime.append(trade["datetime"])
trade_price.append(trade["price"])
trades_opts_data = [opts.MarkPointItem(
name = f"orderid:{trade['orderid']},标的:{trade['vt_symbol']},方向:{trade['direction'].value},{trade['offset'].value},价格:{trade['price']},成交量:{trade['volume']}", #成交详细信息添加到name
itemstyle_opts = opts.ItemStyleOpts(color= "#ec0000" if trade["direction"].value == "多" else "#00da3c"),
coord = [trade["datetime"],trade["price"] * random.randrange(1000,1010) / 1000], #标注的坐标
value = trade["direction"].value + trade["offset"].value
) for trade in trades_list]
bar_2 = Line()
bar_2.add_xaxis(trade_datetime)
bar_2.add_yaxis(f"交易价格:交易时间:{trade_datetime[0]}至{trade_datetime[-1]}\n\n成交笔数:{len(trades_list)}",trade_price) #主标题
bar_2.set_global_opts(opts.TitleOpts(title="交易记录"),toolbox_opts=opts.ToolboxOpts()) #设置工具箱配置项
bar_2.set_series_opts(label_opts=opts.LabelOpts(is_show=False), #标签配置项
markpoint_opts = opts.MarkPointOpts(data = trades_opts_data,
#标记的图形圆形:"circle'",方形:"rect'", 圆角方形:"roundRect'",三角形:"triangle'",菱形:"diamond'",水滴:"pin'",箭头:'arrow'
symbol = "pin"
),
itemstyle_opts = opts.ItemStyleOpts(color = "#ec0000",color0 = "#00da3c"),
) #系列配置项
bar_3 = Bar()
bar_3.add_xaxis(df['balance'].index.tolist())
bar_3.add_yaxis(f"复利净值最高点:{max(self.net_value_list)}\t复利净值最低点:{min(self.net_value_list)}",self.net_value_list)
bar_3.set_global_opts(opts.TitleOpts(title="复利净值"),toolbox_opts=opts.ToolboxOpts()) #设置工具箱配置项
bar_3.set_series_opts(label_opts=opts.LabelOpts(is_show=False)) #系列配置项
bar_4 = Bar()
bar_4.add_xaxis(df['drawdown'].index.tolist())
bar_4.add_yaxis(f"回撤资金\n\n最大回撤资金:{max_drawdown:,.3f}\n最大回撤日期: \t{max_drawdown_start}至{max_drawdown_end},最大回撤天数: \t{max_drawdown_duration}",df['drawdown'].tolist())
bar_4.set_global_opts(opts.TitleOpts(title="资金"),toolbox_opts=opts.ToolboxOpts()) #设置工具箱配置项
bar_4.set_series_opts(label_opts=opts.LabelOpts(is_show=False)) #系列配置项
bar_5 = Bar()
bar_5.add_xaxis(df['ddpercent'].index.tolist())
bar_5.add_yaxis(f"回撤百分比\n\n最大回撤率:{max_drawdown_percent:,.3f}%",df['ddpercent'].tolist())
bar_5.set_global_opts(opts.TitleOpts(title="回撤百分比"),toolbox_opts=opts.ToolboxOpts()) #设置工具箱配置项
bar_5.set_series_opts(label_opts=opts.LabelOpts(is_show=False)) #系列配置项
bar_6 = Bar()
bar_6.add_xaxis(df['net_pnl'].index.tolist())
bar_6.add_yaxis(f"日盈亏\n\n最大日盈利:{df['net_pnl'].max():,.3f}\n\n最大日亏损:{df['net_pnl'].min():,.3f}",df['net_pnl'].tolist())
bar_6.set_global_opts(opts.TitleOpts(title="日盈亏"),toolbox_opts=opts.ToolboxOpts()) #设置工具箱配置项
bar_6.set_series_opts(label_opts=opts.LabelOpts(is_show=False)) #系列配置项
for pnl_index in df['net_pnl'].index:
month_date = f"{pnl_index.year}-{pnl_index.month}"
if month_date == self.last_month_date:
self.month_pnl += df['net_pnl'][pnl_index]
else:
#月份减一保存实际月份收益
self.month_dict.update({month_date:self.month_pnl})
for key,value in list(self.month_dict.items()):
if isinstance(key,datetime):
continue
key = datetime.strptime(key,"%Y-%m") - relativedelta(months = 1)
self.month_dict.update({key:value})
#month_dict删除原始的str键值对
for key,value in list(self.month_dict.items()):
if isinstance(key,str):
self.month_dict.pop(key)
self.month_pnl = df['net_pnl'][pnl_index]
self.last_month_date = month_date
self.month_dict.pop(list(self.month_dict.keys())[0])
max_month_pnl = max(self.month_dict.values())
min_month_pnl = min(self.month_dict.values())
bar_7 = Bar()
bar_7.add_xaxis(list(self.month_dict.keys()))
bar_7.add_yaxis(f"月盈亏\n\n最大月盈利:{max_month_pnl:,.3f}\n\n最大月亏损:{min_month_pnl:,.3f}",list(self.month_dict.values()))
bar_7.set_global_opts(opts.TitleOpts(title="月盈亏"),toolbox_opts=opts.ToolboxOpts()) #设置工具箱配置项
bar_7.set_series_opts(label_opts=opts.LabelOpts(is_show=False)
) #系列配置项
hist,bin_edges= np.histogram(df['net_pnl'], bins=50)
bar_8 = Bar()
bar_8.add_xaxis(bin_edges[1:].tolist())
bar_8.add_yaxis("盈亏分布直方图",hist.tolist())
bar_8.set_global_opts(opts.TitleOpts(title="频数"),toolbox_opts=opts.ToolboxOpts()) #设置工具箱配置项
bar_8.set_series_opts(label_opts=opts.LabelOpts(is_show=False)) #系列配置项
bar_9 = Bar()
bar_9.add_xaxis(df['commission'].index.tolist())
bar_9.add_yaxis(f"每日手续费\n\n日最高手续费:{df['commission'].max():,.3f}",df['commission'].tolist())
bar_9.set_global_opts(opts.TitleOpts(title="手续费"),toolbox_opts=opts.ToolboxOpts()) #设置工具箱配置项
bar_9.set_series_opts(label_opts=opts.LabelOpts(is_show=False)) #系列配置项
page = Page()
page.add(bar_1)
page.add(bar_2)
page.add(bar_3)
page.add(bar_4)
page.add(bar_5)
page.add(bar_6)
page.add(bar_7)
page.add(bar_8)
page.add(bar_9)
#图表结果保存为html
page.render(self.result_path.replace('.csv','.html'))
#----------------------------------------------------------------------
statistics = {
'start_date': start_date,
'end_date': end_date,
'total_days': total_days,
'profit_days': profit_days,
'loss_days': loss_days,
'capital': self.capital,
'end_balance': end_balance,
'max_drawdown': max_drawdown,
'max_drawdown_percent': max_drawdown_percent,
"max_drawdown_duration": max_drawdown_duration,
'total_net_pnl': total_net_pnl,
'daily_net_pnl': daily_net_pnl,
'total_commission': total_commission,
'daily_commission': daily_commission,
'total_slippage': total_slippage,
'daily_slippage': daily_slippage,
'total_turnover': total_turnover,
'daily_turnover': daily_turnover,
'total_trade_count': total_trade_count,
'daily_trade_count': daily_trade_count,
'total_return': total_return,
'annual_return': annual_return,
'return_mean': return_mean,
'return_std': return_std,
'return_skew': return_skew,
'return_kurt': return_kurt,
'sharpe_ratio': sharpe_ratio,
'calmar_ratio': calmar_ratio,
'sortino_info': sortino_info,
'omega_info': omega_info,
'annual_volatility_info': annual_volatility_info,
'cagr_info': cagr_info,
'annual_downside_risk': annual_downside_risk,
'c_var': c_var,
'var_info': var_info,
'stability_return': stability_return,
'tail_ratio_info': tail_ratio_info,
'return_drawdown': return_drawdown,
'return_drawdown_ratio': return_drawdown_ratio,
}
for key,value in statistics.items():
if value in (np.inf,-np.inf):
value = 0
statistics[key] = np.nan_to_num(value)
self.output("策略统计指标计算完成")
return statistics
#----------------------------------------------------------------------
def get_information_ratio(self,returns,benchmark=0.00008):
#benchmark基准收益率
diff = returns - benchmark
return np.mean(diff) / np.std(diff) * np.sqrt(TRADING_DAY)
#----------------------------------------------------------------------
def show_chart(self, df: DataFrame = None):
"""matplotlib画图"""
if df is None:
return
plt.figure(figsize=(10, 16))
balance_plot = plt.subplot(5, 1, 1)
balance_plot.set_title('Balance')
df['balance'].plot(legend=True)
drawdown_plot = plt.subplot(5, 1, 2)
drawdown_plot.set_title('Drawdown')
drawdown_plot.fill_between(range(len(df)), df['drawdown'].values)
drawdown_percent = plt.subplot(5, 1, 3)
drawdown_percent.set_title('DrawdownPercent')
drawdown_percent.fill_between(range(len(df)), df['ddpercent'].values)
pnl_plot = plt.subplot(5, 1, 4)
pnl_plot.set_title('Daily Pnl')
df['net_pnl'].plot(kind='bar', legend=False, grid=False, xticks=[])
distribution_plot = plt.subplot(5, 1, 5)
distribution_plot.set_title('Daily Pnl Distribution')
df['net_pnl'].hist(bins=50)
plt.show()
def run_optimization(self, optimization_setting: OptimizationSetting,target_reverse =True):
"""多进程优化"""
# Get optimization setting and target
settings = optimization_setting.generate_setting()
target_name = optimization_setting.target_name
if not settings:
self.output('优化参数组合为空,请检查')
return
if not target_name:
self.output('优化目标未设置,请检查')
return
# Use multiprocessing pool for running backtesting with different setting
pool = multiprocessing.Pool(multiprocessing.cpu_count(), maxtasksperchild=1)
results = []
for setting in settings:
result = (pool.apply_async(optimize, (
target_name,
self.strategy_class,
setting,
self.vt_symbol,
self.start,
self.rate,
self.slippage,
self.size,
self.price_tick,
self.capital,
self.end,
self.mode
)))
results.append(result)
pool.close()
pool.join()
# Sort results and output
result_values = [result.get() for result in results]
result_values.sort(reverse=target_reverse, key=lambda result: result[1])
for value in result_values:
msg = f'参数:{value[0]}, 目标:{value[1]}'
self.output(msg)
return result_values
def run_ga_optimization(self, optimization_setting: OptimizationSetting, population_size=200, ngen_size=30):
"""遗传算法优化"""
# Get optimization setting and target
settings = optimization_setting.generate_setting_ga()
target_name = optimization_setting.target_name
if not settings:
self.output('优化参数组合为空,请检查')
return
if not target_name:
self.output('优化目标未设置,请检查')
return
# Define parameter generation function
def generate_parameter():
''''''
return random.choice(settings)
def mutate_individual(individual, indpb):
''''''
size = len(individual)
paramlist = generate_parameter()
for i in range(size):
if random.random() < indpb:
individual[i] = paramlist[i]
return individual,
# Create ga object function
global ga_target_name
global ga_strategy_class
global ga_setting
global ga_vt_symbol
global ga_interval
global ga_start
global ga_rate
global ga_slippage
global ga_size
global ga_price_tick
global ga_capital
global ga_end
global ga_mode
ga_target_name = target_name
ga_strategy_class = self.strategy_class
ga_setting = settings[0]
ga_vt_symbol = self.vt_symbol
ga_interval = self.interval
ga_start = self.start
ga_rate = self.rate
ga_slippage = self.slippage
ga_size = self.size
ga_price_tick = self.price_tick
ga_capital = self.capital
ga_end = self.end
ga_mode = self.mode
# Set up genetic algorithem
toolbox = base.Toolbox()
toolbox.register('individual', tools.initIterate, creator.Individual, generate_parameter)
toolbox.register('population', tools.initRepeat, list, toolbox.individual)
toolbox.register('mate', tools.cxTwoPoint)
toolbox.register('mutate', mutate_individual, indpb=1)
toolbox.register('evaluate', ga_optimize)
toolbox.register('select', tools.selNSGA2)
total_size = len(settings)
pop_size = population_size #族群里面的个体数量
lambda_ = int(pop_size * 0.5) #每一代产生的子女数
mu = int(pop_size * 0.25) #每一代选择的个体数
cxpb = 0.5 #种群内部个体的交叉概率
mutpb = 1 - cxpb #种群内部个体的变异概率
ngen = ngen_size #产生种群代数,NGEN = 10要跑10个轮回
pop = toolbox.population(pop_size)
hof = tools.ParetoFront() # end result of pareto front
stats = tools.Statistics(lambda ind: ind.fitness.values)
np.set_printoptions(suppress=True)
stats.register('mean', np.mean, axis=0)
stats.register('std', np.std, axis=0)
stats.register('min', np.min, axis=0)
stats.register('max', np.max, axis=0)
# Multiprocessing is not supported yet.
# pool = multiprocessing.Pool(multiprocessing.cpu_count())
# toolbox.register('map', pool.map)
# Run ga optimization
self.output(f'参数优化空间:{total_size}')
self.output(f'每代族群总数:{pop_size}')
self.output(f'优良筛选个数:{mu}')
self.output(f'迭代次数:{ngen}')
self.output(f'交叉概率:{cxpb:.0%}')
self.output(f'突变概率:{mutpb:.0%}')
start = time()
algorithms.eaMuPlusLambda(
pop,
toolbox,
mu,
lambda_,
cxpb,
mutpb,
ngen,
stats,
halloffame=hof
)
end = time()
cost = int((end - start))
self.output(f'遗传算法优化完成,耗时{cost}秒')
# Return result list
results = []
for parameter_values in hof:
setting = dict(parameter_values)
target_value = ga_optimize(parameter_values)[0]
results.append((setting, target_value, {}))
self.output(results)
return results
def update_daily_close(self, price: float):
''''''
d = self.datetime.date()
daily_result = self.daily_results.get(d, None)
if daily_result:
daily_result.close_price = price
else:
self.daily_results[d] = DailyResult(d, price)
def new_bar(self, bar: BarData):
''''''
self.bar = bar
self.datetime = bar.datetime
self.cross_limit_order() #先撮合限价单
self.cross_stop_order() #再撮合停止单
self.strategy.on_bar(bar) #推送K线到策略中
self.update_postion() #更新持仓数据
self.update_daily_close(bar.close_price)
def new_tick(self, tick: TickData):
''''''
self.tick = tick
self.datetime = tick.datetime
self.cross_limit_order()
self.cross_stop_order()
self.strategy.on_tick(tick)
self.update_postion() #更新持仓数据
self.update_daily_close(tick.last_price)
def cross_limit_order(self):
'''
Cross limit order with last bar/tick data.
'''
if self.mode == BacktestingMode.BAR:
long_cross_price = self.bar.low_price
short_cross_price = self.bar.high_price
long_best_price = self.bar.open_price
short_best_price = self.bar.open_price
else:
long_cross_price = self.tick.ask_price_1
short_cross_price = self.tick.bid_price_1
long_best_price = long_cross_price
short_best_price = short_cross_price
for order in list(self.active_limit_orders.values()):
is_submitting = False
# Push order update with status 'not traded' (pending).
if order.status == Status.SUBMITTING:
is_submitting = True
order.status = Status.NOTTRADED
self.strategy.on_order(order)
# Check whether limit orders can be filled.
long_cross = (
order.direction == Direction.LONG
and order.price >= long_cross_price
and 0 < long_cross_price < 9999999
)
short_cross = (
order.direction == Direction.SHORT
and order.price <= short_cross_price
and 0 < short_cross_price < 9999999
)
if not long_cross and not short_cross:
continue
# Push order udpate with status 'all traded' (filled).
order.traded = order.volume
order.status = Status.ALLTRADED
self.active_limit_orders.pop(order.vt_orderid)
self.strategy.on_order(order)
# Push trade update
self.trade_count += 1
#直接成交使用order.price作为交易价
trade_price = order.price
#计算挂单成交价
if long_cross:
if is_submitting:
trade_price = min(order.price, long_best_price)
pos_change = order.volume
elif short_cross:
if is_submitting:
trade_price = max(order.price, short_best_price)
pos_change = -order.volume
trade = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=str(self.trade_count),
direction=order.direction,
offset=order.offset,
price=trade_price,
volume=order.volume,
date=self.datetime.strftime('%Y%m%d'),
time=self.datetime.strftime('%H:%M:%S'),
gateway_name=self.gateway_name,
)
trade.datetime = self.datetime
self.strategy.pos += pos_change
self.strategy.on_trade(trade)
self.trades[trade.vt_tradeid] = trade
# 更新持仓数据
self.update_postion(trade=trade)
def cross_stop_order(self):
'''
Cross stop order with last bar/tick data.
'''
if self.mode == BacktestingMode.BAR:
long_cross_price = self.bar.high_price
short_cross_price = self.bar.low_price
long_best_price = self.bar.open_price
short_best_price = self.bar.open_price
else:
long_cross_price = self.tick.last_price
short_cross_price = self.tick.last_price
long_best_price = long_cross_price
short_best_price = short_cross_price
for stop_order in list(self.active_stop_orders.values()):
# Check whether stop order can be triggered.
long_cross = (
stop_order.direction == Direction.LONG
and stop_order.price <= long_cross_price
)
short_cross = (
stop_order.direction == Direction.SHORT
and stop_order.price >= short_cross_price
)
if not long_cross and not short_cross:
continue
# Create order data.
self.limit_order_count += 1
order = OrderData(
symbol=self.symbol,
exchange=self.exchange,
orderid=str(self.limit_order_count),
direction=stop_order.direction,
offset=stop_order.offset,
price=stop_order.price,
volume=stop_order.volume,
traded=stop_order.volume,
status=Status.ALLTRADED,
gateway_name=self.gateway_name,
)
order.datetime = self.datetime
self.limit_orders[order.vt_orderid] = order
# Create trade data.
if long_cross:
trade_price = max(stop_order.price, long_best_price)
pos_change = order.volume
else:
trade_price = min(stop_order.price, short_best_price)
pos_change = -order.volume
self.trade_count += 1
trade = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=str(self.trade_count),
direction=order.direction,
offset=order.offset,
price=trade_price,
volume=order.volume,
date=self.datetime.strftime('%Y%m%d'),
time=self.datetime.strftime('%H:%M:%S'),
gateway_name=self.gateway_name,
)
trade.datetime = self.datetime
self.trades[trade.vt_tradeid] = trade
# Update stop order.
stop_order.vt_orderids.append(order.vt_orderid)
stop_order.status = StopOrderStatus.TRIGGERED
if stop_order.stop_orderid in self.active_stop_orders:
self.active_stop_orders.pop(stop_order.stop_orderid)
# Push update to strategy.
self.strategy.on_stop_order(stop_order)
self.strategy.on_order(order)
self.strategy.pos += pos_change
self.strategy.on_trade(trade)
# 更新持仓数据
self.update_postion(trade=trade)
#----------------------------------------------------------------------
def update_postion(self, trade =None):
"""持仓监控"""
if trade:
if trade.direction == Direction.LONG:
# 做多单
if trade.offset == Offset.OPEN:
long_cost = self.long_avg_cost * self.long_pos
long_cost += trade.price * trade.volume
# 平均成本
self.long_pos += trade.volume
if self.long_pos > 0:
self.long_avg_cost = round(long_cost / float(self.long_pos), 3)
else:
self.short_pos -= trade.volume
else:
# 做空单
if trade.offset == Offset.OPEN:
short_cost = self.short_avg_cost * self.short_pos
short_cost += trade.price * trade.volume
# 平均成本
self.short_pos += trade.volume
if self.short_pos > 0:
self.short_avg_cost = round(short_cost / float(self.short_pos), 3)
else:
self.long_pos -= trade.volume
# 多/空仓收益
if self.mode == BacktestingMode.BAR:
last_price = self.bar.close_price
else:
last_price = self.tick.last_price
long_profit = (last_price - self.long_avg_cost) * self.long_pos * self.size
short_profit = (self.short_avg_cost - last_price) * self.short_pos * self.size
if trade:
if trade.direction == Direction.LONG:
self.long_profit_total += long_profit
if trade.direction == Direction.SHORT:
self.short_profit_total += short_profit
self.strategy.long_pos = self.long_pos
self.strategy.short_pos = self.short_pos
self.strategy.long_profit = long_profit
self.strategy.short_profit = short_profit
self.strategy.balance = self.capital + self.long_profit_total + self.short_profit_total
def load_bar(
self, vt_symbol: str, days: int, interval: Interval, callback: Callable
):
''''''
self.days = days
self.callback = callback
def load_tick(self, vt_symbol: str, days: int, callback: Callable):
''''''
self.days = days
self.callback = callback
def send_order(self,vt_symbol, direction: Direction, offset: Offset, price: float, volume: float, stop: bool,line:bool, lock: bool,strategy:CtaTemplate,order_type:OrderType):
"""
发送委托单
"""
#价格,发单量取整到最小变动
price = round_to(price, self.price_tick)
volume = round_to(volume, 1)
#过滤非正常下单价格与委托量
if not price or not volume:
return []
#平仓时仓位为0直接返回
if offset == Offset.CLOSE:
if self.strategy.pos == 0:
return
if stop:
vt_orderid = self.send_stop_order(vt_symbol,direction, offset, price, volume,self,OrderType.STOP)
else:
vt_orderid = self.send_limit_order(vt_symbol,direction, offset, price, volume,self,OrderType.LIMIT)
return [vt_orderid]
def send_stop_order(self, vt_symbol, direction: Direction, offset: Offset, price: float, volume: float, strategy: CtaTemplate, order_type: OrderType, ):
"""
发送本地停止单
"""
self.stop_order_count += 1
stop_order = StopOrder(
vt_symbol=self.vt_symbol,
direction=direction,
offset=offset,
price=price,
volume=volume,
stop_orderid=f'{STOPORDER_PREFIX}.{self.stop_order_count}',
strategy_name=self.strategy.strategy_name,
)
self.strategy.on_stop_order(stop_order)
self.active_stop_orders[stop_order.stop_orderid] = stop_order
self.stop_orders[stop_order.stop_orderid] = stop_order
return stop_order.stop_orderid
def send_limit_order(self, vt_symbol, direction: Direction, offset: Offset, price: float, volume: float, strategy: CtaTemplate, order_type: OrderType, ):
''''''
self.limit_order_count += 1
order = OrderData(
symbol=self.symbol,
exchange=self.exchange,
orderid=str(self.limit_order_count),
direction=direction,
offset=offset,
price=price,
volume=volume,
traded=volume,
status=Status.NOTTRADED,
gateway_name=self.gateway_name,
)
order.datetime = self.datetime
self.active_limit_orders[order.vt_orderid] = order
self.limit_orders[order.vt_orderid] = order
return order.vt_orderid
def cancel_order(self, strategy: CtaTemplate, vt_orderid: str):
"""
用vt_orderid撤销委托单
"""
if vt_orderid.startswith(STOPORDER_PREFIX):
self.cancel_stop_order(strategy, vt_orderid)
else:
self.cancel_limit_order(strategy, vt_orderid)
def cancel_stop_order(self, strategy: CtaTemplate, vt_orderid: str):
''''''
if vt_orderid not in self.active_stop_orders:
return
stop_order = self.active_stop_orders.pop(vt_orderid)
stop_order.status = StopOrderStatus.CANCELLED
self.strategy.on_stop_order(stop_order)
def cancel_limit_order(self, strategy: CtaTemplate, vt_orderid: str):
''''''
if vt_orderid not in self.active_limit_orders:
return
order = self.active_limit_orders.pop(vt_orderid)
order.status = Status.CANCELLED
self.strategy.on_order(order)
def cancel_all(self, strategy: CtaTemplate):
'''
Cancel all orders, both limit and stop.
'''
vt_orderids = list(self.active_limit_orders.keys())
for vt_orderid in vt_orderids:
self.cancel_limit_order(strategy, vt_orderid)
stop_orderids = list(self.active_stop_orders.keys())
for vt_orderid in stop_orderids:
self.cancel_stop_order(strategy, vt_orderid)
def write_log(self, msg: str, strategy: CtaTemplate = None):
"""
Write log message.
"""
msg = '{0}\t{1}'.format(self.datetime,msg)
self.logs.append(msg)
def send_email(self, msg: str, strategy: CtaTemplate = None):
'''
Send email to default receiver.
'''
pass
def sync_strategy_data(self, strategy: CtaTemplate = None):
pass
def get_engine_type(self):
'''
Return engine type.
'''
return self.engine_type
def put_strategy_event(self, strategy: CtaTemplate):
'''
Put an event to update strategy status.
'''
pass
def output(self, msg):
'''
Output message of backtesting engine.
'''
print(f'{datetime.now()}\t{msg}')
def get_all_trades(self):
"""
Return all trade data of current backtesting result.
"""
return list(self.trades.values())
def get_all_orders(self):
"""
Return all limit order data of current backtesting result.
"""
return list(self.limit_orders.values())
def get_all_daily_results(self):
"""
Return all daily result data.
"""
return list(self.daily_results.values())
class DailyResult:
''''''
def __init__(self, date: date, close_price: float):
''''''
self.date = date
self.close_price = close_price
self.pre_close = 0
self.trades = []
self.trade_count = 0
self.start_pos = 0
self.end_pos = 0
self.turnover = 0
self.commission = 0
self.slippage = 0
self.trading_pnl = 0
self.holding_pnl = 0
self.total_pnl = 0
self.net_pnl = 0
def add_trade(self, trade: TradeData):
''''''
self.trades.append(trade)
def calculate_pnl(
self,
pre_close: float,
start_pos: float,
size: int,
rate: float,
slippage: float,
):
''''''
self.pre_close = pre_close
# Holding pnl is the pnl from holding position at day start
self.start_pos = start_pos
self.end_pos = start_pos
self.holding_pnl = self.start_pos * (self.close_price - self.pre_close) * size
# Trading pnl is the pnl from new trade during the day
self.trade_count = len(self.trades)
for trade in self.trades:
if trade.direction == Direction.LONG:
pos_change = trade.volume
else:
pos_change = -trade.volume
turnover = trade.price * trade.volume * size
self.trading_pnl += pos_change * (self.close_price - trade.price) * size
self.end_pos += pos_change
self.turnover += turnover
self.commission += turnover * rate
self.slippage += trade.volume * size * slippage
# Net pnl takes account of commission and slippage cost
self.total_pnl = self.trading_pnl + self.holding_pnl
self.net_pnl = self.total_pnl - self.commission - self.slippage
def optimize(
target_name: str,
strategy_class: CtaTemplate,
setting: dict,
vt_symbol: str,
start: datetime,
rate: float,
slippage: float,
size: float,
price_tick: float,
capital: int,
end: datetime,
mode: BacktestingMode
):
'''
Function for running in multiprocessing.pool
'''
engine = BacktestingEngine()
engine.clear_data()
engine.set_parameters(
vt_symbol=vt_symbol,
start=start,
rate=rate,
slippage=slippage,
size=size,
price_tick=price_tick,
capital=capital,
end=end,
mode=mode
)
engine.add_strategy(strategy_class, setting)
engine.load_data()
engine.run_backtesting()
daily_df = engine.calculate_result()
statistics = engine.calculate_statistics(daily_df,write_result=False)
target_value = statistics[target_name]
return (str(setting), target_value, statistics)
@lru_cache(maxsize=1000000)
def _ga_optimize(parameter_values: tuple):
''''''
setting = dict(parameter_values)
result = optimize(
ga_target_name,
ga_strategy_class,
setting,
ga_vt_symbol,
ga_start,
ga_rate,
ga_slippage,
ga_size,
ga_price_tick,
ga_capital,
ga_end,
ga_mode
)
return (result[1],)
def ga_optimize(parameter_values: list):
''''''
return _ga_optimize(tuple(parameter_values))
@lru_cache(maxsize=10)
def load_bar_data(
symbol: str,
exchange: Exchange,
interval: Interval,
start: datetime,
end: datetime
):
"""bar数据redis序列化存取"""
file_name = f"{symbol}_{exchange.value}_{start.date()}_{end.date()}_bar"
redis_data = REDIS_CLIENT.hget(file_name, file_name)
if not redis_data:
bar_data = database_manager.load_bar_data( symbol, exchange, interval, start, end )
REDIS_CLIENT.hset(file_name, file_name, zlib.compress(pickle.dumps(bar_data), 5))
else:
bar_data = pickle.loads(zlib.decompress(redis_data))
return bar_data
"""数据缓存为pkl格式到本地硬盘"""
""" dir_path = f"H:\\pickle_data\\"
file_name = f"{symbol}_{exchange.value}_{start.date()}_{end.date()}_bar"
pickle_path = dir_path + file_name + ".pkl"
data_size =0
if not os.path.exists(pickle_path):
bar_data = database_manager.load_bar_data( symbol, exchange, interval, start, end )
pickle_file = open(pickle_path,'wb')
pickle.dump(bar_data,pickle_file)
pickle_file.close()
else:
pickle_file = open(pickle_path,'rb')
bar_data =pickle.load(pickle_file)
pickle_file.close()
#pickle_data文件夹大于50G删除缓存数据
for dirpath, dirnames, filenames in os.walk(dir_path):
for file_name in filenames: #当前目录所有文件名
data_size += os.path.getsize(dirpath + file_name)
if data_size / (1024 ** 3) > 50:
for dirpath, dirnames, filenames in os.walk(dir_path):
for file_name in filenames:
os.remove(dirpath + file_name)
return bar_data"""
@lru_cache(maxsize=10)
def load_tick_data(
symbol: str,
exchange: Exchange,
start: datetime,
end: datetime
):
"""tick数据redis序列化存取"""
file_name = f"{symbol}_{exchange.value}_{start.date()}_{end.date()}_tick"
redis_data = REDIS_CLIENT.hget(file_name, file_name)
if not redis_data:
tick_data = database_manager.load_tick_data( symbol, exchange, start, end )
REDIS_CLIENT.hset(file_name, file_name, zlib.compress(pickle.dumps(tick_data), 5))
else:
tick_data = pickle.loads(zlib.decompress(redis_data))
return tick_data
"""数据缓存为pkl格式到本地硬盘"""
""" dir_path = f"H:\\pickle_data\\"
file_name = f"{symbol}_{exchange.value}_{start.date()}_{end.date()}_tick"
pickle_path = dir_path + file_name + ".pkl"
data_size =0
if not os.path.exists(pickle_path):
tick_data = database_manager.load_tick_data( symbol, exchange, start, end )
pickle_file = open(pickle_path,'wb')
pickle.dump(tick_data,pickle_file)
pickle_file.close()
else:
pickle_file = open(pickle_path,'rb')
tick_data =pickle.load(pickle_file)
pickle_file.close()
#pickle_data文件夹大于50G删除缓存数据
for dirpath, dirnames, filenames in os.walk(dir_path):
for file_name in filenames: #当前目录所有文件名
data_size += os.path.getsize(dirpath + file_name)
if data_size / (1024 ** 3) > 50:
for dirpath, dirnames, filenames in os.walk(dir_path):
for file_name in filenames:
os.remove(dirpath + file_name)
return tick_data"""
# GA related global value
ga_end = None
ga_mode = None
ga_target_name = None
ga_strategy_class = None
ga_setting = None
ga_vt_symbol = None
ga_interval = None
ga_start = None
ga_rate = None
ga_slippage = None
ga_size = None
ga_price_tick = None
ga_capital = None
|
# %% Imports
import pandas as pd
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import r2_score, roc_curve, roc_auc_score
from itertools import product
# Import to add project folder to sys path
import sys
utils_path = '/Users/Ryan/Documents/projects/ncaa-basketball/code/utils'
if utils_path not in sys.path:
sys.path.append(utils_path)
from api import opponentAdjust
from db import get_db
from misc import getRelativeFp
# %% Declare parameters
metrics = [
'PF', 'Margin',
'FGM', 'FGA',
'FG3M', 'FG3A',
'FG2M', 'FG2A',
'FTA', 'FTM',
'Ast', 'ORB',
'DRB', 'TRB',
'TO', 'Stl',
'Blk', 'Foul'
]
prefixes = ['Tm', 'Opp']
attrCols = [
'Season',
'TmName',
'dataPriorTo',
'isRegularSeason',
''
]
# %% Read in raw data
def getTd():
"""Reads in local csv of team-date data as of specified dates
Returns:
DataFrame: team-date data
"""
firstYr = True
for yr in range(2003, 2021):
print(f"Reading in {yr}'s data")
fp = getRelativeFp(__file__, f'../data/predate_games/{yr}.csv')
if firstYr:
raw = pd.read_csv(fp)
firstYr = False
else:
raw = raw.append(pd.read_csv(fp))
return raw
# %% Add additional columns to team-date data
def addColsToTd(_td, _metrics=metrics, _prefixes=prefixes):
"""Add additional columns to the team-date DataFrame
Args:
_td (DataFrame): Team-date DataFrame
_metrics (list, optional): list of metrics to pass to opponent-
adjusting function. Defaults to metrics.
_prefixes (list, optional): list of prefixes to pass to opponent-
adjusting function. Defaults to prefixes.
Returns:
DataFrame: Team-date data with additional columns
"""
_td = opponentAdjust(
_td,
_prefixes,
_metrics,
includeOARankFields=False,
includeNormRankFields=False,
includeNormFields=True
)
_td['TmWinPct'] = _td['TmWin'] / _td['TmGame']
_td['TmPossPerGame'] = _td['TmPoss'] / _td['TmGame']
return _td
# %% Clean team-date data
def cleanTd(_td, minGames=10, _metrics=metrics, _prefixes=prefixes):
"""Removes extra columns, removes data prior to specified number of games,
changes datetime data type
Args:
_td (DataFrame): team-date data
minGames (int, optional): Minimum games played to keep data.
Defaults to 10.
_metrics (list, optional): Metrics to drop raw data of.
Defaults to metrics.
_prefixes (list, optional): Prefixes to drop raw data of.
Defaults to prefixes.
Returns:
DataFrame: Cleaned data
"""
# Make list of columns to ignore
colsToDrop = [
'OppGame',
'GameOT'
]
for metr in _metrics + ['Mins', 'Win', 'Poss']:
for pref in _prefixes:
colsToDrop.append(f'{pref}{metr}')
colsToDrop.append(f'OppSum_{pref}{metr}')
keptCols = [
col for col in _td.columns
if (col not in colsToDrop) & (col[:7] != 'OppSum_')
]
_td = _td[keptCols]
# Limit team-dates to only those with >= minGames
_td = _td.loc[_td['TmGame'] >= minGames]
# Change field to datetime
_td['dataPriorTo'] = pd.to_datetime(_td['dataPriorTo'])
return _td
# %% Get game data for analysis
def getGames():
"""Get 2003+ game data from database
Returns:
DataFrame: Game data
"""
db = get_db()
q = {'Season': {'$gte': 2003}}
fields = {
'_id': 0,
'TmName': 1,
'OppName': 1,
'Season': 1,
'GameDate': 1,
'TmMargin': 1,
'GameVegasLine': 1,
'TmLoc': 1
}
raw = pd.DataFrame(
list(
db.games.find(
q,
fields
)
)
)
return raw
# %% De-dupe, remove outliers
def cleanGames(_games):
"""De-dupes game records, keeping only home teams and lower-name neutral games,
rename a few columns, remove outlier lines, and change datatypes.
Args:
_games (DataFrame): game data
Returns:
DataFrame: Cleaned game data
"""
# _games = _games.loc[(
# (_games['TmLoc'] == 'H') |
# ((_games['TmLoc'] == 'N') & (_games['TmName'] < _games['OppName']))
# )]
_games = _games.loc[np.abs(_games['GameVegasLine']) <= 50]
_games.rename(inplace=True, columns={
'GameDate': 'dataPriorTo'
})
_games['dataPriorTo'] = pd.to_datetime(_games['dataPriorTo'])
_games['Tm_isHome'] = np.where(_games['TmLoc'] == 'H', 1, 0)
return _games
# %% Add add'l columns to games
def addColsToGames(_games):
# GameVegasLine is the expected Tm margin. Positive means vegas favored Tm.
# If TmVegasMargin > 0: TmMargin > GameVegasLine: Team outperformed vegas - bet on team.
# If TmVegasMargin < 0: TmMargin < GameVegasLine, team did worse than vegas expected - bet on Opp
_games['tmVegasMargin'] = _games['TmMargin'] - _games['GameVegasLine']
_games['tmAtsWinner'] = (_games['tmVegasMargin'] > 0) * 1
return _games
# %% Merge in team-date data to game records
def addTdToGames(_td, _games):
tdCols = ['TmName', 'Season', 'dataPriorTo']
dfTm = _td.copy()
dfTm.columns = [
f'Tm_{x}' if x not in tdCols else x for x in dfTm.columns
]
dfOpp = _td.copy()
dfOpp.columns = [
f'Opp_{x}' if x not in tdCols else x for x in dfOpp.columns
]
dfOpp.rename(inplace=True, columns={'TmName': 'OppName'})
_games = pd.merge(
left=_games,
right=dfTm,
on=['TmName', 'Season', 'dataPriorTo'],
how='inner'
)
_games = pd.merge(
left=_games,
right=dfOpp,
on=['OppName', 'Season', 'dataPriorTo'],
how='inner'
)
return _games
# %% Eval margin predictions
def evalModel(predMargin, actualMargin, methodName, verbose=False):
print(f"{methodName}:")
# Accuracy of margin
R2 = r2_score(
y_true=actualMargin,
y_pred=predMargin
)
MAE = np.mean(np.abs(actualMargin - predMargin))
# Correctly picking winner
sumWins = np.sum(
(predMargin * actualMargin > 0)*1
)
sumLosses = np.sum(
(predMargin * actualMargin < 0)*1
)
sumTies = np.sum(
(predMargin * actualMargin == 0)*1
)
if verbose:
print(f"MAE: {MAE:.2f}")
print(f"R^2: {R2:.4f}")
print(f"Correct Winner Record: {sumWins:,.0f} - {sumLosses:,.0f} - {sumTies:,.0f}")
print(f"Win Pct: {sumWins/len(predMargin):.3%}")
print(f"Win Pct excluding pushes: {sumWins/(sumWins + sumLosses):.3%}")
print('\n')
return R2, MAE
# %% Create function for classification model
def evalClassificationModel(predClass, actualClass, isPush, methodName, predProb, showCurve=False, verbose=False):
print(f"{methodName}:")
predWin = ((predClass == actualClass) & (isPush == 0))*1
predLoss = ((predClass != actualClass) & (isPush == 0))*1
predPush = (isPush == 1)*1
w = np.sum(predWin)/(np.sum(predWin)+np.sum(predLoss))
b_auc = roc_auc_score(actualClass, predClass)
p_auc = roc_auc_score(actualClass, predProb)
if verbose:
print(f"Record: {np.sum(predWin)} - {np.sum(predLoss)} - {np.sum(predPush)}")
print(f"Net wins: {np.sum(predWin) - np.sum(predLoss)}")
print(f"Win Percent: {w:.2%}")
print(f"Binary AUC: {b_auc:.2%}")
print(f"Probability AUC: {p_auc:.2%}")
if showCurve:
fpr, tpr, thresholds = roc_curve(actualClass, predProb)
fig = go.Figure(go.Scatter(x=fpr, y=tpr))
fig.show()
return w, b_auc, p_auc
# %% Main function
if __name__ == '__main__':
# Get team-date data
td = addColsToTd(getTd())
td = cleanTd(td)
# Get games data, add team details
games = cleanGames(getGames())
games = addColsToGames(games)
gamesCols = games.columns
games = addTdToGames(td, games)
gamesX = games[[col for col in games.columns if col not in gamesCols]]
gamesy = games[gamesCols]
####################################
## Predicting Game Margin
####################################
# Set baseline: Vegas lines to predict margin
evalModel(
predMargin=gamesy['GameVegasLine'],
actualMargin=gamesy['TmMargin'],
methodName='Vegas',
verbose=True
)
# Now, using Marginper40 diffs
evalModel(
predMargin=gamesX['Tm_TmMarginper40'] - gamesX['Opp_TmMarginper40'],
actualMargin=gamesy['TmMargin'],
methodName='Margin per 40 Difference',
verbose=True
)
# OA Margin per 40
evalModel(
predMargin=gamesX['Tm_OA_TmMarginper40'] - gamesX['Opp_OA_TmMarginper40'],
actualMargin=gamesy['TmMargin'],
methodName='OA Margin per 40 Difference',
verbose=True
)
# Now, adding a few different home court advantages to margin and OAM
buffs = [a/2 for a in range(-5,16)]
mov_r= []
mov_m = []
oa_r = []
oa_m = []
for b in buffs:
# print(a/2)
r, m = evalModel(
predMargin=gamesX['Tm_TmMarginper40'] - gamesX['Opp_TmMarginper40'] + b* gamesy['Tm_isHome'],
actualMargin=gamesy['TmMargin'],
methodName=f'Margin per 40 Difference + {b}'
)
mov_r.append(r)
mov_m.append(m)
r, m = evalModel(
predMargin=gamesX['Tm_OA_TmMarginper40'] - gamesX['Opp_OA_TmMarginper40'] + b* gamesy['Tm_isHome'],
actualMargin=gamesy['TmMargin'],
methodName=f'OA Margin per 40 Difference + {b}'
)
oa_r.append(r)
oa_m.append(m)
fig = make_subplots(rows=2, cols=1, shared_xaxes=True, subplot_titles=('R2 by Home Court Advantage', 'MAE by Home Court Advantage'))
fig.add_trace(
go.Scatter(x=buffs,y=mov_r, name='MoV - R2', legendgroup='MOV', line_color='steelblue'),
row=1, col=1
)
fig.add_trace(
go.Scatter(x=buffs,y=oa_r, name='OA MoV - R2', legendgroup='OA', line_color='crimson'),
row=1, col=1
)
fig.add_trace(
go.Scatter(x=buffs,y=mov_m, name='MoV - MAE', legendgroup='MOV', line_color='steelblue'),
row=2, col=1
)
fig.add_trace(
go.Scatter(x=buffs,y=oa_m, name='OA MoV - MAE', legendgroup='OA', line_color='crimson'),
row=2, col=1
)
fig.add_trace(
go.Scatter(x=buffs, y=[0.3504]*len(buffs), name='Vegas - R2', legendgroup='v', line_color='black'),
row=1, col=1
)
fig.add_trace(
go.Scatter(x=buffs, y=[8.31]*len(buffs), name='Vegas - MAE', legendgroup='v', line_color='black'),
row=2, col=1
)
fig.update_xaxes(
title_text='Home Court Advantage Value',
row=2
)
fig.update_yaxes(title_text='R2', row=1)
fig.update_yaxes(title_text='MAE', row=2)
fig.show()
# Machine learning section
rf = RandomForestRegressor(
n_estimators=500,
n_jobs=-1,
max_depth=10,
random_state=114492,
min_samples_split=6,
max_features='sqrt',
bootstrap=True,
oob_score=False,
max_samples=0.8,
# verbose=True
)
temp = pd.DataFrame()
for s in sorted(gamesy['Season'].unique()):
print(f"Season {s}")
rf.fit(gamesX.loc[gamesy['Season'] != s], gamesy.loc[gamesy['Season'] != s, 'TmMargin'])
# gb.predict(gamesX.loc[gamesy['Season'] == s)
t = pd.DataFrame(data={
'predMargin': rf.predict(gamesX.loc[gamesy['Season'] == s]),
'actlMargin': gamesy.loc[gamesy['Season'] == s, 'TmMargin'],
'season': s
})
temp = temp.append(t)
evalModel(
predMargin=t['predMargin'],
actualMargin=t['actlMargin'],
methodName=f'Season {s} RF Results',
verbose=True
)
evalModel(
predMargin=temp['predMargin'],
actualMargin=temp['actlMargin'],
methodName=f'Total RF Results',
verbose=True
)
df = temp.groupby(['Season'])
##############
## ATS
##############
# Now, adding a few different home court advantages to margin and OAM
buffs = [a/2 for a in range(-50,50)]
mov_w = []
mov_bauc = []
mov_pauc = []
oa_w = []
oa_bauc = []
oa_pauc = []
for b in buffs:
# print(a/2)
# Check versus vegas
# vegasMinusModel (+) when vegas likes Tm better than model (model bet on Opp)
# vegasMinusModel (-) when vegas likes Tm worse than model (model bet on Tm)
predMargin = gamesX['Tm_TmMarginper40'] - gamesX['Opp_TmMarginper40'] + b* gamesy['Tm_isHome']
vegasMinusModel = gamesy['GameVegasLine'] - predMargin
w, bauc, pauc = evalClassificationModel(
predClass=(vegasMinusModel <= 0) * 1,
actualClass=gamesy['tmAtsWinner'],
isPush=(gamesy['tmVegasMargin'] == 0)*1,
predProb=vegasMinusModel,
methodName=f'Margin per 40 Difference + {b}'
# showCurve=True
)
mov_w.append(w)
mov_bauc.append(bauc)
mov_pauc.append(pauc)
predMargin = gamesX['Tm_OA_TmMarginper40'] - gamesX['Opp_OA_TmMarginper40'] + b* gamesy['Tm_isHome']
vegasMinusModel = gamesy['GameVegasLine'] - predMargin
w, bauc, pauc = evalClassificationModel(
predClass=(vegasMinusModel <= 0) * 1,
actualClass=gamesy['tmAtsWinner'],
isPush=(gamesy['tmVegasMargin'] == 0)*1,
predProb=vegasMinusModel,
methodName=f'Margin per 40 Difference + {b}'
# showCurve=True
)
oa_w.append(w)
oa_bauc.append(bauc)
oa_pauc.append(pauc)
fig = make_subplots(rows=3, cols=1, shared_xaxes=True, subplot_titles=('Win Pct by Home Court Advantage', 'Binary AUC by HCA', 'Probability AUC by HCA'))
fig.add_trace(
go.Scatter(x=buffs,y=mov_w, name='MoV - Win Pct', legendgroup='MOV', line_color='steelblue'),
row=1, col=1
)
fig.add_trace(
go.Scatter(x=buffs,y=oa_w, name='OA MoV - Win Pct', legendgroup='OA', line_color='crimson'),
row=1, col=1
)
fig.add_trace(
go.Scatter(x=buffs,y=mov_bauc, name='MoV - Binary AUC', legendgroup='MOV', line_color='steelblue'),
row=2, col=1
)
fig.add_trace(
go.Scatter(x=buffs,y=oa_bauc, name='OA MoV - Binary AUC', legendgroup='OA', line_color='crimson'),
row=2, col=1
)
fig.add_trace(
go.Scatter(x=buffs,y=mov_pauc, name='MoV - Probability AUC', legendgroup='MOV', line_color='steelblue'),
row=3, col=1
)
fig.add_trace(
go.Scatter(x=buffs,y=oa_pauc, name='OA MoV - Probability AUC', legendgroup='OA', line_color='crimson'),
row=3, col=1
)
fig.update_xaxes(
title_text='Home Court Advantage Value',
row=3
)
fig.update_yaxes(title_text='Win Pct', row=1)
fig.update_yaxes(title_text='AUC', row=2)
fig.update_yaxes(title_text='AUC', row=3)
fig.show()
# Try Classification based on if Tm wins ATS
gamesX['GameVegasLine'] = gamesy['GameVegasLine']
# Train the winning parameters: 10 depth, 6 min samples
rfc = RandomForestClassifier(
n_estimators=500,
criterion='gini',
max_depth=5,
min_samples_split=2,
max_features='sqrt',
bootstrap=True,
# oob_score=True,
n_jobs=-1,
random_state=114492,
# verbose=True,
max_samples=.8
)
temp = pd.DataFrame()
for s in sorted(gamesy['Season'].unique()):
print(f"Season {s}")
rfc.fit(gamesX.loc[gamesy['Season'] != s], gamesy.loc[gamesy['Season'] != s, 'tmAtsWinner'])
# gb.predict(gamesX.loc[gamesy['Season'] == s)
t = pd.DataFrame(data={
'predClass': rfc.predict(gamesX.loc[gamesy['Season'] == s]),
'predProb': rfc.predict_proba(gamesX.loc[gamesy['Season'] == s])[:,1],
'actlClass': gamesy.loc[gamesy['Season'] == s, 'tmAtsWinner'],
'isPush': ((gamesy.loc[gamesy['Season'] == s, 'tmVegasMargin'])==0)*1
})
temp = temp.append(t)
evalClassificationModel(
predClass=t['predClass'],
actualClass=t['actlClass'],
isPush=t['isPush'],
predProb=t['predProb'],
methodName=f'{s} Season Results',
# showCurve=True
)
evalClassificationModel(
predClass=temp['predClass'],
actualClass=temp['actlClass'],
isPush=temp['isPush'],
predProb=temp['predProb'],
methodName=f'rfc All Season Results',
showCurve=True
)
# Gradient Boosting
gb = GradientBoostingClassifier(
n_estimators=20,
max_depth=3,
random_state=114492,
verbose=True
)
temp = pd.DataFrame()
for s in sorted(gamesy['Season'].unique()):
print(f"Season {s}")
gb.fit(gamesX.loc[gamesy['Season'] != s], gamesy.loc[gamesy['Season'] != s, 'tmAtsWinner'])
# gb.predict(gamesX.loc[gamesy['Season'] == s)
t = pd.DataFrame(data={
'predClass': gb.predict(gamesX.loc[gamesy['Season'] == s]),
'predProb': gb.predict_proba(gamesX.loc[gamesy['Season'] == s])[:,1],
'actlClass': gamesy.loc[gamesy['Season'] == s, 'tmAtsWinner'],
'isPush': ((gamesy.loc[gamesy['Season'] == s, 'tmVegasMargin'])==0)*1
})
temp = temp.append(t)
evalClassificationModel(
predClass=t['predClass'],
actualClass=t['actlClass'],
isPush=t['isPush'],
predProb=t['predProb'],
methodName=f'{s} Season Results',
# showCurve=True
)
evalClassificationModel(
predClass=temp['predClass'],
actualClass=temp['actlClass'],
isPush=temp['isPush'],
predProb=temp['predProb'],
methodName=f'GB All Season Results',
showCurve=True,
verbose=True
)
# print(f"RF Classifier Correct: {np.mean(gamesy["rfc_Correct"]):.2%}")
# games['rfc_correctCumSum'] = np.where(
# games['rfc_betOnTm'] == games['tmAtsWinner'],
# 1,
# -1
# )
# games.sort_values(inplace=True, by=['dataPriorTo'])
# games['modelDateCumSum'] = games['rfc_correctCumSum'].cumsum()
# fig = px.line(games, x='dataPriorTo', y='modelDateCumSum')
# fig.show()
# gamesy['probRounded'] = np.round(rfc.oob_decision_function_[:,1],3)
# temp2 = gamesy.groupby(['probRounded'])['rfc_correctCumSum'].agg({'rfc_correctCumSum': ['sum', 'count']}).reset_index()
# temp2.columns = ['probRounded', 'rfc_correctCumSum', 'recordCnt']
# fig = px.bar(temp2, x='probRounded', y='rfc_correctCumSum')
# fig.update_layout(
# title_text=f"Performance of {predictionCol} by {col}"
# )
# gamesy['placeBet'] = (gamesy['probRounded']<= 0.5)*1
# games['smartWinner'] = games['placeBet']*games['rfc_correctCumSum']
# print(f"Smart logic correct: {np.sum(games["smartWinner"]):.0f}, win pct = {np.sum(games["smartWinner"]==1)/np.sum(games["placeBet"]):.2%}")
# temp = games[['tmAtsWinner', 'rfc_tmAtsWinner_1','rfc_betOnTm']]
# %%
gb = GradientBoostingClassifier(
n_estimators=150,
max_depth=3,
random_state=114492,
verbose=True
)
gb.fit(gamesX, games['tmAtsWinner'])
gamesy['rfc_betOnTm'] = gb.predict(gamesX)
gamesy['rfc_Win'] = ((gamesy['rfc_betOnTm'] == gamesy['tmAtsWinner']) & (gamesy['tmVegasMargin'] != 0))*1
gamesy['rfc_Loss'] = ((gamesy['rfc_betOnTm'] != gamesy['tmAtsWinner']) & (gamesy['tmVegasMargin'] != 0))*1
gamesy['rfc_Push'] = (gamesy['tmVegasMargin'] == 0)*1
print(f"Record: {np.sum(gamesy["rfc_Win"])} - {np.sum(gamesy["rfc_Loss"])} - {np.sum(gamesy["rfc_Push"])}")
print(f"Net wins: {np.sum(gamesy["rfc_Win"]) - np.sum(gamesy["rfc_Loss"])}")
print(f"Win Percent: {np.sum(gamesy["rfc_Win"])/(np.sum(gamesy["rfc_Win"])+np.sum(gamesy["rfc_Loss"])):.2%}")
# %%
|
# %% Imports
import pandas as pd
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import r2_score, roc_curve, roc_auc_score
from itertools import product
# Import to add project folder to sys path
import sys
utils_path = '/Users/Ryan/Documents/projects/ncaa-basketball/code/utils'
if utils_path not in sys.path:
sys.path.append(utils_path)
from api import opponentAdjust
from db import get_db
from misc import getRelativeFp
# %% Declare parameters
metrics = [
'PF', 'Margin',
'FGM', 'FGA',
'FG3M', 'FG3A',
'FG2M', 'FG2A',
'FTA', 'FTM',
'Ast', 'ORB',
'DRB', 'TRB',
'TO', 'Stl',
'Blk', 'Foul'
]
prefixes = ['Tm', 'Opp']
attrCols = [
'Season',
'TmName',
'dataPriorTo',
'isRegularSeason',
''
]
# %% Read in raw data
def getTd():
"""Reads in local csv of team-date data as of specified dates
Returns:
DataFrame: team-date data
"""
firstYr = True
for yr in range(2003, 2021):
print(f"Reading in {yr}'s data")
fp = getRelativeFp(__file__, f'../data/predate_games/{yr}.csv')
if firstYr:
raw = pd.read_csv(fp)
firstYr = False
else:
raw = raw.append(pd.read_csv(fp))
return raw
# %% Add additional columns to team-date data
def addColsToTd(_td, _metrics=metrics, _prefixes=prefixes):
"""Add additional columns to the team-date DataFrame
Args:
_td (DataFrame): Team-date DataFrame
_metrics (list, optional): list of metrics to pass to opponent-
adjusting function. Defaults to metrics.
_prefixes (list, optional): list of prefixes to pass to opponent-
adjusting function. Defaults to prefixes.
Returns:
DataFrame: Team-date data with additional columns
"""
_td = opponentAdjust(
_td,
_prefixes,
_metrics,
includeOARankFields=False,
includeNormRankFields=False,
includeNormFields=True
)
_td['TmWinPct'] = _td['TmWin'] / _td['TmGame']
_td['TmPossPerGame'] = _td['TmPoss'] / _td['TmGame']
return _td
# %% Clean team-date data
def cleanTd(_td, minGames=10, _metrics=metrics, _prefixes=prefixes):
"""Removes extra columns, removes data prior to specified number of games,
changes datetime data type
Args:
_td (DataFrame): team-date data
minGames (int, optional): Minimum games played to keep data.
Defaults to 10.
_metrics (list, optional): Metrics to drop raw data of.
Defaults to metrics.
_prefixes (list, optional): Prefixes to drop raw data of.
Defaults to prefixes.
Returns:
DataFrame: Cleaned data
"""
# Make list of columns to ignore
colsToDrop = [
'OppGame',
'GameOT'
]
for metr in _metrics + ['Mins', 'Win', 'Poss']:
for pref in _prefixes:
colsToDrop.append(f'{pref}{metr}')
colsToDrop.append(f'OppSum_{pref}{metr}')
keptCols = [
col for col in _td.columns
if (col not in colsToDrop) & (col[:7] != 'OppSum_')
]
_td = _td[keptCols]
# Limit team-dates to only those with >= minGames
_td = _td.loc[_td['TmGame'] >= minGames]
# Change field to datetime
_td['dataPriorTo'] = pd.to_datetime(_td['dataPriorTo'])
return _td
# %% Get game data for analysis
def getGames():
"""Get 2003+ game data from database
Returns:
DataFrame: Game data
"""
db = get_db()
q = {'Season': {'$gte': 2003}}
fields = {
'_id': 0,
'TmName': 1,
'OppName': 1,
'Season': 1,
'GameDate': 1,
'TmMargin': 1,
'GameVegasLine': 1,
'TmLoc': 1
}
raw = pd.DataFrame(
list(
db.games.find(
q,
fields
)
)
)
return raw
# %% De-dupe, remove outliers
def cleanGames(_games):
"""De-dupes game records, keeping only home teams and lower-name neutral games,
rename a few columns, remove outlier lines, and change datatypes.
Args:
_games (DataFrame): game data
Returns:
DataFrame: Cleaned game data
"""
# _games = _games.loc[(
# (_games['TmLoc'] == 'H') |
# ((_games['TmLoc'] == 'N') & (_games['TmName'] < _games['OppName']))
# )]
_games = _games.loc[np.abs(_games['GameVegasLine']) <= 50]
_games.rename(inplace=True, columns={
'GameDate': 'dataPriorTo'
})
_games['dataPriorTo'] = pd.to_datetime(_games['dataPriorTo'])
_games['Tm_isHome'] = np.where(_games['TmLoc'] == 'H', 1, 0)
return _games
# %% Add add'l columns to games
def addColsToGames(_games):
# GameVegasLine is the expected Tm margin. Positive means vegas favored Tm.
# If TmVegasMargin > 0: TmMargin > GameVegasLine: Team outperformed vegas - bet on team.
# If TmVegasMargin < 0: TmMargin < GameVegasLine, team did worse than vegas expected - bet on Opp
_games['tmVegasMargin'] = _games['TmMargin'] - _games['GameVegasLine']
_games['tmAtsWinner'] = (_games['tmVegasMargin'] > 0) * 1
return _games
# %% Merge in team-date data to game records
def addTdToGames(_td, _games):
tdCols = ['TmName', 'Season', 'dataPriorTo']
dfTm = _td.copy()
dfTm.columns = [
f'Tm_{x}' if x not in tdCols else x for x in dfTm.columns
]
dfOpp = _td.copy()
dfOpp.columns = [
f'Opp_{x}' if x not in tdCols else x for x in dfOpp.columns
]
dfOpp.rename(inplace=True, columns={'TmName': 'OppName'})
_games = pd.merge(
left=_games,
right=dfTm,
on=['TmName', 'Season', 'dataPriorTo'],
how='inner'
)
_games = pd.merge(
left=_games,
right=dfOpp,
on=['OppName', 'Season', 'dataPriorTo'],
how='inner'
)
return _games
# %% Eval margin predictions
def evalModel(predMargin, actualMargin, methodName, verbose=False):
print(f"{methodName}:")
# Accuracy of margin
R2 = r2_score(
y_true=actualMargin,
y_pred=predMargin
)
MAE = np.mean(np.abs(actualMargin - predMargin))
# Correctly picking winner
sumWins = np.sum(
(predMargin * actualMargin > 0)*1
)
sumLosses = np.sum(
(predMargin * actualMargin < 0)*1
)
sumTies = np.sum(
(predMargin * actualMargin == 0)*1
)
if verbose:
print(f"MAE: {MAE:.2f}")
print(f"R^2: {R2:.4f}")
print(f"Correct Winner Record: {sumWins:,.0f} - {sumLosses:,.0f} - {sumTies:,.0f}")
print(f"Win Pct: {sumWins/len(predMargin):.3%}")
print(f"Win Pct excluding pushes: {sumWins/(sumWins + sumLosses):.3%}")
print('\n')
return R2, MAE
# %% Create function for classification model
def evalClassificationModel(predClass, actualClass, isPush, methodName, predProb, showCurve=False, verbose=False):
print(f"{methodName}:")
predWin = ((predClass == actualClass) & (isPush == 0))*1
predLoss = ((predClass != actualClass) & (isPush == 0))*1
predPush = (isPush == 1)*1
w = np.sum(predWin)/(np.sum(predWin)+np.sum(predLoss))
b_auc = roc_auc_score(actualClass, predClass)
p_auc = roc_auc_score(actualClass, predProb)
if verbose:
print(f"Record: {np.sum(predWin)} - {np.sum(predLoss)} - {np.sum(predPush)}")
print(f"Net wins: {np.sum(predWin) - np.sum(predLoss)}")
print(f"Win Percent: {w:.2%}")
print(f"Binary AUC: {b_auc:.2%}")
print(f"Probability AUC: {p_auc:.2%}")
if showCurve:
fpr, tpr, thresholds = roc_curve(actualClass, predProb)
fig = go.Figure(go.Scatter(x=fpr, y=tpr))
fig.show()
return w, b_auc, p_auc
# %% Main function
if __name__ == '__main__':
# Get team-date data
td = addColsToTd(getTd())
td = cleanTd(td)
# Get games data, add team details
games = cleanGames(getGames())
games = addColsToGames(games)
gamesCols = games.columns
games = addTdToGames(td, games)
gamesX = games[[col for col in games.columns if col not in gamesCols]]
gamesy = games[gamesCols]
####################################
## Predicting Game Margin
####################################
# Set baseline: Vegas lines to predict margin
evalModel(
predMargin=gamesy['GameVegasLine'],
actualMargin=gamesy['TmMargin'],
methodName='Vegas',
verbose=True
)
# Now, using Marginper40 diffs
evalModel(
predMargin=gamesX['Tm_TmMarginper40'] - gamesX['Opp_TmMarginper40'],
actualMargin=gamesy['TmMargin'],
methodName='Margin per 40 Difference',
verbose=True
)
# OA Margin per 40
evalModel(
predMargin=gamesX['Tm_OA_TmMarginper40'] - gamesX['Opp_OA_TmMarginper40'],
actualMargin=gamesy['TmMargin'],
methodName='OA Margin per 40 Difference',
verbose=True
)
# Now, adding a few different home court advantages to margin and OAM
buffs = [a/2 for a in range(-5,16)]
mov_r= []
mov_m = []
oa_r = []
oa_m = []
for b in buffs:
# print(a/2)
r, m = evalModel(
predMargin=gamesX['Tm_TmMarginper40'] - gamesX['Opp_TmMarginper40'] + b* gamesy['Tm_isHome'],
actualMargin=gamesy['TmMargin'],
methodName=f'Margin per 40 Difference + {b}'
)
mov_r.append(r)
mov_m.append(m)
r, m = evalModel(
predMargin=gamesX['Tm_OA_TmMarginper40'] - gamesX['Opp_OA_TmMarginper40'] + b* gamesy['Tm_isHome'],
actualMargin=gamesy['TmMargin'],
methodName=f'OA Margin per 40 Difference + {b}'
)
oa_r.append(r)
oa_m.append(m)
fig = make_subplots(rows=2, cols=1, shared_xaxes=True, subplot_titles=('R2 by Home Court Advantage', 'MAE by Home Court Advantage'))
fig.add_trace(
go.Scatter(x=buffs,y=mov_r, name='MoV - R2', legendgroup='MOV', line_color='steelblue'),
row=1, col=1
)
fig.add_trace(
go.Scatter(x=buffs,y=oa_r, name='OA MoV - R2', legendgroup='OA', line_color='crimson'),
row=1, col=1
)
fig.add_trace(
go.Scatter(x=buffs,y=mov_m, name='MoV - MAE', legendgroup='MOV', line_color='steelblue'),
row=2, col=1
)
fig.add_trace(
go.Scatter(x=buffs,y=oa_m, name='OA MoV - MAE', legendgroup='OA', line_color='crimson'),
row=2, col=1
)
fig.add_trace(
go.Scatter(x=buffs, y=[0.3504]*len(buffs), name='Vegas - R2', legendgroup='v', line_color='black'),
row=1, col=1
)
fig.add_trace(
go.Scatter(x=buffs, y=[8.31]*len(buffs), name='Vegas - MAE', legendgroup='v', line_color='black'),
row=2, col=1
)
fig.update_xaxes(
title_text='Home Court Advantage Value',
row=2
)
fig.update_yaxes(title_text='R2', row=1)
fig.update_yaxes(title_text='MAE', row=2)
fig.show()
# Machine learning section
rf = RandomForestRegressor(
n_estimators=500,
n_jobs=-1,
max_depth=10,
random_state=114492,
min_samples_split=6,
max_features='sqrt',
bootstrap=True,
oob_score=False,
max_samples=0.8,
# verbose=True
)
temp = pd.DataFrame()
for s in sorted(gamesy['Season'].unique()):
print(f"Season {s}")
rf.fit(gamesX.loc[gamesy['Season'] != s], gamesy.loc[gamesy['Season'] != s, 'TmMargin'])
# gb.predict(gamesX.loc[gamesy['Season'] == s)
t = pd.DataFrame(data={
'predMargin': rf.predict(gamesX.loc[gamesy['Season'] == s]),
'actlMargin': gamesy.loc[gamesy['Season'] == s, 'TmMargin'],
'season': s
})
temp = temp.append(t)
evalModel(
predMargin=t['predMargin'],
actualMargin=t['actlMargin'],
methodName=f'Season {s} RF Results',
verbose=True
)
evalModel(
predMargin=temp['predMargin'],
actualMargin=temp['actlMargin'],
methodName=f'Total RF Results',
verbose=True
)
df = temp.groupby(['Season'])
##############
## ATS
##############
# Now, adding a few different home court advantages to margin and OAM
buffs = [a/2 for a in range(-50,50)]
mov_w = []
mov_bauc = []
mov_pauc = []
oa_w = []
oa_bauc = []
oa_pauc = []
for b in buffs:
# print(a/2)
# Check versus vegas
# vegasMinusModel (+) when vegas likes Tm better than model (model bet on Opp)
# vegasMinusModel (-) when vegas likes Tm worse than model (model bet on Tm)
predMargin = gamesX['Tm_TmMarginper40'] - gamesX['Opp_TmMarginper40'] + b* gamesy['Tm_isHome']
vegasMinusModel = gamesy['GameVegasLine'] - predMargin
w, bauc, pauc = evalClassificationModel(
predClass=(vegasMinusModel <= 0) * 1,
actualClass=gamesy['tmAtsWinner'],
isPush=(gamesy['tmVegasMargin'] == 0)*1,
predProb=vegasMinusModel,
methodName=f'Margin per 40 Difference + {b}'
# showCurve=True
)
mov_w.append(w)
mov_bauc.append(bauc)
mov_pauc.append(pauc)
predMargin = gamesX['Tm_OA_TmMarginper40'] - gamesX['Opp_OA_TmMarginper40'] + b* gamesy['Tm_isHome']
vegasMinusModel = gamesy['GameVegasLine'] - predMargin
w, bauc, pauc = evalClassificationModel(
predClass=(vegasMinusModel <= 0) * 1,
actualClass=gamesy['tmAtsWinner'],
isPush=(gamesy['tmVegasMargin'] == 0)*1,
predProb=vegasMinusModel,
methodName=f'Margin per 40 Difference + {b}'
# showCurve=True
)
oa_w.append(w)
oa_bauc.append(bauc)
oa_pauc.append(pauc)
fig = make_subplots(rows=3, cols=1, shared_xaxes=True, subplot_titles=('Win Pct by Home Court Advantage', 'Binary AUC by HCA', 'Probability AUC by HCA'))
fig.add_trace(
go.Scatter(x=buffs,y=mov_w, name='MoV - Win Pct', legendgroup='MOV', line_color='steelblue'),
row=1, col=1
)
fig.add_trace(
go.Scatter(x=buffs,y=oa_w, name='OA MoV - Win Pct', legendgroup='OA', line_color='crimson'),
row=1, col=1
)
fig.add_trace(
go.Scatter(x=buffs,y=mov_bauc, name='MoV - Binary AUC', legendgroup='MOV', line_color='steelblue'),
row=2, col=1
)
fig.add_trace(
go.Scatter(x=buffs,y=oa_bauc, name='OA MoV - Binary AUC', legendgroup='OA', line_color='crimson'),
row=2, col=1
)
fig.add_trace(
go.Scatter(x=buffs,y=mov_pauc, name='MoV - Probability AUC', legendgroup='MOV', line_color='steelblue'),
row=3, col=1
)
fig.add_trace(
go.Scatter(x=buffs,y=oa_pauc, name='OA MoV - Probability AUC', legendgroup='OA', line_color='crimson'),
row=3, col=1
)
fig.update_xaxes(
title_text='Home Court Advantage Value',
row=3
)
fig.update_yaxes(title_text='Win Pct', row=1)
fig.update_yaxes(title_text='AUC', row=2)
fig.update_yaxes(title_text='AUC', row=3)
fig.show()
# Try Classification based on if Tm wins ATS
gamesX['GameVegasLine'] = gamesy['GameVegasLine']
# Train the winning parameters: 10 depth, 6 min samples
rfc = RandomForestClassifier(
n_estimators=500,
criterion='gini',
max_depth=5,
min_samples_split=2,
max_features='sqrt',
bootstrap=True,
# oob_score=True,
n_jobs=-1,
random_state=114492,
# verbose=True,
max_samples=.8
)
temp = pd.DataFrame()
for s in sorted(gamesy['Season'].unique()):
print(f"Season {s}")
rfc.fit(gamesX.loc[gamesy['Season'] != s], gamesy.loc[gamesy['Season'] != s, 'tmAtsWinner'])
# gb.predict(gamesX.loc[gamesy['Season'] == s)
t = pd.DataFrame(data={
'predClass': rfc.predict(gamesX.loc[gamesy['Season'] == s]),
'predProb': rfc.predict_proba(gamesX.loc[gamesy['Season'] == s])[:,1],
'actlClass': gamesy.loc[gamesy['Season'] == s, 'tmAtsWinner'],
'isPush': ((gamesy.loc[gamesy['Season'] == s, 'tmVegasMargin'])==0)*1
})
temp = temp.append(t)
evalClassificationModel(
predClass=t['predClass'],
actualClass=t['actlClass'],
isPush=t['isPush'],
predProb=t['predProb'],
methodName=f'{s} Season Results',
# showCurve=True
)
evalClassificationModel(
predClass=temp['predClass'],
actualClass=temp['actlClass'],
isPush=temp['isPush'],
predProb=temp['predProb'],
methodName=f'rfc All Season Results',
showCurve=True
)
# Gradient Boosting
gb = GradientBoostingClassifier(
n_estimators=20,
max_depth=3,
random_state=114492,
verbose=True
)
temp = pd.DataFrame()
for s in sorted(gamesy['Season'].unique()):
print(f"Season {s}")
gb.fit(gamesX.loc[gamesy['Season'] != s], gamesy.loc[gamesy['Season'] != s, 'tmAtsWinner'])
# gb.predict(gamesX.loc[gamesy['Season'] == s)
t = pd.DataFrame(data={
'predClass': gb.predict(gamesX.loc[gamesy['Season'] == s]),
'predProb': gb.predict_proba(gamesX.loc[gamesy['Season'] == s])[:,1],
'actlClass': gamesy.loc[gamesy['Season'] == s, 'tmAtsWinner'],
'isPush': ((gamesy.loc[gamesy['Season'] == s, 'tmVegasMargin'])==0)*1
})
temp = temp.append(t)
evalClassificationModel(
predClass=t['predClass'],
actualClass=t['actlClass'],
isPush=t['isPush'],
predProb=t['predProb'],
methodName=f'{s} Season Results',
# showCurve=True
)
evalClassificationModel(
predClass=temp['predClass'],
actualClass=temp['actlClass'],
isPush=temp['isPush'],
predProb=temp['predProb'],
methodName=f'GB All Season Results',
showCurve=True,
verbose=True
)
# print(f"RF Classifier Correct: {np.mean(gamesy['rfc_Correct']):.2%}")
# games['rfc_correctCumSum'] = np.where(
# games['rfc_betOnTm'] == games['tmAtsWinner'],
# 1,
# -1
# )
# games.sort_values(inplace=True, by=['dataPriorTo'])
# games['modelDateCumSum'] = games['rfc_correctCumSum'].cumsum()
# fig = px.line(games, x='dataPriorTo', y='modelDateCumSum')
# fig.show()
# gamesy['probRounded'] = np.round(rfc.oob_decision_function_[:,1],3)
# temp2 = gamesy.groupby(['probRounded'])['rfc_correctCumSum'].agg({'rfc_correctCumSum': ['sum', 'count']}).reset_index()
# temp2.columns = ['probRounded', 'rfc_correctCumSum', 'recordCnt']
# fig = px.bar(temp2, x='probRounded', y='rfc_correctCumSum')
# fig.update_layout(
# title_text=f"Performance of {predictionCol} by {col}"
# )
# gamesy['placeBet'] = (gamesy['probRounded']<= 0.5)*1
# games['smartWinner'] = games['placeBet']*games['rfc_correctCumSum']
# print(f"Smart logic correct: {np.sum(games['smartWinner']):.0f}, win pct = {np.sum(games['smartWinner']==1)/np.sum(games['placeBet']):.2%}")
# temp = games[['tmAtsWinner', 'rfc_tmAtsWinner_1','rfc_betOnTm']]
# %%
gb = GradientBoostingClassifier(
n_estimators=150,
max_depth=3,
random_state=114492,
verbose=True
)
gb.fit(gamesX, games['tmAtsWinner'])
gamesy['rfc_betOnTm'] = gb.predict(gamesX)
gamesy['rfc_Win'] = ((gamesy['rfc_betOnTm'] == gamesy['tmAtsWinner']) & (gamesy['tmVegasMargin'] != 0))*1
gamesy['rfc_Loss'] = ((gamesy['rfc_betOnTm'] != gamesy['tmAtsWinner']) & (gamesy['tmVegasMargin'] != 0))*1
gamesy['rfc_Push'] = (gamesy['tmVegasMargin'] == 0)*1
print(f"Record: {np.sum(gamesy['rfc_Win'])} - {np.sum(gamesy['rfc_Loss'])} - {np.sum(gamesy['rfc_Push'])}")
print(f"Net wins: {np.sum(gamesy['rfc_Win']) - np.sum(gamesy['rfc_Loss'])}")
print(f"Win Percent: {np.sum(gamesy['rfc_Win'])/(np.sum(gamesy['rfc_Win'])+np.sum(gamesy['rfc_Loss'])):.2%}")
# %%
|
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import click
import os
import string
import random
from jina.flow import Flow
RANDOM_SEED = 10 # 5
os.environ['PARALLEL'] = str(2)
os.environ['SHARDS'] = str(2)
def get_random_ws(workspace_path, length=8):
random.seed(RANDOM_SEED)
letters = string.ascii_lowercase
dn = ''.join(random.choice(letters) for i in range(length))
return os.path.join(workspace_path, dn)
def print_topk(resp, word):
for d in resp.search.docs:
print(f'Ta-Dah🔮, here are what we found for: {word}')
for idx, match in enumerate(d.matches):
score = match.score.value
if score <= 0.0:
continue
word_def = match.chunks[0].text
word = match.meta_info.decode()
print('> {:>2d}({:.2f}). {}: "{}"'.format(idx, score, word, word_def.strip()))
@click.command()
@click.option('--task', '-t')
@click.option('--num_docs', '-n', default=50)
@click.option('--top_k', '-k', default=5)
def main(task, num_docs, top_k):
workspace_path = '/tmp/jina/urbandict'
os.environ['TMP_WORKSPACE'] = get_random_ws(workspace_path)
print(f'{os.environ['TMP_WORKSPACE']}')
data_fn = os.environ.get('WASHED_DATA_DIR', os.path.join(workspace_path, 'urbandict-word-defs.csv'))
if task == 'index':
f = Flow().load_config('flow-index.yml')
with f:
f.index_lines(filepath=data_fn, size=num_docs, batch_size=16)
elif task == 'query':
f = Flow().load_config('flow-query.yml')
with f:
while True:
text = input('word definition: ')
if not text:
break
ppr = lambda x: print_topk(x, text)
f.search_lines(lines=[text, ], output_fn=ppr, topk=top_k)
elif task == 'query_restful':
f = Flow().load_config('flow-query.yml')
f.use_rest_gateway()
with f:
f.block()
else:
raise NotImplementedError(
f'unknown task: {task}. A valid task is `index` or `query` or `query_restful`.')
if __name__ == '__main__':
main()
|
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import click
import os
import string
import random
from jina.flow import Flow
RANDOM_SEED = 10 # 5
os.environ['PARALLEL'] = str(2)
os.environ['SHARDS'] = str(2)
def get_random_ws(workspace_path, length=8):
random.seed(RANDOM_SEED)
letters = string.ascii_lowercase
dn = ''.join(random.choice(letters) for i in range(length))
return os.path.join(workspace_path, dn)
def print_topk(resp, word):
for d in resp.search.docs:
print(f'Ta-Dah🔮, here are what we found for: {word}')
for idx, match in enumerate(d.matches):
score = match.score.value
if score <= 0.0:
continue
word_def = match.chunks[0].text
word = match.meta_info.decode()
print('> {:>2d}({:.2f}). {}: "{}"'.format(idx, score, word, word_def.strip()))
@click.command()
@click.option('--task', '-t')
@click.option('--num_docs', '-n', default=50)
@click.option('--top_k', '-k', default=5)
def main(task, num_docs, top_k):
workspace_path = '/tmp/jina/urbandict'
os.environ['TMP_WORKSPACE'] = get_random_ws(workspace_path)
print(f'{os.environ["TMP_WORKSPACE"]}')
data_fn = os.environ.get('WASHED_DATA_DIR', os.path.join(workspace_path, 'urbandict-word-defs.csv'))
if task == 'index':
f = Flow().load_config('flow-index.yml')
with f:
f.index_lines(filepath=data_fn, size=num_docs, batch_size=16)
elif task == 'query':
f = Flow().load_config('flow-query.yml')
with f:
while True:
text = input('word definition: ')
if not text:
break
ppr = lambda x: print_topk(x, text)
f.search_lines(lines=[text, ], output_fn=ppr, topk=top_k)
elif task == 'query_restful':
f = Flow().load_config('flow-query.yml')
f.use_rest_gateway()
with f:
f.block()
else:
raise NotImplementedError(
f'unknown task: {task}. A valid task is `index` or `query` or `query_restful`.')
if __name__ == '__main__':
main()
|
import itertools as itt
import pathlib as pl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.io import loadmat
import src.data.rasters
from src.data import dPCA as cdPCA
from src.metrics import dprime as cDP
from src.data.load import load, get_site_ids
from src.data.cache import make_cache, get_cache
from src.metrics.reliability import signal_reliability
from src.utils.tools import shuffle_along_axis as shuffle
from src.utils import fits as fit
from src.visualization import fancy_plots as fplt
'''
since applying the dprime CPN analysis toe the NTI data was unsuccessfull, the next alternative to compare Sam and my
approach is to perform the CPN and NTI analysis to their respective datasets on recording sites that have both data
'''
# 1. list sites with both datasets
# list all NTI sites this have to be done manually
# list all CPN sites, this should be trivial
# check the intersection
# 2. Calculates the dPrime for each site and all possible probes, context pairs and cells (?). This is the difficult part
# to summarize the outcome of all the
def cell_dprime(site, probe, meta):
# recs = load(site, remote=True, rasterfs=meta['raster_fs'], recache=False)
recs = load(site, rasterfs=meta['raster_fs'], recache=rec_recache)
if len(recs) > 2:
print(f'\n\n{recs.keys()}\n\n')
rec = recs['trip0']
sig = rec['resp']
# calculates response realiability and select only good cells to improve analysis
r_vals, goodcells = signal_reliability(sig, r'\ASTIM_*', threshold=meta['reliability'])
goodcells = goodcells.tolist()
# get the full data raster Context x Probe x Rep x Neuron x Time
raster = src.data.rasters.raster_from_sig(sig, probe, channels=goodcells, contexts=meta['transitions'],
smooth_window=meta['smoothing_window'], raster_fs=meta['raster_fs'],
zscore=meta['zscore'], part='probe')
# trialR shape: Trial x Cell x Context x Probe x Time; R shape: Cell x Context x Probe x Time
trialR, R, _ = cdPCA.format_raster(raster)
trialR, R = trialR.squeeze(axis=3), R.squeeze(axis=2) # squeezes out probe
rep, chn, ctx, tme = trialR.shape
trans_pairs = [f'{x}_{y}' for x, y in itt.combinations(meta['transitions'], 2)]
dprime = cDP.pairwise_dprimes(trialR, observation_axis=0, condition_axis=2) # shape CellPair x Cell x Time
# Shuffles the rasters n times and organizes in an array with the same shape the raster plus one dimension
# with size n containing each shuffle
shuffled = list()
# pbar = ProgressBar()
print(f"\nshuffling {meta["montecarlo"]} times")
for tp in trans_pairs:
shuf_trialR = np.empty([meta['montecarlo'], rep, chn, 2, tme])
shuf_trialR[:] = np.nan
tran_idx = np.array([meta['transitions'].index(t) for t in tp.split('_')])
ctx_shuffle = trialR[:, :, tran_idx, :].copy()
for rr in range(meta['montecarlo']):
shuf_trialR[rr, ...] = shuffle(ctx_shuffle, shuffle_axis=2, indie_axis=0)
shuffled.append(cDP.pairwise_dprimes(shuf_trialR, observation_axis=1, condition_axis=3))
shuffled = np.stack(shuffled, axis=1).squeeze(axis=0).swapaxes(0, 1) # shape Montecarlo x ContextPair x Cell x Time
return dprime, shuffled, goodcells, trans_pairs
def dPCA_fourway_analysis(site, probe, meta):
# recs = load(site, remote=True, rasterfs=meta['raster_fs'], recache=False)
recs = load(site, rasterfs=meta['raster_fs'], recache=rec_recache)
if len(recs) > 2:
print(f'\n\n{recs.keys()}\n\n')
rec = recs['trip0']
sig = rec['resp']
# calculates response realiability and select only good cells to improve analysis
r_vals, goodcells = signal_reliability(sig, r'\ASTIM_*', threshold=meta['reliability'])
goodcells = goodcells.tolist()
# get the full data raster Context x Probe x Rep x Neuron x Time
raster = src.data.rasters.raster_from_sig(sig, probe, channels=goodcells, contexts=meta['transitions'],
smooth_window=meta['smoothing_window'], raster_fs=meta['raster_fs'],
zscore=meta['zscore'])
# trialR shape: Trial x Cell x Context x Probe x Time; R shape: Cell x Context x Probe x Time
trialR, R, _ = cdPCA.format_raster(raster)
trialR, R = trialR.squeeze(axis=3), R.squeeze(axis=2) # squeezes out probe
Re, C, S, T = trialR.shape
# calculates full dPCA. i.e. considering all 4 categories
dPCA_projection, dPCA_transformation = cdPCA.fit_transform(R, trialR)
dprime = cDP.pairwise_dprimes(dPCA_projection, observation_axis=0, condition_axis=1)
# calculates floor (ctx shuffle) and ceiling (simulated data)
sim_dprime = np.empty([meta['montecarlo']] + list(dprime.shape))
shuf_dprime = np.empty([meta['montecarlo']] + list(dprime.shape))
ctx_shuffle = trialR.copy()
# pbar = ProgressBar()
for rr in range(meta['montecarlo']):
# ceiling: simulates data, calculates dprimes
sim_trial = np.random.normal(np.mean(trialR, axis=0), np.std(trialR, axis=0),
size=[Re, C, S, T])
sim_projection = cdPCA.transform(sim_trial, dPCA_transformation)
sim_dprime[rr, ...] = cDP.pairwise_dprimes(sim_projection, observation_axis=0, condition_axis=1)
ctx_shuffle = shuffle(ctx_shuffle, shuffle_axis=2, indie_axis=0)
shuf_projection = cdPCA.transform(ctx_shuffle, dPCA_transformation)
shuf_dprime[rr, ...] = cDP.pairwise_dprimes(shuf_projection, observation_axis=0, condition_axis=1)
return dprime, shuf_dprime, sim_dprime, goodcells
# transferable plotting parameters
plt.rcParams['svg.fonttype'] = 'none'
sup_title_size = 30
sub_title_size = 20
ax_lab_size = 15
ax_val_size = 11
meta = {'reliability': 0.1, # r value
'smoothing_window': 0, # ms
'raster_fs': 30,
'transitions': ['silence', 'continuous', 'similar', 'sharp'],
'montecarlo': 1000,
'zscore': False}
dprime_recache = False
rec_recache = False
analysis_name = 'NTI_singel_cell_dprime'
analysis_parameters = '_'.join(['{}-{}'.format(key, str(val)) for key, val in meta.items()])
code_to_name = {'t': 'Probe', 'ct': 'Context'}
full_screen = [19.2, 9.83]
all_probes = [2, 3, 5, 6]
sites = ['ley070a', # good site. A1
'ley072b', # Primary looking responses with strong contextual effects
'AMT028b', # good site
'AMT029a', # Strong response, somehow visible contextual effects
'AMT030a', # low responses, Ok but not as good
# 'AMT031a', # low response, bad
'AMT032a'] # great site. PEG
sites = list(get_site_ids(316).keys())
# problem sites:
# sites = ['AMT031a']
# for site, probe in zip(['AMT029a', 'ley070a'],[5,2]):
# all_sites = ['AMT029a']
# all_sites = ['AMT032a']
# all_probes = [5]
bad_sites = list()
all_pvalues = dict()
all_reals = dict()
all_shuffled = dict()
for site in sites:
this_site_reals = list()
this_site_shuffled = list()
this_site_pvalues = list()
for pp, probe in enumerate(all_probes):
# single cell analysis
object_name = f'200221_{site}_P{probe}_single_cell_dprime'
analysis_parameters = '_'.join(['{}-{}'.format(key, str(val)) for key, val in meta.items()])
analysis_name = 'CPN_singel_cell_dprime'
cache_folder = pl.Path('C:\\', 'users', 'mateo', 'mycache', analysis_name, analysis_parameters)
SC_cache = make_cache(function=cell_dprime,
func_args={'site': site, 'probe': probe, 'meta': meta},
classobj_name=object_name,
cache_folder=cache_folder,
recache=dprime_recache)
dprime, shuf_dprime, cell_names, trans_pairs = get_cache(SC_cache)
this_site_reals.append(dprime)
this_site_shuffled.append(shuf_dprime)
# single tailed p value base on the montecarlo shuffling
SC_pvalues = np.sum((shuf_dprime >= dprime), axis=0) / meta['montecarlo']
this_site_pvalues.append(SC_pvalues)
this_site_reals = np.stack(this_site_reals, axis=0)
this_site_shuffled = np.stack(this_site_shuffled, axis=0)
this_site_pvalues = np.stack(this_site_pvalues, axis=0)
# reorders date in dictionary of cells
for cc, cell in enumerate(cell_names):
all_reals[cell] = this_site_reals[:, :, cc, :]
all_shuffled[cell] = this_site_shuffled[:, :, :, cc, :].swapaxes(0, 1)
all_pvalues[cell] = this_site_pvalues[:, :, cc, :]
# stacks the site individual arrays along a new site dimension. since the sites have disimilar cell number, pads
all_cells = np.array(list(all_pvalues.keys()))
threshold = 0.05
all_signif = {key: (val <= threshold) for key, val in all_pvalues.items()}
# stacks arrays, with different time dimentions, padding with NAN
shape = np.insert(np.max(np.stack([arr.shape for arr in all_signif.values()], axis=0), axis=0), 0,
len(all_signif))
signif_array = np.empty(shape)
signif_array[:] = np.nan
for cc, arr in enumerate(all_signif.values()):
t = arr.shape[-1]
signif_array[cc, :, :, :t] = arr
# sig_array = np.stack(list(all_signif.values()), axis=0) # dimensions: Cell x Probe x trans_pair x time
# calculates exponential decay for each cell, collapsing across all probes and transisions
nbin = signif_array.shape[-1]
fs = meta['raster_fs']
times = np.linspace(0, nbin / fs, nbin, endpoint=False) * 1000 # units in ms!!!!
collapsed = signif_array.mean(axis=(1, 2))
# organizes in a dataframe with columns r0: y intercept, decay: eponential valaue and tau: Time to a 36% amplitude
df = list()
for cellid, data in zip(all_cells, collapsed):
popt, _, _ = fit.exp_decay(times, data)
df.append({'cellid': cellid,
'r0_au': popt[0],
'decay_ms': popt[1]})
context_fits = pd.DataFrame(df)
context_fits['tau_ms'] = -1/context_fits['decay_ms']
context_fits.set_index(['cellid'], inplace=True)
# 3. import and parse matlab results for Sam's NTI analysis. These results are in a cell by cell format, then it makes
# sense to calculate the dprimes idividually forP each cell
file = pl.Path('C:\\', 'Users', 'Mateo', 'Documents', 'Science', 'code', 'integration_quilt', 'scrambling-ferrets',
'analysis', 'model_fit_pop_summary').with_suffix('.mat')
best_fits = loadmat(file)['best_fits'].squeeze()
# orders the data in DF
df = list()
for row in best_fits:
df.append({'cellid': row[2][0],
'intper_ms': row[0][0][0],
'delay_ms': row[1][0][0]})
integration_fits = pd.DataFrame(df)
integration_fits.set_index(['cellid'], inplace=True)
# 4. pools together both approache, selects only common cell, plots relationships
# join='inner' keeps only the intersection between the two Dfs, i.e. the cells that have both approaches
def savefig(fig, root, name):
root = pl.Path(f'C:\\users\\mateo\\Pictures\\{root}')
if not root.exists(): root.mkdir(parents=True, exist_ok=True)
png = root.joinpath(name).with_suffix('.png')
fig.savefig(png, transparent=False, dpi=100)
# svg = root.joinpath(name).with_suffix('.svg')
# fig.savefig(svg, transparent=True)
DF = pd.concat([context_fits, integration_fits], axis=1, join='inner')
# filter out anomalous outliers i.e taus over 1 second due to poor fiting
ff_good = DF['tau_ms'] < 1000
filtered = DF.loc[ff_good, :]
fig_root = 'sam_vs_mat'
x = filtered['tau_ms']
y = filtered['intper_ms']
fig, ax = plt.subplots(figsize=full_screen)
ax.scatter(x, y)
_ = fplt.lin_reg(x,y, ax=ax)
ax.set_xlabel(x.name)
ax.set_ylabel(y.name)
ax.legend()
fig.tight_layout(rect=(0, 0, 1, 0.95))
title = 'Sam integration vs Mateo Tau'
fig.suptitle(title)
savefig(fig, fig_root, title)
|
import itertools as itt
import pathlib as pl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.io import loadmat
import src.data.rasters
from src.data import dPCA as cdPCA
from src.metrics import dprime as cDP
from src.data.load import load, get_site_ids
from src.data.cache import make_cache, get_cache
from src.metrics.reliability import signal_reliability
from src.utils.tools import shuffle_along_axis as shuffle
from src.utils import fits as fit
from src.visualization import fancy_plots as fplt
'''
since applying the dprime CPN analysis toe the NTI data was unsuccessfull, the next alternative to compare Sam and my
approach is to perform the CPN and NTI analysis to their respective datasets on recording sites that have both data
'''
# 1. list sites with both datasets
# list all NTI sites this have to be done manually
# list all CPN sites, this should be trivial
# check the intersection
# 2. Calculates the dPrime for each site and all possible probes, context pairs and cells (?). This is the difficult part
# to summarize the outcome of all the
def cell_dprime(site, probe, meta):
# recs = load(site, remote=True, rasterfs=meta['raster_fs'], recache=False)
recs = load(site, rasterfs=meta['raster_fs'], recache=rec_recache)
if len(recs) > 2:
print(f'\n\n{recs.keys()}\n\n')
rec = recs['trip0']
sig = rec['resp']
# calculates response realiability and select only good cells to improve analysis
r_vals, goodcells = signal_reliability(sig, r'\ASTIM_*', threshold=meta['reliability'])
goodcells = goodcells.tolist()
# get the full data raster Context x Probe x Rep x Neuron x Time
raster = src.data.rasters.raster_from_sig(sig, probe, channels=goodcells, contexts=meta['transitions'],
smooth_window=meta['smoothing_window'], raster_fs=meta['raster_fs'],
zscore=meta['zscore'], part='probe')
# trialR shape: Trial x Cell x Context x Probe x Time; R shape: Cell x Context x Probe x Time
trialR, R, _ = cdPCA.format_raster(raster)
trialR, R = trialR.squeeze(axis=3), R.squeeze(axis=2) # squeezes out probe
rep, chn, ctx, tme = trialR.shape
trans_pairs = [f'{x}_{y}' for x, y in itt.combinations(meta['transitions'], 2)]
dprime = cDP.pairwise_dprimes(trialR, observation_axis=0, condition_axis=2) # shape CellPair x Cell x Time
# Shuffles the rasters n times and organizes in an array with the same shape the raster plus one dimension
# with size n containing each shuffle
shuffled = list()
# pbar = ProgressBar()
print(f"\nshuffling {meta['montecarlo']} times")
for tp in trans_pairs:
shuf_trialR = np.empty([meta['montecarlo'], rep, chn, 2, tme])
shuf_trialR[:] = np.nan
tran_idx = np.array([meta['transitions'].index(t) for t in tp.split('_')])
ctx_shuffle = trialR[:, :, tran_idx, :].copy()
for rr in range(meta['montecarlo']):
shuf_trialR[rr, ...] = shuffle(ctx_shuffle, shuffle_axis=2, indie_axis=0)
shuffled.append(cDP.pairwise_dprimes(shuf_trialR, observation_axis=1, condition_axis=3))
shuffled = np.stack(shuffled, axis=1).squeeze(axis=0).swapaxes(0, 1) # shape Montecarlo x ContextPair x Cell x Time
return dprime, shuffled, goodcells, trans_pairs
def dPCA_fourway_analysis(site, probe, meta):
# recs = load(site, remote=True, rasterfs=meta['raster_fs'], recache=False)
recs = load(site, rasterfs=meta['raster_fs'], recache=rec_recache)
if len(recs) > 2:
print(f'\n\n{recs.keys()}\n\n')
rec = recs['trip0']
sig = rec['resp']
# calculates response realiability and select only good cells to improve analysis
r_vals, goodcells = signal_reliability(sig, r'\ASTIM_*', threshold=meta['reliability'])
goodcells = goodcells.tolist()
# get the full data raster Context x Probe x Rep x Neuron x Time
raster = src.data.rasters.raster_from_sig(sig, probe, channels=goodcells, contexts=meta['transitions'],
smooth_window=meta['smoothing_window'], raster_fs=meta['raster_fs'],
zscore=meta['zscore'])
# trialR shape: Trial x Cell x Context x Probe x Time; R shape: Cell x Context x Probe x Time
trialR, R, _ = cdPCA.format_raster(raster)
trialR, R = trialR.squeeze(axis=3), R.squeeze(axis=2) # squeezes out probe
Re, C, S, T = trialR.shape
# calculates full dPCA. i.e. considering all 4 categories
dPCA_projection, dPCA_transformation = cdPCA.fit_transform(R, trialR)
dprime = cDP.pairwise_dprimes(dPCA_projection, observation_axis=0, condition_axis=1)
# calculates floor (ctx shuffle) and ceiling (simulated data)
sim_dprime = np.empty([meta['montecarlo']] + list(dprime.shape))
shuf_dprime = np.empty([meta['montecarlo']] + list(dprime.shape))
ctx_shuffle = trialR.copy()
# pbar = ProgressBar()
for rr in range(meta['montecarlo']):
# ceiling: simulates data, calculates dprimes
sim_trial = np.random.normal(np.mean(trialR, axis=0), np.std(trialR, axis=0),
size=[Re, C, S, T])
sim_projection = cdPCA.transform(sim_trial, dPCA_transformation)
sim_dprime[rr, ...] = cDP.pairwise_dprimes(sim_projection, observation_axis=0, condition_axis=1)
ctx_shuffle = shuffle(ctx_shuffle, shuffle_axis=2, indie_axis=0)
shuf_projection = cdPCA.transform(ctx_shuffle, dPCA_transformation)
shuf_dprime[rr, ...] = cDP.pairwise_dprimes(shuf_projection, observation_axis=0, condition_axis=1)
return dprime, shuf_dprime, sim_dprime, goodcells
# transferable plotting parameters
plt.rcParams['svg.fonttype'] = 'none'
sup_title_size = 30
sub_title_size = 20
ax_lab_size = 15
ax_val_size = 11
meta = {'reliability': 0.1, # r value
'smoothing_window': 0, # ms
'raster_fs': 30,
'transitions': ['silence', 'continuous', 'similar', 'sharp'],
'montecarlo': 1000,
'zscore': False}
dprime_recache = False
rec_recache = False
analysis_name = 'NTI_singel_cell_dprime'
analysis_parameters = '_'.join(['{}-{}'.format(key, str(val)) for key, val in meta.items()])
code_to_name = {'t': 'Probe', 'ct': 'Context'}
full_screen = [19.2, 9.83]
all_probes = [2, 3, 5, 6]
sites = ['ley070a', # good site. A1
'ley072b', # Primary looking responses with strong contextual effects
'AMT028b', # good site
'AMT029a', # Strong response, somehow visible contextual effects
'AMT030a', # low responses, Ok but not as good
# 'AMT031a', # low response, bad
'AMT032a'] # great site. PEG
sites = list(get_site_ids(316).keys())
# problem sites:
# sites = ['AMT031a']
# for site, probe in zip(['AMT029a', 'ley070a'],[5,2]):
# all_sites = ['AMT029a']
# all_sites = ['AMT032a']
# all_probes = [5]
bad_sites = list()
all_pvalues = dict()
all_reals = dict()
all_shuffled = dict()
for site in sites:
this_site_reals = list()
this_site_shuffled = list()
this_site_pvalues = list()
for pp, probe in enumerate(all_probes):
# single cell analysis
object_name = f'200221_{site}_P{probe}_single_cell_dprime'
analysis_parameters = '_'.join(['{}-{}'.format(key, str(val)) for key, val in meta.items()])
analysis_name = 'CPN_singel_cell_dprime'
cache_folder = pl.Path('C:\\', 'users', 'mateo', 'mycache', analysis_name, analysis_parameters)
SC_cache = make_cache(function=cell_dprime,
func_args={'site': site, 'probe': probe, 'meta': meta},
classobj_name=object_name,
cache_folder=cache_folder,
recache=dprime_recache)
dprime, shuf_dprime, cell_names, trans_pairs = get_cache(SC_cache)
this_site_reals.append(dprime)
this_site_shuffled.append(shuf_dprime)
# single tailed p value base on the montecarlo shuffling
SC_pvalues = np.sum((shuf_dprime >= dprime), axis=0) / meta['montecarlo']
this_site_pvalues.append(SC_pvalues)
this_site_reals = np.stack(this_site_reals, axis=0)
this_site_shuffled = np.stack(this_site_shuffled, axis=0)
this_site_pvalues = np.stack(this_site_pvalues, axis=0)
# reorders date in dictionary of cells
for cc, cell in enumerate(cell_names):
all_reals[cell] = this_site_reals[:, :, cc, :]
all_shuffled[cell] = this_site_shuffled[:, :, :, cc, :].swapaxes(0, 1)
all_pvalues[cell] = this_site_pvalues[:, :, cc, :]
# stacks the site individual arrays along a new site dimension. since the sites have disimilar cell number, pads
all_cells = np.array(list(all_pvalues.keys()))
threshold = 0.05
all_signif = {key: (val <= threshold) for key, val in all_pvalues.items()}
# stacks arrays, with different time dimentions, padding with NAN
shape = np.insert(np.max(np.stack([arr.shape for arr in all_signif.values()], axis=0), axis=0), 0,
len(all_signif))
signif_array = np.empty(shape)
signif_array[:] = np.nan
for cc, arr in enumerate(all_signif.values()):
t = arr.shape[-1]
signif_array[cc, :, :, :t] = arr
# sig_array = np.stack(list(all_signif.values()), axis=0) # dimensions: Cell x Probe x trans_pair x time
# calculates exponential decay for each cell, collapsing across all probes and transisions
nbin = signif_array.shape[-1]
fs = meta['raster_fs']
times = np.linspace(0, nbin / fs, nbin, endpoint=False) * 1000 # units in ms!!!!
collapsed = signif_array.mean(axis=(1, 2))
# organizes in a dataframe with columns r0: y intercept, decay: eponential valaue and tau: Time to a 36% amplitude
df = list()
for cellid, data in zip(all_cells, collapsed):
popt, _, _ = fit.exp_decay(times, data)
df.append({'cellid': cellid,
'r0_au': popt[0],
'decay_ms': popt[1]})
context_fits = pd.DataFrame(df)
context_fits['tau_ms'] = -1/context_fits['decay_ms']
context_fits.set_index(['cellid'], inplace=True)
# 3. import and parse matlab results for Sam's NTI analysis. These results are in a cell by cell format, then it makes
# sense to calculate the dprimes idividually forP each cell
file = pl.Path('C:\\', 'Users', 'Mateo', 'Documents', 'Science', 'code', 'integration_quilt', 'scrambling-ferrets',
'analysis', 'model_fit_pop_summary').with_suffix('.mat')
best_fits = loadmat(file)['best_fits'].squeeze()
# orders the data in DF
df = list()
for row in best_fits:
df.append({'cellid': row[2][0],
'intper_ms': row[0][0][0],
'delay_ms': row[1][0][0]})
integration_fits = pd.DataFrame(df)
integration_fits.set_index(['cellid'], inplace=True)
# 4. pools together both approache, selects only common cell, plots relationships
# join='inner' keeps only the intersection between the two Dfs, i.e. the cells that have both approaches
def savefig(fig, root, name):
root = pl.Path(f'C:\\users\\mateo\\Pictures\\{root}')
if not root.exists(): root.mkdir(parents=True, exist_ok=True)
png = root.joinpath(name).with_suffix('.png')
fig.savefig(png, transparent=False, dpi=100)
# svg = root.joinpath(name).with_suffix('.svg')
# fig.savefig(svg, transparent=True)
DF = pd.concat([context_fits, integration_fits], axis=1, join='inner')
# filter out anomalous outliers i.e taus over 1 second due to poor fiting
ff_good = DF['tau_ms'] < 1000
filtered = DF.loc[ff_good, :]
fig_root = 'sam_vs_mat'
x = filtered['tau_ms']
y = filtered['intper_ms']
fig, ax = plt.subplots(figsize=full_screen)
ax.scatter(x, y)
_ = fplt.lin_reg(x,y, ax=ax)
ax.set_xlabel(x.name)
ax.set_ylabel(y.name)
ax.legend()
fig.tight_layout(rect=(0, 0, 1, 0.95))
title = 'Sam integration vs Mateo Tau'
fig.suptitle(title)
savefig(fig, fig_root, title)
|
import argparse
import logging
import os
import re
import statistics
import sys
import time
from datetime import timedelta, datetime
from threading import Thread
from typing import Dict, Tuple, Optional, List, Iterable
from tqdm import tqdm
from .env import H2TestEnv, H2Conf
from pyhttpd.result import ExecResult
log = logging.getLogger(__name__)
class LoadTestException(Exception):
pass
class H2LoadLogSummary:
@staticmethod
def from_file(fpath: str, title: str, duration: timedelta) -> 'H2LoadLogSummary':
with open(fpath) as fd:
return H2LoadLogSummary.from_lines(fd.readlines(), title=title, duration=duration)
@staticmethod
def from_lines(lines: Iterable[str], title: str, duration: timedelta) -> 'H2LoadLogSummary':
stati = {}
count = 0
durations = list()
all_durations = timedelta(milliseconds=0)
for line in lines:
parts = re.split(r'\s+', line) # start(us), status(int), duration(ms), tbd.
if len(parts) >= 3 and parts[0] and parts[1] and parts[2]:
count += 1
status = int(parts[1])
if status in stati:
stati[status] += 1
else:
stati[status] = 1
durations.append(int(parts[2]))
all_durations += timedelta(microseconds=int(parts[2]))
else:
sys.stderr.write("unrecognize log line: {0}".format(line))
mean_duration = statistics.mean(durations)
return H2LoadLogSummary(title=title, total=count, stati=stati,
duration=duration, all_durations=all_durations,
mean_duration=mean_duration)
def __init__(self, title: str, total: int, stati: Dict[int, int],
duration: timedelta, all_durations: timedelta,
mean_duration: timedelta):
self._title = title
self._total = total
self._stati = stati
self._duration = duration
self._all_durations = all_durations
self._mean_duration = mean_duration
self._transfered_mb = 0.0
self._exec_result = None
self._expected_responses = 0
@property
def title(self) -> str:
return self._title
@property
def response_count(self) -> int:
return self._total
@property
def duration(self) -> timedelta:
return self._duration
@property
def mean_duration_ms(self) -> float:
return self._mean_duration / 1000.0
@property
def response_durations(self) -> timedelta:
return self._all_durations
@property
def response_stati(self) -> Dict[int, int]:
return self._stati
@property
def expected_responses(self) -> int:
return self._expected_responses
@property
def execution(self) -> ExecResult:
return self._exec_result
def all_200(self) -> bool:
non_200s = [n for n in self._stati.keys() if n != 200]
return len(non_200s) == 0
@property
def throughput_mb(self) -> float:
if self._transfered_mb > 0.0:
return self._transfered_mb / self.duration.total_seconds()
return 0.0
def set_transfered_mb(self, mb: float) -> None:
self._transfered_mb = mb
def set_exec_result(self, result: ExecResult):
self._exec_result = result
def set_expected_responses(self, n: int):
self._expected_responses = n
def get_footnote(self) -> Optional[str]:
note = ""
if 0 < self.expected_responses != self.response_count:
note += "{0}/{1} missing".format(
self.expected_responses - self.response_count,
self.expected_responses
)
if not self.all_200():
note += ", non 200s:"
for status in [n for n in self.response_stati.keys() if n != 200]:
note += " {0}={1}".format(status, self.response_stati[status])
return note if len(note) else None
class H2LoadMonitor:
def __init__(self, fpath: str, expected: int, title: str):
self._fpath = fpath
self._expected = expected
self._title = title
self._tqdm = tqdm(desc=title, total=expected, unit="request", leave=False)
self._running = False
self._lines = ()
self._tail = None
def start(self):
self._tail = Thread(target=self._collect, kwargs={'self': self})
self._running = True
self._tail.start()
def get_summary(self, duration: timedelta) -> H2LoadLogSummary:
self._running = False
self._tail.join()
return H2LoadLogSummary.from_file(self._fpath, title=self._title, duration=duration)
def stop(self):
self._running = False
@staticmethod
def _collect(self) -> None:
first_call = True
while self._running:
try:
with open(self._fpath) as fd:
if first_call:
fd.seek(0, 2)
first_call = False
latest_data = fd.read()
while self._running:
if '\n' not in latest_data:
latest_data += fd.read()
if '\n' not in latest_data:
if not os.path.isfile(self._fpath):
break
time.sleep(0.1)
continue
lines = latest_data.split('\n')
if lines[-1] != '\n':
latest_data = lines[-1]
lines = lines[:-1]
else:
latest_data = None
self._tqdm.update(n=len(lines))
if latest_data is None:
latest_data = fd.read()
except IOError:
time.sleep(0.1)
self._tqdm.close()
def mk_text_file(fpath: str, lines: int):
t110 = ""
for _ in range(11):
t110 += "0123456789"
with open(fpath, "w") as fd:
for i in range(lines):
fd.write("{0:015d}: ".format(i)) # total 128 bytes per line
fd.write(t110)
fd.write("\n")
class LoadTestCase:
@staticmethod
def from_scenario(scenario: Dict, env: H2TestEnv) -> 'UrlsLoadTest':
raise NotImplemented
def run(self) -> H2LoadLogSummary:
raise NotImplemented
def format_result(self, summary: H2LoadLogSummary) -> str:
raise NotImplemented
def shutdown(self):
raise NotImplemented
@staticmethod
def setup_base_conf(env: H2TestEnv, worker_count: int = 5000) -> H2Conf:
conf = H2Conf(env=env)
# ylavic's formula
process_count = int(max(10, min(100, int(worker_count / 100))))
thread_count = int(max(25, int(worker_count / process_count)))
conf.add(f"""
StartServers 1
ServerLimit {int(process_count * 2.5)}
ThreadLimit {thread_count}
ThreadsPerChild {thread_count}
MinSpareThreads {thread_count}
MaxSpareThreads {int(worker_count / 2)}
MaxRequestWorkers {worker_count}
MaxConnectionsPerChild 0
KeepAliveTimeout 60
MaxKeepAliveRequests 0
""")
return conf
@staticmethod
def start_server(env: H2TestEnv, cd: timedelta = None):
if cd:
with tqdm(desc="connection cooldown", total=int(cd.total_seconds()), unit="s", leave=False) as t:
end = datetime.now() + cd
while datetime.now() < end:
time.sleep(1)
t.update()
assert env.apache_restart() == 0
@staticmethod
def server_setup(env: H2TestEnv, extras: Dict = None):
conf = LoadTestCase.setup_base_conf(env=env)
if not extras:
extras = {
'base': """
LogLevel ssl:warn
Protocols h2 http/1.1
H2MinWorkers 32
H2MaxWorkers 256
"""
}
extras['base'] += f"""
ProxyPreserveHost on
SSLProxyVerify require
SSLProxyCACertificateFile {env.ca.cert_file}
<Proxy https://127.0.0.1:{env.https_port}/>
SSLProxyEngine on
</Proxy>
<Proxy h2://127.0.0.1:{env.https_port}/>
SSLProxyEngine on
</Proxy>
"""
extras[env.domain_test1] = f"""
Protocols h2 http/1.1
ProxyPass /proxy-h1/ https://127.0.0.1:{env.https_port}/
ProxyPass /proxy-h2/ h2://127.0.0.1:{env.https_port}/
"""
conf.add_vhost_test1(extras=extras)
conf.install()
class UrlsLoadTest(LoadTestCase):
SETUP_DONE = False
def __init__(self, env: H2TestEnv, location: str,
clients: int, requests: int,
file_count: int,
file_sizes: List[int],
measure: str,
protocol: str = 'h2',
max_parallel: int = 1,
threads: int = None, warmup: bool = False):
self.env = env
self._location = location
self._clients = clients
self._measure = measure
self._requests = requests
self._file_count = file_count
self._file_sizes = file_sizes
self._protocol = protocol
self._max_parallel = max_parallel
self._threads = threads if threads is not None else min(2, self._clients)
self._url_file = "{gen_dir}/h2load-urls.txt".format(gen_dir=self.env.gen_dir)
self._warmup = warmup
@staticmethod
def from_scenario(scenario: Dict, env: H2TestEnv) -> 'UrlsLoadTest':
return UrlsLoadTest(
env=env,
location=scenario['location'],
clients=scenario['clients'], requests=scenario['requests'],
file_sizes=scenario['file_sizes'], file_count=scenario['file_count'],
protocol=scenario['protocol'], max_parallel=scenario['max_parallel'],
warmup=scenario['warmup'], measure=scenario['measure']
)
def next_scenario(self, scenario: Dict) -> 'UrlsLoadTest':
return UrlsLoadTest(
env=self.env,
location=scenario['location'],
clients=scenario['clients'], requests=scenario['requests'],
file_sizes=scenario['file_sizes'], file_count=scenario['file_count'],
protocol=scenario['protocol'], max_parallel=scenario['max_parallel'],
warmup=scenario['warmup'], measure=scenario['measure']
)
def _setup(self, cls, extras: Dict = None):
LoadTestCase.server_setup(env=self.env, extras=extras)
docs_a = os.path.join(self.env.server_docs_dir, "test1")
uris = []
for i in range(self._file_count):
fsize = self._file_sizes[i % len(self._file_sizes)]
if fsize is None:
raise Exception("file sizes?: {0} {1}".format(i, fsize))
fname = "{0}-{1}k.txt".format(i, fsize)
fpath = os.path.join(docs_a, fname)
if not os.path.isfile(fpath):
mk_text_file(os.path.join(docs_a, fname), 8 * fsize)
uris.append(f"{self._location}{fname}")
with open(self._url_file, 'w') as fd:
fd.write("\n".join(uris))
fd.write("\n")
self.start_server(env=self.env)
def _teardown(self):
# we shutdown apache at program exit
pass
def shutdown(self):
self._teardown()
def run_test(self, mode: str, path: str) -> H2LoadLogSummary:
monitor = None
try:
log_file = "{gen_dir}/h2load.log".format(gen_dir=self.env.gen_dir)
if os.path.isfile(log_file):
os.remove(log_file)
monitor = H2LoadMonitor(log_file, expected=self._requests,
title=f"{self._protocol}/"
f"{self._file_count / 1024}f/{self._clients}c[{mode}]")
monitor.start()
args = [
'h2load',
'--clients={0}'.format(self._clients),
'--threads={0}'.format(self._threads),
'--requests={0}'.format(self._requests),
'--input-file={0}'.format(self._url_file),
'--log-file={0}'.format(log_file),
'--connect-to=localhost:{0}'.format(self.env.https_port)
]
if self._protocol == 'h1' or self._protocol == 'http/1.1':
args.append('--h1')
elif self._protocol == 'h2':
args.extend(['-m', str(self._max_parallel)])
else:
raise Exception(f"unknown protocol: {self._protocol}")
r = self.env.run(args + [
f'--base-uri=https://{self.env.domain_test1}:{self.env.https_port}{self._location}'
])
if r.exit_code != 0:
raise LoadTestException("h2load returned {0}: {1}".format(r.exit_code, r.stderr))
summary = monitor.get_summary(duration=r.duration)
summary.set_expected_responses(self._requests)
summary.set_exec_result(r)
return summary
finally:
if monitor is not None:
monitor.stop()
def run(self) -> H2LoadLogSummary:
path = self._setup(self.__class__)
try:
if self._warmup:
self.run_test(mode="warmup", path=path)
r = self.run_test(mode="measure", path=path)
# time.sleep(300)
return r
finally:
self._teardown()
def format_result(self, summary: H2LoadLogSummary) -> Tuple[str, Optional[List[str]]]:
if self._measure == 'req/s':
r = "{0:d}".format(round(summary.response_count / summary.duration.total_seconds()))
elif self._measure == 'mean ms/req':
r = "{0:.1f}".format(summary.mean_duration_ms)
elif self._measure == 'mb/s':
reqs = summary.response_count / summary.duration.total_seconds()
mean_size = statistics.mean(self._file_sizes)
r = "{0:d}".format(round(reqs * mean_size / 1024.0))
else:
raise Exception(f"measure '{self._measure}' not defined")
return r, summary.get_footnote()
class StressTest(LoadTestCase):
SETUP_DONE = False
def __init__(self, env: H2TestEnv, location: str,
clients: int, requests: int, file_count: int,
file_sizes: List[int],
protocol: str = 'h2',
max_parallel: int = 1,
cooldown: timedelta = None,
threads: int = None, ):
self.env = env
self._location = location
self._clients = clients
self._requests = requests
self._file_count = file_count
self._file_sizes = file_sizes
self._protocol = protocol
self._max_parallel = max_parallel
self._cooldown = cooldown if cooldown else timedelta(seconds=0)
self._threads = threads if threads is not None else min(2, self._clients)
self._url_file = "{gen_dir}/h2load-urls.txt".format(gen_dir=self.env.gen_dir)
self._is_setup = False
@staticmethod
def from_scenario(scenario: Dict, env: H2TestEnv) -> 'UrlsLoadTest':
return StressTest(
env=env,
location=scenario['location'],
clients=scenario['clients'], requests=scenario['requests'],
file_sizes=scenario['file_sizes'], file_count=scenario['file_count'],
protocol=scenario['protocol'], max_parallel=scenario['max_parallel'],
cooldown=scenario['cooldown']
)
def next_scenario(self, scenario: Dict) -> 'UrlsLoadTest':
self._location = scenario['location']
self._clients = scenario['clients']
self._requests = scenario['requests']
self._file_sizes = scenario['file_sizes']
self._file_count = scenario['file_count']
self._protocol = scenario['protocol']
self._max_parallel = scenario['max_parallel']
return self
def _setup(self, cls):
LoadTestCase.server_setup(env=self.env, extras={
'base': f"""
H2MinWorkers 32
H2MaxWorkers 128
H2MaxWorkerIdleSeconds 5
"""
})
if not cls.SETUP_DONE:
with tqdm(desc="setup resources", total=self._file_count, unit="file", leave=False) as t:
docs_a = os.path.join(self.env.server_docs_dir, "test1")
uris = []
for i in range(self._file_count):
fsize = self._file_sizes[i % len(self._file_sizes)]
if fsize is None:
raise Exception("file sizes?: {0} {1}".format(i, fsize))
fname = "{0}-{1}k.txt".format(i, fsize)
mk_text_file(os.path.join(docs_a, fname), 8 * fsize)
uris.append(f"{self._location}{fname}")
t.update()
with open(self._url_file, 'w') as fd:
fd.write("\n".join(uris))
fd.write("\n")
cls.SETUP_DONE = True
self.start_server(env=self.env)
self._is_setup = True
def shutdown(self):
# we shutdown apache at program exit
pass
def run_test(self, mode: str) -> H2LoadLogSummary:
monitor = None
try:
log_file = "{gen_dir}/h2load.log".format(gen_dir=self.env.gen_dir)
if os.path.isfile(log_file):
os.remove(log_file)
monitor = H2LoadMonitor(log_file, expected=self._requests,
title=f"{self._protocol}/"
f"{self._file_count / 1024}f/{self._clients}c[{mode}]")
monitor.start()
args = [
'h2load',
'--clients={0}'.format(self._clients),
'--threads={0}'.format(min(self._clients, 2)),
'--requests={0}'.format(self._requests),
'--input-file={0}'.format(self._url_file),
'--log-file={0}'.format(log_file),
'--connect-to=localhost:{0}'.format(self.env.https_port)
]
if self._protocol == 'h1' or self._protocol == 'http/1.1':
args.append('--h1')
elif self._protocol == 'h2':
args.extend(['-m', str(self._max_parallel)])
else:
raise Exception(f"unknown protocol: {self._protocol}")
r = self.env.run(args + [
f'--base-uri=https://{self.env.domain_test1}:{self.env.https_port}{self._location}'
])
if r.exit_code != 0:
raise LoadTestException("h2load returned {0}: {1}".format(r.exit_code, r.stderr))
summary = monitor.get_summary(duration=r.duration)
summary.set_expected_responses(self._requests)
summary.set_exec_result(r)
return summary
finally:
if monitor is not None:
monitor.stop()
def run(self) -> H2LoadLogSummary:
if not self._is_setup:
self._setup(self.__class__)
elif self._cooldown.total_seconds() > 0:
with tqdm(desc="worker cooldown",
total=int(self._cooldown.total_seconds()),
unit="s", leave=False) as t:
end = datetime.now() + self._cooldown
while datetime.now() < end:
time.sleep(1)
t.update()
return self.run_test(mode="measure")
def format_result(self, summary: H2LoadLogSummary) -> Tuple[str, Optional[List[str]]]:
return "{0:.1f}".format(
summary.response_count / summary.duration.total_seconds()
), summary.get_footnote()
class LoadTest:
@staticmethod
def print_table(table: List[List[str]], foot_notes: List[str] = None):
col_widths = []
col_sep = " "
for row in table[1:]:
for idx, cell in enumerate(row):
if idx >= len(col_widths):
col_widths.append(len(cell))
else:
col_widths[idx] = max(len(cell), col_widths[idx])
row_len = sum(col_widths) + (len(col_widths) * len(col_sep))
print(f"{" ".join(table[0]):^{row_len}}")
for row in table[1:]:
line = ""
for idx, cell in enumerate(row):
line += f"{col_sep if idx > 0 else ""}{cell:>{col_widths[idx]}}"
print(line)
if foot_notes is not None:
for idx, note in enumerate(foot_notes):
print("{0:3d}) {1}".format(idx+1, note))
@classmethod
def main(cls):
parser = argparse.ArgumentParser(prog='load_h1', description="""
Run a range of load tests against the test Apache setup.
""")
parser.add_argument("-p", "--protocol", type=str, default=None,
help="which protocols to test, defaults to all")
parser.add_argument("-v", "--verbose", action='count', default=0,
help="log more output on stderr")
parser.add_argument("names", nargs='*', help="Name(s) of scenarios to run")
args = parser.parse_args()
if args.verbose > 0:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logging.getLogger('').addHandler(console)
scenarios = {
"1k-files": {
"title": "1k files, 1k-10MB, *conn, 10k req ({measure})",
"class": UrlsLoadTest,
"location": "/",
"file_count": 1024,
"file_sizes": [1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 100, 10000],
"requests": 10000,
"warmup": True,
"measure": "req/s",
"protocol": 'h2',
"max_parallel": 1,
"row0_title": "protocol max",
"row_title": "{protocol} {max_parallel:3d}",
"rows": [
{"protocol": 'h2', "max_parallel": 1},
{"protocol": 'h2', "max_parallel": 2},
{"protocol": 'h2', "max_parallel": 6},
{"protocol": 'h2', "max_parallel": 20},
{"protocol": 'h2', "max_parallel": 50},
{"protocol": 'h1', "max_parallel": 1},
],
"col_title": "{clients}c",
"clients": 1,
"columns": [
{"clients": 1},
{"clients": 4},
{"clients": 8},
{"clients": 16},
{"clients": 32},
],
},
"long": {
"title": "1k files, 10k size, *conn, 100k req, {protocol} ({measure})",
"class": UrlsLoadTest,
"location": "/",
"file_count": 100,
"file_sizes": [1],
"requests": 100000,
"warmup": False,
"measure": "req/s",
"protocol": 'h2',
"max_parallel": 1,
"row0_title": "max requests",
"row_title": "{max_parallel:3d} {requests}",
"rows": [
{"max_parallel": 1, "requests": 100000},
{"max_parallel": 2, "requests": 100000},
#{"max_parallel": 6, "requests": 250000},
#{"max_parallel": 20, "requests": 500000},
#{"max_parallel": 50, "requests": 750000},
],
"col_title": "{clients}c",
"clients": 1,
"columns": [
{"clients": 1},
],
},
"durations": {
"title": "1k files, 64k size, 10k req/conn ({measure})",
"class": UrlsLoadTest,
"location": "/",
"file_count": 1024,
"file_sizes": [64],
"requests": 10000,
"warmup": False,
"measure": "mean ms/req",
"protocol": 'h2',
"max_parallel": 1,
"row0_title": "protocol max",
"row_title": "{protocol} {max_parallel:3d}",
"rows": [
{"protocol": 'h2', "max_parallel": 1},
{"protocol": 'h2', "max_parallel": 2},
{"protocol": 'h2', "max_parallel": 6},
{"protocol": 'h2', "max_parallel": 20},
{"protocol": 'h2', "max_parallel": 50},
{"protocol": 'h1', "max_parallel": 1},
],
"col_title": "{clients}c",
"clients": 1,
"columns": [
{"clients": 1, "requests": 10000},
{"clients": 4, "requests": 40000},
{"clients": 8, "requests": 80000},
{"clients": 16, "requests": 160000},
{"clients": 32, "requests": 320000},
],
},
"transfers": {
"title": "net transfer speed, by KB body size, (MB/s)",
"class": UrlsLoadTest,
"location": "/",
"file_count": 1,
"file_sizes": [10, 100, 1000, 10000],
"requests": 10000,
"clients": 1,
"warmup": False,
"measure": "mb/s",
"protocol": 'h2',
"max_parallel": 1,
"row0_title": "protocol c/parallel",
"row_title": "{protocol} {clients}/{max_parallel}",
"rows": [
{"protocol": 'h1', "max_parallel": 1, "clients": 1},
{"protocol": 'h2', "max_parallel": 1, "clients": 1},
{"protocol": 'h2', "max_parallel": 2, "clients": 1},
{"protocol": 'h2', "max_parallel": 6, "clients": 1},
{"protocol": 'h1', "max_parallel": 1, "clients": 2},
{"protocol": 'h2', "max_parallel": 1, "clients": 2},
{"protocol": 'h2', "max_parallel": 2, "clients": 2},
{"protocol": 'h2', "max_parallel": 6, "clients": 2},
{"protocol": 'h1', "max_parallel": 1, "clients": 6},
{"protocol": 'h2', "max_parallel": 1, "clients": 6},
{"protocol": 'h2', "max_parallel": 2, "clients": 6},
{"protocol": 'h2', "max_parallel": 6, "clients": 6},
],
"col_title": "{file_sizes}",
"clients": 1,
"columns": [
{"file_sizes": [10], "requests": 100000},
{"file_sizes": [100], "requests": 50000},
{"file_sizes": [1000], "requests": 20000},
{"file_sizes": [10000], "requests": 5000},
],
},
"bursty": {
"title": "1k files, {clients} clients, {requests} request, (req/s)",
"class": StressTest,
"location": "/",
"file_count": 1024,
"file_sizes": [1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 100, 10000],
"requests": 20000,
"protocol": "h2",
"max_parallel": 50,
"clients": 32,
"cooldown": timedelta(seconds=20),
"row0_title": "protocol",
"row_title": "{protocol}",
"rows": [
{"protocol": 'h2', },
],
"col_title": "{run}",
"columns": [
{"run": 1},
{"run": 2},
{"run": 3},
{"run": 4},
{"run": 5},
{"run": 6},
{"run": 7},
{"run": 8},
{"run": 9},
{"run": 10},
{"run": 11},
{"run": 12},
{"run": 13},
{"run": 14},
{"run": 15},
{"run": 16},
{"run": 17},
{"run": 18},
{"run": 19},
{"run": 20},
],
},
"m6": {
"title": "1k files, 1k-10MB, *conn, 10k req ({measure})",
"class": UrlsLoadTest,
"location": "/",
"file_count": 1024,
"file_sizes": [1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 100, 10000],
"requests": 5000,
"warmup": True,
"measure": "req/s",
"protocol": 'h2',
"max_parallel": 6,
"row0_title": "protocol max",
"row_title": "{protocol} {max_parallel:3d}",
"rows": [
{"protocol": 'h2', "max_parallel": 6},
{"protocol": 'h2', "max_parallel": 6},
{"protocol": 'h2', "max_parallel": 6},
{"protocol": 'h2', "max_parallel": 6},
{"protocol": 'h2', "max_parallel": 6},
{"protocol": 'h2', "max_parallel": 6},
],
"col_title": "{clients}c",
"clients": 1,
"columns": [
{"clients": 1, "requests": 1000},
{"clients": 32, "requests": 16000},
{"clients": 64, "requests": 32000},
{"clients": 128, "requests": 64000},
{"clients": 192, "requests": 96000},
],
},
}
env = H2TestEnv()
rv = 0
try:
log.debug("starting tests")
names = args.names if len(args.names) else sorted(scenarios.keys())
for name in names:
if name not in scenarios:
raise LoadTestException(f"unknown test scenario: {name}")
scenario = scenarios[name]
table = [
[scenario['title'].format(**scenario)],
]
foot_notes = []
headers = [scenario['row0_title']]
for col in scenario['columns']:
headers.append(scenario['col_title'].format(**col))
table.append(headers)
cls.print_table(table)
test = scenario['class'].from_scenario(scenario, env=env)
for row in scenario['rows']:
if args.protocol is not None and row['protocol'] != args.protocol:
continue
row_line = [scenario['row_title'].format(**row)]
table.append(row_line)
for col in scenario['columns']:
t = scenario.copy()
t.update(row)
t.update(col)
test = test.next_scenario(t)
env.apache_error_log_clear()
summary = test.run()
result, fnote = test.format_result(summary)
if fnote:
foot_notes.append(fnote)
row_line.append("{0}{1}".format(result,
f"[{len(foot_notes)}]" if fnote else ""))
cls.print_table(table, foot_notes)
test.shutdown()
except KeyboardInterrupt:
log.warning("aborted")
rv = 1
except LoadTestException as ex:
sys.stderr.write(f"ERROR: {str(ex)}\n")
rv = 1
env.apache_stop()
sys.exit(rv)
if __name__ == "__main__":
LoadTest.main()
|
import argparse
import logging
import os
import re
import statistics
import sys
import time
from datetime import timedelta, datetime
from threading import Thread
from typing import Dict, Tuple, Optional, List, Iterable
from tqdm import tqdm
from .env import H2TestEnv, H2Conf
from pyhttpd.result import ExecResult
log = logging.getLogger(__name__)
class LoadTestException(Exception):
pass
class H2LoadLogSummary:
@staticmethod
def from_file(fpath: str, title: str, duration: timedelta) -> 'H2LoadLogSummary':
with open(fpath) as fd:
return H2LoadLogSummary.from_lines(fd.readlines(), title=title, duration=duration)
@staticmethod
def from_lines(lines: Iterable[str], title: str, duration: timedelta) -> 'H2LoadLogSummary':
stati = {}
count = 0
durations = list()
all_durations = timedelta(milliseconds=0)
for line in lines:
parts = re.split(r'\s+', line) # start(us), status(int), duration(ms), tbd.
if len(parts) >= 3 and parts[0] and parts[1] and parts[2]:
count += 1
status = int(parts[1])
if status in stati:
stati[status] += 1
else:
stati[status] = 1
durations.append(int(parts[2]))
all_durations += timedelta(microseconds=int(parts[2]))
else:
sys.stderr.write("unrecognize log line: {0}".format(line))
mean_duration = statistics.mean(durations)
return H2LoadLogSummary(title=title, total=count, stati=stati,
duration=duration, all_durations=all_durations,
mean_duration=mean_duration)
def __init__(self, title: str, total: int, stati: Dict[int, int],
duration: timedelta, all_durations: timedelta,
mean_duration: timedelta):
self._title = title
self._total = total
self._stati = stati
self._duration = duration
self._all_durations = all_durations
self._mean_duration = mean_duration
self._transfered_mb = 0.0
self._exec_result = None
self._expected_responses = 0
@property
def title(self) -> str:
return self._title
@property
def response_count(self) -> int:
return self._total
@property
def duration(self) -> timedelta:
return self._duration
@property
def mean_duration_ms(self) -> float:
return self._mean_duration / 1000.0
@property
def response_durations(self) -> timedelta:
return self._all_durations
@property
def response_stati(self) -> Dict[int, int]:
return self._stati
@property
def expected_responses(self) -> int:
return self._expected_responses
@property
def execution(self) -> ExecResult:
return self._exec_result
def all_200(self) -> bool:
non_200s = [n for n in self._stati.keys() if n != 200]
return len(non_200s) == 0
@property
def throughput_mb(self) -> float:
if self._transfered_mb > 0.0:
return self._transfered_mb / self.duration.total_seconds()
return 0.0
def set_transfered_mb(self, mb: float) -> None:
self._transfered_mb = mb
def set_exec_result(self, result: ExecResult):
self._exec_result = result
def set_expected_responses(self, n: int):
self._expected_responses = n
def get_footnote(self) -> Optional[str]:
note = ""
if 0 < self.expected_responses != self.response_count:
note += "{0}/{1} missing".format(
self.expected_responses - self.response_count,
self.expected_responses
)
if not self.all_200():
note += ", non 200s:"
for status in [n for n in self.response_stati.keys() if n != 200]:
note += " {0}={1}".format(status, self.response_stati[status])
return note if len(note) else None
class H2LoadMonitor:
def __init__(self, fpath: str, expected: int, title: str):
self._fpath = fpath
self._expected = expected
self._title = title
self._tqdm = tqdm(desc=title, total=expected, unit="request", leave=False)
self._running = False
self._lines = ()
self._tail = None
def start(self):
self._tail = Thread(target=self._collect, kwargs={'self': self})
self._running = True
self._tail.start()
def get_summary(self, duration: timedelta) -> H2LoadLogSummary:
self._running = False
self._tail.join()
return H2LoadLogSummary.from_file(self._fpath, title=self._title, duration=duration)
def stop(self):
self._running = False
@staticmethod
def _collect(self) -> None:
first_call = True
while self._running:
try:
with open(self._fpath) as fd:
if first_call:
fd.seek(0, 2)
first_call = False
latest_data = fd.read()
while self._running:
if '\n' not in latest_data:
latest_data += fd.read()
if '\n' not in latest_data:
if not os.path.isfile(self._fpath):
break
time.sleep(0.1)
continue
lines = latest_data.split('\n')
if lines[-1] != '\n':
latest_data = lines[-1]
lines = lines[:-1]
else:
latest_data = None
self._tqdm.update(n=len(lines))
if latest_data is None:
latest_data = fd.read()
except IOError:
time.sleep(0.1)
self._tqdm.close()
def mk_text_file(fpath: str, lines: int):
t110 = ""
for _ in range(11):
t110 += "0123456789"
with open(fpath, "w") as fd:
for i in range(lines):
fd.write("{0:015d}: ".format(i)) # total 128 bytes per line
fd.write(t110)
fd.write("\n")
class LoadTestCase:
@staticmethod
def from_scenario(scenario: Dict, env: H2TestEnv) -> 'UrlsLoadTest':
raise NotImplemented
def run(self) -> H2LoadLogSummary:
raise NotImplemented
def format_result(self, summary: H2LoadLogSummary) -> str:
raise NotImplemented
def shutdown(self):
raise NotImplemented
@staticmethod
def setup_base_conf(env: H2TestEnv, worker_count: int = 5000) -> H2Conf:
conf = H2Conf(env=env)
# ylavic's formula
process_count = int(max(10, min(100, int(worker_count / 100))))
thread_count = int(max(25, int(worker_count / process_count)))
conf.add(f"""
StartServers 1
ServerLimit {int(process_count * 2.5)}
ThreadLimit {thread_count}
ThreadsPerChild {thread_count}
MinSpareThreads {thread_count}
MaxSpareThreads {int(worker_count / 2)}
MaxRequestWorkers {worker_count}
MaxConnectionsPerChild 0
KeepAliveTimeout 60
MaxKeepAliveRequests 0
""")
return conf
@staticmethod
def start_server(env: H2TestEnv, cd: timedelta = None):
if cd:
with tqdm(desc="connection cooldown", total=int(cd.total_seconds()), unit="s", leave=False) as t:
end = datetime.now() + cd
while datetime.now() < end:
time.sleep(1)
t.update()
assert env.apache_restart() == 0
@staticmethod
def server_setup(env: H2TestEnv, extras: Dict = None):
conf = LoadTestCase.setup_base_conf(env=env)
if not extras:
extras = {
'base': """
LogLevel ssl:warn
Protocols h2 http/1.1
H2MinWorkers 32
H2MaxWorkers 256
"""
}
extras['base'] += f"""
ProxyPreserveHost on
SSLProxyVerify require
SSLProxyCACertificateFile {env.ca.cert_file}
<Proxy https://127.0.0.1:{env.https_port}/>
SSLProxyEngine on
</Proxy>
<Proxy h2://127.0.0.1:{env.https_port}/>
SSLProxyEngine on
</Proxy>
"""
extras[env.domain_test1] = f"""
Protocols h2 http/1.1
ProxyPass /proxy-h1/ https://127.0.0.1:{env.https_port}/
ProxyPass /proxy-h2/ h2://127.0.0.1:{env.https_port}/
"""
conf.add_vhost_test1(extras=extras)
conf.install()
class UrlsLoadTest(LoadTestCase):
SETUP_DONE = False
def __init__(self, env: H2TestEnv, location: str,
clients: int, requests: int,
file_count: int,
file_sizes: List[int],
measure: str,
protocol: str = 'h2',
max_parallel: int = 1,
threads: int = None, warmup: bool = False):
self.env = env
self._location = location
self._clients = clients
self._measure = measure
self._requests = requests
self._file_count = file_count
self._file_sizes = file_sizes
self._protocol = protocol
self._max_parallel = max_parallel
self._threads = threads if threads is not None else min(2, self._clients)
self._url_file = "{gen_dir}/h2load-urls.txt".format(gen_dir=self.env.gen_dir)
self._warmup = warmup
@staticmethod
def from_scenario(scenario: Dict, env: H2TestEnv) -> 'UrlsLoadTest':
return UrlsLoadTest(
env=env,
location=scenario['location'],
clients=scenario['clients'], requests=scenario['requests'],
file_sizes=scenario['file_sizes'], file_count=scenario['file_count'],
protocol=scenario['protocol'], max_parallel=scenario['max_parallel'],
warmup=scenario['warmup'], measure=scenario['measure']
)
def next_scenario(self, scenario: Dict) -> 'UrlsLoadTest':
return UrlsLoadTest(
env=self.env,
location=scenario['location'],
clients=scenario['clients'], requests=scenario['requests'],
file_sizes=scenario['file_sizes'], file_count=scenario['file_count'],
protocol=scenario['protocol'], max_parallel=scenario['max_parallel'],
warmup=scenario['warmup'], measure=scenario['measure']
)
def _setup(self, cls, extras: Dict = None):
LoadTestCase.server_setup(env=self.env, extras=extras)
docs_a = os.path.join(self.env.server_docs_dir, "test1")
uris = []
for i in range(self._file_count):
fsize = self._file_sizes[i % len(self._file_sizes)]
if fsize is None:
raise Exception("file sizes?: {0} {1}".format(i, fsize))
fname = "{0}-{1}k.txt".format(i, fsize)
fpath = os.path.join(docs_a, fname)
if not os.path.isfile(fpath):
mk_text_file(os.path.join(docs_a, fname), 8 * fsize)
uris.append(f"{self._location}{fname}")
with open(self._url_file, 'w') as fd:
fd.write("\n".join(uris))
fd.write("\n")
self.start_server(env=self.env)
def _teardown(self):
# we shutdown apache at program exit
pass
def shutdown(self):
self._teardown()
def run_test(self, mode: str, path: str) -> H2LoadLogSummary:
monitor = None
try:
log_file = "{gen_dir}/h2load.log".format(gen_dir=self.env.gen_dir)
if os.path.isfile(log_file):
os.remove(log_file)
monitor = H2LoadMonitor(log_file, expected=self._requests,
title=f"{self._protocol}/"
f"{self._file_count / 1024}f/{self._clients}c[{mode}]")
monitor.start()
args = [
'h2load',
'--clients={0}'.format(self._clients),
'--threads={0}'.format(self._threads),
'--requests={0}'.format(self._requests),
'--input-file={0}'.format(self._url_file),
'--log-file={0}'.format(log_file),
'--connect-to=localhost:{0}'.format(self.env.https_port)
]
if self._protocol == 'h1' or self._protocol == 'http/1.1':
args.append('--h1')
elif self._protocol == 'h2':
args.extend(['-m', str(self._max_parallel)])
else:
raise Exception(f"unknown protocol: {self._protocol}")
r = self.env.run(args + [
f'--base-uri=https://{self.env.domain_test1}:{self.env.https_port}{self._location}'
])
if r.exit_code != 0:
raise LoadTestException("h2load returned {0}: {1}".format(r.exit_code, r.stderr))
summary = monitor.get_summary(duration=r.duration)
summary.set_expected_responses(self._requests)
summary.set_exec_result(r)
return summary
finally:
if monitor is not None:
monitor.stop()
def run(self) -> H2LoadLogSummary:
path = self._setup(self.__class__)
try:
if self._warmup:
self.run_test(mode="warmup", path=path)
r = self.run_test(mode="measure", path=path)
# time.sleep(300)
return r
finally:
self._teardown()
def format_result(self, summary: H2LoadLogSummary) -> Tuple[str, Optional[List[str]]]:
if self._measure == 'req/s':
r = "{0:d}".format(round(summary.response_count / summary.duration.total_seconds()))
elif self._measure == 'mean ms/req':
r = "{0:.1f}".format(summary.mean_duration_ms)
elif self._measure == 'mb/s':
reqs = summary.response_count / summary.duration.total_seconds()
mean_size = statistics.mean(self._file_sizes)
r = "{0:d}".format(round(reqs * mean_size / 1024.0))
else:
raise Exception(f"measure '{self._measure}' not defined")
return r, summary.get_footnote()
class StressTest(LoadTestCase):
SETUP_DONE = False
def __init__(self, env: H2TestEnv, location: str,
clients: int, requests: int, file_count: int,
file_sizes: List[int],
protocol: str = 'h2',
max_parallel: int = 1,
cooldown: timedelta = None,
threads: int = None, ):
self.env = env
self._location = location
self._clients = clients
self._requests = requests
self._file_count = file_count
self._file_sizes = file_sizes
self._protocol = protocol
self._max_parallel = max_parallel
self._cooldown = cooldown if cooldown else timedelta(seconds=0)
self._threads = threads if threads is not None else min(2, self._clients)
self._url_file = "{gen_dir}/h2load-urls.txt".format(gen_dir=self.env.gen_dir)
self._is_setup = False
@staticmethod
def from_scenario(scenario: Dict, env: H2TestEnv) -> 'UrlsLoadTest':
return StressTest(
env=env,
location=scenario['location'],
clients=scenario['clients'], requests=scenario['requests'],
file_sizes=scenario['file_sizes'], file_count=scenario['file_count'],
protocol=scenario['protocol'], max_parallel=scenario['max_parallel'],
cooldown=scenario['cooldown']
)
def next_scenario(self, scenario: Dict) -> 'UrlsLoadTest':
self._location = scenario['location']
self._clients = scenario['clients']
self._requests = scenario['requests']
self._file_sizes = scenario['file_sizes']
self._file_count = scenario['file_count']
self._protocol = scenario['protocol']
self._max_parallel = scenario['max_parallel']
return self
def _setup(self, cls):
LoadTestCase.server_setup(env=self.env, extras={
'base': f"""
H2MinWorkers 32
H2MaxWorkers 128
H2MaxWorkerIdleSeconds 5
"""
})
if not cls.SETUP_DONE:
with tqdm(desc="setup resources", total=self._file_count, unit="file", leave=False) as t:
docs_a = os.path.join(self.env.server_docs_dir, "test1")
uris = []
for i in range(self._file_count):
fsize = self._file_sizes[i % len(self._file_sizes)]
if fsize is None:
raise Exception("file sizes?: {0} {1}".format(i, fsize))
fname = "{0}-{1}k.txt".format(i, fsize)
mk_text_file(os.path.join(docs_a, fname), 8 * fsize)
uris.append(f"{self._location}{fname}")
t.update()
with open(self._url_file, 'w') as fd:
fd.write("\n".join(uris))
fd.write("\n")
cls.SETUP_DONE = True
self.start_server(env=self.env)
self._is_setup = True
def shutdown(self):
# we shutdown apache at program exit
pass
def run_test(self, mode: str) -> H2LoadLogSummary:
monitor = None
try:
log_file = "{gen_dir}/h2load.log".format(gen_dir=self.env.gen_dir)
if os.path.isfile(log_file):
os.remove(log_file)
monitor = H2LoadMonitor(log_file, expected=self._requests,
title=f"{self._protocol}/"
f"{self._file_count / 1024}f/{self._clients}c[{mode}]")
monitor.start()
args = [
'h2load',
'--clients={0}'.format(self._clients),
'--threads={0}'.format(min(self._clients, 2)),
'--requests={0}'.format(self._requests),
'--input-file={0}'.format(self._url_file),
'--log-file={0}'.format(log_file),
'--connect-to=localhost:{0}'.format(self.env.https_port)
]
if self._protocol == 'h1' or self._protocol == 'http/1.1':
args.append('--h1')
elif self._protocol == 'h2':
args.extend(['-m', str(self._max_parallel)])
else:
raise Exception(f"unknown protocol: {self._protocol}")
r = self.env.run(args + [
f'--base-uri=https://{self.env.domain_test1}:{self.env.https_port}{self._location}'
])
if r.exit_code != 0:
raise LoadTestException("h2load returned {0}: {1}".format(r.exit_code, r.stderr))
summary = monitor.get_summary(duration=r.duration)
summary.set_expected_responses(self._requests)
summary.set_exec_result(r)
return summary
finally:
if monitor is not None:
monitor.stop()
def run(self) -> H2LoadLogSummary:
if not self._is_setup:
self._setup(self.__class__)
elif self._cooldown.total_seconds() > 0:
with tqdm(desc="worker cooldown",
total=int(self._cooldown.total_seconds()),
unit="s", leave=False) as t:
end = datetime.now() + self._cooldown
while datetime.now() < end:
time.sleep(1)
t.update()
return self.run_test(mode="measure")
def format_result(self, summary: H2LoadLogSummary) -> Tuple[str, Optional[List[str]]]:
return "{0:.1f}".format(
summary.response_count / summary.duration.total_seconds()
), summary.get_footnote()
class LoadTest:
@staticmethod
def print_table(table: List[List[str]], foot_notes: List[str] = None):
col_widths = []
col_sep = " "
for row in table[1:]:
for idx, cell in enumerate(row):
if idx >= len(col_widths):
col_widths.append(len(cell))
else:
col_widths[idx] = max(len(cell), col_widths[idx])
row_len = sum(col_widths) + (len(col_widths) * len(col_sep))
print(f"{' '.join(table[0]):^{row_len}}")
for row in table[1:]:
line = ""
for idx, cell in enumerate(row):
line += f"{col_sep if idx > 0 else ''}{cell:>{col_widths[idx]}}"
print(line)
if foot_notes is not None:
for idx, note in enumerate(foot_notes):
print("{0:3d}) {1}".format(idx+1, note))
@classmethod
def main(cls):
parser = argparse.ArgumentParser(prog='load_h1', description="""
Run a range of load tests against the test Apache setup.
""")
parser.add_argument("-p", "--protocol", type=str, default=None,
help="which protocols to test, defaults to all")
parser.add_argument("-v", "--verbose", action='count', default=0,
help="log more output on stderr")
parser.add_argument("names", nargs='*', help="Name(s) of scenarios to run")
args = parser.parse_args()
if args.verbose > 0:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logging.getLogger('').addHandler(console)
scenarios = {
"1k-files": {
"title": "1k files, 1k-10MB, *conn, 10k req ({measure})",
"class": UrlsLoadTest,
"location": "/",
"file_count": 1024,
"file_sizes": [1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 100, 10000],
"requests": 10000,
"warmup": True,
"measure": "req/s",
"protocol": 'h2',
"max_parallel": 1,
"row0_title": "protocol max",
"row_title": "{protocol} {max_parallel:3d}",
"rows": [
{"protocol": 'h2', "max_parallel": 1},
{"protocol": 'h2', "max_parallel": 2},
{"protocol": 'h2', "max_parallel": 6},
{"protocol": 'h2', "max_parallel": 20},
{"protocol": 'h2', "max_parallel": 50},
{"protocol": 'h1', "max_parallel": 1},
],
"col_title": "{clients}c",
"clients": 1,
"columns": [
{"clients": 1},
{"clients": 4},
{"clients": 8},
{"clients": 16},
{"clients": 32},
],
},
"long": {
"title": "1k files, 10k size, *conn, 100k req, {protocol} ({measure})",
"class": UrlsLoadTest,
"location": "/",
"file_count": 100,
"file_sizes": [1],
"requests": 100000,
"warmup": False,
"measure": "req/s",
"protocol": 'h2',
"max_parallel": 1,
"row0_title": "max requests",
"row_title": "{max_parallel:3d} {requests}",
"rows": [
{"max_parallel": 1, "requests": 100000},
{"max_parallel": 2, "requests": 100000},
#{"max_parallel": 6, "requests": 250000},
#{"max_parallel": 20, "requests": 500000},
#{"max_parallel": 50, "requests": 750000},
],
"col_title": "{clients}c",
"clients": 1,
"columns": [
{"clients": 1},
],
},
"durations": {
"title": "1k files, 64k size, 10k req/conn ({measure})",
"class": UrlsLoadTest,
"location": "/",
"file_count": 1024,
"file_sizes": [64],
"requests": 10000,
"warmup": False,
"measure": "mean ms/req",
"protocol": 'h2',
"max_parallel": 1,
"row0_title": "protocol max",
"row_title": "{protocol} {max_parallel:3d}",
"rows": [
{"protocol": 'h2', "max_parallel": 1},
{"protocol": 'h2', "max_parallel": 2},
{"protocol": 'h2', "max_parallel": 6},
{"protocol": 'h2', "max_parallel": 20},
{"protocol": 'h2', "max_parallel": 50},
{"protocol": 'h1', "max_parallel": 1},
],
"col_title": "{clients}c",
"clients": 1,
"columns": [
{"clients": 1, "requests": 10000},
{"clients": 4, "requests": 40000},
{"clients": 8, "requests": 80000},
{"clients": 16, "requests": 160000},
{"clients": 32, "requests": 320000},
],
},
"transfers": {
"title": "net transfer speed, by KB body size, (MB/s)",
"class": UrlsLoadTest,
"location": "/",
"file_count": 1,
"file_sizes": [10, 100, 1000, 10000],
"requests": 10000,
"clients": 1,
"warmup": False,
"measure": "mb/s",
"protocol": 'h2',
"max_parallel": 1,
"row0_title": "protocol c/parallel",
"row_title": "{protocol} {clients}/{max_parallel}",
"rows": [
{"protocol": 'h1', "max_parallel": 1, "clients": 1},
{"protocol": 'h2', "max_parallel": 1, "clients": 1},
{"protocol": 'h2', "max_parallel": 2, "clients": 1},
{"protocol": 'h2', "max_parallel": 6, "clients": 1},
{"protocol": 'h1', "max_parallel": 1, "clients": 2},
{"protocol": 'h2', "max_parallel": 1, "clients": 2},
{"protocol": 'h2', "max_parallel": 2, "clients": 2},
{"protocol": 'h2', "max_parallel": 6, "clients": 2},
{"protocol": 'h1', "max_parallel": 1, "clients": 6},
{"protocol": 'h2', "max_parallel": 1, "clients": 6},
{"protocol": 'h2', "max_parallel": 2, "clients": 6},
{"protocol": 'h2', "max_parallel": 6, "clients": 6},
],
"col_title": "{file_sizes}",
"clients": 1,
"columns": [
{"file_sizes": [10], "requests": 100000},
{"file_sizes": [100], "requests": 50000},
{"file_sizes": [1000], "requests": 20000},
{"file_sizes": [10000], "requests": 5000},
],
},
"bursty": {
"title": "1k files, {clients} clients, {requests} request, (req/s)",
"class": StressTest,
"location": "/",
"file_count": 1024,
"file_sizes": [1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 100, 10000],
"requests": 20000,
"protocol": "h2",
"max_parallel": 50,
"clients": 32,
"cooldown": timedelta(seconds=20),
"row0_title": "protocol",
"row_title": "{protocol}",
"rows": [
{"protocol": 'h2', },
],
"col_title": "{run}",
"columns": [
{"run": 1},
{"run": 2},
{"run": 3},
{"run": 4},
{"run": 5},
{"run": 6},
{"run": 7},
{"run": 8},
{"run": 9},
{"run": 10},
{"run": 11},
{"run": 12},
{"run": 13},
{"run": 14},
{"run": 15},
{"run": 16},
{"run": 17},
{"run": 18},
{"run": 19},
{"run": 20},
],
},
"m6": {
"title": "1k files, 1k-10MB, *conn, 10k req ({measure})",
"class": UrlsLoadTest,
"location": "/",
"file_count": 1024,
"file_sizes": [1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 100, 10000],
"requests": 5000,
"warmup": True,
"measure": "req/s",
"protocol": 'h2',
"max_parallel": 6,
"row0_title": "protocol max",
"row_title": "{protocol} {max_parallel:3d}",
"rows": [
{"protocol": 'h2', "max_parallel": 6},
{"protocol": 'h2', "max_parallel": 6},
{"protocol": 'h2', "max_parallel": 6},
{"protocol": 'h2', "max_parallel": 6},
{"protocol": 'h2', "max_parallel": 6},
{"protocol": 'h2', "max_parallel": 6},
],
"col_title": "{clients}c",
"clients": 1,
"columns": [
{"clients": 1, "requests": 1000},
{"clients": 32, "requests": 16000},
{"clients": 64, "requests": 32000},
{"clients": 128, "requests": 64000},
{"clients": 192, "requests": 96000},
],
},
}
env = H2TestEnv()
rv = 0
try:
log.debug("starting tests")
names = args.names if len(args.names) else sorted(scenarios.keys())
for name in names:
if name not in scenarios:
raise LoadTestException(f"unknown test scenario: {name}")
scenario = scenarios[name]
table = [
[scenario['title'].format(**scenario)],
]
foot_notes = []
headers = [scenario['row0_title']]
for col in scenario['columns']:
headers.append(scenario['col_title'].format(**col))
table.append(headers)
cls.print_table(table)
test = scenario['class'].from_scenario(scenario, env=env)
for row in scenario['rows']:
if args.protocol is not None and row['protocol'] != args.protocol:
continue
row_line = [scenario['row_title'].format(**row)]
table.append(row_line)
for col in scenario['columns']:
t = scenario.copy()
t.update(row)
t.update(col)
test = test.next_scenario(t)
env.apache_error_log_clear()
summary = test.run()
result, fnote = test.format_result(summary)
if fnote:
foot_notes.append(fnote)
row_line.append("{0}{1}".format(result,
f"[{len(foot_notes)}]" if fnote else ""))
cls.print_table(table, foot_notes)
test.shutdown()
except KeyboardInterrupt:
log.warning("aborted")
rv = 1
except LoadTestException as ex:
sys.stderr.write(f"ERROR: {str(ex)}\n")
rv = 1
env.apache_stop()
sys.exit(rv)
if __name__ == "__main__":
LoadTest.main()
|
# ------------------------------------------------------------------------
# Copyright (c) 2021 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from BasicSR (https://github.com/xinntao/BasicSR)
# Copyright 2018-2020 BasicSR Authors
# ------------------------------------------------------------------------
import random
from pathlib import Path
import numpy as np
import torch
from torch.utils import data as data
from basicsr.data.transforms import augment, paired_random_crop
from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
from basicsr.utils.flow_util import dequantize_flow
class REDSDataset(data.Dataset):
"""REDS dataset for training.
The keys are generated from a meta info txt file.
basicsr/data/meta_info/meta_info_REDS_GT.txt
Each line contains:
1. subfolder (clip) name; 2. frame number; 3. image shape, seperated by
a white space.
Examples:
000 100 (720,1280,3)
001 100 (720,1280,3)
...
Key examples: "000/00000000"
GT (gt): Ground-Truth;
LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames.
Args:
opt (dict): Config for train dataset. It contains the following keys:
dataroot_gt (str): Data root path for gt.
dataroot_lq (str): Data root path for lq.
dataroot_flow (str, optional): Data root path for flow.
meta_info_file (str): Path for meta information file.
val_partition (str): Validation partition types. 'REDS4' or
'official'.
io_backend (dict): IO backend type and other kwarg.
num_frame (int): Window size for input frames.
gt_size (int): Cropped patched size for gt patches.
interval_list (list): Interval list for temporal augmentation.
random_reverse (bool): Random reverse input frames.
use_flip (bool): Use horizontal flips.
use_rot (bool): Use rotation (use vertical flip and transposing h
and w for implementation).
scale (bool): Scale, which will be added automatically.
"""
def __init__(self, opt):
super(REDSDataset, self).__init__()
self.opt = opt
self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(
opt['dataroot_lq'])
self.flow_root = Path(
opt['dataroot_flow']) if opt['dataroot_flow'] is not None else None
assert opt['num_frame'] % 2 == 1, (
f'num_frame should be odd number, but got {opt['num_frame']}')
self.num_frame = opt['num_frame']
self.num_half_frames = opt['num_frame'] // 2
self.keys = []
with open(opt['meta_info_file'], 'r') as fin:
for line in fin:
folder, frame_num, _ = line.split(' ')
self.keys.extend(
[f'{folder}/{i:08d}' for i in range(int(frame_num))])
# remove the video clips used in validation
if opt['val_partition'] == 'REDS4':
val_partition = ['000', '011', '015', '020']
elif opt['val_partition'] == 'official':
val_partition = [f'{v:03d}' for v in range(240, 270)]
else:
raise ValueError(
f'Wrong validation partition {opt['val_partition']}.'
f"Supported ones are ['official', 'REDS4'].")
self.keys = [
v for v in self.keys if v.split('/')[0] not in val_partition
]
# file client (io backend)
self.file_client = None
self.io_backend_opt = opt['io_backend']
self.is_lmdb = False
if self.io_backend_opt['type'] == 'lmdb':
self.is_lmdb = True
if self.flow_root is not None:
self.io_backend_opt['db_paths'] = [
self.lq_root, self.gt_root, self.flow_root
]
self.io_backend_opt['client_keys'] = ['lq', 'gt', 'flow']
else:
self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root]
self.io_backend_opt['client_keys'] = ['lq', 'gt']
# temporal augmentation configs
self.interval_list = opt['interval_list']
self.random_reverse = opt['random_reverse']
interval_str = ','.join(str(x) for x in opt['interval_list'])
logger = get_root_logger()
logger.info(f'Temporal augmentation interval list: [{interval_str}]; '
f'random reverse is {self.random_reverse}.')
def __getitem__(self, index):
if self.file_client is None:
self.file_client = FileClient(
self.io_backend_opt.pop('type'), **self.io_backend_opt)
scale = self.opt['scale']
gt_size = self.opt['gt_size']
key = self.keys[index]
clip_name, frame_name = key.split('/') # key example: 000/00000000
center_frame_idx = int(frame_name)
# determine the neighboring frames
interval = random.choice(self.interval_list)
# ensure not exceeding the borders
start_frame_idx = center_frame_idx - self.num_half_frames * interval
end_frame_idx = center_frame_idx + self.num_half_frames * interval
# each clip has 100 frames starting from 0 to 99
while (start_frame_idx < 0) or (end_frame_idx > 99):
center_frame_idx = random.randint(0, 99)
start_frame_idx = (
center_frame_idx - self.num_half_frames * interval)
end_frame_idx = center_frame_idx + self.num_half_frames * interval
frame_name = f'{center_frame_idx:08d}'
neighbor_list = list(
range(center_frame_idx - self.num_half_frames * interval,
center_frame_idx + self.num_half_frames * interval + 1,
interval))
# random reverse
if self.random_reverse and random.random() < 0.5:
neighbor_list.reverse()
assert len(neighbor_list) == self.num_frame, (
f'Wrong length of neighbor list: {len(neighbor_list)}')
# get the GT frame (as the center frame)
if self.is_lmdb:
img_gt_path = f'{clip_name}/{frame_name}'
else:
img_gt_path = self.gt_root / clip_name / f'{frame_name}.png'
img_bytes = self.file_client.get(img_gt_path, 'gt')
img_gt = imfrombytes(img_bytes, float32=True)
# get the neighboring LQ frames
img_lqs = []
for neighbor in neighbor_list:
if self.is_lmdb:
img_lq_path = f'{clip_name}/{neighbor:08d}'
else:
img_lq_path = self.lq_root / clip_name / f'{neighbor:08d}.png'
img_bytes = self.file_client.get(img_lq_path, 'lq')
img_lq = imfrombytes(img_bytes, float32=True)
img_lqs.append(img_lq)
# get flows
if self.flow_root is not None:
img_flows = []
# read previous flows
for i in range(self.num_half_frames, 0, -1):
if self.is_lmdb:
flow_path = f'{clip_name}/{frame_name}_p{i}'
else:
flow_path = (
self.flow_root / clip_name / f'{frame_name}_p{i}.png')
img_bytes = self.file_client.get(flow_path, 'flow')
cat_flow = imfrombytes(
img_bytes, flag='grayscale',
float32=False) # uint8, [0, 255]
dx, dy = np.split(cat_flow, 2, axis=0)
flow = dequantize_flow(
dx, dy, max_val=20,
denorm=False) # we use max_val 20 here.
img_flows.append(flow)
# read next flows
for i in range(1, self.num_half_frames + 1):
if self.is_lmdb:
flow_path = f'{clip_name}/{frame_name}_n{i}'
else:
flow_path = (
self.flow_root / clip_name / f'{frame_name}_n{i}.png')
img_bytes = self.file_client.get(flow_path, 'flow')
cat_flow = imfrombytes(
img_bytes, flag='grayscale',
float32=False) # uint8, [0, 255]
dx, dy = np.split(cat_flow, 2, axis=0)
flow = dequantize_flow(
dx, dy, max_val=20,
denorm=False) # we use max_val 20 here.
img_flows.append(flow)
# for random crop, here, img_flows and img_lqs have the same
# spatial size
img_lqs.extend(img_flows)
# randomly crop
img_gt, img_lqs = paired_random_crop(img_gt, img_lqs, gt_size, scale,
img_gt_path)
if self.flow_root is not None:
img_lqs, img_flows = img_lqs[:self.num_frame], img_lqs[self.
num_frame:]
# augmentation - flip, rotate
img_lqs.append(img_gt)
if self.flow_root is not None:
img_results, img_flows = augment(img_lqs, self.opt['use_flip'],
self.opt['use_rot'], img_flows)
else:
img_results = augment(img_lqs, self.opt['use_flip'],
self.opt['use_rot'])
img_results = img2tensor(img_results)
img_lqs = torch.stack(img_results[0:-1], dim=0)
img_gt = img_results[-1]
if self.flow_root is not None:
img_flows = img2tensor(img_flows)
# add the zero center flow
img_flows.insert(self.num_half_frames,
torch.zeros_like(img_flows[0]))
img_flows = torch.stack(img_flows, dim=0)
# img_lqs: (t, c, h, w)
# img_flows: (t, 2, h, w)
# img_gt: (c, h, w)
# key: str
if self.flow_root is not None:
return {'lq': img_lqs, 'flow': img_flows, 'gt': img_gt, 'key': key}
else:
return {'lq': img_lqs, 'gt': img_gt, 'key': key}
def __len__(self):
return len(self.keys)
|
# ------------------------------------------------------------------------
# Copyright (c) 2021 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from BasicSR (https://github.com/xinntao/BasicSR)
# Copyright 2018-2020 BasicSR Authors
# ------------------------------------------------------------------------
import random
from pathlib import Path
import numpy as np
import torch
from torch.utils import data as data
from basicsr.data.transforms import augment, paired_random_crop
from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
from basicsr.utils.flow_util import dequantize_flow
class REDSDataset(data.Dataset):
"""REDS dataset for training.
The keys are generated from a meta info txt file.
basicsr/data/meta_info/meta_info_REDS_GT.txt
Each line contains:
1. subfolder (clip) name; 2. frame number; 3. image shape, seperated by
a white space.
Examples:
000 100 (720,1280,3)
001 100 (720,1280,3)
...
Key examples: "000/00000000"
GT (gt): Ground-Truth;
LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames.
Args:
opt (dict): Config for train dataset. It contains the following keys:
dataroot_gt (str): Data root path for gt.
dataroot_lq (str): Data root path for lq.
dataroot_flow (str, optional): Data root path for flow.
meta_info_file (str): Path for meta information file.
val_partition (str): Validation partition types. 'REDS4' or
'official'.
io_backend (dict): IO backend type and other kwarg.
num_frame (int): Window size for input frames.
gt_size (int): Cropped patched size for gt patches.
interval_list (list): Interval list for temporal augmentation.
random_reverse (bool): Random reverse input frames.
use_flip (bool): Use horizontal flips.
use_rot (bool): Use rotation (use vertical flip and transposing h
and w for implementation).
scale (bool): Scale, which will be added automatically.
"""
def __init__(self, opt):
super(REDSDataset, self).__init__()
self.opt = opt
self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(
opt['dataroot_lq'])
self.flow_root = Path(
opt['dataroot_flow']) if opt['dataroot_flow'] is not None else None
assert opt['num_frame'] % 2 == 1, (
f'num_frame should be odd number, but got {opt["num_frame"]}')
self.num_frame = opt['num_frame']
self.num_half_frames = opt['num_frame'] // 2
self.keys = []
with open(opt['meta_info_file'], 'r') as fin:
for line in fin:
folder, frame_num, _ = line.split(' ')
self.keys.extend(
[f'{folder}/{i:08d}' for i in range(int(frame_num))])
# remove the video clips used in validation
if opt['val_partition'] == 'REDS4':
val_partition = ['000', '011', '015', '020']
elif opt['val_partition'] == 'official':
val_partition = [f'{v:03d}' for v in range(240, 270)]
else:
raise ValueError(
f'Wrong validation partition {opt["val_partition"]}.'
f"Supported ones are ['official', 'REDS4'].")
self.keys = [
v for v in self.keys if v.split('/')[0] not in val_partition
]
# file client (io backend)
self.file_client = None
self.io_backend_opt = opt['io_backend']
self.is_lmdb = False
if self.io_backend_opt['type'] == 'lmdb':
self.is_lmdb = True
if self.flow_root is not None:
self.io_backend_opt['db_paths'] = [
self.lq_root, self.gt_root, self.flow_root
]
self.io_backend_opt['client_keys'] = ['lq', 'gt', 'flow']
else:
self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root]
self.io_backend_opt['client_keys'] = ['lq', 'gt']
# temporal augmentation configs
self.interval_list = opt['interval_list']
self.random_reverse = opt['random_reverse']
interval_str = ','.join(str(x) for x in opt['interval_list'])
logger = get_root_logger()
logger.info(f'Temporal augmentation interval list: [{interval_str}]; '
f'random reverse is {self.random_reverse}.')
def __getitem__(self, index):
if self.file_client is None:
self.file_client = FileClient(
self.io_backend_opt.pop('type'), **self.io_backend_opt)
scale = self.opt['scale']
gt_size = self.opt['gt_size']
key = self.keys[index]
clip_name, frame_name = key.split('/') # key example: 000/00000000
center_frame_idx = int(frame_name)
# determine the neighboring frames
interval = random.choice(self.interval_list)
# ensure not exceeding the borders
start_frame_idx = center_frame_idx - self.num_half_frames * interval
end_frame_idx = center_frame_idx + self.num_half_frames * interval
# each clip has 100 frames starting from 0 to 99
while (start_frame_idx < 0) or (end_frame_idx > 99):
center_frame_idx = random.randint(0, 99)
start_frame_idx = (
center_frame_idx - self.num_half_frames * interval)
end_frame_idx = center_frame_idx + self.num_half_frames * interval
frame_name = f'{center_frame_idx:08d}'
neighbor_list = list(
range(center_frame_idx - self.num_half_frames * interval,
center_frame_idx + self.num_half_frames * interval + 1,
interval))
# random reverse
if self.random_reverse and random.random() < 0.5:
neighbor_list.reverse()
assert len(neighbor_list) == self.num_frame, (
f'Wrong length of neighbor list: {len(neighbor_list)}')
# get the GT frame (as the center frame)
if self.is_lmdb:
img_gt_path = f'{clip_name}/{frame_name}'
else:
img_gt_path = self.gt_root / clip_name / f'{frame_name}.png'
img_bytes = self.file_client.get(img_gt_path, 'gt')
img_gt = imfrombytes(img_bytes, float32=True)
# get the neighboring LQ frames
img_lqs = []
for neighbor in neighbor_list:
if self.is_lmdb:
img_lq_path = f'{clip_name}/{neighbor:08d}'
else:
img_lq_path = self.lq_root / clip_name / f'{neighbor:08d}.png'
img_bytes = self.file_client.get(img_lq_path, 'lq')
img_lq = imfrombytes(img_bytes, float32=True)
img_lqs.append(img_lq)
# get flows
if self.flow_root is not None:
img_flows = []
# read previous flows
for i in range(self.num_half_frames, 0, -1):
if self.is_lmdb:
flow_path = f'{clip_name}/{frame_name}_p{i}'
else:
flow_path = (
self.flow_root / clip_name / f'{frame_name}_p{i}.png')
img_bytes = self.file_client.get(flow_path, 'flow')
cat_flow = imfrombytes(
img_bytes, flag='grayscale',
float32=False) # uint8, [0, 255]
dx, dy = np.split(cat_flow, 2, axis=0)
flow = dequantize_flow(
dx, dy, max_val=20,
denorm=False) # we use max_val 20 here.
img_flows.append(flow)
# read next flows
for i in range(1, self.num_half_frames + 1):
if self.is_lmdb:
flow_path = f'{clip_name}/{frame_name}_n{i}'
else:
flow_path = (
self.flow_root / clip_name / f'{frame_name}_n{i}.png')
img_bytes = self.file_client.get(flow_path, 'flow')
cat_flow = imfrombytes(
img_bytes, flag='grayscale',
float32=False) # uint8, [0, 255]
dx, dy = np.split(cat_flow, 2, axis=0)
flow = dequantize_flow(
dx, dy, max_val=20,
denorm=False) # we use max_val 20 here.
img_flows.append(flow)
# for random crop, here, img_flows and img_lqs have the same
# spatial size
img_lqs.extend(img_flows)
# randomly crop
img_gt, img_lqs = paired_random_crop(img_gt, img_lqs, gt_size, scale,
img_gt_path)
if self.flow_root is not None:
img_lqs, img_flows = img_lqs[:self.num_frame], img_lqs[self.
num_frame:]
# augmentation - flip, rotate
img_lqs.append(img_gt)
if self.flow_root is not None:
img_results, img_flows = augment(img_lqs, self.opt['use_flip'],
self.opt['use_rot'], img_flows)
else:
img_results = augment(img_lqs, self.opt['use_flip'],
self.opt['use_rot'])
img_results = img2tensor(img_results)
img_lqs = torch.stack(img_results[0:-1], dim=0)
img_gt = img_results[-1]
if self.flow_root is not None:
img_flows = img2tensor(img_flows)
# add the zero center flow
img_flows.insert(self.num_half_frames,
torch.zeros_like(img_flows[0]))
img_flows = torch.stack(img_flows, dim=0)
# img_lqs: (t, c, h, w)
# img_flows: (t, 2, h, w)
# img_gt: (c, h, w)
# key: str
if self.flow_root is not None:
return {'lq': img_lqs, 'flow': img_flows, 'gt': img_gt, 'key': key}
else:
return {'lq': img_lqs, 'gt': img_gt, 'key': key}
def __len__(self):
return len(self.keys)
|
import os
import sys
import re
import json
import platform
try:
from pathlib import Path
from colorclass import Color
from terminaltables import SingleTable
import semver
except ImportError:
print("ERROR: Need to install required modules.")
print("python3 -m pip install colorclass terminaltables semver")
sys.exit(1)
VERSION_REGEX = re.compile(r'^(\d+)\.(\d+)\.(\d+)$')
VERSION_REGEX_NOCAP = r'\d+\.\d+\.\d+'
COMPARATOR_REGEX = r'(?:<|>)=?'
RANGE = f'({VERSION_REGEX_NOCAP})\\s+' + \
f'({COMPARATOR_REGEX})\\s+' + \
'v\\s+' + \
f'({COMPARATOR_REGEX})\\s+' + \
f'({VERSION_REGEX_NOCAP})'
VRANGE_REGEX = re.compile(RANGE)
CMP = {
"<": [-1],
"<=": [-1, 0],
">": [1],
">=": [0, 1]
}
# will handle all dependencies
PROJECT_DEPS = {}
def get_versions_from_home(elm_home):
dirs = filter(
lambda d: os.path.isdir(os.path.join(elm_home, d)),
[dir for dir in os.listdir(elm_home)]
)
return [v for v in dirs if re.match(VERSION_REGEX_NOCAP, v)]
def version_in_range(low, low_op, version, high_op, high):
compare_low = semver.compare(low, version) in CMP[low_op]
compare_high = semver.compare(version, high) in CMP[high_op]
return compare_low and compare_high
def get_highest_version_from_dir(dir, cmp_version):
low, low_op, high_op, high = VRANGE_REGEX.findall(cmp_version)[0]
all_versions = [v for v in get_versions_from_home(dir)]
return max(list(filter(
lambda v: version_in_range(low, low_op, v, high_op, high),
all_versions
)))
def add_dep_to_dict(pkg_home, who, what, pkg, version, type):
with open(
os.path.join(pkg_home, who, what, version, "elm.json"), "r"
) as dep_file:
license = json.load(dep_file)["license"]
PROJECT_DEPS[pkg] = {
"version": version,
"license": license,
"type": type
}
def get_project_dependencies(json_directory):
json_path = os.path.join(
json_directory if json_directory else os.getcwd(),
"elm.json"
)
if platform.system() == "Windows":
ELM_HOME = os.path.join(str(Path.home()), "AppData", "Roaming", "elm")
else:
ELM_HOME = os.path.join(str(Path.home()), ".elm")
ELM_HOME = os.getenv("ELM_HOME", ELM_HOME)
with open(json_path, "r") as elm_file:
json_data = json.load(elm_file)
dependencies = json_data["dependencies"]
type = json_data["type"]
elm_version = json_data["elm-version"]
if type == "package":
elm_version = get_highest_version_from_dir(ELM_HOME, elm_version)
package_home = os.path.join(ELM_HOME, elm_version, "packages")
if not os.path.exists(package_home):
print(f"I'm unable to find your package home: {package_home}")
raise
if type == "application":
for type in ["direct", "indirect"]:
deps = dependencies[type]
for pkg, ver in deps.items():
who, what = pkg.split("/")
add_dep_to_dict(package_home, who, what, pkg, ver)
elif type == "package":
for pkg, ver in dependencies.items():
who, what = pkg.split("/")
high_ver = get_highest_version_from_dir(
os.path.join(package_home, who, what),
ver
)
add_dep_to_dict(package_home, who, what, pkg, high_ver, "direct")
else:
print(f"""Unknown Elm project type of {type}.
Expected your elm.json to have either:
\"type\": \"application\"
or
\"type\": \"package\"
""")
raise
return PROJECT_DEPS
def output_tables(deps):
# Build Table Headers
lsc_count_data = [
[Color("{red}License{/red}"), Color("{red}Count{/red}")]
]
large_table_data = [
[
Color("{red}Package{/red}"),
Color("{red}Version{/red}"),
Color("{red}License{/red}"),
Color("{red}Type{/red}")
]
]
# Build table bodies
packages = list(deps.keys())
lsc_count_data = {"total": 0, "direct": 0, "indirect": 0}
for pkg in packages:
pkg_data = deps[pkg]
license = pkg_data["license"]
if license not in lsc_count_data.keys():
lsc_count_data[license] = 0
lsc_count_data[license] += 1
lsc_count_data["total"] += 1
lsc_count_data[pkg_data["type"]] += 1
large_table_data.append(
[pkg, pkg_data["version"], license, pkg_data["type"]]
)
for l, c in lsc_count_data.items():
if l not in ["total", "direct", "indirect"]:
lsc_count_data.append([l, str(c)])
# Format Tables
lsc_table = SingleTable(lsc_count_data)
lsc_table.inner_row_border = True
lsc_table.justify_columns = {0: 'center', 1: 'center'}
print("Dependencies:")
print(f"Total: {lsc_count_data["total"]}")
print(f"Direct: {lsc_count_data["direct"]}")
print(f"Indirect: {lsc_count_data["indirect"]}")
print(lsc_table.table)
large_table = SingleTable(large_table_data)
large_table.inner_row_border = True
large_table.justify_columns = {
0: 'center',
1: 'center',
2: 'center',
3: 'center'
}
print(large_table.table)
|
import os
import sys
import re
import json
import platform
try:
from pathlib import Path
from colorclass import Color
from terminaltables import SingleTable
import semver
except ImportError:
print("ERROR: Need to install required modules.")
print("python3 -m pip install colorclass terminaltables semver")
sys.exit(1)
VERSION_REGEX = re.compile(r'^(\d+)\.(\d+)\.(\d+)$')
VERSION_REGEX_NOCAP = r'\d+\.\d+\.\d+'
COMPARATOR_REGEX = r'(?:<|>)=?'
RANGE = f'({VERSION_REGEX_NOCAP})\\s+' + \
f'({COMPARATOR_REGEX})\\s+' + \
'v\\s+' + \
f'({COMPARATOR_REGEX})\\s+' + \
f'({VERSION_REGEX_NOCAP})'
VRANGE_REGEX = re.compile(RANGE)
CMP = {
"<": [-1],
"<=": [-1, 0],
">": [1],
">=": [0, 1]
}
# will handle all dependencies
PROJECT_DEPS = {}
def get_versions_from_home(elm_home):
dirs = filter(
lambda d: os.path.isdir(os.path.join(elm_home, d)),
[dir for dir in os.listdir(elm_home)]
)
return [v for v in dirs if re.match(VERSION_REGEX_NOCAP, v)]
def version_in_range(low, low_op, version, high_op, high):
compare_low = semver.compare(low, version) in CMP[low_op]
compare_high = semver.compare(version, high) in CMP[high_op]
return compare_low and compare_high
def get_highest_version_from_dir(dir, cmp_version):
low, low_op, high_op, high = VRANGE_REGEX.findall(cmp_version)[0]
all_versions = [v for v in get_versions_from_home(dir)]
return max(list(filter(
lambda v: version_in_range(low, low_op, v, high_op, high),
all_versions
)))
def add_dep_to_dict(pkg_home, who, what, pkg, version, type):
with open(
os.path.join(pkg_home, who, what, version, "elm.json"), "r"
) as dep_file:
license = json.load(dep_file)["license"]
PROJECT_DEPS[pkg] = {
"version": version,
"license": license,
"type": type
}
def get_project_dependencies(json_directory):
json_path = os.path.join(
json_directory if json_directory else os.getcwd(),
"elm.json"
)
if platform.system() == "Windows":
ELM_HOME = os.path.join(str(Path.home()), "AppData", "Roaming", "elm")
else:
ELM_HOME = os.path.join(str(Path.home()), ".elm")
ELM_HOME = os.getenv("ELM_HOME", ELM_HOME)
with open(json_path, "r") as elm_file:
json_data = json.load(elm_file)
dependencies = json_data["dependencies"]
type = json_data["type"]
elm_version = json_data["elm-version"]
if type == "package":
elm_version = get_highest_version_from_dir(ELM_HOME, elm_version)
package_home = os.path.join(ELM_HOME, elm_version, "packages")
if not os.path.exists(package_home):
print(f"I'm unable to find your package home: {package_home}")
raise
if type == "application":
for type in ["direct", "indirect"]:
deps = dependencies[type]
for pkg, ver in deps.items():
who, what = pkg.split("/")
add_dep_to_dict(package_home, who, what, pkg, ver)
elif type == "package":
for pkg, ver in dependencies.items():
who, what = pkg.split("/")
high_ver = get_highest_version_from_dir(
os.path.join(package_home, who, what),
ver
)
add_dep_to_dict(package_home, who, what, pkg, high_ver, "direct")
else:
print(f"""Unknown Elm project type of {type}.
Expected your elm.json to have either:
\"type\": \"application\"
or
\"type\": \"package\"
""")
raise
return PROJECT_DEPS
def output_tables(deps):
# Build Table Headers
lsc_count_data = [
[Color("{red}License{/red}"), Color("{red}Count{/red}")]
]
large_table_data = [
[
Color("{red}Package{/red}"),
Color("{red}Version{/red}"),
Color("{red}License{/red}"),
Color("{red}Type{/red}")
]
]
# Build table bodies
packages = list(deps.keys())
lsc_count_data = {"total": 0, "direct": 0, "indirect": 0}
for pkg in packages:
pkg_data = deps[pkg]
license = pkg_data["license"]
if license not in lsc_count_data.keys():
lsc_count_data[license] = 0
lsc_count_data[license] += 1
lsc_count_data["total"] += 1
lsc_count_data[pkg_data["type"]] += 1
large_table_data.append(
[pkg, pkg_data["version"], license, pkg_data["type"]]
)
for l, c in lsc_count_data.items():
if l not in ["total", "direct", "indirect"]:
lsc_count_data.append([l, str(c)])
# Format Tables
lsc_table = SingleTable(lsc_count_data)
lsc_table.inner_row_border = True
lsc_table.justify_columns = {0: 'center', 1: 'center'}
print("Dependencies:")
print(f"Total: {lsc_count_data['total']}")
print(f"Direct: {lsc_count_data['direct']}")
print(f"Indirect: {lsc_count_data['indirect']}")
print(lsc_table.table)
large_table = SingleTable(large_table_data)
large_table.inner_row_border = True
large_table.justify_columns = {
0: 'center',
1: 'center',
2: 'center',
3: 'center'
}
print(large_table.table)
|
"""
Merges the 3 Tesla Dashcam and Sentry camera video files into 1 video. If
then further concatenates the files together to make 1 movie.
"""
import argparse
import logging
import os
import sys
from datetime import datetime, timedelta, timezone
from fnmatch import fnmatch
from glob import glob
from pathlib import Path
from re import search
from shlex import split as shlex_split
from shutil import which
from subprocess import CalledProcessError, run
from tempfile import mkstemp
from time import sleep, time as timestamp
from typing import List, Optional
import requests
from dateutil.parser import isoparse
from psutil import disk_partitions
from tzlocal import get_localzone
_LOGGER = logging.getLogger(__name__)
# TODO: Move everything into classes and separate files. For example,
# update class, font class (for timestamp), folder class, clip class (
# combining front, left, and right info), file class (for individual file).
# Clip class would then have to merge the camera clips, folder class would
# have to concatenate the merged clips. Settings class to take in all settings
# TODO: Create kind of logger or output classes for output. That then allows
# different ones to be created based on where it should go to (stdout,
# log file, ...).
VERSION = {"major": 0, "minor": 1, "patch": 16, "beta": -1}
VERSION_STR = "v{major}.{minor}.{patch}".format(
major=VERSION["major"], minor=VERSION["minor"], patch=VERSION["patch"]
)
if VERSION["beta"] > -1:
VERSION_STR = VERSION_STR + "b{beta}".format(beta=VERSION["beta"])
MONITOR_SLEEP_TIME = 5
GITHUB = {
"URL": "https://api.github.com",
"owner": "ehendrix23",
"repo": "tesla_dashcam",
}
FFMPEG = {
"darwin": "ffmpeg",
"win32": "ffmpeg.exe",
"cygwin": "ffmpeg",
"linux": "ffmpeg",
"freebsd11": "ffmpeg",
}
# noinspection PyPep8
MOVIE_HOMEDIR = {
"darwin": "Movies/Tesla_Dashcam",
"win32": "Videos\Tesla_Dashcam",
"cygwin": "Videos/Tesla_Dashcam",
"linux": "Videos/Tesla_Dashcam",
"freebsd11": "Videos/Tesla_Dashcam",
}
DEFAULT_CLIP_HEIGHT = 960
DEFAULT_CLIP_WIDTH = 1280
MOVIE_QUALITY = {
"HIGH": "18",
"MEDIUM": "20",
"LOW": "23",
"LOWER": "28",
"LOWEST": "33",
}
MOVIE_ENCODING = {
"x264": "libx264",
"x264_nvidia": "h264_nvenc",
"x264_mac": "h264_videotoolbox",
"x264_intel": "h264_qsv",
"x264_RPi": "h264_omx",
"x265": "libx265",
"x265_nvidia": "hevc_nvenc",
"x265_mac": "hevc_videotoolbox",
"x265_intel": "hevc_qsv",
"x265_RPi": "h265",
}
DEFAULT_FONT = {
"darwin": "/Library/Fonts/Arial Unicode.ttf",
"win32": "/Windows/Fonts/arial.ttf",
"cygwin": "/cygdrive/c/Windows/Fonts/arial.ttf",
"linux": "/usr/share/fonts/truetype/freefont/FreeSans.ttf",
"freebsd11": "/usr/share/local/fonts/freefont-ttf/FreeSans.ttf",
}
HALIGN = {"LEFT": "10", "CENTER": "(w/2-text_w/2)", "RIGHT": "(w-text_w)"}
VALIGN = {"TOP": "10", "MIDDLE": "(h/2-(text_h/2))", "BOTTOM": "(h-(text_h*2))"}
TOASTER_INSTANCE = None
class Font(object):
""" Font Class
"""
def __init__(self, layout, font=None, size=None, color=None):
self._layout = layout
self._font = font
self._size = size
self._color = color
self._halign = None
self._valign = None
self._xpos = None
self._ypos = None
@property
def font(self):
return self._font
@font.setter
def font(self, value):
self._font = value
@property
def size(self):
if hasattr(self._layout, "_font_size"):
return getattr(self._layout, "_font_size")()
return (
int(max(16, 16 * self._layout.scale)) if self._size is None else self._size
)
@size.setter
def size(self, value):
self._size = value
@property
def color(self):
return self._color
@color.setter
def color(self, value):
self._color = value
@property
def halign(self):
if hasattr(self._layout, "_font_halign"):
return getattr(self._layout, "_font_halign")()
return HALIGN.get(self._halign, self._halign)
@halign.setter
def halign(self, value):
self._halign = value
@property
def valign(self):
if hasattr(self._layout, "_font_valign"):
return getattr(self._layout, "_font_valign")()
return VALIGN.get(self._valign, self._valign)
@valign.setter
def valign(self, value):
self._valign = value
@property
def xpos(self):
return self._xpos
@xpos.setter
def xpos(self, value):
self._xpos = value
@property
def ypos(self):
return self._ypos
@ypos.setter
def ypos(self, value):
self._ypos = value
class Camera(object):
""" Camera Class
"""
def __init__(self, layout, camera):
self._layout = layout
self._camera = camera
self._include = True
self._width = 1280
self._height = 960
self._xpos = 0
self._ypos = 0
self._scale = 0
self._options = ""
@property
def camera(self):
return self._camera
@camera.setter
def camera(self, value):
self._camera = value
@property
def include(self):
return self._include
@include.setter
def include(self, value):
self._include = value
@property
def width(self):
return (
getattr(self._layout, "_" + self._camera + "_width")()
if hasattr(self._layout, "_" + self._camera + "_width")
else int(self._width * self.scale * self.include)
)
@width.setter
def width(self, value):
self._width = value
@property
def height(self):
return (
getattr(self._layout, "_" + self._camera + "_height")()
if hasattr(self._layout, "_" + self._camera + "_height")
else int(self._height * self.scale * self.include)
)
@height.setter
def height(self, value):
self._height = value
@property
def xpos(self):
if hasattr(self._layout, "_" + self._camera + "_xpos"):
return getattr(self._layout, "_" + self._camera + "_xpos")() * self.include
return self._xpos * self.include
@xpos.setter
def xpos(self, value):
self._xpos = value
@property
def ypos(self):
if hasattr(self._layout, "_" + self._camera + "_ypos"):
return getattr(self._layout, "_" + self._camera + "_ypos")() * self.include
return self._ypos * self.include
@ypos.setter
def ypos(self, value):
self._ypos = value
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, value):
if value is None:
self._scale = None
elif len(str(value).split("x")) == 1:
# Scale provided is a multiplier
self._scale = float(str(value).split("x")[0])
else:
# Scale is a resolution.
self.width = int(str(value).split("x")[0])
self.height = int(str(value).split("x")[1])
self._scale = 1
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self._options = value
class MovieLayout(object):
""" Main Layout class
"""
def __init__(self):
self._cameras = {
"Front": Camera(layout=self, camera="front"),
"Left": Camera(layout=self, camera="left"),
"Right": Camera(layout=self, camera="right"),
"Rear": Camera(layout=self, camera="rear"),
}
self._font = Font(layout=self)
self._swap_left_right = False
self._swap_front_rear = False
self._perspective = False
self._font.halign = "CENTER"
self._font.valign = "BOTTOM"
def cameras(self, camera):
return self._cameras.get(camera, self._cameras)
@property
def font(self):
return self._font
@font.setter
def font(self, value):
self._font = value
@property
def swap_left_right(self):
return self._swap_left_right
@swap_left_right.setter
def swap_left_right(self, value):
self._swap_left_right = value
@property
def swap_front_rear(self):
return self._swap_front_rear
@swap_front_rear.setter
def swap_front_rear(self, value):
self._swap_front_rear = value
@property
def perspective(self):
return self._perspective
@perspective.setter
def perspective(self, new_perspective):
self._perspective = new_perspective
if self._perspective:
self.cameras("Left").options = (
", pad=iw+4:3/2*ih:-1:ih/8:0x00000000, "
"perspective=x0=0:y0=1*H/5:x1=W:y1=-3/44*H:"
"x2=0:y2=6*H/5:x3=7/8*W:y3=5*H/6:sense=destination"
)
self.cameras("Right").options = (
", pad=iw+4:3/2*ih:-1:ih/8:0x00000000,"
"perspective=x0=0:y1=1*H/5:x1=W:y0=-3/44*H:"
"x2=1/8*W:y3=6*H/5:x3=W:y2=5*H/6:sense=destination"
)
else:
self.cameras("Left").options = ""
self.cameras("Right").options = ""
@property
def scale(self):
# Return scale of new video based on 1280x960 video = scale:1
return (self.video_height * self.video_width) / (1280 * 960)
@scale.setter
def scale(self, scale):
self.cameras("Front").scale = scale
self.cameras("Left").scale = scale
self.cameras("Right").scale = scale
self.cameras("Rear").scale = scale
@property
def video_width(self):
return int(
max(
self.cameras("Front").xpos + self.cameras("Front").width,
self.cameras("Left").xpos + self.cameras("Left").width,
self.cameras("Right").xpos + self.cameras("Right").width,
self.cameras("Rear").xpos + self.cameras("Rear").width,
)
)
@property
def video_height(self):
perspective_adjustement = 3 / 2 if self.perspective else 1
return int(
max(
self.cameras("Front").ypos + self.cameras("Front").height,
perspective_adjustement * self.cameras("Left").ypos
+ self.cameras("Left").height,
perspective_adjustement * self.cameras("Right").ypos
+ self.cameras("Right").height,
self.cameras("Rear").ypos + self.cameras("Rear").height,
)
)
@property
def center_xpos(self):
return int(self.video_width / 2)
@property
def center_ypos(self):
return int(self.video_height / 2)
class FullScreen(MovieLayout):
""" FullScreen Movie Layout
[FRONT_CAMERA]
[LEFT_CAMERA][REAR_CAMERA][RIGHT_CAMERA]
"""
def __init__(self):
super().__init__()
self.scale = 1 / 2
@property
def video_width(self):
return int(
max(
self.cameras("Front").width,
self.cameras("Left").width
+ self.cameras("Rear").width
+ self.cameras("Right").width,
)
)
@property
def video_height(self):
perspective_adjustement = 3 / 2 if self.perspective else 1
return int(
self.cameras("Front").height
+ max(
perspective_adjustement * self.cameras("Left").height,
self.cameras("Rear").height,
perspective_adjustement * self.cameras("Right").height,
)
)
def _front_height(self):
# For height keep same ratio of 4/3
return int(self.cameras("Front").width / 4 * 3)
def _front_xpos(self):
# Make sure that front is placed in the middle
return (
max(
0,
self.center_xpos
- int(
(
self.cameras("Left").width
+ self.cameras("Front").width
+ self.cameras("Right").width
)
/ 2
)
+ self.cameras("Left").width,
)
* self.cameras("Front").include
)
def _left_xpos(self):
return (
max(
0,
self.center_xpos
- int(
(
self.cameras("Left").width
+ self.cameras("Rear").width
+ self.cameras("Right").width
)
/ 2
),
)
* self.cameras("Left").include
)
def _left_ypos(self):
return (
self.cameras("Front").ypos + self.cameras("Front").height
) * self.cameras("Left").include
def _rear_xpos(self):
return (
max(
0,
self.center_xpos
- int(
(
self.cameras("Left").width
+ self.cameras("Rear").width
+ self.cameras("Right").width
)
/ 2
)
+ self.cameras("Left").width,
)
* self.cameras("Rear").include
)
def _rear_ypos(self):
return (
self.cameras("Front").ypos + self.cameras("Front").height
) * self.cameras("Rear").include
def _right_xpos(self):
return (
max(
0,
self.center_xpos
- int(
(
self.cameras("Left").width
+ self.cameras("Rear").width
+ self.cameras("Right").width
)
/ 2
)
+ self.cameras("Left").width
+ self.cameras("Rear").width,
)
* self.cameras("Right").include
)
def _right_ypos(self):
return (
self.cameras("Front").ypos + self.cameras("Front").height
) * self.cameras("Right").include
# noinspection PyProtectedMember
class WideScreen(FullScreen):
""" WideScreen Movie Layout
[ FRONT_CAMERA ]
[LEFT_CAMERA][REAR_CAMERA][RIGHT_CAMERA]
"""
def __init__(self):
super().__init__()
self.scale = 1 / 2
# Set front scale to None so we know if it was overriden or not.
self.cameras("Front").scale = None
# Only front_width has to be adjusted as by default width would be left+rear+right instead of normal scale.
def _front_width(self):
return (
(
self.cameras("Left").width
+ self.cameras("Rear").width
+ self.cameras("Right").width
)
* self.cameras("Front").include
if self.cameras("Front").scale is None
else int(
(
self.cameras("Front")._width
* self.cameras("Front").scale
* self.cameras("Front").include
)
)
)
class Cross(FullScreen):
""" Cross Movie Layout
[FRONT_CAMERA]
[LEFT_CAMERA][RIGHT_CAMERA]
[REAR_CAMERA]
"""
def __init__(self):
super().__init__()
self.scale = 1 / 2
@property
def video_width(self):
return max(
self.cameras("Front").width,
self.cameras("Left").width + self.cameras("Right").width,
self.cameras("Rear").width,
)
@property
def video_height(self):
if self.perspective:
height = int(
max(
3 / 2 * self.cameras("Left").height,
3 / 2 * self.cameras("Right").height,
)
)
if (
self.cameras("Left").include
and self.cameras("Left").scale >= self.cameras("Rear").scale
and self.cameras("Right").include
and self.cameras("Right").scale >= self.cameras("Rear").scale
and self.cameras("Rear").include
):
height = int(height / 3 * 2)
height += self.cameras("Rear").height
else:
height = (
max(self.cameras("Left").height, self.cameras("Right").height)
+ self.cameras("Rear").height
)
return int(height + self.cameras("Front").height)
def _front_xpos(self):
return (
int(max(0, self.center_xpos - (self.cameras("Front").width / 2)))
* self.cameras("Front").include
)
def _left_xpos(self):
return (
max(
0,
self.center_xpos
- int((self.cameras("Left").width + self.cameras("Right").width) / 2),
)
* self.cameras("Left").include
)
def _left_ypos(self):
return (
self.cameras("Front").height
+ int(
(
max(self.cameras("Left").height, self.cameras("Right").height)
- self.cameras("Left").height
)
/ 2
)
) * self.cameras("Left").include
def _right_xpos(self):
return (
max(
0,
self.center_xpos
- int((self.cameras("Left").width + self.cameras("Right").width) / 2)
+ self.cameras("Left").width,
)
* self.cameras("Right").include
)
def _right_ypos(self):
return (
self.cameras("Front").height
+ int(
(
max(self.cameras("Left").height, self.cameras("Right").height)
- self.cameras("Right").height
)
/ 2
)
) * self.cameras("Right").include
def _rear_xpos(self):
return (
int(max(0, self.center_xpos - (self.cameras("Rear").width / 2)))
* self.cameras("Rear").include
)
def _rear_ypos(self):
return int(max(0, self.video_height - self.cameras("Rear").height))
# noinspection PyProtectedMember
class Diamond(Cross):
""" Diamond Movie Layout
[FRONT_CAMERA]
[LEFT_CAMERA] [RIGHT_CAMERA]
[REAR_CAMERA]
"""
def __init__(self):
super().__init__()
self.scale = 1 / 2
self._font.valign = "MIDDLE"
def _font_halign(self):
if self._font._halign == "CENTER":
# Change alignment to left or right if one of the left/right cameras is excluded.
if (self.cameras("Left").include and not self.cameras("Right").include) or (
self.cameras("Right").include and not self.cameras("Left").include
):
x_pos = int(
max(
self.cameras("Front").xpos + self.cameras("Front").width / 2,
self.cameras("Rear").xpos + self.cameras("Rear").width / 2,
)
)
return f"({x_pos} - text_w / 2)"
return HALIGN.get(self._font._halign, self._font._halign)
def _font_valign(self):
if self._font._valign == "MIDDLE":
if self.cameras("Front").include:
return (
f'({self.cameras('Front').ypos + self.cameras('Front').height} + 5)'
)
elif self.cameras("Rear").include:
return f'({self.cameras('Rear').ypos} - 5 - text_h)'
return VALIGN.get(self._font._valign, self._font._valign)
def _font_size(self):
# For this layout the video height has to include font size. But default for calculating
# font size is based on video height.
# Thus overriding font size to get video height without font size to figure our scaling.
if self.font._size is None:
scale = (
self._video_height(include_fontsize=False)
* self.video_width
/ (1280 * 960)
)
return int(max(16, 16 * scale))
else:
return self.font.size
@property
def video_width(self):
return (
max(self.cameras("Front").width, self.cameras("Rear").width)
+ self.cameras("Left").width
+ self.cameras("Right").width
)
def _video_height(self, include_fontsize=True):
perspective = 3 / 2 if self.perspective else 1
fontsize = self.font.size if include_fontsize else 0
return int(
max(
perspective
* max(self.cameras("Left").height, self.cameras("Right").height),
self.cameras("Front").height + self.cameras("Rear").height + fontsize,
)
)
@property
def video_height(self):
return self._video_height(include_fontsize=True)
def _front_xpos(self):
return (
self.cameras("Left").width
+ int(
(
max(self.cameras("Front").width, self.cameras("Rear").width)
- self.cameras("Front").width
)
/ 2
)
) * self.cameras("Front").include
def _left_xpos(self):
return 0
def _left_ypos(self):
return max(0, self.center_ypos - int(self.cameras("Left").height / 2))
def _right_xpos(self):
return max(
self.cameras("Front").xpos + self.cameras("Front").width,
self.cameras("Rear").xpos + self.cameras("Rear").width,
)
def _right_ypos(self):
return max(0, self.center_ypos - int(self.cameras("Right").height / 2))
def _rear_xpos(self):
return (
self.cameras("Left").width
+ int(
(
max(self.cameras("Front").width, self.cameras("Rear").width)
- self.cameras("Rear").width
)
/ 2
)
) * self.cameras("Rear").include
class MyArgumentParser(argparse.ArgumentParser):
def convert_arg_line_to_args(self, arg_line):
# Remove comments.
return shlex_split(arg_line, comments=True)
def args_to_dict(self, arguments, default):
argument_list = []
if arguments is None:
return argument_list
for argument in arguments:
argument_dict = {}
for argument_value in argument:
if "=" in argument_value:
key = argument_value.split("=")[0].lower()
value = (
argument_value.split("=")[1].strip()
if argument_value.split("=")[1].strip() != ""
else None
)
else:
key = default
value = argument_value
argument_dict.update({key: value})
argument_list.append(argument_dict)
return argument_list
# noinspection PyCallByClass,PyProtectedMember
class SmartFormatter(argparse.HelpFormatter):
""" Formatter for argument help. """
def _split_lines(self, text, width):
""" Provide raw output allowing for prettier help output """
if text.startswith("R|"):
return text[2:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width)
def _get_help_string(self, action):
""" Call default help string """
return argparse.ArgumentDefaultsHelpFormatter._get_help_string(self, action)
def search_dict(
match_value: object = None, key: str = None, search_list: List[dict] = None
) -> Optional[dict]:
"""
Returns the 1st element in a list containing dictionaries
where the value of key provided matches the value provided.
:param match_value: value to match upon (search for)
:type match_value: object
:param key: dictionary key to use for the match
:type key: str
:param search_list: List containing dictionary objects in which to search
:type search_list: List[dict]
:return: Dictionary object that matches
:rtype: dict
"""
if key is None or search_list is None:
return None
if match_value is None:
return next(
(element for element in search_list if element.get(key) is None), None
)
return next(
(element for element in search_list if element.get(key) == match_value), None
)
def check_latest_release(include_beta):
""" Checks GitHub for latest release """
url = "{url}/repos/{owner}/{repo}/releases".format(
url=GITHUB["URL"], owner=GITHUB["owner"], repo=GITHUB["repo"]
)
if not include_beta:
url = url + "/latest"
try:
releases = requests.get(url)
except requests.exceptions.RequestException as exc:
print("Unable to check for latest release: {exc}".format(exc=exc))
return None
release_data = releases.json()
# If we include betas then we would have received a list, thus get 1st
# element as that is the latest release.
if include_beta:
release_data = release_data[0]
return release_data
def get_tesladashcam_folder():
""" Check if there is a drive mounted with the Tesla DashCam folder."""
for partition in disk_partitions(all=False):
if "cdrom" in partition.opts or partition.fstype == "":
continue
teslacamfolder = os.path.join(partition.mountpoint, "TeslaCam")
if os.path.isdir(teslacamfolder):
_LOGGER.debug(f"Folder TeslaCam found on partition {partition.mountpoint}.")
return teslacamfolder, partition.mountpoint
_LOGGER.debug(f"No TeslaCam folder on partition {partition.mountpoint}.")
return None, None
def get_movie_files(source_folder, exclude_subdirs, video_settings):
""" Find all the clip files within folder (and subfolder if requested) """
folder_list = {}
total_folders = 0
for pathname in source_folder:
if os.path.isdir(pathname):
isfile = False
if exclude_subdirs:
# Retrieve all the video files in current path:
search_path = os.path.join(pathname, "*.mp4")
files = [
filename
for filename in glob(search_path)
if not os.path.basename(filename).startswith(".")
]
print(f"Discovered {len(files)} files in {pathname}")
else:
# Search all sub folder.
files = []
for folder, _, filenames in os.walk(pathname, followlinks=True):
total_folders = total_folders + 1
for filename in (
filename
for filename in filenames
if not os.path.basename(filename).startswith(".")
and fnmatch(filename, "*.mp4")
):
files.append(os.path.join(folder, filename))
print(
f"Discovered {total_folders} folders containing total of {len(files)} files in {pathname}"
)
else:
files = [pathname]
isfile = True
# Now go through and get timestamps etc..
for file in sorted(files):
# Strip path so that we just have the filename.
movie_folder, movie_filename = os.path.split(file)
# And now get the timestamp of the filename.
filename_timestamp = movie_filename.rsplit("-", 1)[0]
movie_file_list = folder_list.get(movie_folder, {})
# Check if we already processed this timestamp.
if movie_file_list.get(filename_timestamp) is not None:
# Already processed this timestamp, moving on.
continue
_LOGGER.debug(
f"Checking camera files in folder {movie_folder} with timestamp {filename_timestamp}"
)
video_info = {
"front_camera": {
"filename": None,
"duration": None,
"timestamp": None,
"include": False,
},
"left_camera": {
"filename": None,
"duration": None,
"timestamp": None,
"include": False,
},
"right_camera": {
"filename": None,
"duration": None,
"timestamp": None,
"include": False,
},
"rear_camera": {
"filename": None,
"duration": None,
"timestamp": None,
"include": False,
},
}
front_filename = str(filename_timestamp) + "-front.mp4"
front_path = os.path.join(movie_folder, front_filename)
left_filename = str(filename_timestamp) + "-left_repeater.mp4"
left_path = os.path.join(movie_folder, left_filename)
right_filename = str(filename_timestamp) + "-right_repeater.mp4"
right_path = os.path.join(movie_folder, right_filename)
rear_filename = str(filename_timestamp) + "-back.mp4"
rear_path = os.path.join(movie_folder, rear_filename)
# Get meta data for each video to determine creation time and duration.
metadata = get_metadata(
video_settings["ffmpeg_exec"],
[front_path, left_path, right_path, rear_path],
)
# Move on to next one if nothing received.
if not metadata:
continue
# Get the longest duration:
duration = 0
video_timestamp = None
for item in metadata:
_, filename = os.path.split(item["filename"])
if filename == front_filename:
camera = "front_camera"
video_filename = front_filename
include_clip = (
item["include"]
if video_settings["video_layout"].cameras("Front").include
else False
)
elif filename == left_filename:
camera = "left_camera"
video_filename = left_filename
include_clip = (
item["include"]
if video_settings["video_layout"].cameras("Left").include
else False
)
elif filename == right_filename:
camera = "right_camera"
video_filename = right_filename
include_clip = (
item["include"]
if video_settings["video_layout"].cameras("Right").include
else False
)
elif filename == rear_filename:
camera = "rear_camera"
video_filename = rear_filename
include_clip = (
item["include"]
if video_settings["video_layout"].cameras("Rear").include
else False
)
else:
continue
# Store duration and timestamp
video_info[camera].update(
filename=video_filename,
duration=item["duration"],
timestamp=item["timestamp"],
include=include_clip,
)
# Only check duration and timestamp if this file is not corrupt and if we include this camera
# in our output.
if include_clip:
# Figure out which one has the longest duration
duration = (
item["duration"] if item["duration"] > duration else duration
)
# Figure out starting timestamp
if video_timestamp is None:
video_timestamp = item["timestamp"]
else:
video_timestamp = (
item["timestamp"]
if item["timestamp"] < video_timestamp
else video_timestamp
)
if video_timestamp is None:
# Firmware version 2019.16 changed filename timestamp format.
if len(filename_timestamp) == 16:
# This is for before version 2019.16
video_timestamp = datetime.strptime(
filename_timestamp, "%Y-%m-%d_%H-%M"
)
video_timestamp = video_timestamp.astimezone(get_localzone())
else:
# This is for version 2019.16 and later
video_timestamp = datetime.strptime(
filename_timestamp, "%Y-%m-%d_%H-%M-%S"
)
video_timestamp = video_timestamp.astimezone(timezone.utc)
movie_info = {
"movie_folder": movie_folder,
"timestamp": video_timestamp,
"duration": duration,
"video_info": video_info,
"file_only": isfile,
}
movie_file_list.update({filename_timestamp: movie_info})
folder_list.update({movie_folder: movie_file_list})
return folder_list
def get_metadata(ffmpeg, filenames):
""" Retrieve the meta data for the clip (i.e. timestamp, duration) """
# Get meta data for each video to determine creation time and duration.
ffmpeg_command = [ffmpeg]
metadata = []
for camera_file in filenames:
if os.path.isfile(camera_file):
ffmpeg_command.append("-i")
ffmpeg_command.append(camera_file)
metadata.append(
{
"filename": camera_file,
"timestamp": None,
"duration": 0,
"include": False,
}
)
else:
_LOGGER.debug(f"File {camera_file} does not exist, skipping.")
# Don't run ffmpeg if nothing to check for.
if not metadata:
return metadata
ffmpeg_command.append("-hide_banner")
command_result = run(ffmpeg_command, capture_output=True, text=True)
metadata_iterator = iter(metadata)
input_counter = 0
video_timestamp = None
wait_for_input_line = True
metadata_item = {}
for line in command_result.stderr.splitlines():
if search("^Input #", line) is not None:
# If filename was not yet appended then it means it is a corrupt file, in that case just add to list for
# but identify not to include for processing
metadata_item = next(metadata_iterator)
input_counter += 1
video_timestamp = None
wait_for_input_line = False
continue
if wait_for_input_line:
continue
if search("^ *creation_time ", line) is not None:
line_split = line.split(":", 1)
video_timestamp = datetime.strptime(
line_split[1].strip(), "%Y-%m-%dT%H:%M:%S.%f%z"
)
continue
if search("^ *Duration: ", line) is not None:
line_split = line.split(",")
line_split = line_split[0].split(":", 1)
duration_list = line_split[1].split(":")
duration = (
int(duration_list[0]) * 60 * 60
+ int(duration_list[1]) * 60
+ int(duration_list[2].split(".")[0])
+ (float(duration_list[2].split(".")[1]) / 100)
)
# File will only be processed if duration is greater then 0
include = duration > 0
metadata_item.update(
{"timestamp": video_timestamp, "duration": duration, "include": include}
)
wait_for_input_line = True
return metadata
def create_intermediate_movie(
filename_timestamp,
video,
folder_timestamps,
video_settings,
clip_number,
total_clips,
):
""" Create intermediate movie files. This is the merging of the 3 camera
video files into 1 video file. """
# We first stack (combine the 3 different camera video files into 1
# and then we concatenate.
front_camera = (
os.path.join(
video["movie_folder"], video["video_info"]["front_camera"]["filename"]
)
if (
video["video_info"]["front_camera"]["filename"] is not None
and video["video_info"]["front_camera"]["include"]
)
else None
)
left_camera = (
os.path.join(
video["movie_folder"], video["video_info"]["left_camera"]["filename"]
)
if (
video["video_info"]["left_camera"]["filename"] is not None
and video["video_info"]["left_camera"]["include"]
)
else None
)
right_camera = (
os.path.join(
video["movie_folder"], video["video_info"]["right_camera"]["filename"]
)
if (
video["video_info"]["right_camera"]["filename"] is not None
and video["video_info"]["right_camera"]["include"]
)
else None
)
rear_camera = (
os.path.join(
video["movie_folder"], video["video_info"]["rear_camera"]["filename"]
)
if (
video["video_info"]["rear_camera"]["filename"] is not None
and video["video_info"]["rear_camera"]["include"]
)
else None
)
if (
front_camera is None
and left_camera is None
and right_camera is None
and rear_camera is None
):
_LOGGER.debug(
f'No front, left, right, and rear camera clip exist for {video['timestamp']}'
)
return None, 0, True
if video_settings["video_layout"].swap_left_right:
left_camera, right_camera = right_camera, left_camera
if video_settings["video_layout"].swap_front_rear:
front_camera, rear_camera = rear_camera, front_camera
# Determine if this clip is to be included based on potential start and end timestamp/offsets that were provided.
# Clip starting time is between the start&end times we're looking for
# or Clip end time is between the start&end time we're looking for.
# or Starting time is between start&end clip time
# or End time is between start&end clip time
starting_timestmp = video["timestamp"]
ending_timestmp = starting_timestmp + timedelta(seconds=video["duration"])
if not (
folder_timestamps[0] <= starting_timestmp <= folder_timestamps[1]
or folder_timestamps[0] <= ending_timestmp <= folder_timestamps[1]
or starting_timestmp <= folder_timestamps[0] <= ending_timestmp
or starting_timestmp <= folder_timestamps[1] <= ending_timestmp
):
# This clip is not in-between the timestamps we want, skip it.
_LOGGER.debug(
f"Clip timestamp from {starting_timestmp} to {ending_timestmp} not "
f"between {folder_timestamps[0]} and {folder_timestamps[1]}"
)
return None, 0, True
# Determine if we need to do an offset of the starting timestamp
starting_offset = 0
ffmpeg_offset_command = []
clip_duration = video["duration"]
# This clip falls in between the start and end timestamps to include.
# Set offsets if required
if video["timestamp"] < folder_timestamps[0]:
# Starting timestamp is withing this clip.
starting_offset = (folder_timestamps[0] - video["timestamp"]).total_seconds()
starting_timestmp = folder_timestamps[0]
ffmpeg_offset_command = ["-ss", str(starting_offset)]
clip_duration = video["duration"] - starting_offset
# Adjust duration if end of clip's timestamp is after ending timestamp we need.
if video["timestamp"] + timedelta(seconds=video["duration"]) > folder_timestamps[1]:
# Duration has to be cut.
clip_duration = (
folder_timestamps[1]
- (video["timestamp"] + timedelta(seconds=starting_offset))
).total_seconds()
ffmpeg_offset_command += ["-t", str(clip_duration)]
# Confirm if files exist, if not replace with nullsrc
input_count = 0
if left_camera is not None and os.path.isfile(left_camera):
ffmpeg_left_command = ffmpeg_offset_command + ["-i", left_camera]
ffmpeg_left_camera = ";[0:v] " + video_settings["left_camera"]
input_count += 1
else:
ffmpeg_left_command = []
ffmpeg_left_camera = (
video_settings["background"].format(
duration=clip_duration,
speed=video_settings["movie_speed"],
width=video_settings["video_layout"].cameras("Left").width,
height=video_settings["video_layout"].cameras("Left").height,
)
+ "[left]"
if video_settings["video_layout"].cameras("Left").include
else ""
)
if front_camera is not None and os.path.isfile(front_camera):
ffmpeg_front_command = ffmpeg_offset_command + ["-i", front_camera]
ffmpeg_front_camera = (
";[" + str(input_count) + ":v] " + video_settings["front_camera"]
)
input_count += 1
else:
ffmpeg_front_command = []
ffmpeg_front_camera = (
video_settings["background"].format(
duration=clip_duration,
speed=video_settings["movie_speed"],
width=video_settings["video_layout"].cameras("Front").width,
height=video_settings["video_layout"].cameras("Front").height,
)
+ "[front]"
if video_settings["video_layout"].cameras("Front").include
else ""
)
if right_camera is not None and os.path.isfile(right_camera):
ffmpeg_right_command = ffmpeg_offset_command + ["-i", right_camera]
ffmpeg_right_camera = (
";[" + str(input_count) + ":v] " + video_settings["right_camera"]
)
input_count += 1
else:
ffmpeg_right_command = []
ffmpeg_right_camera = (
video_settings["background"].format(
duration=clip_duration,
speed=video_settings["movie_speed"],
width=video_settings["video_layout"].cameras("Right").width,
height=video_settings["video_layout"].cameras("Right").height,
)
+ "[right]"
if video_settings["video_layout"].cameras("Right").include
else ""
)
if rear_camera is not None and os.path.isfile(rear_camera):
ffmpeg_rear_command = ffmpeg_offset_command + ["-i", rear_camera]
ffmpeg_rear_camera = (
";[" + str(input_count) + ":v] " + video_settings["rear_camera"]
)
input_count += 1
else:
ffmpeg_rear_command = []
ffmpeg_rear_camera = (
video_settings["background"].format(
duration=clip_duration,
speed=video_settings["movie_speed"],
width=video_settings["video_layout"].cameras("Rear").width,
height=video_settings["video_layout"].cameras("Rear").height,
)
+ "[rear]"
if video_settings["video_layout"].cameras("Rear").include
else ""
)
local_timestamp = video["timestamp"].astimezone(get_localzone())
# Check if target video file exist if skip existing.
file_already_exist = False
if video_settings["skip_existing"]:
temp_movie_name = (
os.path.join(video_settings["target_folder"], filename_timestamp) + ".mp4"
)
if os.path.isfile(temp_movie_name):
file_already_exist = True
elif (
not video_settings["keep_intermediate"]
and video_settings["temp_dir"] is not None
):
temp_movie_name = (
os.path.join(video_settings["temp_dir"], filename_timestamp) + ".mp4"
)
if os.path.isfile(temp_movie_name):
file_already_exist = True
if file_already_exist:
print(
"\t\tSkipping clip {clip_number}/{total_clips} from {timestamp} "
"and {duration} seconds as it already exist.".format(
clip_number=clip_number + 1,
total_clips=total_clips,
timestamp=local_timestamp.strftime("%x %X"),
duration=int(clip_duration),
)
)
# Get actual duration of our new video, required for chapters when concatenating.
metadata = get_metadata(video_settings["ffmpeg_exec"], [temp_movie_name])
duration = metadata[0]["duration"] if metadata else video["duration"]
return temp_movie_name, duration, True
else:
target_folder = (
video_settings["temp_dir"]
if not video_settings["keep_intermediate"]
and video_settings["temp_dir"] is not None
else video_settings["target_folder"]
)
temp_movie_name = os.path.join(target_folder, filename_timestamp) + ".mp4"
print(
"\t\tProcessing clip {clip_number}/{total_clips} from {timestamp} "
"and {duration} seconds long.".format(
clip_number=clip_number + 1,
total_clips=total_clips,
timestamp=local_timestamp.strftime("%x %X"),
duration=int(clip_duration),
)
)
epoch_timestamp = int(starting_timestmp.timestamp())
ffmpeg_filter = (
video_settings["base"].format(
duration=clip_duration, speed=video_settings["movie_speed"]
)
+ ffmpeg_left_camera
+ ffmpeg_front_camera
+ ffmpeg_right_camera
+ ffmpeg_rear_camera
+ video_settings["clip_positions"]
+ video_settings["timestamp_text"].format(epoch_time=epoch_timestamp)
+ video_settings["ffmpeg_speed"]
+ video_settings["ffmpeg_motiononly"]
)
ffmpeg_command = (
[video_settings["ffmpeg_exec"]]
+ ["-loglevel", "error"]
+ ffmpeg_left_command
+ ffmpeg_front_command
+ ffmpeg_right_command
+ ffmpeg_rear_command
+ ["-filter_complex", ffmpeg_filter]
+ ["-map", f"[{video_settings["input_clip"]}]"]
+ video_settings["other_params"]
)
ffmpeg_command = ffmpeg_command + ["-y", temp_movie_name]
_LOGGER.debug(f"FFMPEG Command: {ffmpeg_command}")
# Run the command.
try:
run(ffmpeg_command, capture_output=True, check=True)
except CalledProcessError as exc:
print(
"\t\t\tError trying to create clip for {base_name}. RC: {rc}\n"
"\t\t\tCommand: {command}\n"
"\t\t\tError: {stderr}\n\n".format(
base_name=os.path.join(video["movie_folder"], filename_timestamp),
rc=exc.returncode,
command=exc.cmd,
stderr=exc.stderr,
)
)
return None, 0, False
# Get actual duration of our new video, required for chapters when concatenating.
metadata = get_metadata(video_settings["ffmpeg_exec"], [temp_movie_name])
duration = metadata[0]["duration"] if metadata else video["duration"]
return temp_movie_name, duration, True
def create_movie(clips_list, movie_filename, video_settings, chapter_offset):
""" Concatenate provided movie files into 1."""
# Just return if there are no clips.
if not clips_list:
_LOGGER.debug("Clip list is empty")
return None, None
# Go through the list of clips to create the command and content for chapter meta file.
ffmpeg_join_filehandle, ffmpeg_join_filename = mkstemp(suffix=".txt", text=True)
total_clips = 0
meta_content = ""
meta_start = 0
total_videoduration = 0
chapter_offset = chapter_offset * 1000000000
with os.fdopen(ffmpeg_join_filehandle, "w") as fp:
# Loop through the list sorted by video timestamp.
for video_clip in sorted(
clips_list, key=lambda video: video["video_timestamp"]
):
if not os.path.isfile(video_clip["video_filename"]):
print(
"\t\tFile {} does not exist anymore, skipping.".format(
video_clip["video_filename"]
)
)
continue
# Add this file in our join list.
fp.write(
"file '"
+ video_clip["video_filename"]
+ "'{linesep}".format(linesep=os.linesep)
)
total_clips = total_clips + 1
title = video_clip["video_timestamp"].astimezone(get_localzone())
# For duration need to also calculate if video was sped-up or slowed down.
video_duration = int(video_clip["video_duration"] * 1000000000)
total_videoduration += video_duration
chapter_start = meta_start
if video_duration > abs(chapter_offset):
if chapter_offset < 0:
chapter_start = meta_start + video_duration + chapter_offset
elif chapter_offset > 0:
chapter_start = chapter_start + chapter_offset
# We need to add an initial chapter if our "1st" chapter is not at the beginning of the movie.
if total_clips == 1 and chapter_start > 0:
meta_content = (
"[CHAPTER]{linesep}"
"TIMEBASE=1/1000000000{linesep}"
"START={start}{linesep}"
"END={end}{linesep}"
"title={title}{linesep}".format(
linesep=os.linesep,
start=0,
end=chapter_start - 1,
title="Start",
)
)
meta_content = (
meta_content + "[CHAPTER]{linesep}"
"TIMEBASE=1/1000000000{linesep}"
"START={start}{linesep}"
"END={end}{linesep}"
"title={title}{linesep}".format(
linesep=os.linesep,
start=chapter_start,
end=meta_start + video_duration,
title=title.strftime("%x %X"),
)
)
meta_start = meta_start + 1 + video_duration
if total_clips == 0:
print("\t\tError: No valid clips to merge found.")
return None, None
# Write out the meta data file.
meta_content = ";FFMETADATA1" + os.linesep + meta_content
ffmpeg_meta_filehandle, ffmpeg_meta_filename = mkstemp(suffix=".txt", text=True)
with os.fdopen(ffmpeg_meta_filehandle, "w") as fp:
fp.write(meta_content)
ffmpeg_params = [
"-f",
"concat",
"-safe",
"0",
"-i",
ffmpeg_join_filename,
"-i",
ffmpeg_meta_filename,
"-map_metadata",
"1",
"-map_chapters",
"1",
]
if video_settings["movflags_faststart"]:
ffmpeg_params = ffmpeg_params + ["-movflags", "+faststart"]
ffmpeg_params = ffmpeg_params + ["-c", "copy"]
ffmpeg_params = ffmpeg_params + [
"-metadata",
f"description=Created by tesla_dashcam {VERSION_STR}",
]
ffmpeg_command = (
[video_settings["ffmpeg_exec"]]
+ ["-loglevel", "error"]
+ ffmpeg_params
+ ["-y", movie_filename]
)
_LOGGER.debug(f"FFMPEG Command: {ffmpeg_command}")
try:
run(ffmpeg_command, capture_output=True, check=True)
except CalledProcessError as exc:
print(
"\t\tError trying to create movie {base_name}. RC: {rc}\n"
"\t\tCommand: {command}\n"
"\t\tError: {stderr}\n\n".format(
base_name=movie_filename,
rc=exc.returncode,
command=exc.cmd,
stderr=exc.stderr,
)
)
movie_filename = None
duration = 0
else:
# Get actual duration of our new video, required for chapters when concatenating.
metadata = get_metadata(video_settings["ffmpeg_exec"], [movie_filename])
duration = metadata[0]["duration"] if metadata else total_videoduration
# Remove temp join file.
# noinspection PyBroadException,PyPep8
try:
os.remove(ffmpeg_join_filename)
except:
_LOGGER.debug(f"Failed to remove {ffmpeg_join_filename}")
pass
# Remove temp join file.
# noinspection PyBroadException,PyPep8
try:
os.remove(ffmpeg_meta_filename)
except:
_LOGGER.debug(f"Failed to remove {ffmpeg_meta_filename}")
pass
return movie_filename, duration
def make_folder(parameter, folder):
# Create folder if not already existing.
if not os.path.isdir(folder):
current_path, add_folder = os.path.split(folder)
if add_folder == "":
current_path, add_folder = os.path.split(current_path)
# If path does not exist in which to create folder then exit.
if not os.path.isdir(current_path):
print(
f"Path {current_path} for parameter {parameter} does not exist, please provide a valid path."
)
return False
try:
os.mkdir(folder)
except OSError:
print(
f"Error creating folder {add_folder} at location {current_path} for parameter {parameter}"
)
return False
return True
def delete_intermediate(movie_files):
""" Delete the files provided in list """
for file in movie_files:
if file is not None:
if os.path.isfile(file):
try:
os.remove(file)
except OSError as exc:
print("\t\tError trying to remove file {}: {}".format(file, exc))
elif os.path.isdir(file):
# This is more specific for Mac but won't hurt on other platforms.
if os.path.exists(os.path.join(file, ".DS_Store")):
# noinspection PyBroadException,PyPep8
try:
os.remove(os.path.join(file, ".DS_Store"))
except:
_LOGGER.debug(f"Failed to remove .DS_Store from {file}")
pass
try:
os.rmdir(file)
except OSError as exc:
print("\t\tError trying to remove folder {}: {}".format(file, exc))
def process_folders(folders, video_settings, delete_source):
""" Process all clips found within folders. """
start_time = timestamp()
total_clips = 0
for folder_number, folder_name in enumerate(sorted(folders)):
total_clips = total_clips + len(folders[folder_name])
print(
"There are {total_folders} folders with {total_clips} clips to "
"process.".format(total_folders=len(folders), total_clips=total_clips)
)
# Loop through all the folders.
dashcam_clips = []
for folder_number, folder_name in enumerate(sorted(folders)):
files = folders[folder_name]
# Ensure the clips are sorted based on video timestamp.
sorted_video_clips = sorted(files, key=lambda video: files[video]["timestamp"])
# Get the start and ending timestamps, we add duration to
# last timestamp to get true ending.
first_clip_tmstp = files[sorted_video_clips[0]]["timestamp"]
last_clip_tmstp = files[sorted_video_clips[-1]]["timestamp"] + timedelta(
seconds=files[sorted_video_clips[-1]]["duration"]
)
# Skip this folder if we it does not fall within provided timestamps.
if (
video_settings["start_timestamp"] is not None
and last_clip_tmstp < video_settings["start_timestamp"]
):
# Clips from this folder are from before start timestamp requested.
_LOGGER.debug(
f"Clips in folder end at {last_clip_tmstp} which is still before "
f'start timestamp {video_settings['start_timestamp']}'
)
continue
if (
video_settings["end_timestamp"] is not None
and first_clip_tmstp > video_settings["end_timestamp"]
):
# Clips from this folder are from after end timestamp requested.
_LOGGER.debug(
f"Clips in folder start at {first_clip_tmstp} which is after "
f'end timestamp {video_settings['end_timestamp']}'
)
continue
# Determine the starting and ending timestamps for the clips in this folder based on start/end timestamps
# provided and offsets.
folder_start_timestmp = (
timedelta(seconds=video_settings["start_offset"]) + first_clip_tmstp
)
# Use provided start timestamp if it is after folder timestamp + offset
folder_start_timestmp = (
video_settings["start_timestamp"]
if video_settings["start_timestamp"] is not None
and video_settings["start_timestamp"] > folder_start_timestmp
else folder_start_timestmp
)
# Figure out potential end timestamp for clip based on offset and end timestamp.
folder_end_timestmp = last_clip_tmstp - timedelta(
seconds=video_settings["end_offset"]
)
# Use provided end timestamp if it is before folder timestamp - offset
folder_end_timestmp = (
video_settings["end_timestamp"]
if video_settings["end_timestamp"] is not None
and video_settings["end_timestamp"] < folder_end_timestmp
else folder_end_timestmp
)
# Put them together to create the filename for the folder.
movie_filename = (
folder_start_timestmp.astimezone(get_localzone()).strftime(
"%Y-%m-%dT%H-%M-%S"
)
+ "_"
+ folder_end_timestmp.astimezone(get_localzone()).strftime(
"%Y-%m-%dT%H-%M-%S"
)
)
# Now add full path to it.
movie_filename = (
os.path.join(video_settings["target_folder"], movie_filename) + ".mp4"
)
# Do not process the files from this folder if we're to skip it if
# the target movie file already exist.
if video_settings["skip_existing"] and os.path.isfile(movie_filename):
print(
"\tSkipping folder {folder} as {filename} is already "
"created ({folder_number}/{total_folders})".format(
folder=folder_name,
filename=movie_filename,
folder_number=folder_number + 1,
total_folders=len(folders),
)
)
# Get actual duration of our new video, required for chapters when concatenating.
metadata = get_metadata(video_settings["ffmpeg_exec"], [movie_filename])
movie_duration = metadata[0]["duration"] if metadata else 0
dashcam_clips.append(
{
"video_timestamp": first_clip_tmstp,
"video_filename": movie_filename,
"video_duration": movie_duration,
}
)
continue
print(
"\tProcessing {total_clips} clips in folder {folder} "
"({folder_number}/{total_folders})".format(
total_clips=len(files),
folder=folder_name,
folder_number=folder_number + 1,
total_folders=len(folders),
)
)
# Loop through all the files within the folder.
folder_clips = []
delete_folder_clips = []
delete_folder_files = delete_source
delete_file_list = []
folder_timestamp = None
for clip_number, filename_timestamp in enumerate(sorted_video_clips):
video_timestamp_info = files[filename_timestamp]
folder_timestamp = (
video_timestamp_info["timestamp"]
if folder_timestamp is None
else folder_timestamp
)
clip_name, clip_duration, files_processed = create_intermediate_movie(
filename_timestamp,
video_timestamp_info,
(folder_start_timestmp, folder_end_timestmp),
video_settings,
clip_number,
len(files),
)
if clip_name is not None:
if video_timestamp_info["file_only"]:
# When file only there is no concatenation at the folder
# level, will only happen at the higher level if requested.
dashcam_clips.append(
{
"video_timestamp": video_timestamp_info["timestamp"],
"video_filename": clip_name,
"video_duration": clip_duration,
}
)
else:
# Movie was created, store name for concatenation.
folder_clips.append(
{
"video_timestamp": video_timestamp_info["timestamp"],
"video_filename": clip_name,
"video_duration": clip_duration,
}
)
# Add clip for deletion only if it's name is not the
# same as the resulting movie filename
if clip_name != movie_filename:
delete_folder_clips.append(clip_name)
elif not files_processed:
delete_folder_files = False
if files_processed:
# Add the files to our list for removal.
video_info = video_timestamp_info["video_info"]
if video_info["front_camera"]["filename"] is not None:
delete_file_list.append(
os.path.join(
video_timestamp_info["movie_folder"],
video_info["front_camera"]["filename"],
)
)
if video_info["left_camera"]["filename"] is not None:
delete_file_list.append(
os.path.join(
video_timestamp_info["movie_folder"],
video_info["left_camera"]["filename"],
)
)
if video_info["right_camera"]["filename"] is not None:
delete_file_list.append(
os.path.join(
video_timestamp_info["movie_folder"],
video_info["right_camera"]["filename"],
)
)
if video_info["rear_camera"]["filename"] is not None:
delete_file_list.append(
os.path.join(
video_timestamp_info["movie_folder"],
video_info["rear_camera"]["filename"],
)
)
# All clips in folder have been processed, merge those clips
# together now.
movie_name = None
movie_duration = 0
if folder_clips:
print("\t\tCreating movie {}, please be patient.".format(movie_filename))
movie_name, movie_duration = create_movie(
folder_clips, movie_filename, video_settings, 0
)
# Delete the source files if stated to delete.
# We only do so if there were no issues in processing the clips
if delete_folder_files and (
(folder_clips and movie_name is not None) or not folder_clips
):
print(
"\t\tDeleting files and folder {folder_name}".format(
folder_name=folder_name
)
)
delete_intermediate(delete_file_list)
# And delete the folder
delete_intermediate([folder_name])
# Add this one to our list for final concatenation
if movie_name is not None:
dashcam_clips.append(
{
"video_timestamp": folder_timestamp,
"video_filename": movie_name,
"video_duration": movie_duration,
}
)
# Delete the intermediate files we created.
if not video_settings["keep_intermediate"]:
delete_intermediate(delete_folder_clips)
print(
"\tMovie {base_name} for folder {folder_name} with duration {duration} is "
"ready.".format(
base_name=movie_name,
folder_name=folder_name,
duration=str(timedelta(seconds=int(movie_duration))),
)
)
# Now that we have gone through all the folders merge.
# We only do this if merge is enabled OR if we only have 1 clip and for
# output a specific filename was provided.
movie_name = None
movie_duration = 0
if dashcam_clips:
if video_settings["merge_subdirs"] or (
len(folders) == 1 and video_settings["target_filename"] is not None
):
if video_settings["movie_filename"] is not None:
movie_filename = video_settings["movie_filename"]
elif video_settings["target_filename"] is not None:
movie_filename = video_settings["target_filename"]
else:
folder, movie_filename = os.path.split(video_settings["target_folder"])
# If there was a trailing separator provided then it will be
# empty, redo split then.
if movie_filename == "":
movie_filename = os.path.split(folder)[1]
movie_filename = os.path.join(
video_settings["target_folder"], movie_filename
)
# Make sure it ends in .mp4
if os.path.splitext(movie_filename)[1] != ".mp4":
movie_filename = movie_filename + ".mp4"
print("\tCreating movie {}, please be patient.".format(movie_filename))
movie_name, movie_duration = create_movie(
dashcam_clips,
movie_filename,
video_settings,
video_settings["chapter_offset"],
)
if movie_name is not None:
print(
"Movie {base_name} with duration {duration} has been created, enjoy.".format(
base_name=movie_name,
duration=str(timedelta(seconds=int(movie_duration))),
)
)
else:
print(
"All folders have been processed, resulting movie files are "
"located in {target_folder}".format(
target_folder=video_settings["target_folder"]
)
)
else:
print("No clips found.")
end_time = timestamp()
real = int((end_time - start_time))
print("Total processing time: {real}".format(real=str(timedelta(seconds=real))))
if video_settings["notification"]:
if movie_name is not None:
notify(
"TeslaCam",
"Completed",
"{total_folders} folder{folders} with {total_clips} "
"clip{clips} have been processed, movie {movie_name} has "
"been created.".format(
folders="" if len(folders) < 2 else "s",
total_folders=len(folders),
clips="" if total_clips < 2 else "s",
total_clips=total_clips,
movie_name=video_settings["target_folder"],
),
)
else:
notify(
"TeslaCam",
"Completed",
"{total_folders} folder{folders} with {total_clips} "
"clip{clips} have been processed, {target_folder} contains "
"resulting files.".format(
folders="" if len(folders) < 2 else "s",
total_folders=len(folders),
clips="" if total_clips < 2 else "s",
total_clips=total_clips,
target_folder=video_settings["target_folder"],
),
)
print()
def resource_path(relative_path):
""" Return absolute path for provided relative item based on location
of program.
"""
# If compiled with pyinstaller then sys._MEIPASS points to the location
# of the bundle. Otherwise path of python script is used.
base_path = getattr(sys, "_MEIPASS", str(Path(__file__).parent))
return os.path.join(base_path, relative_path)
def notify_macos(title, subtitle, message):
""" Notification on MacOS """
try:
run(
[
"osascript",
'-e display notification "{message}" with title "{title}" '
'subtitle "{subtitle}"'
"".format(message=message, title=title, subtitle=subtitle),
]
)
except Exception as exc:
print("Failed in notifification: ", exc)
def notify_windows(title, subtitle, message):
""" Notification on Windows """
# Section commented out, waiting to see if it really does not work on Windows 7
# This works only on Windows 10 9r Windows Server 2016/2019. Skipping for everything else
# from platform import win32_ver
# if win32_ver()[0] != 10:
# return
global TOASTER_INSTANCE
# noinspection PyBroadException
try:
# noinspection PyUnresolvedReferences,PyPackageRequirements
from win10toast import ToastNotifier
if TOASTER_INSTANCE is None:
TOASTER_INSTANCE = ToastNotifier()
TOASTER_INSTANCE.show_toast(
threaded=True,
title="{} {}".format(title, subtitle),
msg=message,
duration=5,
icon_path=resource_path("tesla_dashcam.ico"),
)
run(
[
"notify-send",
'"{title} {subtitle}"'.format(title=title, subtitle=subtitle),
'"{}"'.format(message),
]
)
except Exception:
pass
def notify_linux(title, subtitle, message):
""" Notification on Linux """
try:
run(
[
"notify-send",
'"{title} {subtitle}"'.format(title=title, subtitle=subtitle),
'"{}"'.format(message),
]
)
except Exception as exc:
print("Failed in notifification: ", exc)
def notify(title, subtitle, message):
""" Call function to send notification based on OS """
if sys.platform == "darwin":
notify_macos(title, subtitle, message)
elif sys.platform == "win32":
notify_windows(title, subtitle, message)
elif sys.platform == "linux":
notify_linux(title, subtitle, message)
def main() -> None:
""" Main function """
loglevels = dict(
(logging.getLevelName(level), level) for level in [10, 20, 30, 40, 50]
)
internal_ffmpeg = getattr(sys, "frozen", None) is not None
ffmpeg_default = resource_path(FFMPEG.get(sys.platform, "ffmpeg"))
movie_folder = os.path.join(str(Path.home()), MOVIE_HOMEDIR.get(sys.platform), "")
# Check if ffmpeg exist, if not then hope it is in default path or
# provided.
if not os.path.isfile(ffmpeg_default):
internal_ffmpeg = False
ffmpeg_default = FFMPEG.get(sys.platform, "ffmpeg")
epilog = (
"This program leverages ffmpeg which is included. See https://ffmpeg.org/ for more information on ffmpeg"
if internal_ffmpeg
else "This program requires ffmpeg which can be downloaded from: https://ffmpeg.org/download.html"
)
parser = MyArgumentParser(
description="tesla_dashcam - Tesla DashCam & Sentry Video Creator",
epilog=epilog,
formatter_class=SmartFormatter,
fromfile_prefix_chars="@",
)
parser.add_argument(
"source",
type=str,
nargs="*",
help="Folder(s) (events) containing the saved camera files. Filenames can be provided as well to manage "
"individual clips.",
)
parser.add_argument(
"--version", action="version", version=" %(prog)s " + VERSION_STR
)
parser.add_argument(
"--loglevel",
default="INFO",
choices=list(loglevels.keys()),
help="Logging level.",
)
parser.add_argument(
"--temp_dir", required=False, type=str, help="R|Path to store temporary files."
)
parser.add_argument(
"--no-notification",
dest="system_notification",
action="store_false",
help="Do not create a notification upon " "completion.",
)
input_group = parser.add_argument_group(
title="Video Input",
description="Options related to what clips and events to process.",
)
input_group.add_argument(
"--skip_existing",
dest="skip_existing",
action="store_true",
help="Skip creating encoded video file if it already exist. Note that only existence is checked, not if "
"layout etc. are the same.",
)
input_group.add_argument(
"--delete_source",
dest="delete_source",
action="store_true",
help="Delete the processed files upon completion.",
)
input_group.add_argument(
"--exclude_subdirs",
dest="exclude_subdirs",
action="store_true",
help="Do not search sub folders (events) for video files to process.",
)
monitor_group = parser.add_argument_group(
title="Trigger Monitor",
description="Parameters for monitoring of insertion of TeslaCam drive, folder, or file existence.",
)
monitor_group.add_argument(
"--monitor",
dest="monitor",
action="store_true",
help="Enable monitoring for drive to be attached with TeslaCam folder.",
)
monitor_group.add_argument(
"--monitor_once",
dest="monitor_once",
action="store_true",
help="Enable monitoring and exit once drive with TeslaCam folder has been attached and files processed.",
)
monitor_group.add_argument(
"--monitor_trigger",
required=False,
type=str,
help="Trigger file to look for instead of waiting for drive to be attached. Once file is discovered then "
"processing will start, file will be deleted when processing has been completed. If source is not "
"provided then folder where file is located will be used as source.",
)
layout_group = parser.add_argument_group(
title="Video Layout",
description="Set what the layout of the resulting video should be",
)
layout_group.add_argument(
"--layout",
required=False,
choices=["WIDESCREEN", "FULLSCREEN", "PERSPECTIVE", "CROSS", "DIAMOND"],
default="FULLSCREEN",
help="R|Layout of the created video.\n"
" FULLSCREEN: Front camera center top, "
"side cameras underneath it with rear camera between side camera.\n"
" WIDESCREEN: Front camera on top with side and rear cameras smaller underneath it.\n"
" PERSPECTIVE: Similar to FULLSCREEN but then with side cameras in perspective.\n"
" CROSS: Front camera center top, side cameras underneath, and rear camera center bottom.\n"
" DIAMOND: Front camera center top, side cameras below front camera left and right of front, "
"and rear camera center bottom.\n",
)
layout_group.add_argument(
"--perspective",
dest="perspective",
action="store_true",
help="Show side cameras in perspective.",
)
layout_group.set_defaults(perspective=False)
layout_group.add_argument(
"--scale",
dest="clip_scale",
type=str,
nargs="+",
action="append",
help="R|Set camera clip scale for all clips, scale of 1 is 1280x960 camera clip.\n"
"If provided with value then it is default for all cameras, to set the scale for a specific "
"camera provide camera=<front, left, right,rear> <scale>\n"
"for example:\n"
" --scale 0.5 all are 640x480\n"
" --scale 640x480 all are 640x480\n"
" --scale 0.5 --scale camera=front 1 all are 640x480 except front at 1280x960\n"
" --scale camera=left .25 --scale camera=right 320x240 left and right are set to 320x240\n"
"Defaults:\n"
" WIDESCREEN: 1/2 (front 1280x960, others 640x480, video is 1920x1920)\n"
" FULLSCREEN: 1/2 (640x480, video is 1920x960)\n"
" CROSS: 1/2 (640x480, video is 1280x1440)\n"
" DIAMOND: 1/2 (640x480, video is 1920x976)\n",
)
layout_group.add_argument(
"--mirror",
dest="rear_or_mirror",
action="store_const",
const=1,
help="Video from side and rear cameras as if being viewed through the mirror. Default when not providing "
"parameter --no-front. Cannot be used in combination with --rear.",
)
layout_group.add_argument(
"--rear",
dest="rear_or_mirror",
action="store_const",
const=0,
help="Video from side and rear cameras as if looking backwards. Default when providing parameter --no-front. "
"Cannot be used in combination with --mirror.",
)
layout_group.add_argument(
"--swap",
dest="swap_leftright",
action="store_const",
const=1,
help="Swap left and right cameras in output, default when side and rear cameras are as if looking backwards. "
"See --rear parameter.",
)
layout_group.add_argument(
"--no-swap",
dest="swap_leftright",
action="store_const",
const=0,
help="Do not swap left and right cameras, default when side and rear cameras are as if looking through a "
"mirror. Also see --mirror parameter",
)
layout_group.add_argument(
"--swap_frontrear",
dest="swap_frontrear",
action="store_true",
help="Swap front and rear cameras in output.",
)
layout_group.add_argument(
"--background",
dest="background",
default="black",
help="Background color for video. Can be a color string or RGB value. Also see --fontcolor.",
)
camera_group = parser.add_argument_group(
title="Camera Exclusion", description="Exclude one or more cameras:"
)
camera_group.add_argument(
"--no-front",
dest="no_front",
action="store_true",
help="Exclude front camera from video.",
)
camera_group.add_argument(
"--no-left",
dest="no_left",
action="store_true",
help="Exclude left camera from video.",
)
camera_group.add_argument(
"--no-right",
dest="no_right",
action="store_true",
help="Exclude right camera from video.",
)
camera_group.add_argument(
"--no-rear",
dest="no_rear",
action="store_true",
help="Exclude rear camera from video.",
)
timestamp_group = parser.add_argument_group(
title="Timestamp",
description="Options on how to show date/time in resulting video:",
)
timestamp_group.add_argument(
"--no-timestamp",
dest="no_timestamp",
action="store_true",
help="Do not show timestamp in video",
)
timestamp_group.add_argument(
"--halign",
required=False,
choices=["LEFT", "CENTER", "RIGHT"],
help="Horizontal alignment for timestamp",
)
timestamp_group.add_argument(
"--valign",
required=False,
choices=["TOP", "MIDDLE", "BOTTOM"],
help="Vertical Alignment for timestamp",
)
timestamp_group.add_argument(
"--font",
required=False,
type=str,
default=DEFAULT_FONT.get(sys.platform, None),
help="Fully qualified filename (.ttf) to the font to be chosen for timestamp.",
)
timestamp_group.add_argument(
"--fontsize",
required=False,
type=int,
help="Font size for timestamp. Default is scaled based on resulting video size.",
)
timestamp_group.add_argument(
"--fontcolor",
required=False,
type=str,
default="white",
help="R|Font color for timestamp. Any color is accepted as a color string or RGB value.\n"
"Some potential values are:\n"
" white\n"
" yellowgreen\n"
" yellowgreen@0.9\n"
" Red\n:"
" 0x2E8B57\n"
"For more information on this see ffmpeg documentation for color: https://ffmpeg.org/ffmpeg-utils.html#Color",
)
filter_group = parser.add_argument_group(
title="Timestamp Restriction",
description="Restrict video to be between start and/or end timestamps. Timestamp to be provided in a ISO-8601 "
"format (see https://fits.gsfc.nasa.gov/iso-time.html for examples)",
)
filter_group.add_argument(
"--start_timestamp", dest="start_timestamp", type=str, help="Starting timestamp"
)
filter_group.add_argument(
"--end_timestamp",
dest="end_timestamp",
type=str,
# type=lambda d: datetime.strptime(d, "%Y-%m-%d_%H-%M-%S").datetime(),
help="Ending timestamp",
)
offset_group = parser.add_argument_group(
title="Event offsets", description="Start and/or end offsets for events"
)
offset_group.add_argument(
"--start_offset",
dest="start_offset",
type=int,
help="Skip x number of seconds from start of event for resulting video.",
)
offset_group.add_argument(
"--end_offset",
dest="end_offset",
type=int,
help="Ignore the last x seconds of the event for resulting video",
)
output_group = parser.add_argument_group(
title="Video Output", description="Options related to resulting video creation."
)
output_group.add_argument(
"--output",
required=False,
default=movie_folder,
type=str,
help="R|Path/Filename for the new movie file. Event files will be stored in same folder."
+ os.linesep,
)
output_group.add_argument(
"--motion_only",
dest="motion_only",
action="store_true",
help="Fast-forward through video when there is no motion.",
)
output_group.add_argument(
"--slowdown",
dest="slow_down",
type=float,
default=argparse.SUPPRESS,
help="Slow down video output. Accepts a number that is then used as multiplier, providing 2 means half the "
"speed.",
)
output_group.add_argument(
"--speedup",
dest="speed_up",
type=float,
default=argparse.SUPPRESS,
help="Speed up the video. Accepts a number that is then used as a multiplier, providing 2 means "
"twice the speed.",
)
output_group.add_argument(
"--chapter_offset",
dest="chapter_offset",
type=int,
default=0,
help="Offset in seconds for chapters in merged video. Negative offset is # of seconds before the end of the "
"subdir video, positive offset if # of seconds after the start of the subdir video.",
)
output_group.add_argument(
"--merge",
dest="merge_subdirs",
action="store_true",
help="Merge the video files from different folders (events) into 1 big video file.",
)
output_group.add_argument(
"--keep-intermediate",
dest="keep_intermediate",
action="store_true",
help="Do not remove the clip video files that are created",
)
advancedencoding_group = parser.add_argument_group(
title="Advanced encoding settings", description="Advanced options for encoding"
)
gpu_help = (
"R|Use GPU acceleration, only enable if supported by hardware.\n"
" MAC: All MACs with Haswell CPU or later support this (Macs after 2013).\n"
" See following link as well: \n"
" https://en.wikipedia.org/wiki/List_of_Macintosh_models_grouped_by_CPU_type#Haswell\n"
)
if sys.platform == "darwin":
advancedencoding_group.add_argument(
"--no-gpu", dest="gpu", action="store_true", help=gpu_help
)
else:
advancedencoding_group.add_argument(
"--gpu", dest="gpu", action="store_true", help=gpu_help
)
advancedencoding_group.add_argument(
"--gpu_type",
choices=["nvidia", "intel", "RPi"],
help="Type of graphics card (GPU) in the system. This determines the encoder that will be used."
"This parameter is mandatory if --gpu is provided.",
)
advancedencoding_group.add_argument(
"--no-faststart",
dest="faststart",
action="store_true",
help="Do not enable flag faststart on the resulting video files. Use this when using a network share and "
"errors occur during encoding.",
)
advancedencoding_group.add_argument(
"--quality",
required=False,
choices=["LOWEST", "LOWER", "LOW", "MEDIUM", "HIGH"],
default="LOWER",
help="Define the quality setting for the video, higher quality means bigger file size but might "
"not be noticeable.",
)
advancedencoding_group.add_argument(
"--compression",
required=False,
choices=[
"ultrafast",
"superfast",
"veryfast",
"faster",
"fast",
"medium",
"slow",
"slower",
"veryslow",
],
default="medium",
help="Speed to optimize video. Faster speed results in a bigger file. This does not impact the quality of "
"the video, just how much time is used to compress it.",
)
advancedencoding_group.add_argument(
"--fps",
required=False,
type=int,
default=24,
help="Frames per second for resulting video. Tesla records at about 33fps hence going higher wouldn't do "
"much as frames would just be duplicated. Default is 24fps which is the standard for movies and TV shows",
)
if internal_ffmpeg:
advancedencoding_group.add_argument(
"--ffmpeg",
required=False,
type=str,
help="Full path and filename for alternative " "ffmpeg.",
)
else:
advancedencoding_group.add_argument(
"--ffmpeg",
required=False,
type=str,
default=ffmpeg_default,
help="Path and filename for ffmpeg. Specify if ffmpeg is not within path.",
)
advancedencoding_group.add_argument(
"--encoding",
required=False,
choices=["x264", "x265"],
default=argparse.SUPPRESS,
help="R|Encoding to use for video creation.\n"
" x264: standard encoding, can be viewed on most devices but results in bigger file.\n"
" x265: newer encoding standard but not all devices support this yet.\n",
)
advancedencoding_group.add_argument(
"--enc",
required=False,
type=str,
default=argparse.SUPPRESS,
help="R|Provide a custom encoder for video creation. Cannot be used in combination with --encoding.\n"
"Note: when using this option the --gpu option is ignored. To use GPU hardware acceleration specify an "
"encoding that provides this.",
)
update_check_group = parser.add_argument_group(
title="Update Check", description="Check for updates"
)
update_check_group.add_argument(
"--check_for_update",
dest="check_for_updates",
action="store_true",
help="Check for update and exit.",
)
update_check_group.add_argument(
"--no-check_for_update",
dest="no_check_for_updates",
action="store_true",
help="A check for new updates is performed every time. With this parameter that can be disabled",
)
update_check_group.add_argument(
"--include_test",
dest="include_beta",
action="store_true",
help="Include test (beta) releases when checking for updates.",
)
args = parser.parse_args()
logging.basicConfig(
level=loglevels[args.loglevel],
format="%(asctime)s:%(levelname)s:\t%(name)s\t%(message)s",
)
_LOGGER.debug(f"Arguments : {args}")
# Check that any mutual exclusive items are not both provided.
if "speed_up" in args and "slow_down" in args:
print(
"Option --speed_up and option --slow_down cannot be used together, only use one of them."
)
return 1
if "enc" in args and "encoding" in args:
print(
"Option --enc and option --encoding cannot be used together, only use one of them."
)
return 1
if not args.no_check_for_updates or args.check_for_updates:
release_info = check_latest_release(args.include_beta)
if release_info is not None:
new_version = False
if release_info.get("tag_name") is not None:
github_version = release_info.get("tag_name").split(".")
if len(github_version) == 3:
# Release tags normally start with v. If that is the case
# then strip the v.
try:
major_version = int(github_version[0])
except ValueError:
major_version = int(github_version[0][1:])
minor_version = int(github_version[1])
if release_info.get("prerelease"):
# Drafts will have b and then beta number.
patch_version = int(github_version[2].split("b")[0])
beta_version = int(github_version[2].split("b")[1])
else:
patch_version = int(github_version[2])
beta_version = -1
if major_version == VERSION["major"]:
if minor_version == VERSION["minor"]:
if patch_version == VERSION["patch"]:
if beta_version > VERSION["beta"] or (
beta_version == -1 and VERSION["beta"] != -1
):
new_version = True
elif patch_version > VERSION["patch"]:
new_version = True
elif minor_version > VERSION["minor"]:
new_version = True
elif major_version > VERSION["major"]:
new_version = True
if new_version:
beta = ""
if release_info.get("prerelease"):
beta = "beta "
release_notes = ""
if not args.check_for_updates:
if args.system_notification:
notify(
"TeslaCam",
"Update available",
"New {beta}release {release} is available. You are "
"on version {version}".format(
beta=beta,
release=release_info.get("tag_name"),
version=VERSION_STR,
),
)
release_notes = (
"Use --check_for_update to get latest " "release notes."
)
print(
"New {beta}release {release} is available for download "
"({url}). You are currently on {version}. {rel_note}".format(
beta=beta,
release=release_info.get("tag_name"),
url=release_info.get("html_url"),
version=VERSION_STR,
rel_note=release_notes,
)
)
if args.check_for_updates:
print(
"You can download the new release from: {url}".format(
url=release_info.get("html_url")
)
)
print(
"Release Notes:\n {release_notes}".format(
release_notes=release_info.get("body")
)
)
return
else:
if args.check_for_updates:
print(
"{version} is the latest release available.".format(
version=VERSION_STR
)
)
return
else:
print("Did not retrieve latest version info.")
ffmpeg = ffmpeg_default if getattr(args, "ffmpeg", None) is None else args.ffmpeg
if which(ffmpeg) is None:
print(
f"ffmpeg is a requirement, unable to find {ffmpeg} executable. Please ensure it exist and is located"
f"within PATH environment or provide full path using parameter --ffmpeg."
)
if args.layout == "PERSPECTIVE":
layout_settings = FullScreen()
layout_settings.perspective = True
else:
if args.layout == "WIDESCREEN":
layout_settings = WideScreen()
elif args.layout == "FULLSCREEN":
layout_settings = FullScreen()
elif args.layout == "CROSS":
layout_settings = Cross()
elif args.layout == "DIAMOND":
layout_settings = Diamond()
else:
layout_settings = FullScreen()
layout_settings.perspective = args.perspective
layout_settings.cameras("Front").include = not args.no_front
layout_settings.cameras("Left").include = not args.no_left
layout_settings.cameras("Right").include = not args.no_right
layout_settings.cameras("Rear").include = not args.no_rear
# Check if either rear or mirror argument has been provided.
# If front camera then default to mirror, if no front camera then default to rear.
side_camera_as_mirror = (
layout_settings.cameras("Front").include
if args.rear_or_mirror is None
else args.rear_or_mirror
)
mirror_sides = ", hflip" if side_camera_as_mirror else ""
# For scale first set the main clip one if provided, this than allows camera specific ones to override for
# that camera.
scaling = parser.args_to_dict(args.clip_scale, "scale")
main_scale = search_dict(None, "camera", scaling)
if main_scale is not None:
layout_settings.scale = main_scale.get("scale", layout_settings.scale)
for scale in scaling:
if scale.get("camera", "").lower() in ["front", "left", "right", "rear"]:
camera_scale = scale.get("scale")
if camera_scale is not None:
layout_settings.cameras(
scale["camera"].lower().capitalize()
).scale = camera_scale
layout_settings.font.halign = (
args.halign if args.halign is not None else layout_settings.font.halign
)
layout_settings.font.valign = (
args.valign if args.valign is not None else layout_settings.font.valign
)
# Determine if left and right cameras should be swapped or not.
# No more arguments related to cameras (i.e .scale, include or not) can be processed from now on.
# Up till now Left means left camera and Right means Right camera.
# From this point forward Left can mean Right camera if we're swapping output.
layout_settings.swap_left_right = (
not side_camera_as_mirror
if args.swap_leftright is None
else args.swap_leftright
)
layout_settings.swap_front_rear = args.swap_frontrear
layout_settings.font.font = args.font
layout_settings.font.color = args.fontcolor
if args.fontsize is not None and args.fontsize > 0:
layout_settings.font.size = args.fontsize
black_base = "color=duration={duration}:"
black_size = f"s={{width}}x{{height}}:c={args.background}, fps={args.fps} "
ffmpeg_base = (
black_base
+ black_size.format(
width=layout_settings.video_width, height=layout_settings.video_height
)
+ "[base]"
)
ffmpeg_black_video = ";" + black_base + black_size
input_clip = "base"
ffmpeg_video_position = ""
ffmpeg_left_camera = ""
camera = "Left"
if layout_settings.cameras(camera).include:
ffmpeg_left_camera = (
"setpts=PTS-STARTPTS, "
"scale={clip_width}x{clip_height} {mirror}{options}"
" [left]".format(
clip_width=layout_settings.cameras(camera).width,
clip_height=layout_settings.cameras(camera).height,
mirror=mirror_sides,
options=layout_settings.cameras(camera).options,
)
)
ffmpeg_video_position = (
ffmpeg_video_position
+ ";[{input_clip}][left] overlay=eof_action=pass:repeatlast=0:"
"x={x_pos}:y={y_pos} [left1]".format(
input_clip=input_clip,
x_pos=layout_settings.cameras(camera).xpos,
y_pos=layout_settings.cameras(camera).ypos,
)
)
input_clip = "left1"
ffmpeg_front_camera = ""
camera = "Front"
if layout_settings.cameras(camera).include:
ffmpeg_front_camera = (
"setpts=PTS-STARTPTS, "
"scale={clip_width}x{clip_height} {options}"
" [front]".format(
clip_width=layout_settings.cameras(camera).width,
clip_height=layout_settings.cameras(camera).height,
options=layout_settings.cameras(camera).options,
)
)
ffmpeg_video_position = (
ffmpeg_video_position
+ ";[{input_clip}][front] overlay=eof_action=pass:repeatlast=0:"
"x={x_pos}:y={y_pos} [front1]".format(
input_clip=input_clip,
x_pos=layout_settings.cameras(camera).xpos,
y_pos=layout_settings.cameras(camera).ypos,
)
)
input_clip = "front1"
ffmpeg_right_camera = ""
camera = "Right"
if layout_settings.cameras(camera).include:
ffmpeg_right_camera = (
"setpts=PTS-STARTPTS, "
"scale={clip_width}x{clip_height} {mirror}{options}"
" [right]".format(
clip_width=layout_settings.cameras(camera).width,
clip_height=layout_settings.cameras(camera).height,
mirror=mirror_sides,
options=layout_settings.cameras(camera).options,
)
)
ffmpeg_video_position = (
ffmpeg_video_position
+ ";[{input_clip}][right] overlay=eof_action=pass:repeatlast=0:"
"x={x_pos}:y={y_pos} [right1]".format(
input_clip=input_clip,
x_pos=layout_settings.cameras(camera).xpos,
y_pos=layout_settings.cameras(camera).ypos,
)
)
input_clip = "right1"
ffmpeg_rear_camera = ""
camera = "Rear"
if layout_settings.cameras(camera).include:
ffmpeg_rear_camera = (
"setpts=PTS-STARTPTS, "
# "crop=512:798:225:26, "
"scale={clip_width}x{clip_height} {mirror}{options}"
" [rear]".format(
clip_width=layout_settings.cameras(camera).width,
clip_height=layout_settings.cameras(camera).height,
mirror=mirror_sides,
options=layout_settings.cameras(camera).options,
)
)
ffmpeg_video_position = (
ffmpeg_video_position
+ ";[{input_clip}][rear] overlay=eof_action=pass:repeatlast=0:"
"x={x_pos}:y={y_pos} [rear1]".format(
input_clip=input_clip,
x_pos=layout_settings.cameras(camera).xpos,
y_pos=layout_settings.cameras(camera).ypos,
)
)
input_clip = "rear1"
filter_counter = 0
filter_string = ";[{input_clip}] {filter} [tmp{filter_counter}]"
ffmpeg_timestamp = ""
if not args.no_timestamp:
if layout_settings.font.font is None:
print(
f"Unable to get a font file for platform {sys.platform}. Please provide valid font file using "
f"--font or disable timestamp using --no-timestamp."
)
return
# noinspection PyPep8
temp_font_file = (
f"c:\{layout_settings.font.font}"
if sys.platform == "win32"
else layout_settings.font.font
)
if not os.path.isfile(temp_font_file):
print(
f"Font file {temp_font_file} does not exist. Provide a valid font file using --font or"
f" disable timestamp using --no-timestamp"
)
if sys.platform == "linux":
print(
"You can also install the fonts using for example: apt-get install ttf-freefont"
)
return
# noinspection PyPep8,PyPep8,PyPep8
ffmpeg_timestamp = (
ffmpeg_timestamp + f"drawtext=fontfile={layout_settings.font.font}:"
f"fontcolor={layout_settings.font.color}:fontsize={layout_settings.font.size}:"
"borderw=2:bordercolor=black@1.0:"
f"x={layout_settings.font.halign}:y={layout_settings.font.valign}:"
"text='%{{pts\:localtime\:{epoch_time}\:%x %X}}'"
)
ffmpeg_timestamp = filter_string.format(
input_clip=input_clip,
filter=ffmpeg_timestamp,
filter_counter=filter_counter,
)
input_clip = f"tmp{filter_counter}"
filter_counter += 1
speed = args.slow_down if "slow_down" in args else ""
speed = round(1 / args.speed_up, 4) if "speed_up" in args else speed
ffmpeg_speed = ""
if speed != "":
ffmpeg_speed = filter_string.format(
input_clip=input_clip,
filter=f"setpts={speed}*PTS",
filter_counter=filter_counter,
)
input_clip = f"tmp{filter_counter}"
filter_counter += 1
ffmpeg_motiononly = ""
if args.motion_only:
ffmpeg_motiononly = filter_string.format(
input_clip=input_clip,
filter=f"mpdecimate=hi=64*48, setpts=N/FRAME_RATE/TB",
filter_counter=filter_counter,
)
input_clip = f"tmp{filter_counter}"
filter_counter += 1
ffmpeg_params = ["-preset", args.compression, "-crf", MOVIE_QUALITY[args.quality]]
use_gpu = not args.gpu if sys.platform == "darwin" else args.gpu
video_encoding = []
if not "enc" in args:
encoding = args.encoding if "encoding" in args else "x264"
# GPU acceleration enabled
if use_gpu:
print("GPU acceleration is enabled")
if sys.platform == "darwin":
video_encoding = video_encoding + ["-allow_sw", "1"]
encoding = encoding + "_mac"
else:
if args.gpu_type is None:
print(
"Parameter --gpu_type is mandatory when parameter --use_gpu is used."
)
return
encoding = encoding + "_" + args.gpu_type
bit_rate = str(int(10000 * layout_settings.scale)) + "K"
video_encoding = video_encoding + ["-b:v", bit_rate]
video_encoding = video_encoding + ["-c:v", MOVIE_ENCODING[encoding]]
else:
video_encoding = video_encoding + ["-c:v", args.enc]
ffmpeg_params = ffmpeg_params + video_encoding
# Set metadata
ffmpeg_params = ffmpeg_params + [
"-metadata",
f"description=Created by tesla_dashcam {VERSION_STR}",
]
# Determine the target folder and filename.
# If no extension then assume it is a folder.
if (
os.path.splitext(args.output)[1] is not None
and os.path.splitext(args.output)[1] != ""
):
target_folder, target_filename = os.path.split(args.output)
if target_folder is None or target_folder == "":
# If nothing in target_filename then no folder was given,
# setting default movie folder
target_folder = movie_folder
target_filename = args.output
else:
# Folder only provided.
target_folder = args.output
target_filename = None
# Convert target folder to absolute path if relative path has been provided.
target_folder = os.path.abspath(target_folder)
# Ensure folder if not already exist and if not can be created
if not make_folder("--output", target_folder):
return
temp_folder = args.temp_dir
if temp_folder is not None:
# Convert temp folder to absolute path if relative path has been provided
temp_folder = os.path.abspath(args.temp_dir)
if not make_folder("--temp_dir", temp_folder):
return
# Set the run type based on arguments.
runtype = "RUN"
if args.monitor:
runtype = "MONITOR"
elif args.monitor_once:
runtype = "MONITOR_ONCE"
monitor_file = args.monitor_trigger
# If no source provided then set to MONITOR_ONCE and we're only going to
# take SavedClips and SentryClips
source_list = args.source
if not source_list:
source_list = ["SavedClips", "SentryClips"]
if runtype == "RUN":
runtype = "MONITOR_ONCE"
start_timestamp = None
if args.start_timestamp is not None:
start_timestamp = isoparse(args.start_timestamp)
if start_timestamp.tzinfo is None:
start_timestamp = start_timestamp.astimezone(get_localzone())
end_timestamp = None
if args.end_timestamp is not None:
end_timestamp = isoparse(args.end_timestamp)
if end_timestamp.tzinfo is None:
end_timestamp = end_timestamp.astimezone(get_localzone())
start_offset = abs(args.start_offset) if args.start_offset is not None else 0
end_offset = abs(args.end_offset) if args.end_offset is not None else 0
video_settings = {
"source_folder": source_list,
"output": args.output,
"target_folder": target_folder,
"target_filename": target_filename,
"temp_dir": temp_folder,
"run_type": runtype,
"merge_subdirs": args.merge_subdirs,
"chapter_offset": args.chapter_offset,
"movie_filename": None,
"keep_intermediate": args.keep_intermediate,
"notification": args.system_notification,
"movie_layout": args.layout,
"movie_speed": speed,
"video_encoding": video_encoding,
"movie_encoding": args.encoding if "encoding" in args else "x264",
"movie_compression": args.compression,
"movie_quality": args.quality,
"background": ffmpeg_black_video,
"ffmpeg_exec": ffmpeg,
"base": ffmpeg_base,
"video_layout": layout_settings,
"clip_positions": ffmpeg_video_position,
"timestamp_text": ffmpeg_timestamp,
"ffmpeg_speed": ffmpeg_speed,
"ffmpeg_motiononly": ffmpeg_motiononly,
"movflags_faststart": not args.faststart,
"input_clip": input_clip,
"other_params": ffmpeg_params,
"left_camera": ffmpeg_left_camera,
"front_camera": ffmpeg_front_camera,
"right_camera": ffmpeg_right_camera,
"rear_camera": ffmpeg_rear_camera,
"start_timestamp": start_timestamp,
"start_offset": start_offset,
"end_timestamp": end_timestamp,
"end_offset": end_offset,
"skip_existing": args.skip_existing,
}
_LOGGER.debug(f"Video Settings {video_settings}")
_LOGGER.debug(f"Layout Settings {layout_settings}")
# If we constantly run and monitor for drive added or not.
if video_settings["run_type"] in ["MONITOR", "MONITOR_ONCE"]:
video_settings.update({"skip_existing": True})
trigger_exist = False
if monitor_file is None:
print("Monitoring for TeslaCam Drive to be inserted. Press CTRL-C to stop")
else:
print(
"Monitoring for trigger {} to exist. Press CTRL-C to stop".format(
monitor_file
)
)
while True:
try:
# Monitoring for disk to be inserted and not for a file.
if monitor_file is None:
source_folder, source_partition = get_tesladashcam_folder()
if source_folder is None:
# Nothing found, sleep for 1 minute and check again.
if trigger_exist:
print("TeslaCam drive has been ejected.")
print(
"Monitoring for TeslaCam Drive to be inserted. "
"Press CTRL-C to stop"
)
sleep(MONITOR_SLEEP_TIME)
trigger_exist = False
continue
# As long as TeslaCam drive is still attached we're going to
# keep on waiting.
if trigger_exist:
_LOGGER.debug(f"TeslaCam Drive still attached")
sleep(MONITOR_SLEEP_TIME)
continue
# Got a folder, append what was provided as source unless
# . was provided in which case everything is done.
source_folder_list = []
for folder in video_settings["source_folder"]:
if folder == ".":
source_folder_list.append(folder)
else:
source_folder_list.append(
os.path.join(source_folder, folder)
)
message = "TeslaCam folder found on {partition}.".format(
partition=source_partition
)
else:
# Wait till trigger file exist (can also be folder).
if not os.path.exists(monitor_file):
_LOGGER.debug(f"Trigger file {monitor_file} does not exist.")
sleep(MONITOR_SLEEP_TIME)
trigger_exist = False
continue
if trigger_exist:
sleep(MONITOR_SLEEP_TIME)
continue
message = "Trigger {} exist.".format(monitor_file)
trigger_exist = True
# Set monitor path, make sure what was provided is a file first otherwise get path.
monitor_path = monitor_file
if os.path.isfile(monitor_file):
monitor_path, _ = os.path.split(monitor_file)
# If . is provided then source folder is path where monitor file exist.
source_folder_list = []
for folder in video_settings["source_folder"]:
if folder == ".":
source_folder_list.append(monitor_path)
else:
# If source path provided is absolute then use that for source path
if os.path.isabs(folder):
source_folder_list.append(folder)
else:
# Path provided is relative, hence based on path of trigger file.
source_folder_list.append(
os.path.join(monitor_path, folder)
)
print(message)
if args.system_notification:
notify("TeslaCam", "Started", message)
if len(source_folder_list) == 1:
print(f"Retrieving all files from {source_folder_list[0]}")
else:
print(f"Retrieving all files from: ")
for folder in source_folder_list:
print(f" {folder}")
folders = get_movie_files(
source_folder_list, args.exclude_subdirs, video_settings
)
if video_settings["run_type"] == "MONITOR":
# We will continue to monitor hence we need to
# ensure we always have a unique final movie name.
movie_filename = (
datetime.today().strftime("%Y-%m-%d_%H_%M")
if video_settings["target_filename"] is None
else os.path.splitext(video_settings["target_filename"])[0]
+ "_"
+ datetime.today().strftime("%Y-%m-%d_%H_%M")
+ os.path.splitext(video_settings["target_filename"])[1]
)
video_settings.update({"movie_filename": movie_filename})
else:
# Set filename to right now if no filename provided.
movie_filename = (
datetime.today().strftime("%Y-%m-%d_%H_%M")
if video_settings["target_filename"] is None
else video_settings["target_filename"]
)
video_settings.update({"movie_filename": movie_filename})
process_folders(folders, video_settings, args.delete_source)
print("Processing of movies has completed.")
if args.system_notification:
notify(
"TeslaCam", "Completed", "Processing of movies has completed."
)
# Stop if we're only to monitor once and then exit.
if video_settings["run_type"] == "MONITOR_ONCE":
if monitor_file is not None:
if os.path.isfile(monitor_file):
try:
os.remove(monitor_file)
except OSError as exc:
print(
"Error trying to remove trigger file {}: {}".format(
monitor_file, exc
)
)
print("Exiting monitoring as asked process once.")
break
if monitor_file is None:
trigger_exist = True
print(
"Waiting for TeslaCam Drive to be ejected. Press "
"CTRL-C to stop"
)
else:
if os.path.isfile(monitor_file):
try:
os.remove(monitor_file)
except OSError as exc:
print(
"Error trying to remove trigger file {}: {}".format(
monitor_file, exc
)
)
break
trigger_exist = False
print(
"Monitoring for trigger {}. Press CTRL-C to stop".format(
monitor_file
)
)
else:
print(
"Waiting for trigger {} to be removed. Press CTRL-C to stop".format(
monitor_file
)
)
except KeyboardInterrupt:
print("Monitoring stopped due to CTRL-C.")
break
else:
folders = get_movie_files(
video_settings["source_folder"], args.exclude_subdirs, video_settings
)
# Set filename to right now if no filename provided.
movie_filename = (
datetime.today().strftime("%Y-%m-%d_%H_%M")
if video_settings["target_filename"] is None
else video_settings["target_filename"]
)
video_settings.update({"movie_filename": movie_filename})
process_folders(folders, video_settings, args.delete_source)
if sys.version_info < (3, 7):
print(
f"Python version 3.7 or higher is required, you have: {sys.version}. Please update your Python version."
)
sys.exit(1)
if __name__ == "__main__":
sys.exit(main())
|
"""
Merges the 3 Tesla Dashcam and Sentry camera video files into 1 video. If
then further concatenates the files together to make 1 movie.
"""
import argparse
import logging
import os
import sys
from datetime import datetime, timedelta, timezone
from fnmatch import fnmatch
from glob import glob
from pathlib import Path
from re import search
from shlex import split as shlex_split
from shutil import which
from subprocess import CalledProcessError, run
from tempfile import mkstemp
from time import sleep, time as timestamp
from typing import List, Optional
import requests
from dateutil.parser import isoparse
from psutil import disk_partitions
from tzlocal import get_localzone
_LOGGER = logging.getLogger(__name__)
# TODO: Move everything into classes and separate files. For example,
# update class, font class (for timestamp), folder class, clip class (
# combining front, left, and right info), file class (for individual file).
# Clip class would then have to merge the camera clips, folder class would
# have to concatenate the merged clips. Settings class to take in all settings
# TODO: Create kind of logger or output classes for output. That then allows
# different ones to be created based on where it should go to (stdout,
# log file, ...).
VERSION = {"major": 0, "minor": 1, "patch": 16, "beta": -1}
VERSION_STR = "v{major}.{minor}.{patch}".format(
major=VERSION["major"], minor=VERSION["minor"], patch=VERSION["patch"]
)
if VERSION["beta"] > -1:
VERSION_STR = VERSION_STR + "b{beta}".format(beta=VERSION["beta"])
MONITOR_SLEEP_TIME = 5
GITHUB = {
"URL": "https://api.github.com",
"owner": "ehendrix23",
"repo": "tesla_dashcam",
}
FFMPEG = {
"darwin": "ffmpeg",
"win32": "ffmpeg.exe",
"cygwin": "ffmpeg",
"linux": "ffmpeg",
"freebsd11": "ffmpeg",
}
# noinspection PyPep8
MOVIE_HOMEDIR = {
"darwin": "Movies/Tesla_Dashcam",
"win32": "Videos\Tesla_Dashcam",
"cygwin": "Videos/Tesla_Dashcam",
"linux": "Videos/Tesla_Dashcam",
"freebsd11": "Videos/Tesla_Dashcam",
}
DEFAULT_CLIP_HEIGHT = 960
DEFAULT_CLIP_WIDTH = 1280
MOVIE_QUALITY = {
"HIGH": "18",
"MEDIUM": "20",
"LOW": "23",
"LOWER": "28",
"LOWEST": "33",
}
MOVIE_ENCODING = {
"x264": "libx264",
"x264_nvidia": "h264_nvenc",
"x264_mac": "h264_videotoolbox",
"x264_intel": "h264_qsv",
"x264_RPi": "h264_omx",
"x265": "libx265",
"x265_nvidia": "hevc_nvenc",
"x265_mac": "hevc_videotoolbox",
"x265_intel": "hevc_qsv",
"x265_RPi": "h265",
}
DEFAULT_FONT = {
"darwin": "/Library/Fonts/Arial Unicode.ttf",
"win32": "/Windows/Fonts/arial.ttf",
"cygwin": "/cygdrive/c/Windows/Fonts/arial.ttf",
"linux": "/usr/share/fonts/truetype/freefont/FreeSans.ttf",
"freebsd11": "/usr/share/local/fonts/freefont-ttf/FreeSans.ttf",
}
HALIGN = {"LEFT": "10", "CENTER": "(w/2-text_w/2)", "RIGHT": "(w-text_w)"}
VALIGN = {"TOP": "10", "MIDDLE": "(h/2-(text_h/2))", "BOTTOM": "(h-(text_h*2))"}
TOASTER_INSTANCE = None
class Font(object):
""" Font Class
"""
def __init__(self, layout, font=None, size=None, color=None):
self._layout = layout
self._font = font
self._size = size
self._color = color
self._halign = None
self._valign = None
self._xpos = None
self._ypos = None
@property
def font(self):
return self._font
@font.setter
def font(self, value):
self._font = value
@property
def size(self):
if hasattr(self._layout, "_font_size"):
return getattr(self._layout, "_font_size")()
return (
int(max(16, 16 * self._layout.scale)) if self._size is None else self._size
)
@size.setter
def size(self, value):
self._size = value
@property
def color(self):
return self._color
@color.setter
def color(self, value):
self._color = value
@property
def halign(self):
if hasattr(self._layout, "_font_halign"):
return getattr(self._layout, "_font_halign")()
return HALIGN.get(self._halign, self._halign)
@halign.setter
def halign(self, value):
self._halign = value
@property
def valign(self):
if hasattr(self._layout, "_font_valign"):
return getattr(self._layout, "_font_valign")()
return VALIGN.get(self._valign, self._valign)
@valign.setter
def valign(self, value):
self._valign = value
@property
def xpos(self):
return self._xpos
@xpos.setter
def xpos(self, value):
self._xpos = value
@property
def ypos(self):
return self._ypos
@ypos.setter
def ypos(self, value):
self._ypos = value
class Camera(object):
""" Camera Class
"""
def __init__(self, layout, camera):
self._layout = layout
self._camera = camera
self._include = True
self._width = 1280
self._height = 960
self._xpos = 0
self._ypos = 0
self._scale = 0
self._options = ""
@property
def camera(self):
return self._camera
@camera.setter
def camera(self, value):
self._camera = value
@property
def include(self):
return self._include
@include.setter
def include(self, value):
self._include = value
@property
def width(self):
return (
getattr(self._layout, "_" + self._camera + "_width")()
if hasattr(self._layout, "_" + self._camera + "_width")
else int(self._width * self.scale * self.include)
)
@width.setter
def width(self, value):
self._width = value
@property
def height(self):
return (
getattr(self._layout, "_" + self._camera + "_height")()
if hasattr(self._layout, "_" + self._camera + "_height")
else int(self._height * self.scale * self.include)
)
@height.setter
def height(self, value):
self._height = value
@property
def xpos(self):
if hasattr(self._layout, "_" + self._camera + "_xpos"):
return getattr(self._layout, "_" + self._camera + "_xpos")() * self.include
return self._xpos * self.include
@xpos.setter
def xpos(self, value):
self._xpos = value
@property
def ypos(self):
if hasattr(self._layout, "_" + self._camera + "_ypos"):
return getattr(self._layout, "_" + self._camera + "_ypos")() * self.include
return self._ypos * self.include
@ypos.setter
def ypos(self, value):
self._ypos = value
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, value):
if value is None:
self._scale = None
elif len(str(value).split("x")) == 1:
# Scale provided is a multiplier
self._scale = float(str(value).split("x")[0])
else:
# Scale is a resolution.
self.width = int(str(value).split("x")[0])
self.height = int(str(value).split("x")[1])
self._scale = 1
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self._options = value
class MovieLayout(object):
""" Main Layout class
"""
def __init__(self):
self._cameras = {
"Front": Camera(layout=self, camera="front"),
"Left": Camera(layout=self, camera="left"),
"Right": Camera(layout=self, camera="right"),
"Rear": Camera(layout=self, camera="rear"),
}
self._font = Font(layout=self)
self._swap_left_right = False
self._swap_front_rear = False
self._perspective = False
self._font.halign = "CENTER"
self._font.valign = "BOTTOM"
def cameras(self, camera):
return self._cameras.get(camera, self._cameras)
@property
def font(self):
return self._font
@font.setter
def font(self, value):
self._font = value
@property
def swap_left_right(self):
return self._swap_left_right
@swap_left_right.setter
def swap_left_right(self, value):
self._swap_left_right = value
@property
def swap_front_rear(self):
return self._swap_front_rear
@swap_front_rear.setter
def swap_front_rear(self, value):
self._swap_front_rear = value
@property
def perspective(self):
return self._perspective
@perspective.setter
def perspective(self, new_perspective):
self._perspective = new_perspective
if self._perspective:
self.cameras("Left").options = (
", pad=iw+4:3/2*ih:-1:ih/8:0x00000000, "
"perspective=x0=0:y0=1*H/5:x1=W:y1=-3/44*H:"
"x2=0:y2=6*H/5:x3=7/8*W:y3=5*H/6:sense=destination"
)
self.cameras("Right").options = (
", pad=iw+4:3/2*ih:-1:ih/8:0x00000000,"
"perspective=x0=0:y1=1*H/5:x1=W:y0=-3/44*H:"
"x2=1/8*W:y3=6*H/5:x3=W:y2=5*H/6:sense=destination"
)
else:
self.cameras("Left").options = ""
self.cameras("Right").options = ""
@property
def scale(self):
# Return scale of new video based on 1280x960 video = scale:1
return (self.video_height * self.video_width) / (1280 * 960)
@scale.setter
def scale(self, scale):
self.cameras("Front").scale = scale
self.cameras("Left").scale = scale
self.cameras("Right").scale = scale
self.cameras("Rear").scale = scale
@property
def video_width(self):
return int(
max(
self.cameras("Front").xpos + self.cameras("Front").width,
self.cameras("Left").xpos + self.cameras("Left").width,
self.cameras("Right").xpos + self.cameras("Right").width,
self.cameras("Rear").xpos + self.cameras("Rear").width,
)
)
@property
def video_height(self):
perspective_adjustement = 3 / 2 if self.perspective else 1
return int(
max(
self.cameras("Front").ypos + self.cameras("Front").height,
perspective_adjustement * self.cameras("Left").ypos
+ self.cameras("Left").height,
perspective_adjustement * self.cameras("Right").ypos
+ self.cameras("Right").height,
self.cameras("Rear").ypos + self.cameras("Rear").height,
)
)
@property
def center_xpos(self):
return int(self.video_width / 2)
@property
def center_ypos(self):
return int(self.video_height / 2)
class FullScreen(MovieLayout):
""" FullScreen Movie Layout
[FRONT_CAMERA]
[LEFT_CAMERA][REAR_CAMERA][RIGHT_CAMERA]
"""
def __init__(self):
super().__init__()
self.scale = 1 / 2
@property
def video_width(self):
return int(
max(
self.cameras("Front").width,
self.cameras("Left").width
+ self.cameras("Rear").width
+ self.cameras("Right").width,
)
)
@property
def video_height(self):
perspective_adjustement = 3 / 2 if self.perspective else 1
return int(
self.cameras("Front").height
+ max(
perspective_adjustement * self.cameras("Left").height,
self.cameras("Rear").height,
perspective_adjustement * self.cameras("Right").height,
)
)
def _front_height(self):
# For height keep same ratio of 4/3
return int(self.cameras("Front").width / 4 * 3)
def _front_xpos(self):
# Make sure that front is placed in the middle
return (
max(
0,
self.center_xpos
- int(
(
self.cameras("Left").width
+ self.cameras("Front").width
+ self.cameras("Right").width
)
/ 2
)
+ self.cameras("Left").width,
)
* self.cameras("Front").include
)
def _left_xpos(self):
return (
max(
0,
self.center_xpos
- int(
(
self.cameras("Left").width
+ self.cameras("Rear").width
+ self.cameras("Right").width
)
/ 2
),
)
* self.cameras("Left").include
)
def _left_ypos(self):
return (
self.cameras("Front").ypos + self.cameras("Front").height
) * self.cameras("Left").include
def _rear_xpos(self):
return (
max(
0,
self.center_xpos
- int(
(
self.cameras("Left").width
+ self.cameras("Rear").width
+ self.cameras("Right").width
)
/ 2
)
+ self.cameras("Left").width,
)
* self.cameras("Rear").include
)
def _rear_ypos(self):
return (
self.cameras("Front").ypos + self.cameras("Front").height
) * self.cameras("Rear").include
def _right_xpos(self):
return (
max(
0,
self.center_xpos
- int(
(
self.cameras("Left").width
+ self.cameras("Rear").width
+ self.cameras("Right").width
)
/ 2
)
+ self.cameras("Left").width
+ self.cameras("Rear").width,
)
* self.cameras("Right").include
)
def _right_ypos(self):
return (
self.cameras("Front").ypos + self.cameras("Front").height
) * self.cameras("Right").include
# noinspection PyProtectedMember
class WideScreen(FullScreen):
""" WideScreen Movie Layout
[ FRONT_CAMERA ]
[LEFT_CAMERA][REAR_CAMERA][RIGHT_CAMERA]
"""
def __init__(self):
super().__init__()
self.scale = 1 / 2
# Set front scale to None so we know if it was overriden or not.
self.cameras("Front").scale = None
# Only front_width has to be adjusted as by default width would be left+rear+right instead of normal scale.
def _front_width(self):
return (
(
self.cameras("Left").width
+ self.cameras("Rear").width
+ self.cameras("Right").width
)
* self.cameras("Front").include
if self.cameras("Front").scale is None
else int(
(
self.cameras("Front")._width
* self.cameras("Front").scale
* self.cameras("Front").include
)
)
)
class Cross(FullScreen):
""" Cross Movie Layout
[FRONT_CAMERA]
[LEFT_CAMERA][RIGHT_CAMERA]
[REAR_CAMERA]
"""
def __init__(self):
super().__init__()
self.scale = 1 / 2
@property
def video_width(self):
return max(
self.cameras("Front").width,
self.cameras("Left").width + self.cameras("Right").width,
self.cameras("Rear").width,
)
@property
def video_height(self):
if self.perspective:
height = int(
max(
3 / 2 * self.cameras("Left").height,
3 / 2 * self.cameras("Right").height,
)
)
if (
self.cameras("Left").include
and self.cameras("Left").scale >= self.cameras("Rear").scale
and self.cameras("Right").include
and self.cameras("Right").scale >= self.cameras("Rear").scale
and self.cameras("Rear").include
):
height = int(height / 3 * 2)
height += self.cameras("Rear").height
else:
height = (
max(self.cameras("Left").height, self.cameras("Right").height)
+ self.cameras("Rear").height
)
return int(height + self.cameras("Front").height)
def _front_xpos(self):
return (
int(max(0, self.center_xpos - (self.cameras("Front").width / 2)))
* self.cameras("Front").include
)
def _left_xpos(self):
return (
max(
0,
self.center_xpos
- int((self.cameras("Left").width + self.cameras("Right").width) / 2),
)
* self.cameras("Left").include
)
def _left_ypos(self):
return (
self.cameras("Front").height
+ int(
(
max(self.cameras("Left").height, self.cameras("Right").height)
- self.cameras("Left").height
)
/ 2
)
) * self.cameras("Left").include
def _right_xpos(self):
return (
max(
0,
self.center_xpos
- int((self.cameras("Left").width + self.cameras("Right").width) / 2)
+ self.cameras("Left").width,
)
* self.cameras("Right").include
)
def _right_ypos(self):
return (
self.cameras("Front").height
+ int(
(
max(self.cameras("Left").height, self.cameras("Right").height)
- self.cameras("Right").height
)
/ 2
)
) * self.cameras("Right").include
def _rear_xpos(self):
return (
int(max(0, self.center_xpos - (self.cameras("Rear").width / 2)))
* self.cameras("Rear").include
)
def _rear_ypos(self):
return int(max(0, self.video_height - self.cameras("Rear").height))
# noinspection PyProtectedMember
class Diamond(Cross):
""" Diamond Movie Layout
[FRONT_CAMERA]
[LEFT_CAMERA] [RIGHT_CAMERA]
[REAR_CAMERA]
"""
def __init__(self):
super().__init__()
self.scale = 1 / 2
self._font.valign = "MIDDLE"
def _font_halign(self):
if self._font._halign == "CENTER":
# Change alignment to left or right if one of the left/right cameras is excluded.
if (self.cameras("Left").include and not self.cameras("Right").include) or (
self.cameras("Right").include and not self.cameras("Left").include
):
x_pos = int(
max(
self.cameras("Front").xpos + self.cameras("Front").width / 2,
self.cameras("Rear").xpos + self.cameras("Rear").width / 2,
)
)
return f"({x_pos} - text_w / 2)"
return HALIGN.get(self._font._halign, self._font._halign)
def _font_valign(self):
if self._font._valign == "MIDDLE":
if self.cameras("Front").include:
return (
f'({self.cameras("Front").ypos + self.cameras("Front").height} + 5)'
)
elif self.cameras("Rear").include:
return f'({self.cameras("Rear").ypos} - 5 - text_h)'
return VALIGN.get(self._font._valign, self._font._valign)
def _font_size(self):
# For this layout the video height has to include font size. But default for calculating
# font size is based on video height.
# Thus overriding font size to get video height without font size to figure our scaling.
if self.font._size is None:
scale = (
self._video_height(include_fontsize=False)
* self.video_width
/ (1280 * 960)
)
return int(max(16, 16 * scale))
else:
return self.font.size
@property
def video_width(self):
return (
max(self.cameras("Front").width, self.cameras("Rear").width)
+ self.cameras("Left").width
+ self.cameras("Right").width
)
def _video_height(self, include_fontsize=True):
perspective = 3 / 2 if self.perspective else 1
fontsize = self.font.size if include_fontsize else 0
return int(
max(
perspective
* max(self.cameras("Left").height, self.cameras("Right").height),
self.cameras("Front").height + self.cameras("Rear").height + fontsize,
)
)
@property
def video_height(self):
return self._video_height(include_fontsize=True)
def _front_xpos(self):
return (
self.cameras("Left").width
+ int(
(
max(self.cameras("Front").width, self.cameras("Rear").width)
- self.cameras("Front").width
)
/ 2
)
) * self.cameras("Front").include
def _left_xpos(self):
return 0
def _left_ypos(self):
return max(0, self.center_ypos - int(self.cameras("Left").height / 2))
def _right_xpos(self):
return max(
self.cameras("Front").xpos + self.cameras("Front").width,
self.cameras("Rear").xpos + self.cameras("Rear").width,
)
def _right_ypos(self):
return max(0, self.center_ypos - int(self.cameras("Right").height / 2))
def _rear_xpos(self):
return (
self.cameras("Left").width
+ int(
(
max(self.cameras("Front").width, self.cameras("Rear").width)
- self.cameras("Rear").width
)
/ 2
)
) * self.cameras("Rear").include
class MyArgumentParser(argparse.ArgumentParser):
def convert_arg_line_to_args(self, arg_line):
# Remove comments.
return shlex_split(arg_line, comments=True)
def args_to_dict(self, arguments, default):
argument_list = []
if arguments is None:
return argument_list
for argument in arguments:
argument_dict = {}
for argument_value in argument:
if "=" in argument_value:
key = argument_value.split("=")[0].lower()
value = (
argument_value.split("=")[1].strip()
if argument_value.split("=")[1].strip() != ""
else None
)
else:
key = default
value = argument_value
argument_dict.update({key: value})
argument_list.append(argument_dict)
return argument_list
# noinspection PyCallByClass,PyProtectedMember
class SmartFormatter(argparse.HelpFormatter):
""" Formatter for argument help. """
def _split_lines(self, text, width):
""" Provide raw output allowing for prettier help output """
if text.startswith("R|"):
return text[2:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width)
def _get_help_string(self, action):
""" Call default help string """
return argparse.ArgumentDefaultsHelpFormatter._get_help_string(self, action)
def search_dict(
match_value: object = None, key: str = None, search_list: List[dict] = None
) -> Optional[dict]:
"""
Returns the 1st element in a list containing dictionaries
where the value of key provided matches the value provided.
:param match_value: value to match upon (search for)
:type match_value: object
:param key: dictionary key to use for the match
:type key: str
:param search_list: List containing dictionary objects in which to search
:type search_list: List[dict]
:return: Dictionary object that matches
:rtype: dict
"""
if key is None or search_list is None:
return None
if match_value is None:
return next(
(element for element in search_list if element.get(key) is None), None
)
return next(
(element for element in search_list if element.get(key) == match_value), None
)
def check_latest_release(include_beta):
""" Checks GitHub for latest release """
url = "{url}/repos/{owner}/{repo}/releases".format(
url=GITHUB["URL"], owner=GITHUB["owner"], repo=GITHUB["repo"]
)
if not include_beta:
url = url + "/latest"
try:
releases = requests.get(url)
except requests.exceptions.RequestException as exc:
print("Unable to check for latest release: {exc}".format(exc=exc))
return None
release_data = releases.json()
# If we include betas then we would have received a list, thus get 1st
# element as that is the latest release.
if include_beta:
release_data = release_data[0]
return release_data
def get_tesladashcam_folder():
""" Check if there is a drive mounted with the Tesla DashCam folder."""
for partition in disk_partitions(all=False):
if "cdrom" in partition.opts or partition.fstype == "":
continue
teslacamfolder = os.path.join(partition.mountpoint, "TeslaCam")
if os.path.isdir(teslacamfolder):
_LOGGER.debug(f"Folder TeslaCam found on partition {partition.mountpoint}.")
return teslacamfolder, partition.mountpoint
_LOGGER.debug(f"No TeslaCam folder on partition {partition.mountpoint}.")
return None, None
def get_movie_files(source_folder, exclude_subdirs, video_settings):
""" Find all the clip files within folder (and subfolder if requested) """
folder_list = {}
total_folders = 0
for pathname in source_folder:
if os.path.isdir(pathname):
isfile = False
if exclude_subdirs:
# Retrieve all the video files in current path:
search_path = os.path.join(pathname, "*.mp4")
files = [
filename
for filename in glob(search_path)
if not os.path.basename(filename).startswith(".")
]
print(f"Discovered {len(files)} files in {pathname}")
else:
# Search all sub folder.
files = []
for folder, _, filenames in os.walk(pathname, followlinks=True):
total_folders = total_folders + 1
for filename in (
filename
for filename in filenames
if not os.path.basename(filename).startswith(".")
and fnmatch(filename, "*.mp4")
):
files.append(os.path.join(folder, filename))
print(
f"Discovered {total_folders} folders containing total of {len(files)} files in {pathname}"
)
else:
files = [pathname]
isfile = True
# Now go through and get timestamps etc..
for file in sorted(files):
# Strip path so that we just have the filename.
movie_folder, movie_filename = os.path.split(file)
# And now get the timestamp of the filename.
filename_timestamp = movie_filename.rsplit("-", 1)[0]
movie_file_list = folder_list.get(movie_folder, {})
# Check if we already processed this timestamp.
if movie_file_list.get(filename_timestamp) is not None:
# Already processed this timestamp, moving on.
continue
_LOGGER.debug(
f"Checking camera files in folder {movie_folder} with timestamp {filename_timestamp}"
)
video_info = {
"front_camera": {
"filename": None,
"duration": None,
"timestamp": None,
"include": False,
},
"left_camera": {
"filename": None,
"duration": None,
"timestamp": None,
"include": False,
},
"right_camera": {
"filename": None,
"duration": None,
"timestamp": None,
"include": False,
},
"rear_camera": {
"filename": None,
"duration": None,
"timestamp": None,
"include": False,
},
}
front_filename = str(filename_timestamp) + "-front.mp4"
front_path = os.path.join(movie_folder, front_filename)
left_filename = str(filename_timestamp) + "-left_repeater.mp4"
left_path = os.path.join(movie_folder, left_filename)
right_filename = str(filename_timestamp) + "-right_repeater.mp4"
right_path = os.path.join(movie_folder, right_filename)
rear_filename = str(filename_timestamp) + "-back.mp4"
rear_path = os.path.join(movie_folder, rear_filename)
# Get meta data for each video to determine creation time and duration.
metadata = get_metadata(
video_settings["ffmpeg_exec"],
[front_path, left_path, right_path, rear_path],
)
# Move on to next one if nothing received.
if not metadata:
continue
# Get the longest duration:
duration = 0
video_timestamp = None
for item in metadata:
_, filename = os.path.split(item["filename"])
if filename == front_filename:
camera = "front_camera"
video_filename = front_filename
include_clip = (
item["include"]
if video_settings["video_layout"].cameras("Front").include
else False
)
elif filename == left_filename:
camera = "left_camera"
video_filename = left_filename
include_clip = (
item["include"]
if video_settings["video_layout"].cameras("Left").include
else False
)
elif filename == right_filename:
camera = "right_camera"
video_filename = right_filename
include_clip = (
item["include"]
if video_settings["video_layout"].cameras("Right").include
else False
)
elif filename == rear_filename:
camera = "rear_camera"
video_filename = rear_filename
include_clip = (
item["include"]
if video_settings["video_layout"].cameras("Rear").include
else False
)
else:
continue
# Store duration and timestamp
video_info[camera].update(
filename=video_filename,
duration=item["duration"],
timestamp=item["timestamp"],
include=include_clip,
)
# Only check duration and timestamp if this file is not corrupt and if we include this camera
# in our output.
if include_clip:
# Figure out which one has the longest duration
duration = (
item["duration"] if item["duration"] > duration else duration
)
# Figure out starting timestamp
if video_timestamp is None:
video_timestamp = item["timestamp"]
else:
video_timestamp = (
item["timestamp"]
if item["timestamp"] < video_timestamp
else video_timestamp
)
if video_timestamp is None:
# Firmware version 2019.16 changed filename timestamp format.
if len(filename_timestamp) == 16:
# This is for before version 2019.16
video_timestamp = datetime.strptime(
filename_timestamp, "%Y-%m-%d_%H-%M"
)
video_timestamp = video_timestamp.astimezone(get_localzone())
else:
# This is for version 2019.16 and later
video_timestamp = datetime.strptime(
filename_timestamp, "%Y-%m-%d_%H-%M-%S"
)
video_timestamp = video_timestamp.astimezone(timezone.utc)
movie_info = {
"movie_folder": movie_folder,
"timestamp": video_timestamp,
"duration": duration,
"video_info": video_info,
"file_only": isfile,
}
movie_file_list.update({filename_timestamp: movie_info})
folder_list.update({movie_folder: movie_file_list})
return folder_list
def get_metadata(ffmpeg, filenames):
""" Retrieve the meta data for the clip (i.e. timestamp, duration) """
# Get meta data for each video to determine creation time and duration.
ffmpeg_command = [ffmpeg]
metadata = []
for camera_file in filenames:
if os.path.isfile(camera_file):
ffmpeg_command.append("-i")
ffmpeg_command.append(camera_file)
metadata.append(
{
"filename": camera_file,
"timestamp": None,
"duration": 0,
"include": False,
}
)
else:
_LOGGER.debug(f"File {camera_file} does not exist, skipping.")
# Don't run ffmpeg if nothing to check for.
if not metadata:
return metadata
ffmpeg_command.append("-hide_banner")
command_result = run(ffmpeg_command, capture_output=True, text=True)
metadata_iterator = iter(metadata)
input_counter = 0
video_timestamp = None
wait_for_input_line = True
metadata_item = {}
for line in command_result.stderr.splitlines():
if search("^Input #", line) is not None:
# If filename was not yet appended then it means it is a corrupt file, in that case just add to list for
# but identify not to include for processing
metadata_item = next(metadata_iterator)
input_counter += 1
video_timestamp = None
wait_for_input_line = False
continue
if wait_for_input_line:
continue
if search("^ *creation_time ", line) is not None:
line_split = line.split(":", 1)
video_timestamp = datetime.strptime(
line_split[1].strip(), "%Y-%m-%dT%H:%M:%S.%f%z"
)
continue
if search("^ *Duration: ", line) is not None:
line_split = line.split(",")
line_split = line_split[0].split(":", 1)
duration_list = line_split[1].split(":")
duration = (
int(duration_list[0]) * 60 * 60
+ int(duration_list[1]) * 60
+ int(duration_list[2].split(".")[0])
+ (float(duration_list[2].split(".")[1]) / 100)
)
# File will only be processed if duration is greater then 0
include = duration > 0
metadata_item.update(
{"timestamp": video_timestamp, "duration": duration, "include": include}
)
wait_for_input_line = True
return metadata
def create_intermediate_movie(
filename_timestamp,
video,
folder_timestamps,
video_settings,
clip_number,
total_clips,
):
""" Create intermediate movie files. This is the merging of the 3 camera
video files into 1 video file. """
# We first stack (combine the 3 different camera video files into 1
# and then we concatenate.
front_camera = (
os.path.join(
video["movie_folder"], video["video_info"]["front_camera"]["filename"]
)
if (
video["video_info"]["front_camera"]["filename"] is not None
and video["video_info"]["front_camera"]["include"]
)
else None
)
left_camera = (
os.path.join(
video["movie_folder"], video["video_info"]["left_camera"]["filename"]
)
if (
video["video_info"]["left_camera"]["filename"] is not None
and video["video_info"]["left_camera"]["include"]
)
else None
)
right_camera = (
os.path.join(
video["movie_folder"], video["video_info"]["right_camera"]["filename"]
)
if (
video["video_info"]["right_camera"]["filename"] is not None
and video["video_info"]["right_camera"]["include"]
)
else None
)
rear_camera = (
os.path.join(
video["movie_folder"], video["video_info"]["rear_camera"]["filename"]
)
if (
video["video_info"]["rear_camera"]["filename"] is not None
and video["video_info"]["rear_camera"]["include"]
)
else None
)
if (
front_camera is None
and left_camera is None
and right_camera is None
and rear_camera is None
):
_LOGGER.debug(
f'No front, left, right, and rear camera clip exist for {video["timestamp"]}'
)
return None, 0, True
if video_settings["video_layout"].swap_left_right:
left_camera, right_camera = right_camera, left_camera
if video_settings["video_layout"].swap_front_rear:
front_camera, rear_camera = rear_camera, front_camera
# Determine if this clip is to be included based on potential start and end timestamp/offsets that were provided.
# Clip starting time is between the start&end times we're looking for
# or Clip end time is between the start&end time we're looking for.
# or Starting time is between start&end clip time
# or End time is between start&end clip time
starting_timestmp = video["timestamp"]
ending_timestmp = starting_timestmp + timedelta(seconds=video["duration"])
if not (
folder_timestamps[0] <= starting_timestmp <= folder_timestamps[1]
or folder_timestamps[0] <= ending_timestmp <= folder_timestamps[1]
or starting_timestmp <= folder_timestamps[0] <= ending_timestmp
or starting_timestmp <= folder_timestamps[1] <= ending_timestmp
):
# This clip is not in-between the timestamps we want, skip it.
_LOGGER.debug(
f"Clip timestamp from {starting_timestmp} to {ending_timestmp} not "
f"between {folder_timestamps[0]} and {folder_timestamps[1]}"
)
return None, 0, True
# Determine if we need to do an offset of the starting timestamp
starting_offset = 0
ffmpeg_offset_command = []
clip_duration = video["duration"]
# This clip falls in between the start and end timestamps to include.
# Set offsets if required
if video["timestamp"] < folder_timestamps[0]:
# Starting timestamp is withing this clip.
starting_offset = (folder_timestamps[0] - video["timestamp"]).total_seconds()
starting_timestmp = folder_timestamps[0]
ffmpeg_offset_command = ["-ss", str(starting_offset)]
clip_duration = video["duration"] - starting_offset
# Adjust duration if end of clip's timestamp is after ending timestamp we need.
if video["timestamp"] + timedelta(seconds=video["duration"]) > folder_timestamps[1]:
# Duration has to be cut.
clip_duration = (
folder_timestamps[1]
- (video["timestamp"] + timedelta(seconds=starting_offset))
).total_seconds()
ffmpeg_offset_command += ["-t", str(clip_duration)]
# Confirm if files exist, if not replace with nullsrc
input_count = 0
if left_camera is not None and os.path.isfile(left_camera):
ffmpeg_left_command = ffmpeg_offset_command + ["-i", left_camera]
ffmpeg_left_camera = ";[0:v] " + video_settings["left_camera"]
input_count += 1
else:
ffmpeg_left_command = []
ffmpeg_left_camera = (
video_settings["background"].format(
duration=clip_duration,
speed=video_settings["movie_speed"],
width=video_settings["video_layout"].cameras("Left").width,
height=video_settings["video_layout"].cameras("Left").height,
)
+ "[left]"
if video_settings["video_layout"].cameras("Left").include
else ""
)
if front_camera is not None and os.path.isfile(front_camera):
ffmpeg_front_command = ffmpeg_offset_command + ["-i", front_camera]
ffmpeg_front_camera = (
";[" + str(input_count) + ":v] " + video_settings["front_camera"]
)
input_count += 1
else:
ffmpeg_front_command = []
ffmpeg_front_camera = (
video_settings["background"].format(
duration=clip_duration,
speed=video_settings["movie_speed"],
width=video_settings["video_layout"].cameras("Front").width,
height=video_settings["video_layout"].cameras("Front").height,
)
+ "[front]"
if video_settings["video_layout"].cameras("Front").include
else ""
)
if right_camera is not None and os.path.isfile(right_camera):
ffmpeg_right_command = ffmpeg_offset_command + ["-i", right_camera]
ffmpeg_right_camera = (
";[" + str(input_count) + ":v] " + video_settings["right_camera"]
)
input_count += 1
else:
ffmpeg_right_command = []
ffmpeg_right_camera = (
video_settings["background"].format(
duration=clip_duration,
speed=video_settings["movie_speed"],
width=video_settings["video_layout"].cameras("Right").width,
height=video_settings["video_layout"].cameras("Right").height,
)
+ "[right]"
if video_settings["video_layout"].cameras("Right").include
else ""
)
if rear_camera is not None and os.path.isfile(rear_camera):
ffmpeg_rear_command = ffmpeg_offset_command + ["-i", rear_camera]
ffmpeg_rear_camera = (
";[" + str(input_count) + ":v] " + video_settings["rear_camera"]
)
input_count += 1
else:
ffmpeg_rear_command = []
ffmpeg_rear_camera = (
video_settings["background"].format(
duration=clip_duration,
speed=video_settings["movie_speed"],
width=video_settings["video_layout"].cameras("Rear").width,
height=video_settings["video_layout"].cameras("Rear").height,
)
+ "[rear]"
if video_settings["video_layout"].cameras("Rear").include
else ""
)
local_timestamp = video["timestamp"].astimezone(get_localzone())
# Check if target video file exist if skip existing.
file_already_exist = False
if video_settings["skip_existing"]:
temp_movie_name = (
os.path.join(video_settings["target_folder"], filename_timestamp) + ".mp4"
)
if os.path.isfile(temp_movie_name):
file_already_exist = True
elif (
not video_settings["keep_intermediate"]
and video_settings["temp_dir"] is not None
):
temp_movie_name = (
os.path.join(video_settings["temp_dir"], filename_timestamp) + ".mp4"
)
if os.path.isfile(temp_movie_name):
file_already_exist = True
if file_already_exist:
print(
"\t\tSkipping clip {clip_number}/{total_clips} from {timestamp} "
"and {duration} seconds as it already exist.".format(
clip_number=clip_number + 1,
total_clips=total_clips,
timestamp=local_timestamp.strftime("%x %X"),
duration=int(clip_duration),
)
)
# Get actual duration of our new video, required for chapters when concatenating.
metadata = get_metadata(video_settings["ffmpeg_exec"], [temp_movie_name])
duration = metadata[0]["duration"] if metadata else video["duration"]
return temp_movie_name, duration, True
else:
target_folder = (
video_settings["temp_dir"]
if not video_settings["keep_intermediate"]
and video_settings["temp_dir"] is not None
else video_settings["target_folder"]
)
temp_movie_name = os.path.join(target_folder, filename_timestamp) + ".mp4"
print(
"\t\tProcessing clip {clip_number}/{total_clips} from {timestamp} "
"and {duration} seconds long.".format(
clip_number=clip_number + 1,
total_clips=total_clips,
timestamp=local_timestamp.strftime("%x %X"),
duration=int(clip_duration),
)
)
epoch_timestamp = int(starting_timestmp.timestamp())
ffmpeg_filter = (
video_settings["base"].format(
duration=clip_duration, speed=video_settings["movie_speed"]
)
+ ffmpeg_left_camera
+ ffmpeg_front_camera
+ ffmpeg_right_camera
+ ffmpeg_rear_camera
+ video_settings["clip_positions"]
+ video_settings["timestamp_text"].format(epoch_time=epoch_timestamp)
+ video_settings["ffmpeg_speed"]
+ video_settings["ffmpeg_motiononly"]
)
ffmpeg_command = (
[video_settings["ffmpeg_exec"]]
+ ["-loglevel", "error"]
+ ffmpeg_left_command
+ ffmpeg_front_command
+ ffmpeg_right_command
+ ffmpeg_rear_command
+ ["-filter_complex", ffmpeg_filter]
+ ["-map", f"[{video_settings['input_clip']}]"]
+ video_settings["other_params"]
)
ffmpeg_command = ffmpeg_command + ["-y", temp_movie_name]
_LOGGER.debug(f"FFMPEG Command: {ffmpeg_command}")
# Run the command.
try:
run(ffmpeg_command, capture_output=True, check=True)
except CalledProcessError as exc:
print(
"\t\t\tError trying to create clip for {base_name}. RC: {rc}\n"
"\t\t\tCommand: {command}\n"
"\t\t\tError: {stderr}\n\n".format(
base_name=os.path.join(video["movie_folder"], filename_timestamp),
rc=exc.returncode,
command=exc.cmd,
stderr=exc.stderr,
)
)
return None, 0, False
# Get actual duration of our new video, required for chapters when concatenating.
metadata = get_metadata(video_settings["ffmpeg_exec"], [temp_movie_name])
duration = metadata[0]["duration"] if metadata else video["duration"]
return temp_movie_name, duration, True
def create_movie(clips_list, movie_filename, video_settings, chapter_offset):
""" Concatenate provided movie files into 1."""
# Just return if there are no clips.
if not clips_list:
_LOGGER.debug("Clip list is empty")
return None, None
# Go through the list of clips to create the command and content for chapter meta file.
ffmpeg_join_filehandle, ffmpeg_join_filename = mkstemp(suffix=".txt", text=True)
total_clips = 0
meta_content = ""
meta_start = 0
total_videoduration = 0
chapter_offset = chapter_offset * 1000000000
with os.fdopen(ffmpeg_join_filehandle, "w") as fp:
# Loop through the list sorted by video timestamp.
for video_clip in sorted(
clips_list, key=lambda video: video["video_timestamp"]
):
if not os.path.isfile(video_clip["video_filename"]):
print(
"\t\tFile {} does not exist anymore, skipping.".format(
video_clip["video_filename"]
)
)
continue
# Add this file in our join list.
fp.write(
"file '"
+ video_clip["video_filename"]
+ "'{linesep}".format(linesep=os.linesep)
)
total_clips = total_clips + 1
title = video_clip["video_timestamp"].astimezone(get_localzone())
# For duration need to also calculate if video was sped-up or slowed down.
video_duration = int(video_clip["video_duration"] * 1000000000)
total_videoduration += video_duration
chapter_start = meta_start
if video_duration > abs(chapter_offset):
if chapter_offset < 0:
chapter_start = meta_start + video_duration + chapter_offset
elif chapter_offset > 0:
chapter_start = chapter_start + chapter_offset
# We need to add an initial chapter if our "1st" chapter is not at the beginning of the movie.
if total_clips == 1 and chapter_start > 0:
meta_content = (
"[CHAPTER]{linesep}"
"TIMEBASE=1/1000000000{linesep}"
"START={start}{linesep}"
"END={end}{linesep}"
"title={title}{linesep}".format(
linesep=os.linesep,
start=0,
end=chapter_start - 1,
title="Start",
)
)
meta_content = (
meta_content + "[CHAPTER]{linesep}"
"TIMEBASE=1/1000000000{linesep}"
"START={start}{linesep}"
"END={end}{linesep}"
"title={title}{linesep}".format(
linesep=os.linesep,
start=chapter_start,
end=meta_start + video_duration,
title=title.strftime("%x %X"),
)
)
meta_start = meta_start + 1 + video_duration
if total_clips == 0:
print("\t\tError: No valid clips to merge found.")
return None, None
# Write out the meta data file.
meta_content = ";FFMETADATA1" + os.linesep + meta_content
ffmpeg_meta_filehandle, ffmpeg_meta_filename = mkstemp(suffix=".txt", text=True)
with os.fdopen(ffmpeg_meta_filehandle, "w") as fp:
fp.write(meta_content)
ffmpeg_params = [
"-f",
"concat",
"-safe",
"0",
"-i",
ffmpeg_join_filename,
"-i",
ffmpeg_meta_filename,
"-map_metadata",
"1",
"-map_chapters",
"1",
]
if video_settings["movflags_faststart"]:
ffmpeg_params = ffmpeg_params + ["-movflags", "+faststart"]
ffmpeg_params = ffmpeg_params + ["-c", "copy"]
ffmpeg_params = ffmpeg_params + [
"-metadata",
f"description=Created by tesla_dashcam {VERSION_STR}",
]
ffmpeg_command = (
[video_settings["ffmpeg_exec"]]
+ ["-loglevel", "error"]
+ ffmpeg_params
+ ["-y", movie_filename]
)
_LOGGER.debug(f"FFMPEG Command: {ffmpeg_command}")
try:
run(ffmpeg_command, capture_output=True, check=True)
except CalledProcessError as exc:
print(
"\t\tError trying to create movie {base_name}. RC: {rc}\n"
"\t\tCommand: {command}\n"
"\t\tError: {stderr}\n\n".format(
base_name=movie_filename,
rc=exc.returncode,
command=exc.cmd,
stderr=exc.stderr,
)
)
movie_filename = None
duration = 0
else:
# Get actual duration of our new video, required for chapters when concatenating.
metadata = get_metadata(video_settings["ffmpeg_exec"], [movie_filename])
duration = metadata[0]["duration"] if metadata else total_videoduration
# Remove temp join file.
# noinspection PyBroadException,PyPep8
try:
os.remove(ffmpeg_join_filename)
except:
_LOGGER.debug(f"Failed to remove {ffmpeg_join_filename}")
pass
# Remove temp join file.
# noinspection PyBroadException,PyPep8
try:
os.remove(ffmpeg_meta_filename)
except:
_LOGGER.debug(f"Failed to remove {ffmpeg_meta_filename}")
pass
return movie_filename, duration
def make_folder(parameter, folder):
# Create folder if not already existing.
if not os.path.isdir(folder):
current_path, add_folder = os.path.split(folder)
if add_folder == "":
current_path, add_folder = os.path.split(current_path)
# If path does not exist in which to create folder then exit.
if not os.path.isdir(current_path):
print(
f"Path {current_path} for parameter {parameter} does not exist, please provide a valid path."
)
return False
try:
os.mkdir(folder)
except OSError:
print(
f"Error creating folder {add_folder} at location {current_path} for parameter {parameter}"
)
return False
return True
def delete_intermediate(movie_files):
""" Delete the files provided in list """
for file in movie_files:
if file is not None:
if os.path.isfile(file):
try:
os.remove(file)
except OSError as exc:
print("\t\tError trying to remove file {}: {}".format(file, exc))
elif os.path.isdir(file):
# This is more specific for Mac but won't hurt on other platforms.
if os.path.exists(os.path.join(file, ".DS_Store")):
# noinspection PyBroadException,PyPep8
try:
os.remove(os.path.join(file, ".DS_Store"))
except:
_LOGGER.debug(f"Failed to remove .DS_Store from {file}")
pass
try:
os.rmdir(file)
except OSError as exc:
print("\t\tError trying to remove folder {}: {}".format(file, exc))
def process_folders(folders, video_settings, delete_source):
""" Process all clips found within folders. """
start_time = timestamp()
total_clips = 0
for folder_number, folder_name in enumerate(sorted(folders)):
total_clips = total_clips + len(folders[folder_name])
print(
"There are {total_folders} folders with {total_clips} clips to "
"process.".format(total_folders=len(folders), total_clips=total_clips)
)
# Loop through all the folders.
dashcam_clips = []
for folder_number, folder_name in enumerate(sorted(folders)):
files = folders[folder_name]
# Ensure the clips are sorted based on video timestamp.
sorted_video_clips = sorted(files, key=lambda video: files[video]["timestamp"])
# Get the start and ending timestamps, we add duration to
# last timestamp to get true ending.
first_clip_tmstp = files[sorted_video_clips[0]]["timestamp"]
last_clip_tmstp = files[sorted_video_clips[-1]]["timestamp"] + timedelta(
seconds=files[sorted_video_clips[-1]]["duration"]
)
# Skip this folder if we it does not fall within provided timestamps.
if (
video_settings["start_timestamp"] is not None
and last_clip_tmstp < video_settings["start_timestamp"]
):
# Clips from this folder are from before start timestamp requested.
_LOGGER.debug(
f"Clips in folder end at {last_clip_tmstp} which is still before "
f'start timestamp {video_settings["start_timestamp"]}'
)
continue
if (
video_settings["end_timestamp"] is not None
and first_clip_tmstp > video_settings["end_timestamp"]
):
# Clips from this folder are from after end timestamp requested.
_LOGGER.debug(
f"Clips in folder start at {first_clip_tmstp} which is after "
f'end timestamp {video_settings["end_timestamp"]}'
)
continue
# Determine the starting and ending timestamps for the clips in this folder based on start/end timestamps
# provided and offsets.
folder_start_timestmp = (
timedelta(seconds=video_settings["start_offset"]) + first_clip_tmstp
)
# Use provided start timestamp if it is after folder timestamp + offset
folder_start_timestmp = (
video_settings["start_timestamp"]
if video_settings["start_timestamp"] is not None
and video_settings["start_timestamp"] > folder_start_timestmp
else folder_start_timestmp
)
# Figure out potential end timestamp for clip based on offset and end timestamp.
folder_end_timestmp = last_clip_tmstp - timedelta(
seconds=video_settings["end_offset"]
)
# Use provided end timestamp if it is before folder timestamp - offset
folder_end_timestmp = (
video_settings["end_timestamp"]
if video_settings["end_timestamp"] is not None
and video_settings["end_timestamp"] < folder_end_timestmp
else folder_end_timestmp
)
# Put them together to create the filename for the folder.
movie_filename = (
folder_start_timestmp.astimezone(get_localzone()).strftime(
"%Y-%m-%dT%H-%M-%S"
)
+ "_"
+ folder_end_timestmp.astimezone(get_localzone()).strftime(
"%Y-%m-%dT%H-%M-%S"
)
)
# Now add full path to it.
movie_filename = (
os.path.join(video_settings["target_folder"], movie_filename) + ".mp4"
)
# Do not process the files from this folder if we're to skip it if
# the target movie file already exist.
if video_settings["skip_existing"] and os.path.isfile(movie_filename):
print(
"\tSkipping folder {folder} as {filename} is already "
"created ({folder_number}/{total_folders})".format(
folder=folder_name,
filename=movie_filename,
folder_number=folder_number + 1,
total_folders=len(folders),
)
)
# Get actual duration of our new video, required for chapters when concatenating.
metadata = get_metadata(video_settings["ffmpeg_exec"], [movie_filename])
movie_duration = metadata[0]["duration"] if metadata else 0
dashcam_clips.append(
{
"video_timestamp": first_clip_tmstp,
"video_filename": movie_filename,
"video_duration": movie_duration,
}
)
continue
print(
"\tProcessing {total_clips} clips in folder {folder} "
"({folder_number}/{total_folders})".format(
total_clips=len(files),
folder=folder_name,
folder_number=folder_number + 1,
total_folders=len(folders),
)
)
# Loop through all the files within the folder.
folder_clips = []
delete_folder_clips = []
delete_folder_files = delete_source
delete_file_list = []
folder_timestamp = None
for clip_number, filename_timestamp in enumerate(sorted_video_clips):
video_timestamp_info = files[filename_timestamp]
folder_timestamp = (
video_timestamp_info["timestamp"]
if folder_timestamp is None
else folder_timestamp
)
clip_name, clip_duration, files_processed = create_intermediate_movie(
filename_timestamp,
video_timestamp_info,
(folder_start_timestmp, folder_end_timestmp),
video_settings,
clip_number,
len(files),
)
if clip_name is not None:
if video_timestamp_info["file_only"]:
# When file only there is no concatenation at the folder
# level, will only happen at the higher level if requested.
dashcam_clips.append(
{
"video_timestamp": video_timestamp_info["timestamp"],
"video_filename": clip_name,
"video_duration": clip_duration,
}
)
else:
# Movie was created, store name for concatenation.
folder_clips.append(
{
"video_timestamp": video_timestamp_info["timestamp"],
"video_filename": clip_name,
"video_duration": clip_duration,
}
)
# Add clip for deletion only if it's name is not the
# same as the resulting movie filename
if clip_name != movie_filename:
delete_folder_clips.append(clip_name)
elif not files_processed:
delete_folder_files = False
if files_processed:
# Add the files to our list for removal.
video_info = video_timestamp_info["video_info"]
if video_info["front_camera"]["filename"] is not None:
delete_file_list.append(
os.path.join(
video_timestamp_info["movie_folder"],
video_info["front_camera"]["filename"],
)
)
if video_info["left_camera"]["filename"] is not None:
delete_file_list.append(
os.path.join(
video_timestamp_info["movie_folder"],
video_info["left_camera"]["filename"],
)
)
if video_info["right_camera"]["filename"] is not None:
delete_file_list.append(
os.path.join(
video_timestamp_info["movie_folder"],
video_info["right_camera"]["filename"],
)
)
if video_info["rear_camera"]["filename"] is not None:
delete_file_list.append(
os.path.join(
video_timestamp_info["movie_folder"],
video_info["rear_camera"]["filename"],
)
)
# All clips in folder have been processed, merge those clips
# together now.
movie_name = None
movie_duration = 0
if folder_clips:
print("\t\tCreating movie {}, please be patient.".format(movie_filename))
movie_name, movie_duration = create_movie(
folder_clips, movie_filename, video_settings, 0
)
# Delete the source files if stated to delete.
# We only do so if there were no issues in processing the clips
if delete_folder_files and (
(folder_clips and movie_name is not None) or not folder_clips
):
print(
"\t\tDeleting files and folder {folder_name}".format(
folder_name=folder_name
)
)
delete_intermediate(delete_file_list)
# And delete the folder
delete_intermediate([folder_name])
# Add this one to our list for final concatenation
if movie_name is not None:
dashcam_clips.append(
{
"video_timestamp": folder_timestamp,
"video_filename": movie_name,
"video_duration": movie_duration,
}
)
# Delete the intermediate files we created.
if not video_settings["keep_intermediate"]:
delete_intermediate(delete_folder_clips)
print(
"\tMovie {base_name} for folder {folder_name} with duration {duration} is "
"ready.".format(
base_name=movie_name,
folder_name=folder_name,
duration=str(timedelta(seconds=int(movie_duration))),
)
)
# Now that we have gone through all the folders merge.
# We only do this if merge is enabled OR if we only have 1 clip and for
# output a specific filename was provided.
movie_name = None
movie_duration = 0
if dashcam_clips:
if video_settings["merge_subdirs"] or (
len(folders) == 1 and video_settings["target_filename"] is not None
):
if video_settings["movie_filename"] is not None:
movie_filename = video_settings["movie_filename"]
elif video_settings["target_filename"] is not None:
movie_filename = video_settings["target_filename"]
else:
folder, movie_filename = os.path.split(video_settings["target_folder"])
# If there was a trailing separator provided then it will be
# empty, redo split then.
if movie_filename == "":
movie_filename = os.path.split(folder)[1]
movie_filename = os.path.join(
video_settings["target_folder"], movie_filename
)
# Make sure it ends in .mp4
if os.path.splitext(movie_filename)[1] != ".mp4":
movie_filename = movie_filename + ".mp4"
print("\tCreating movie {}, please be patient.".format(movie_filename))
movie_name, movie_duration = create_movie(
dashcam_clips,
movie_filename,
video_settings,
video_settings["chapter_offset"],
)
if movie_name is not None:
print(
"Movie {base_name} with duration {duration} has been created, enjoy.".format(
base_name=movie_name,
duration=str(timedelta(seconds=int(movie_duration))),
)
)
else:
print(
"All folders have been processed, resulting movie files are "
"located in {target_folder}".format(
target_folder=video_settings["target_folder"]
)
)
else:
print("No clips found.")
end_time = timestamp()
real = int((end_time - start_time))
print("Total processing time: {real}".format(real=str(timedelta(seconds=real))))
if video_settings["notification"]:
if movie_name is not None:
notify(
"TeslaCam",
"Completed",
"{total_folders} folder{folders} with {total_clips} "
"clip{clips} have been processed, movie {movie_name} has "
"been created.".format(
folders="" if len(folders) < 2 else "s",
total_folders=len(folders),
clips="" if total_clips < 2 else "s",
total_clips=total_clips,
movie_name=video_settings["target_folder"],
),
)
else:
notify(
"TeslaCam",
"Completed",
"{total_folders} folder{folders} with {total_clips} "
"clip{clips} have been processed, {target_folder} contains "
"resulting files.".format(
folders="" if len(folders) < 2 else "s",
total_folders=len(folders),
clips="" if total_clips < 2 else "s",
total_clips=total_clips,
target_folder=video_settings["target_folder"],
),
)
print()
def resource_path(relative_path):
""" Return absolute path for provided relative item based on location
of program.
"""
# If compiled with pyinstaller then sys._MEIPASS points to the location
# of the bundle. Otherwise path of python script is used.
base_path = getattr(sys, "_MEIPASS", str(Path(__file__).parent))
return os.path.join(base_path, relative_path)
def notify_macos(title, subtitle, message):
""" Notification on MacOS """
try:
run(
[
"osascript",
'-e display notification "{message}" with title "{title}" '
'subtitle "{subtitle}"'
"".format(message=message, title=title, subtitle=subtitle),
]
)
except Exception as exc:
print("Failed in notifification: ", exc)
def notify_windows(title, subtitle, message):
""" Notification on Windows """
# Section commented out, waiting to see if it really does not work on Windows 7
# This works only on Windows 10 9r Windows Server 2016/2019. Skipping for everything else
# from platform import win32_ver
# if win32_ver()[0] != 10:
# return
global TOASTER_INSTANCE
# noinspection PyBroadException
try:
# noinspection PyUnresolvedReferences,PyPackageRequirements
from win10toast import ToastNotifier
if TOASTER_INSTANCE is None:
TOASTER_INSTANCE = ToastNotifier()
TOASTER_INSTANCE.show_toast(
threaded=True,
title="{} {}".format(title, subtitle),
msg=message,
duration=5,
icon_path=resource_path("tesla_dashcam.ico"),
)
run(
[
"notify-send",
'"{title} {subtitle}"'.format(title=title, subtitle=subtitle),
'"{}"'.format(message),
]
)
except Exception:
pass
def notify_linux(title, subtitle, message):
""" Notification on Linux """
try:
run(
[
"notify-send",
'"{title} {subtitle}"'.format(title=title, subtitle=subtitle),
'"{}"'.format(message),
]
)
except Exception as exc:
print("Failed in notifification: ", exc)
def notify(title, subtitle, message):
""" Call function to send notification based on OS """
if sys.platform == "darwin":
notify_macos(title, subtitle, message)
elif sys.platform == "win32":
notify_windows(title, subtitle, message)
elif sys.platform == "linux":
notify_linux(title, subtitle, message)
def main() -> None:
""" Main function """
loglevels = dict(
(logging.getLevelName(level), level) for level in [10, 20, 30, 40, 50]
)
internal_ffmpeg = getattr(sys, "frozen", None) is not None
ffmpeg_default = resource_path(FFMPEG.get(sys.platform, "ffmpeg"))
movie_folder = os.path.join(str(Path.home()), MOVIE_HOMEDIR.get(sys.platform), "")
# Check if ffmpeg exist, if not then hope it is in default path or
# provided.
if not os.path.isfile(ffmpeg_default):
internal_ffmpeg = False
ffmpeg_default = FFMPEG.get(sys.platform, "ffmpeg")
epilog = (
"This program leverages ffmpeg which is included. See https://ffmpeg.org/ for more information on ffmpeg"
if internal_ffmpeg
else "This program requires ffmpeg which can be downloaded from: https://ffmpeg.org/download.html"
)
parser = MyArgumentParser(
description="tesla_dashcam - Tesla DashCam & Sentry Video Creator",
epilog=epilog,
formatter_class=SmartFormatter,
fromfile_prefix_chars="@",
)
parser.add_argument(
"source",
type=str,
nargs="*",
help="Folder(s) (events) containing the saved camera files. Filenames can be provided as well to manage "
"individual clips.",
)
parser.add_argument(
"--version", action="version", version=" %(prog)s " + VERSION_STR
)
parser.add_argument(
"--loglevel",
default="INFO",
choices=list(loglevels.keys()),
help="Logging level.",
)
parser.add_argument(
"--temp_dir", required=False, type=str, help="R|Path to store temporary files."
)
parser.add_argument(
"--no-notification",
dest="system_notification",
action="store_false",
help="Do not create a notification upon " "completion.",
)
input_group = parser.add_argument_group(
title="Video Input",
description="Options related to what clips and events to process.",
)
input_group.add_argument(
"--skip_existing",
dest="skip_existing",
action="store_true",
help="Skip creating encoded video file if it already exist. Note that only existence is checked, not if "
"layout etc. are the same.",
)
input_group.add_argument(
"--delete_source",
dest="delete_source",
action="store_true",
help="Delete the processed files upon completion.",
)
input_group.add_argument(
"--exclude_subdirs",
dest="exclude_subdirs",
action="store_true",
help="Do not search sub folders (events) for video files to process.",
)
monitor_group = parser.add_argument_group(
title="Trigger Monitor",
description="Parameters for monitoring of insertion of TeslaCam drive, folder, or file existence.",
)
monitor_group.add_argument(
"--monitor",
dest="monitor",
action="store_true",
help="Enable monitoring for drive to be attached with TeslaCam folder.",
)
monitor_group.add_argument(
"--monitor_once",
dest="monitor_once",
action="store_true",
help="Enable monitoring and exit once drive with TeslaCam folder has been attached and files processed.",
)
monitor_group.add_argument(
"--monitor_trigger",
required=False,
type=str,
help="Trigger file to look for instead of waiting for drive to be attached. Once file is discovered then "
"processing will start, file will be deleted when processing has been completed. If source is not "
"provided then folder where file is located will be used as source.",
)
layout_group = parser.add_argument_group(
title="Video Layout",
description="Set what the layout of the resulting video should be",
)
layout_group.add_argument(
"--layout",
required=False,
choices=["WIDESCREEN", "FULLSCREEN", "PERSPECTIVE", "CROSS", "DIAMOND"],
default="FULLSCREEN",
help="R|Layout of the created video.\n"
" FULLSCREEN: Front camera center top, "
"side cameras underneath it with rear camera between side camera.\n"
" WIDESCREEN: Front camera on top with side and rear cameras smaller underneath it.\n"
" PERSPECTIVE: Similar to FULLSCREEN but then with side cameras in perspective.\n"
" CROSS: Front camera center top, side cameras underneath, and rear camera center bottom.\n"
" DIAMOND: Front camera center top, side cameras below front camera left and right of front, "
"and rear camera center bottom.\n",
)
layout_group.add_argument(
"--perspective",
dest="perspective",
action="store_true",
help="Show side cameras in perspective.",
)
layout_group.set_defaults(perspective=False)
layout_group.add_argument(
"--scale",
dest="clip_scale",
type=str,
nargs="+",
action="append",
help="R|Set camera clip scale for all clips, scale of 1 is 1280x960 camera clip.\n"
"If provided with value then it is default for all cameras, to set the scale for a specific "
"camera provide camera=<front, left, right,rear> <scale>\n"
"for example:\n"
" --scale 0.5 all are 640x480\n"
" --scale 640x480 all are 640x480\n"
" --scale 0.5 --scale camera=front 1 all are 640x480 except front at 1280x960\n"
" --scale camera=left .25 --scale camera=right 320x240 left and right are set to 320x240\n"
"Defaults:\n"
" WIDESCREEN: 1/2 (front 1280x960, others 640x480, video is 1920x1920)\n"
" FULLSCREEN: 1/2 (640x480, video is 1920x960)\n"
" CROSS: 1/2 (640x480, video is 1280x1440)\n"
" DIAMOND: 1/2 (640x480, video is 1920x976)\n",
)
layout_group.add_argument(
"--mirror",
dest="rear_or_mirror",
action="store_const",
const=1,
help="Video from side and rear cameras as if being viewed through the mirror. Default when not providing "
"parameter --no-front. Cannot be used in combination with --rear.",
)
layout_group.add_argument(
"--rear",
dest="rear_or_mirror",
action="store_const",
const=0,
help="Video from side and rear cameras as if looking backwards. Default when providing parameter --no-front. "
"Cannot be used in combination with --mirror.",
)
layout_group.add_argument(
"--swap",
dest="swap_leftright",
action="store_const",
const=1,
help="Swap left and right cameras in output, default when side and rear cameras are as if looking backwards. "
"See --rear parameter.",
)
layout_group.add_argument(
"--no-swap",
dest="swap_leftright",
action="store_const",
const=0,
help="Do not swap left and right cameras, default when side and rear cameras are as if looking through a "
"mirror. Also see --mirror parameter",
)
layout_group.add_argument(
"--swap_frontrear",
dest="swap_frontrear",
action="store_true",
help="Swap front and rear cameras in output.",
)
layout_group.add_argument(
"--background",
dest="background",
default="black",
help="Background color for video. Can be a color string or RGB value. Also see --fontcolor.",
)
camera_group = parser.add_argument_group(
title="Camera Exclusion", description="Exclude one or more cameras:"
)
camera_group.add_argument(
"--no-front",
dest="no_front",
action="store_true",
help="Exclude front camera from video.",
)
camera_group.add_argument(
"--no-left",
dest="no_left",
action="store_true",
help="Exclude left camera from video.",
)
camera_group.add_argument(
"--no-right",
dest="no_right",
action="store_true",
help="Exclude right camera from video.",
)
camera_group.add_argument(
"--no-rear",
dest="no_rear",
action="store_true",
help="Exclude rear camera from video.",
)
timestamp_group = parser.add_argument_group(
title="Timestamp",
description="Options on how to show date/time in resulting video:",
)
timestamp_group.add_argument(
"--no-timestamp",
dest="no_timestamp",
action="store_true",
help="Do not show timestamp in video",
)
timestamp_group.add_argument(
"--halign",
required=False,
choices=["LEFT", "CENTER", "RIGHT"],
help="Horizontal alignment for timestamp",
)
timestamp_group.add_argument(
"--valign",
required=False,
choices=["TOP", "MIDDLE", "BOTTOM"],
help="Vertical Alignment for timestamp",
)
timestamp_group.add_argument(
"--font",
required=False,
type=str,
default=DEFAULT_FONT.get(sys.platform, None),
help="Fully qualified filename (.ttf) to the font to be chosen for timestamp.",
)
timestamp_group.add_argument(
"--fontsize",
required=False,
type=int,
help="Font size for timestamp. Default is scaled based on resulting video size.",
)
timestamp_group.add_argument(
"--fontcolor",
required=False,
type=str,
default="white",
help="R|Font color for timestamp. Any color is accepted as a color string or RGB value.\n"
"Some potential values are:\n"
" white\n"
" yellowgreen\n"
" yellowgreen@0.9\n"
" Red\n:"
" 0x2E8B57\n"
"For more information on this see ffmpeg documentation for color: https://ffmpeg.org/ffmpeg-utils.html#Color",
)
filter_group = parser.add_argument_group(
title="Timestamp Restriction",
description="Restrict video to be between start and/or end timestamps. Timestamp to be provided in a ISO-8601 "
"format (see https://fits.gsfc.nasa.gov/iso-time.html for examples)",
)
filter_group.add_argument(
"--start_timestamp", dest="start_timestamp", type=str, help="Starting timestamp"
)
filter_group.add_argument(
"--end_timestamp",
dest="end_timestamp",
type=str,
# type=lambda d: datetime.strptime(d, "%Y-%m-%d_%H-%M-%S").datetime(),
help="Ending timestamp",
)
offset_group = parser.add_argument_group(
title="Event offsets", description="Start and/or end offsets for events"
)
offset_group.add_argument(
"--start_offset",
dest="start_offset",
type=int,
help="Skip x number of seconds from start of event for resulting video.",
)
offset_group.add_argument(
"--end_offset",
dest="end_offset",
type=int,
help="Ignore the last x seconds of the event for resulting video",
)
output_group = parser.add_argument_group(
title="Video Output", description="Options related to resulting video creation."
)
output_group.add_argument(
"--output",
required=False,
default=movie_folder,
type=str,
help="R|Path/Filename for the new movie file. Event files will be stored in same folder."
+ os.linesep,
)
output_group.add_argument(
"--motion_only",
dest="motion_only",
action="store_true",
help="Fast-forward through video when there is no motion.",
)
output_group.add_argument(
"--slowdown",
dest="slow_down",
type=float,
default=argparse.SUPPRESS,
help="Slow down video output. Accepts a number that is then used as multiplier, providing 2 means half the "
"speed.",
)
output_group.add_argument(
"--speedup",
dest="speed_up",
type=float,
default=argparse.SUPPRESS,
help="Speed up the video. Accepts a number that is then used as a multiplier, providing 2 means "
"twice the speed.",
)
output_group.add_argument(
"--chapter_offset",
dest="chapter_offset",
type=int,
default=0,
help="Offset in seconds for chapters in merged video. Negative offset is # of seconds before the end of the "
"subdir video, positive offset if # of seconds after the start of the subdir video.",
)
output_group.add_argument(
"--merge",
dest="merge_subdirs",
action="store_true",
help="Merge the video files from different folders (events) into 1 big video file.",
)
output_group.add_argument(
"--keep-intermediate",
dest="keep_intermediate",
action="store_true",
help="Do not remove the clip video files that are created",
)
advancedencoding_group = parser.add_argument_group(
title="Advanced encoding settings", description="Advanced options for encoding"
)
gpu_help = (
"R|Use GPU acceleration, only enable if supported by hardware.\n"
" MAC: All MACs with Haswell CPU or later support this (Macs after 2013).\n"
" See following link as well: \n"
" https://en.wikipedia.org/wiki/List_of_Macintosh_models_grouped_by_CPU_type#Haswell\n"
)
if sys.platform == "darwin":
advancedencoding_group.add_argument(
"--no-gpu", dest="gpu", action="store_true", help=gpu_help
)
else:
advancedencoding_group.add_argument(
"--gpu", dest="gpu", action="store_true", help=gpu_help
)
advancedencoding_group.add_argument(
"--gpu_type",
choices=["nvidia", "intel", "RPi"],
help="Type of graphics card (GPU) in the system. This determines the encoder that will be used."
"This parameter is mandatory if --gpu is provided.",
)
advancedencoding_group.add_argument(
"--no-faststart",
dest="faststart",
action="store_true",
help="Do not enable flag faststart on the resulting video files. Use this when using a network share and "
"errors occur during encoding.",
)
advancedencoding_group.add_argument(
"--quality",
required=False,
choices=["LOWEST", "LOWER", "LOW", "MEDIUM", "HIGH"],
default="LOWER",
help="Define the quality setting for the video, higher quality means bigger file size but might "
"not be noticeable.",
)
advancedencoding_group.add_argument(
"--compression",
required=False,
choices=[
"ultrafast",
"superfast",
"veryfast",
"faster",
"fast",
"medium",
"slow",
"slower",
"veryslow",
],
default="medium",
help="Speed to optimize video. Faster speed results in a bigger file. This does not impact the quality of "
"the video, just how much time is used to compress it.",
)
advancedencoding_group.add_argument(
"--fps",
required=False,
type=int,
default=24,
help="Frames per second for resulting video. Tesla records at about 33fps hence going higher wouldn't do "
"much as frames would just be duplicated. Default is 24fps which is the standard for movies and TV shows",
)
if internal_ffmpeg:
advancedencoding_group.add_argument(
"--ffmpeg",
required=False,
type=str,
help="Full path and filename for alternative " "ffmpeg.",
)
else:
advancedencoding_group.add_argument(
"--ffmpeg",
required=False,
type=str,
default=ffmpeg_default,
help="Path and filename for ffmpeg. Specify if ffmpeg is not within path.",
)
advancedencoding_group.add_argument(
"--encoding",
required=False,
choices=["x264", "x265"],
default=argparse.SUPPRESS,
help="R|Encoding to use for video creation.\n"
" x264: standard encoding, can be viewed on most devices but results in bigger file.\n"
" x265: newer encoding standard but not all devices support this yet.\n",
)
advancedencoding_group.add_argument(
"--enc",
required=False,
type=str,
default=argparse.SUPPRESS,
help="R|Provide a custom encoder for video creation. Cannot be used in combination with --encoding.\n"
"Note: when using this option the --gpu option is ignored. To use GPU hardware acceleration specify an "
"encoding that provides this.",
)
update_check_group = parser.add_argument_group(
title="Update Check", description="Check for updates"
)
update_check_group.add_argument(
"--check_for_update",
dest="check_for_updates",
action="store_true",
help="Check for update and exit.",
)
update_check_group.add_argument(
"--no-check_for_update",
dest="no_check_for_updates",
action="store_true",
help="A check for new updates is performed every time. With this parameter that can be disabled",
)
update_check_group.add_argument(
"--include_test",
dest="include_beta",
action="store_true",
help="Include test (beta) releases when checking for updates.",
)
args = parser.parse_args()
logging.basicConfig(
level=loglevels[args.loglevel],
format="%(asctime)s:%(levelname)s:\t%(name)s\t%(message)s",
)
_LOGGER.debug(f"Arguments : {args}")
# Check that any mutual exclusive items are not both provided.
if "speed_up" in args and "slow_down" in args:
print(
"Option --speed_up and option --slow_down cannot be used together, only use one of them."
)
return 1
if "enc" in args and "encoding" in args:
print(
"Option --enc and option --encoding cannot be used together, only use one of them."
)
return 1
if not args.no_check_for_updates or args.check_for_updates:
release_info = check_latest_release(args.include_beta)
if release_info is not None:
new_version = False
if release_info.get("tag_name") is not None:
github_version = release_info.get("tag_name").split(".")
if len(github_version) == 3:
# Release tags normally start with v. If that is the case
# then strip the v.
try:
major_version = int(github_version[0])
except ValueError:
major_version = int(github_version[0][1:])
minor_version = int(github_version[1])
if release_info.get("prerelease"):
# Drafts will have b and then beta number.
patch_version = int(github_version[2].split("b")[0])
beta_version = int(github_version[2].split("b")[1])
else:
patch_version = int(github_version[2])
beta_version = -1
if major_version == VERSION["major"]:
if minor_version == VERSION["minor"]:
if patch_version == VERSION["patch"]:
if beta_version > VERSION["beta"] or (
beta_version == -1 and VERSION["beta"] != -1
):
new_version = True
elif patch_version > VERSION["patch"]:
new_version = True
elif minor_version > VERSION["minor"]:
new_version = True
elif major_version > VERSION["major"]:
new_version = True
if new_version:
beta = ""
if release_info.get("prerelease"):
beta = "beta "
release_notes = ""
if not args.check_for_updates:
if args.system_notification:
notify(
"TeslaCam",
"Update available",
"New {beta}release {release} is available. You are "
"on version {version}".format(
beta=beta,
release=release_info.get("tag_name"),
version=VERSION_STR,
),
)
release_notes = (
"Use --check_for_update to get latest " "release notes."
)
print(
"New {beta}release {release} is available for download "
"({url}). You are currently on {version}. {rel_note}".format(
beta=beta,
release=release_info.get("tag_name"),
url=release_info.get("html_url"),
version=VERSION_STR,
rel_note=release_notes,
)
)
if args.check_for_updates:
print(
"You can download the new release from: {url}".format(
url=release_info.get("html_url")
)
)
print(
"Release Notes:\n {release_notes}".format(
release_notes=release_info.get("body")
)
)
return
else:
if args.check_for_updates:
print(
"{version} is the latest release available.".format(
version=VERSION_STR
)
)
return
else:
print("Did not retrieve latest version info.")
ffmpeg = ffmpeg_default if getattr(args, "ffmpeg", None) is None else args.ffmpeg
if which(ffmpeg) is None:
print(
f"ffmpeg is a requirement, unable to find {ffmpeg} executable. Please ensure it exist and is located"
f"within PATH environment or provide full path using parameter --ffmpeg."
)
if args.layout == "PERSPECTIVE":
layout_settings = FullScreen()
layout_settings.perspective = True
else:
if args.layout == "WIDESCREEN":
layout_settings = WideScreen()
elif args.layout == "FULLSCREEN":
layout_settings = FullScreen()
elif args.layout == "CROSS":
layout_settings = Cross()
elif args.layout == "DIAMOND":
layout_settings = Diamond()
else:
layout_settings = FullScreen()
layout_settings.perspective = args.perspective
layout_settings.cameras("Front").include = not args.no_front
layout_settings.cameras("Left").include = not args.no_left
layout_settings.cameras("Right").include = not args.no_right
layout_settings.cameras("Rear").include = not args.no_rear
# Check if either rear or mirror argument has been provided.
# If front camera then default to mirror, if no front camera then default to rear.
side_camera_as_mirror = (
layout_settings.cameras("Front").include
if args.rear_or_mirror is None
else args.rear_or_mirror
)
mirror_sides = ", hflip" if side_camera_as_mirror else ""
# For scale first set the main clip one if provided, this than allows camera specific ones to override for
# that camera.
scaling = parser.args_to_dict(args.clip_scale, "scale")
main_scale = search_dict(None, "camera", scaling)
if main_scale is not None:
layout_settings.scale = main_scale.get("scale", layout_settings.scale)
for scale in scaling:
if scale.get("camera", "").lower() in ["front", "left", "right", "rear"]:
camera_scale = scale.get("scale")
if camera_scale is not None:
layout_settings.cameras(
scale["camera"].lower().capitalize()
).scale = camera_scale
layout_settings.font.halign = (
args.halign if args.halign is not None else layout_settings.font.halign
)
layout_settings.font.valign = (
args.valign if args.valign is not None else layout_settings.font.valign
)
# Determine if left and right cameras should be swapped or not.
# No more arguments related to cameras (i.e .scale, include or not) can be processed from now on.
# Up till now Left means left camera and Right means Right camera.
# From this point forward Left can mean Right camera if we're swapping output.
layout_settings.swap_left_right = (
not side_camera_as_mirror
if args.swap_leftright is None
else args.swap_leftright
)
layout_settings.swap_front_rear = args.swap_frontrear
layout_settings.font.font = args.font
layout_settings.font.color = args.fontcolor
if args.fontsize is not None and args.fontsize > 0:
layout_settings.font.size = args.fontsize
black_base = "color=duration={duration}:"
black_size = f"s={{width}}x{{height}}:c={args.background}, fps={args.fps} "
ffmpeg_base = (
black_base
+ black_size.format(
width=layout_settings.video_width, height=layout_settings.video_height
)
+ "[base]"
)
ffmpeg_black_video = ";" + black_base + black_size
input_clip = "base"
ffmpeg_video_position = ""
ffmpeg_left_camera = ""
camera = "Left"
if layout_settings.cameras(camera).include:
ffmpeg_left_camera = (
"setpts=PTS-STARTPTS, "
"scale={clip_width}x{clip_height} {mirror}{options}"
" [left]".format(
clip_width=layout_settings.cameras(camera).width,
clip_height=layout_settings.cameras(camera).height,
mirror=mirror_sides,
options=layout_settings.cameras(camera).options,
)
)
ffmpeg_video_position = (
ffmpeg_video_position
+ ";[{input_clip}][left] overlay=eof_action=pass:repeatlast=0:"
"x={x_pos}:y={y_pos} [left1]".format(
input_clip=input_clip,
x_pos=layout_settings.cameras(camera).xpos,
y_pos=layout_settings.cameras(camera).ypos,
)
)
input_clip = "left1"
ffmpeg_front_camera = ""
camera = "Front"
if layout_settings.cameras(camera).include:
ffmpeg_front_camera = (
"setpts=PTS-STARTPTS, "
"scale={clip_width}x{clip_height} {options}"
" [front]".format(
clip_width=layout_settings.cameras(camera).width,
clip_height=layout_settings.cameras(camera).height,
options=layout_settings.cameras(camera).options,
)
)
ffmpeg_video_position = (
ffmpeg_video_position
+ ";[{input_clip}][front] overlay=eof_action=pass:repeatlast=0:"
"x={x_pos}:y={y_pos} [front1]".format(
input_clip=input_clip,
x_pos=layout_settings.cameras(camera).xpos,
y_pos=layout_settings.cameras(camera).ypos,
)
)
input_clip = "front1"
ffmpeg_right_camera = ""
camera = "Right"
if layout_settings.cameras(camera).include:
ffmpeg_right_camera = (
"setpts=PTS-STARTPTS, "
"scale={clip_width}x{clip_height} {mirror}{options}"
" [right]".format(
clip_width=layout_settings.cameras(camera).width,
clip_height=layout_settings.cameras(camera).height,
mirror=mirror_sides,
options=layout_settings.cameras(camera).options,
)
)
ffmpeg_video_position = (
ffmpeg_video_position
+ ";[{input_clip}][right] overlay=eof_action=pass:repeatlast=0:"
"x={x_pos}:y={y_pos} [right1]".format(
input_clip=input_clip,
x_pos=layout_settings.cameras(camera).xpos,
y_pos=layout_settings.cameras(camera).ypos,
)
)
input_clip = "right1"
ffmpeg_rear_camera = ""
camera = "Rear"
if layout_settings.cameras(camera).include:
ffmpeg_rear_camera = (
"setpts=PTS-STARTPTS, "
# "crop=512:798:225:26, "
"scale={clip_width}x{clip_height} {mirror}{options}"
" [rear]".format(
clip_width=layout_settings.cameras(camera).width,
clip_height=layout_settings.cameras(camera).height,
mirror=mirror_sides,
options=layout_settings.cameras(camera).options,
)
)
ffmpeg_video_position = (
ffmpeg_video_position
+ ";[{input_clip}][rear] overlay=eof_action=pass:repeatlast=0:"
"x={x_pos}:y={y_pos} [rear1]".format(
input_clip=input_clip,
x_pos=layout_settings.cameras(camera).xpos,
y_pos=layout_settings.cameras(camera).ypos,
)
)
input_clip = "rear1"
filter_counter = 0
filter_string = ";[{input_clip}] {filter} [tmp{filter_counter}]"
ffmpeg_timestamp = ""
if not args.no_timestamp:
if layout_settings.font.font is None:
print(
f"Unable to get a font file for platform {sys.platform}. Please provide valid font file using "
f"--font or disable timestamp using --no-timestamp."
)
return
# noinspection PyPep8
temp_font_file = (
f"c:\{layout_settings.font.font}"
if sys.platform == "win32"
else layout_settings.font.font
)
if not os.path.isfile(temp_font_file):
print(
f"Font file {temp_font_file} does not exist. Provide a valid font file using --font or"
f" disable timestamp using --no-timestamp"
)
if sys.platform == "linux":
print(
"You can also install the fonts using for example: apt-get install ttf-freefont"
)
return
# noinspection PyPep8,PyPep8,PyPep8
ffmpeg_timestamp = (
ffmpeg_timestamp + f"drawtext=fontfile={layout_settings.font.font}:"
f"fontcolor={layout_settings.font.color}:fontsize={layout_settings.font.size}:"
"borderw=2:bordercolor=black@1.0:"
f"x={layout_settings.font.halign}:y={layout_settings.font.valign}:"
"text='%{{pts\:localtime\:{epoch_time}\:%x %X}}'"
)
ffmpeg_timestamp = filter_string.format(
input_clip=input_clip,
filter=ffmpeg_timestamp,
filter_counter=filter_counter,
)
input_clip = f"tmp{filter_counter}"
filter_counter += 1
speed = args.slow_down if "slow_down" in args else ""
speed = round(1 / args.speed_up, 4) if "speed_up" in args else speed
ffmpeg_speed = ""
if speed != "":
ffmpeg_speed = filter_string.format(
input_clip=input_clip,
filter=f"setpts={speed}*PTS",
filter_counter=filter_counter,
)
input_clip = f"tmp{filter_counter}"
filter_counter += 1
ffmpeg_motiononly = ""
if args.motion_only:
ffmpeg_motiononly = filter_string.format(
input_clip=input_clip,
filter=f"mpdecimate=hi=64*48, setpts=N/FRAME_RATE/TB",
filter_counter=filter_counter,
)
input_clip = f"tmp{filter_counter}"
filter_counter += 1
ffmpeg_params = ["-preset", args.compression, "-crf", MOVIE_QUALITY[args.quality]]
use_gpu = not args.gpu if sys.platform == "darwin" else args.gpu
video_encoding = []
if not "enc" in args:
encoding = args.encoding if "encoding" in args else "x264"
# GPU acceleration enabled
if use_gpu:
print("GPU acceleration is enabled")
if sys.platform == "darwin":
video_encoding = video_encoding + ["-allow_sw", "1"]
encoding = encoding + "_mac"
else:
if args.gpu_type is None:
print(
"Parameter --gpu_type is mandatory when parameter --use_gpu is used."
)
return
encoding = encoding + "_" + args.gpu_type
bit_rate = str(int(10000 * layout_settings.scale)) + "K"
video_encoding = video_encoding + ["-b:v", bit_rate]
video_encoding = video_encoding + ["-c:v", MOVIE_ENCODING[encoding]]
else:
video_encoding = video_encoding + ["-c:v", args.enc]
ffmpeg_params = ffmpeg_params + video_encoding
# Set metadata
ffmpeg_params = ffmpeg_params + [
"-metadata",
f"description=Created by tesla_dashcam {VERSION_STR}",
]
# Determine the target folder and filename.
# If no extension then assume it is a folder.
if (
os.path.splitext(args.output)[1] is not None
and os.path.splitext(args.output)[1] != ""
):
target_folder, target_filename = os.path.split(args.output)
if target_folder is None or target_folder == "":
# If nothing in target_filename then no folder was given,
# setting default movie folder
target_folder = movie_folder
target_filename = args.output
else:
# Folder only provided.
target_folder = args.output
target_filename = None
# Convert target folder to absolute path if relative path has been provided.
target_folder = os.path.abspath(target_folder)
# Ensure folder if not already exist and if not can be created
if not make_folder("--output", target_folder):
return
temp_folder = args.temp_dir
if temp_folder is not None:
# Convert temp folder to absolute path if relative path has been provided
temp_folder = os.path.abspath(args.temp_dir)
if not make_folder("--temp_dir", temp_folder):
return
# Set the run type based on arguments.
runtype = "RUN"
if args.monitor:
runtype = "MONITOR"
elif args.monitor_once:
runtype = "MONITOR_ONCE"
monitor_file = args.monitor_trigger
# If no source provided then set to MONITOR_ONCE and we're only going to
# take SavedClips and SentryClips
source_list = args.source
if not source_list:
source_list = ["SavedClips", "SentryClips"]
if runtype == "RUN":
runtype = "MONITOR_ONCE"
start_timestamp = None
if args.start_timestamp is not None:
start_timestamp = isoparse(args.start_timestamp)
if start_timestamp.tzinfo is None:
start_timestamp = start_timestamp.astimezone(get_localzone())
end_timestamp = None
if args.end_timestamp is not None:
end_timestamp = isoparse(args.end_timestamp)
if end_timestamp.tzinfo is None:
end_timestamp = end_timestamp.astimezone(get_localzone())
start_offset = abs(args.start_offset) if args.start_offset is not None else 0
end_offset = abs(args.end_offset) if args.end_offset is not None else 0
video_settings = {
"source_folder": source_list,
"output": args.output,
"target_folder": target_folder,
"target_filename": target_filename,
"temp_dir": temp_folder,
"run_type": runtype,
"merge_subdirs": args.merge_subdirs,
"chapter_offset": args.chapter_offset,
"movie_filename": None,
"keep_intermediate": args.keep_intermediate,
"notification": args.system_notification,
"movie_layout": args.layout,
"movie_speed": speed,
"video_encoding": video_encoding,
"movie_encoding": args.encoding if "encoding" in args else "x264",
"movie_compression": args.compression,
"movie_quality": args.quality,
"background": ffmpeg_black_video,
"ffmpeg_exec": ffmpeg,
"base": ffmpeg_base,
"video_layout": layout_settings,
"clip_positions": ffmpeg_video_position,
"timestamp_text": ffmpeg_timestamp,
"ffmpeg_speed": ffmpeg_speed,
"ffmpeg_motiononly": ffmpeg_motiononly,
"movflags_faststart": not args.faststart,
"input_clip": input_clip,
"other_params": ffmpeg_params,
"left_camera": ffmpeg_left_camera,
"front_camera": ffmpeg_front_camera,
"right_camera": ffmpeg_right_camera,
"rear_camera": ffmpeg_rear_camera,
"start_timestamp": start_timestamp,
"start_offset": start_offset,
"end_timestamp": end_timestamp,
"end_offset": end_offset,
"skip_existing": args.skip_existing,
}
_LOGGER.debug(f"Video Settings {video_settings}")
_LOGGER.debug(f"Layout Settings {layout_settings}")
# If we constantly run and monitor for drive added or not.
if video_settings["run_type"] in ["MONITOR", "MONITOR_ONCE"]:
video_settings.update({"skip_existing": True})
trigger_exist = False
if monitor_file is None:
print("Monitoring for TeslaCam Drive to be inserted. Press CTRL-C to stop")
else:
print(
"Monitoring for trigger {} to exist. Press CTRL-C to stop".format(
monitor_file
)
)
while True:
try:
# Monitoring for disk to be inserted and not for a file.
if monitor_file is None:
source_folder, source_partition = get_tesladashcam_folder()
if source_folder is None:
# Nothing found, sleep for 1 minute and check again.
if trigger_exist:
print("TeslaCam drive has been ejected.")
print(
"Monitoring for TeslaCam Drive to be inserted. "
"Press CTRL-C to stop"
)
sleep(MONITOR_SLEEP_TIME)
trigger_exist = False
continue
# As long as TeslaCam drive is still attached we're going to
# keep on waiting.
if trigger_exist:
_LOGGER.debug(f"TeslaCam Drive still attached")
sleep(MONITOR_SLEEP_TIME)
continue
# Got a folder, append what was provided as source unless
# . was provided in which case everything is done.
source_folder_list = []
for folder in video_settings["source_folder"]:
if folder == ".":
source_folder_list.append(folder)
else:
source_folder_list.append(
os.path.join(source_folder, folder)
)
message = "TeslaCam folder found on {partition}.".format(
partition=source_partition
)
else:
# Wait till trigger file exist (can also be folder).
if not os.path.exists(monitor_file):
_LOGGER.debug(f"Trigger file {monitor_file} does not exist.")
sleep(MONITOR_SLEEP_TIME)
trigger_exist = False
continue
if trigger_exist:
sleep(MONITOR_SLEEP_TIME)
continue
message = "Trigger {} exist.".format(monitor_file)
trigger_exist = True
# Set monitor path, make sure what was provided is a file first otherwise get path.
monitor_path = monitor_file
if os.path.isfile(monitor_file):
monitor_path, _ = os.path.split(monitor_file)
# If . is provided then source folder is path where monitor file exist.
source_folder_list = []
for folder in video_settings["source_folder"]:
if folder == ".":
source_folder_list.append(monitor_path)
else:
# If source path provided is absolute then use that for source path
if os.path.isabs(folder):
source_folder_list.append(folder)
else:
# Path provided is relative, hence based on path of trigger file.
source_folder_list.append(
os.path.join(monitor_path, folder)
)
print(message)
if args.system_notification:
notify("TeslaCam", "Started", message)
if len(source_folder_list) == 1:
print(f"Retrieving all files from {source_folder_list[0]}")
else:
print(f"Retrieving all files from: ")
for folder in source_folder_list:
print(f" {folder}")
folders = get_movie_files(
source_folder_list, args.exclude_subdirs, video_settings
)
if video_settings["run_type"] == "MONITOR":
# We will continue to monitor hence we need to
# ensure we always have a unique final movie name.
movie_filename = (
datetime.today().strftime("%Y-%m-%d_%H_%M")
if video_settings["target_filename"] is None
else os.path.splitext(video_settings["target_filename"])[0]
+ "_"
+ datetime.today().strftime("%Y-%m-%d_%H_%M")
+ os.path.splitext(video_settings["target_filename"])[1]
)
video_settings.update({"movie_filename": movie_filename})
else:
# Set filename to right now if no filename provided.
movie_filename = (
datetime.today().strftime("%Y-%m-%d_%H_%M")
if video_settings["target_filename"] is None
else video_settings["target_filename"]
)
video_settings.update({"movie_filename": movie_filename})
process_folders(folders, video_settings, args.delete_source)
print("Processing of movies has completed.")
if args.system_notification:
notify(
"TeslaCam", "Completed", "Processing of movies has completed."
)
# Stop if we're only to monitor once and then exit.
if video_settings["run_type"] == "MONITOR_ONCE":
if monitor_file is not None:
if os.path.isfile(monitor_file):
try:
os.remove(monitor_file)
except OSError as exc:
print(
"Error trying to remove trigger file {}: {}".format(
monitor_file, exc
)
)
print("Exiting monitoring as asked process once.")
break
if monitor_file is None:
trigger_exist = True
print(
"Waiting for TeslaCam Drive to be ejected. Press "
"CTRL-C to stop"
)
else:
if os.path.isfile(monitor_file):
try:
os.remove(monitor_file)
except OSError as exc:
print(
"Error trying to remove trigger file {}: {}".format(
monitor_file, exc
)
)
break
trigger_exist = False
print(
"Monitoring for trigger {}. Press CTRL-C to stop".format(
monitor_file
)
)
else:
print(
"Waiting for trigger {} to be removed. Press CTRL-C to stop".format(
monitor_file
)
)
except KeyboardInterrupt:
print("Monitoring stopped due to CTRL-C.")
break
else:
folders = get_movie_files(
video_settings["source_folder"], args.exclude_subdirs, video_settings
)
# Set filename to right now if no filename provided.
movie_filename = (
datetime.today().strftime("%Y-%m-%d_%H_%M")
if video_settings["target_filename"] is None
else video_settings["target_filename"]
)
video_settings.update({"movie_filename": movie_filename})
process_folders(folders, video_settings, args.delete_source)
if sys.version_info < (3, 7):
print(
f"Python version 3.7 or higher is required, you have: {sys.version}. Please update your Python version."
)
sys.exit(1)
if __name__ == "__main__":
sys.exit(main())
|
# coding=utf-8
from time import sleep
import readchar
import math
import numpy
import json
import pigpio
from turtle_draw import BrachioGraphTurtle
try:
pigpio.exceptions = False
rpi = pigpio.pi()
rpi.set_PWM_frequency(18, 50)
pigpio.exceptions = True
force_virtual = False
except:
print("pigpio daemon is not available; running in virtual mode")
force_virtual = True
import tqdm
class BrachioGraph:
def __init__(
self,
# ----------------- geometry of the plotter -----------------
inner_arm=8, # the lengths of the arms
outer_arm=8,
bounds=[-8, 4, 6, 13], # the maximum rectangular drawing area
# ----------------- naive calculation values -----------------
servo_1_parked_pw=1500, # pulse-widths when parked
servo_2_parked_pw=1500,
servo_1_degree_ms=-10, # milliseconds pulse-width per degree
servo_2_degree_ms=10, # reversed for the mounting of the shoulder servo
servo_1_parked_angle=-90, # the arm angle in the parked position
servo_2_parked_angle=90,
# ----------------- hysteresis -----------------
hysteresis_correction_1=0, # hardware error compensation
hysteresis_correction_2=0,
# ----------------- servo angles and pulse-widths in lists -----------------
servo_1_angle_pws=[], # pulse-widths for various angles
servo_2_angle_pws=[],
# ----------------- servo angles and pulse-widths in lists (bi-directional) ------
servo_1_angle_pws_bidi = [], # bi-directional pulse-widths for various angles
servo_2_angle_pws_bidi = [],
# ----------------- the pen -----------------
pw_up=1500, # pulse-widths for pen up/down
pw_down=1100,
# ----------------- misc -----------------
wait=None, # default wait time between operations
virtual = False, # run in virtual mode
turtle = False
):
# set the geometry
self.inner_arm = inner_arm
self.outer_arm = outer_arm
self.virtual = virtual or force_virtual
self.turtle = turtle
if self.turtle:
self.reset_turtle()
# the box bounds describe a rectangle that we can safely draw in
self.bounds = bounds
# if pulse-widths to angles are supplied for each servo, we will feed them to
# numpy.polyfit(), to produce a function for each one. Otherwise, we will use a simple
# approximation based on a centre of travel of 1500µS and 10µS per degree
self.servo_1_parked_pw = servo_1_parked_pw
self.servo_1_degree_ms = servo_1_degree_ms
self.servo_1_parked_angle = servo_1_parked_angle
self.hysteresis_correction_1 = hysteresis_correction_1
self.servo_2_parked_pw = servo_2_parked_pw
self.servo_2_degree_ms = servo_2_degree_ms
self.servo_2_parked_angle = servo_2_parked_angle
self.hysteresis_correction_2 = hysteresis_correction_2
# set some initial values required for moving methods
self.previous_pw_1 = self.previous_pw_2 = 0
self.active_hysteresis_correction_1 = self.active_hysteresis_correction_2 = 0
self.reset_report()
# Set the x and y position state, so it knows its current x/y position.
self.x = -self.inner_arm
self.y = self.outer_arm
if servo_1_angle_pws_bidi:
servo_1_angle_pws = []
differences = []
for angle, pws in servo_1_angle_pws_bidi.items():
pw = (pws['acw'] + pws['cw']) / 2
servo_1_angle_pws.append([angle, pw])
differences.append((pws['acw'] - pws['cw']) / 2)
self.hysteresis_correction_1 = numpy.mean(differences)
if servo_1_angle_pws:
servo_1_array = numpy.array(servo_1_angle_pws)
self.angles_to_pw_1 = numpy.poly1d(
numpy.polyfit(
servo_1_array[:,0],
servo_1_array[:,1],
3
)
)
else:
self.angles_to_pw_1 = self.naive_angles_to_pulse_widths_1
if servo_2_angle_pws_bidi:
servo_2_angle_pws = []
differences = []
for angle, pws in servo_2_angle_pws_bidi.items():
pw = (pws['acw'] + pws['cw']) / 2
servo_2_angle_pws.append([angle, pw])
differences.append((pws['acw'] - pws['cw']) / 2)
self.hysteresis_correction_2 = numpy.mean(differences)
print(servo_2_angle_pws)
if servo_2_angle_pws:
servo_2_array = numpy.array(servo_2_angle_pws)
self.angles_to_pw_2 = numpy.poly1d(
numpy.polyfit(
servo_2_array[:,0],
servo_2_array[:,1],
3
)
)
else:
self.angles_to_pw_2 = self.naive_angles_to_pulse_widths_2
# create the pen object
self.pen = Pen(bg=self, pw_up=pw_up, pw_down=pw_down, virtual=self.virtual)
if self.virtual:
print("Initialising virtual BrachioGraph")
self.virtual_pw_1 = self.angles_to_pw_1(-90)
self.virtual_pw_2 = self.angles_to_pw_2(90)
# by default in virtual mode, we use a wait factor of 0 for speed
self.wait = wait or 0
else:
# instantiate this Raspberry Pi as a pigpio.pi() instance
self.rpi = pigpio.pi()
# the pulse frequency should be no higher than 100Hz - higher values could (supposedly) damage the servos
self.rpi.set_PWM_frequency(14, 50)
self.rpi.set_PWM_frequency(15, 50)
# by default we use a wait factor of 0.1 for accuracy
self.wait = wait or .1
self.set_angles(-90, 90)
if self.turtle:
self.turtle.showturtle()
self.status()
# methods in this class:
# drawing
# line-processing
# test patterns
# pen-moving
# angles-to-pulse-widths
# hardware-related
# trigonometric
# calibration
# manual driving
# reporting
# ----------------- drawing methods -----------------
def plot_file(self, filename="", wait=0, interpolate=10, bounds=None):
"""Passes the lines in the supplied JSON file to ``plot_lines()``"""
wait = wait or self.wait
bounds = bounds or self.bounds
if not bounds:
return "File plotting is only possible when BrachioGraph.bounds is set."
with open(filename, "r") as line_file:
lines = json.load(line_file)
self.plot_lines(lines=lines, wait=wait, interpolate=interpolate, bounds=bounds, flip=True)
def plot_lines(self, lines=[], wait=0, interpolate=10, rotate=False, flip=False, bounds=None):
"""Passes each segment of each line in lines to ``draw_line()``"""
wait = wait or self.wait
bounds = bounds or self.bounds
if not bounds:
return "Line plotting is only possible when BrachioGraph.bounds is set."
lines = self.rotate_and_scale_lines(lines=lines, bounds=bounds, flip=True)
for line in tqdm.tqdm(lines, desc="Lines", leave=False):
x, y = line[0]
# only if we are not within 1mm of the start of the line, lift pen and go there
if (round(self.x, 1), round(self.y, 1)) != (round(x, 1), round(y, 1)):
self.xy(x, y, wait=wait, interpolate=interpolate)
for point in tqdm.tqdm(line[1:], desc="Segments", leave=False):
x, y = point
self.xy(x, y, wait=wait, interpolate=interpolate, draw=True)
self.park()
def draw_line(self, start=(0, 0), end=(0, 0), wait=0, interpolate=10, both=False):
"""Draws a straight line between two points"""
wait = wait or self.wait
start_x, start_y = start
end_x, end_y = end
self.xy(x=start_x, y=start_y, wait=wait, interpolate=interpolate)
self.xy(x=end_x, y=end_y, wait=wait, interpolate=interpolate, draw=True)
if both:
self.xy(x=start_x, y=start_y, wait=wait, interpolate=interpolate, draw=True)
# ----------------- line-processing methods -----------------
def rotate_and_scale_lines(self, lines=[], rotate=False, flip=False, bounds=None):
rotate, x_mid_point, y_mid_point, box_x_mid_point, box_y_mid_point, divider = self.analyse_lines(
lines=lines, rotate=rotate, bounds=bounds
)
for line in lines:
for point in line:
if rotate:
point[0], point[1] = point[1], point[0]
x = point[0]
x = x - x_mid_point # shift x values so that they have zero as their mid-point
x = x / divider # scale x values to fit in our box width
if flip ^ rotate: # flip before moving back into drwaing pane
x = -x
x = x + box_x_mid_point # shift x values so that they have the box x midpoint as their endpoint
y = point[1]
y = y - y_mid_point
y = y / divider
y = y + box_y_mid_point
point[0], point[1] = x, y
return lines
def analyse_lines(self, lines=[], rotate=False, bounds=None):
# lines is a tuple itself containing a number of tuples, each of which contains a number of 2-tuples
#
# [ # |
# [ # |
# [3, 4], # | # |
# [2, 4], # | # |
# [1, 5], # a single point in a line # | a list of points defining a line # |
# [3, 5], # | # |
# [3, 7], # | # |
# ], # |
# [ # | all the lines
# [...], # |
# [...], # |
# ], # |
# [ # |
# [...], # |
# [...], # |
# ], # |
# ] # |
# First, we create a pair of empty sets for all the x and y values in all of the lines of the plot data.
x_values_in_lines = set()
y_values_in_lines = set()
# Loop over each line and all the points in each line, to get sets of all the x and y values:
for line in lines:
x_values_in_line, y_values_in_line = zip(*line)
x_values_in_lines.update(x_values_in_line)
y_values_in_lines.update(y_values_in_line)
# Identify the minimum and maximum values.
min_x, max_x = min(x_values_in_lines), max(x_values_in_lines)
min_y, max_y = min(y_values_in_lines), max(y_values_in_lines)
# Identify the range they span.
x_range, y_range = max_x - min_x, max_y - min_y
box_x_range, box_y_range = bounds[2] - bounds[0], bounds[3] - bounds[1]
# And their mid-points.
x_mid_point, y_mid_point = (max_x + min_x) / 2, (max_y + min_y) / 2
box_x_mid_point, box_y_mid_point = (bounds[0] + bounds[2]) / 2, (bounds[1] + bounds[3]) / 2
# Get a 'divider' value for each range - the value by which we must divide all x and y so that they will
# fit safely inside the drawing range of the plotter.
# If both image and box are in portrait orientation, or both in landscape, we don't need to rotate the plot.
if (x_range >= y_range and box_x_range >= box_y_range) or (x_range <= y_range and box_x_range <= box_y_range):
divider = max((x_range / box_x_range), (y_range / box_y_range))
rotate = False
else:
divider = max((x_range / box_y_range), (y_range / box_x_range))
rotate = True
x_mid_point, y_mid_point = y_mid_point, x_mid_point
return rotate, x_mid_point, y_mid_point, box_x_mid_point, box_y_mid_point, divider
# ----------------- test pattern methods -----------------
def test_pattern(self, bounds=None, lines=4, wait=0, interpolate=10, repeat=1, reverse=False, both=False):
self.vertical_lines(
bounds=bounds, lines=lines, wait=wait, interpolate=interpolate, repeat=repeat, reverse=reverse, both=both
)
self.horizontal_lines(
bounds=bounds, lines=lines, wait=wait, interpolate=interpolate, repeat=repeat, reverse=reverse, both=both
)
def vertical_lines(self, bounds=None, lines=4, wait=0, interpolate=10, repeat=1, reverse=False, both=False):
wait = wait or self.wait
bounds = bounds or self.bounds
if not bounds:
return "Plotting a test pattern is only possible when BrachioGraph.bounds is set."
if not reverse:
top_y = self.bounds[1]
bottom_y = self.bounds[3]
else:
bottom_y = self.bounds[1]
top_y = self.bounds[3]
for n in range(repeat):
step = (self.bounds[2] - self.bounds[0]) / lines
x = self.bounds[0]
while x <= self.bounds[2]:
self.draw_line((x, top_y), (x, bottom_y), interpolate=interpolate, both=both)
x = x + step
self.park()
def horizontal_lines(self, bounds=None, lines=4, wait=0, interpolate=10, repeat=1, reverse=False, both=False):
wait = wait or self.wait
bounds = bounds or self.bounds
if not bounds:
return "Plotting a test pattern is only possible when BrachioGraph.bounds is set."
if not reverse:
min_x = self.bounds[0]
max_x = self.bounds[2]
else:
max_x = self.bounds[0]
min_x = self.bounds[2]
for n in range(repeat):
step = (self.bounds[3] - self.bounds[1]) / lines
y = self.bounds[1]
while y <= self.bounds[3]:
self.draw_line((min_x, y), (max_x, y), interpolate=interpolate, both=both)
y = y + step
self.park()
def box(self, bounds=None, wait=0, interpolate=10, repeat=1, reverse=False):
"""Draw a box marked out by the ``bounds``."""
wait = wait or self.wait
bounds = bounds or self.bounds
if not bounds:
return "Box drawing is only possible when BrachioGraph.bounds is set."
self.xy(bounds[0], bounds[1], wait, interpolate)
for r in tqdm.tqdm(tqdm.trange(repeat), desc='Iteration', leave=False):
if not reverse:
self.xy(bounds[2], bounds[1], wait, interpolate, draw=True)
self.xy(bounds[2], bounds[3], wait, interpolate, draw=True)
self.xy(bounds[0], bounds[3], wait, interpolate, draw=True)
self.xy(bounds[0], bounds[1], wait, interpolate, draw=True)
else:
self.xy(bounds[0], bounds[3], wait, interpolate, draw=True)
self.xy(bounds[2], bounds[3], wait, interpolate, draw=True)
self.xy(bounds[2], bounds[1], wait, interpolate, draw=True)
self.xy(bounds[0], bounds[1], wait, interpolate, draw=True)
self.park()
def test_arcs(self):
self.park()
elbow_angle = 120
self.move_angles(angle_2=elbow_angle)
for angle_1 in range(-135, 15, 15):
self.move_angles(angle_1=angle_1, draw=True)
for angle_2 in range(elbow_angle, elbow_angle+16):
self.move_angles(angle_2=angle_2, draw=True)
for angle_2 in range(elbow_angle+16, elbow_angle-16, -1):
self.move_angles(angle_2=angle_2, draw=True)
for angle_2 in range(elbow_angle-16, elbow_angle+1):
self.move_angles(angle_2=angle_2, draw=True)
# ----------------- pen-moving methods -----------------
def xy(self, x=None, y=None, wait=0, interpolate=10, draw=False):
"""Moves the pen to the xy position; optionally draws while doing it."""
wait = wait or self.wait
if draw:
self.pen.down()
else:
self.pen.up()
x = x or self.x
y = y or self.y
(angle_1, angle_2) = self.xy_to_angles(x, y)
# calculate how many steps we need for this move, and the x/y length of each
(x_length, y_length) = (x - self.x, y - self.y)
length = math.sqrt(x_length ** 2 + y_length **2)
no_of_steps = int(length * interpolate) or 1
if no_of_steps < 100:
disable_tqdm = True
else:
disable_tqdm = False
(length_of_step_x, length_of_step_y) = (x_length/no_of_steps, y_length/no_of_steps)
for step in tqdm.tqdm(range(no_of_steps), desc='Interpolation', leave=False, disable=disable_tqdm):
self.x = self.x + length_of_step_x
self.y = self.y + length_of_step_y
angle_1, angle_2 = self.xy_to_angles(self.x, self.y)
self.set_angles(angle_1, angle_2)
if step + 1 < no_of_steps:
sleep(length * wait/no_of_steps)
sleep(length * wait/10)
def move_angles(self, angle_1=None, angle_2=None, wait=0, interpolate=10, draw=False):
"""Moves the servo motors to the specified angles step-by-step, calling set_angles() for each step."""
wait = wait or self.wait
if draw:
self.pen.down()
else:
self.pen.up()
diff_1 = diff_2 = 0
if angle_1 is not None:
diff_1 = angle_1 - self.angle_1
if angle_2 is not None:
diff_2 = angle_2 - self.angle_2
length = math.sqrt(diff_1 ** 2 + diff_2 **2)
no_of_steps = int(length * interpolate) or 1
if no_of_steps < 100:
disable_tqdm = True
else:
disable_tqdm = False
(length_of_step_1, length_of_step_2) = (diff_1/no_of_steps, diff_2/no_of_steps)
for step in tqdm.tqdm(range(no_of_steps), desc='Interpolation', leave=False, disable=disable_tqdm):
self.angle_1 = self.angle_1 + length_of_step_1
self.angle_2 = self.angle_2 + length_of_step_2
self.set_angles(self.angle_1, self.angle_2)
if step + 1 < no_of_steps:
sleep(length * wait/no_of_steps)
sleep(length * wait/10)
def set_angles(self, angle_1=None, angle_2=None):
"""Moves the servo motors to the specified angles immediately. Relies upon getting accurate pulse-width
values.
Calls set_pulse_widths().
Sets current_x, current_y.
"""
pw_1 = pw_2 = None
if angle_1 is not None:
pw_1 = self.angles_to_pw_1(angle_1)
if pw_1 > self.previous_pw_1:
self.active_hysteresis_correction_1 = self.hysteresis_correction_1
elif pw_1 < self.previous_pw_1:
self.active_hysteresis_correction_1 = - self.hysteresis_correction_1
self.previous_pw_1 = pw_1
pw_1 = pw_1 + self.active_hysteresis_correction_1
self.angle_1 = angle_1
self.angles_used_1.add(int(angle_1))
self.pulse_widths_used_1.add(int(pw_1))
if angle_2 is not None:
pw_2 = self.angles_to_pw_2(angle_2)
if pw_2 > self.previous_pw_2:
self.active_hysteresis_correction_2 = self.hysteresis_correction_2
elif pw_2 < self.previous_pw_2:
self.active_hysteresis_correction_2 = - self.hysteresis_correction_2
self.previous_pw_2 = pw_2
pw_2 = pw_2 + self.active_hysteresis_correction_2
self.angle_2 = angle_2
self.angles_used_2.add(int(angle_2))
self.pulse_widths_used_2.add(int(pw_2))
if self.turtle:
x, y = self.angles_to_xy(self.angle_1, self.angle_2)
self.turtle.setx(x * self.turtle.multiplier)
self.turtle.sety(y * self.turtle.multiplier)
self.set_pulse_widths(pw_1, pw_2)
self.x, self.y = self.angles_to_xy(self.angle_1, self.angle_2)
# ----------------- angles-to-pulse-widths methods -----------------
def naive_angles_to_pulse_widths_1(self, angle):
return (angle - self.servo_1_parked_angle) * self.servo_1_degree_ms + self.servo_1_parked_pw
def naive_angles_to_pulse_widths_2(self, angle):
return (angle - self.servo_2_parked_angle) * self.servo_2_degree_ms + self.servo_2_parked_pw
# ----------------- hardware-related methods -----------------
def set_pulse_widths(self, pw_1=None, pw_2=None):
"""Applies the supplied pulse-width values to the servos, or pretends to, if we're in virtual
mode."""
if self.virtual:
if pw_1:
if 500 < pw_1 < 2500:
self.virtual_pw_1 = pw_1
else:
raise ValueError
if pw_2:
if 500 < pw_2 < 2500:
self.virtual_pw_2 = pw_2
else:
raise ValueError
else:
if pw_1:
self.rpi.set_servo_pulsewidth(14, pw_1)
if pw_2:
self.rpi.set_servo_pulsewidth(15, pw_2)
def get_pulse_widths(self):
"""Returns the actual pulse-widths values; if in virtual mode, returns the nominal values - i.e. the
values that they might be.
"""
if self.virtual:
actual_pulse_width_1 = self.virtual_pw_1
actual_pulse_width_2 = self.virtual_pw_2
else:
actual_pulse_width_1 = self.rpi.get_servo_pulsewidth(14)
actual_pulse_width_2 = self.rpi.get_servo_pulsewidth(15)
return (actual_pulse_width_1, actual_pulse_width_2)
def park(self):
"""Park the plotter with the inner arm at -90˚ and the outer arm at 90˚ to it.
This corresponds to an x/y position:
* x: ``-inner_arm``
* y: ``outer_arm``
"""
if self.virtual:
print("Parking")
self.pen.up()
self.xy(-self.inner_arm, self.outer_arm)
sleep(1)
def quiet(self, servos=[14, 15, 18]):
"""Stop sending pulses to the servos, so that they are no longer energised (and so that they
stop buzzing).
"""
if self.virtual:
print("Going quiet")
else:
for servo in servos:
self.rpi.set_servo_pulsewidth(servo, 0)
# ----------------- trigonometric methods -----------------
# Every x/y position of the plotter corresponds to a pair of angles of the arms. These methods
# calculate:
#
# the angles required to reach any x/y position
# the x/y position represented by any pair of angles
def xy_to_angles(self, x=0, y=0):
"""Return the servo angles required to reach any x/y position."""
hypotenuse = math.sqrt(x**2+y**2)
if hypotenuse > self.inner_arm + self.outer_arm:
raise Exception(f"Cannot reach {hypotenuse}; total arm length is {self.inner_arm + self.outer_arm}")
hypotenuse_angle = math.asin(x/hypotenuse)
inner_angle = math.acos(
(hypotenuse**2+self.inner_arm**2-self.outer_arm**2)/(2*hypotenuse*self.inner_arm)
)
outer_angle = math.acos(
(self.inner_arm**2+self.outer_arm**2-hypotenuse**2)/(2*self.inner_arm*self.outer_arm)
)
shoulder_motor_angle = hypotenuse_angle - inner_angle
elbow_motor_angle = math.pi - outer_angle
return (math.degrees(shoulder_motor_angle), math.degrees(elbow_motor_angle))
def angles_to_xy(self, shoulder_motor_angle, elbow_motor_angle):
"""Return the x/y co-ordinates represented by a pair of servo angles."""
elbow_motor_angle = math.radians(elbow_motor_angle)
shoulder_motor_angle = math.radians(shoulder_motor_angle)
hypotenuse = math.sqrt(
(self.inner_arm ** 2 + self.outer_arm ** 2 - 2 * self.inner_arm * self.outer_arm * math.cos(
math.pi - elbow_motor_angle)
)
)
base_angle = math.acos(
(hypotenuse ** 2 + self.inner_arm ** 2 - self.outer_arm ** 2) / (2 * hypotenuse * self.inner_arm)
)
inner_angle = base_angle + shoulder_motor_angle
x = math.sin(inner_angle) * hypotenuse
y = math.cos(inner_angle) * hypotenuse
return(x, y)
# ----------------- calibration -----------------
def auto_calibrate(self):
self.park()
for elbow in range(90, 136):
self.set_angles(None, elbow)
sleep(.01)
for shoulder in range(-90, -140, -1):
self.set_angles(shoulder, None)
sleep(.01)
def calibrate(self, servo=1):
pin = {1: 14, 2: 15}[servo]
servo_centre = {1: self.servo_1_parked_pw, 2: self.servo_2_parked_pw}.get(servo)
servo_angle_pws = []
texts = {
"arm-name": {1: "inner", 2: "outer"},
"nominal-centre": {1: 0, 2: 90},
"mount-arm": {
1: "(straight ahead)",
2: "(i.e. to the right) to the inner arm)"
},
"safe-guess": {1: -60, 2: 90}
}
pw = servo_centre
print(f"Calibrating servo {servo}, for the {texts["arm-name"][servo]} arm.")
print(f"See https://brachiograph.art/how-to/calibrate.html")
print()
self.rpi.set_servo_pulsewidth(pin, pw)
print(f"The servo is now at {pw}µS, in the centre of its range of movement.")
print("Attach the protractor to the base, with its centre at the axis of the servo.")
print(f"Mount the arm at a position as close as possible to {texts["nominal-centre"][servo]}˚ {texts["mount-arm"][servo]}.")
print("Now drive the arm to a known angle, as marked on the protractor.")
print("When the arm reaches the angle, press 1 and record the angle. Do this for as many angles as possible.")
print()
print("When you have done all the angles, press 2.")
print("Press 0 to exit at any time.")
while True:
key = readchar.readchar()
if key == "0":
return
elif key == "1":
angle = float(input("Enter the angle: "))
servo_angle_pws.append([angle, pw])
elif key == "2":
break
elif key=="a":
pw = pw - 10
elif key=="s":
pw = pw + 10
elif key=="A":
pw = pw - 1
elif key=="S":
pw = pw + 1
else:
continue
print(pw)
self.rpi.set_servo_pulsewidth(pin, pw)
print(f"------------------------")
print(f"Recorded angles servo {servo}")
print(f"------------------------")
print(f" angle | pulse-width ")
print(f"---------+--------------")
servo_angle_pws.sort()
for [angle, pw] in servo_angle_pws:
print(f" {angle:>6.1f} | {pw:>4.0f}")
servo_array = numpy.array(servo_angle_pws)
pw = int(numpy.poly1d(
numpy.polyfit(
servo_array[:,0],
servo_array[:,1],
3
)
)(0))
self.rpi.set_servo_pulsewidth(pin, pw)
print()
print(f"The servo is now at {int(pw)}µS, which should correspond to {texts["nominal-centre"][servo]}˚.")
print("If necessary, remount the arm at the centre of its optimal sweep for your drawing area.")
print()
print(f"Alternatively as a rule of thumb, if the arms are of equal length, use the position closest to {texts["safe-guess"][servo]}˚.")
print("Carefully count how many spline positions you had to move the arm by to get it there.")
print("Multiply that by the number of degrees for each spline to get the angle by which you moved it.")
offset = float(input("Enter the angle by which you moved the arm (anti-clockwise is negative): "))
print(f"---------------------------")
print(f"Calculated angles {texts["arm-name"][servo]} arm")
print(f"---------------------------")
print(f" angle | pulse-width ")
print(f"----------+----------------")
servo_angle_including_offset_pws = []
for [angle, pw] in servo_angle_pws:
angle_including_offset = round(angle + offset, 1)
servo_angle_including_offset_pws.append([angle_including_offset, pw])
print(f" {angle:>6.1f} | {pw:>4.0f}")
print()
print("Use this list of angles and pulse-widths in your BrachioGraph definition:")
print()
print(f"servo_{servo}_angle_pws={servo_angle_including_offset_pws}")
# ----------------- manual driving methods -----------------
def drive(self):
# adjust the pulse-widths using the keyboard
pw_1, pw_2 = self.get_pulse_widths()
self.set_pulse_widths(pw_1, pw_2)
while True:
key = readchar.readchar()
if key == "0":
return
elif key=="a":
pw_1 = pw_1 - 10
elif key=="s":
pw_1 = pw_1 + 10
elif key=="A":
pw_1 = pw_1 - 2
elif key=="S":
pw_1 = pw_1 + 2
elif key=="k":
pw_2 = pw_2 - 10
elif key=="l":
pw_2 = pw_2 + 10
elif key=="K":
pw_2 = pw_2 - 2
elif key=="L":
pw_2 = pw_2 + 2
print(pw_1, pw_2)
self.set_pulse_widths(pw_1, pw_2)
def drive_xy(self):
# move the pen up/down and left/right using the keyboard
while True:
key = readchar.readchar()
if key == "0":
return
elif key=="a":
self.x = self.x - 1
elif key=="s":
self.x = self.x + 1
elif key=="A":
self.x = self.x - .1
elif key=="S":
self.x = self.x + .1
elif key=="k":
self.y = self.y - 1
elif key=="l":
self.y = self.y + 1
elif key=="K":
self.y = self.y - .1
elif key=="L":
self.y = self.y + .1
print(self.x, self.y)
self.xy(self.x, self.y)
# ----------------- reporting methods -----------------
def status(self):
print("------------------------------------------")
print(" | Servo 1 | Servo 2 ")
print(" | Shoulder| Elbow ")
print("----------------------|---------|---------")
pw_1, pw_2 = self.get_pulse_widths()
print(f"{"pulse-width |":>23}", f"{pw_1:>7.0f}", "|", f"{pw_2:>7.0f}")
angle_1, angle_2 = self.angle_1, self.angle_2
print(f"{"angle |":>23}", f"{angle_1:>7.0f}", "|", f"{angle_2:>7.0f}")
h1, h2 = self.hysteresis_correction_1, self.hysteresis_correction_2
print(f"{"hysteresis correction |":>23}", f"{h1:>7.1f}", "|", f"{h2:>7.1f}")
print("------------------------------------------")
print(f"{"x/y location |":>23}", f"{self.x:>7.1f}", "|", f"{self.y:>7.1f}")
print()
print("------------------------------------------")
print("pen:", self.pen.position)
bl = self.bounds[0], self.bounds[1]
tr = self.bounds[2], self.bounds[3]
print("------------------------------------------")
print("bottom left:", bl, "top right:", tr)
print("------------------------------------------")
def report(self):
print(f" -----------------|-----------------")
print(f" Servo 1 | Servo 2 ")
print(f" -----------------|-----------------")
h1, h2 = self.hysteresis_correction_1, self.hysteresis_correction_2
print(f"hysteresis {h1:>2.1f} | {h2:>2.1f}")
pw_1, pw_2 = self.get_pulse_widths()
print(f"pulse-width {pw_1:<4.0f} | {pw_2:<4.0f}")
angle_1, angle_2 = self.angle_1, self.angle_2
if angle_1 and angle_2:
print(f" angle {angle_1:>4.0f} | {angle_2:>4.0f}")
print(f" -----------------|-----------------")
print(f" min max mid | min max mid")
print(f" -----------------|-----------------")
if self.angles_used_1 and self.angles_used_2 and self.pulse_widths_used_1 and self.pulse_widths_used_2:
min1 = min(self.pulse_widths_used_1)
max1 = max(self.pulse_widths_used_1)
mid1 = (min1 + max1) / 2
min2 = min(self.pulse_widths_used_2)
max2 = max(self.pulse_widths_used_2)
mid2 = (min2 + max2) / 2
print(f"pulse-widths {min1:>4.0f} {max1:>4.0f} {mid1:>4.0f} | {min2:>4.0f} {max2:>4.0f} {mid2:>4.0f}")
min1 = min(self.angles_used_1)
max1 = max(self.angles_used_1)
mid1 = (min1 + max1) / 2
min2 = min(self.angles_used_2)
max2 = max(self.angles_used_2)
mid2 = (min2 + max2) / 2
print(f" angles {min1:>4.0f} {max1:>4.0f} {mid1:>4.0f} | {min2:>4.0f} {max2:>4.0f} {mid2:>4.0f}")
else:
print("No data recorded yet. Try calling the BrachioGraph.box() method first.")
def reset_report(self):
self.angle_1 = self.angle_2 = None
# Create sets for recording movement of the plotter.
self.angles_used_1 = set()
self.angles_used_2 = set()
self.pulse_widths_used_1 = set()
self.pulse_widths_used_2 = set()
@property
def bl(self):
return (self.bounds[0], self.bounds[1])
@property
def tl(self):
return (self.bounds[0], self.bounds[3])
@property
def tr(self):
return (self.bounds[2], self.bounds[3])
@property
def br(self):
return (self.bounds[2], self.bounds[1])
def reset_turtle(self):
self.turtle = BrachioGraphTurtle(
inner_arm=self.inner_arm, # the length of the inner arm (blue)
shoulder_centre_angle=-90, # the starting angle of the inner arm, relative to straight ahead
shoulder_sweep=180, # the arc covered by the shoulder motor
outer_arm=self.outer_arm, # the length of the outer arm (red)
elbow_centre_angle=90, # the centre of the outer arm relative to the inner arm
elbow_sweep=180, # the arc covered by the elbow motor
window_size=800, # width and height of the turtle canvas
speed=0, # how fast to draw
)
self.turtle.draw_grid()
class Pen:
def __init__(self, bg, pw_up=1700, pw_down=1300, pin=18, transition_time=0.25, virtual=False):
self.bg = bg
self.pin = pin
self.pw_up = pw_up
self.pw_down = pw_down
self.transition_time = transition_time
self.virtual = virtual
if self.virtual:
print("Initialising virtual Pen")
else:
self.rpi = pigpio.pi()
self.rpi.set_PWM_frequency(self.pin, 50)
self.up()
sleep(0.3)
self.down()
sleep(0.3)
self.up()
sleep(0.3)
def down(self):
if self.virtual:
self.virtual_pw = self.pw_down
else:
self.rpi.set_servo_pulsewidth(self.pin, self.pw_down)
sleep(self.transition_time)
if self.bg.turtle:
self.bg.turtle.down()
self.bg.turtle.color('blue')
self.bg.turtle.width(1)
self.position = "down"
def up(self):
if self.virtual:
self.virtual_pw = self.pw_up
else:
self.rpi.set_servo_pulsewidth(self.pin, self.pw_up)
sleep(self.transition_time)
if self.bg.turtle:
self.bg.turtle.up()
self.position = "up"
# for convenience, a quick way to set pen motor pulse-widths
def pw(self, pulse_width):
if self.virtual:
self.virtual_pw = pulse_width
else:
self.rpi.set_servo_pulsewidth(self.pin, pulse_width)
def calibrate(self):
print(f"Calibrating the pen-lifting servo.")
print(f"See https://brachiograph.art/how-to/calibrate.html")
pw_1, pw_2 = self.bg.get_pulse_widths()
pw_3 = self.pw_up
while True:
self.bg.set_pulse_widths(pw_1, pw_2)
self.pw(pw_3)
key = readchar.readchar()
if key == "0":
break
elif key=="a":
pw_1 = pw_1 - 10
continue
elif key=="s":
pw_1 = pw_1 + 10
continue
elif key=="k":
pw_2 = pw_2 - 10
continue
elif key=="l":
pw_2 = pw_2 + 10
continue
elif key=="t":
if pw_3 == self.pw_up:
pw_3 = self.pw_down
else:
pw_3 = self.pw_up
continue
elif key=="z":
pw_3 = pw_3 - 10
print(pw_3)
continue
elif key=="x":
pw_3 = pw_3 + 10
print(pw_3)
continue
elif key=="u":
self.pw_up = pw_3
elif key=="d":
self.pw_down = pw_3
else:
continue
mid = (self.pw_up + self.pw_down) / 2
print(f"Pen-up pulse-width: {self.pw_up}µS, pen-down pulse-width: {self.pw_down}µS, mid-point: {mid}")
print()
print("Use these values in your BrachioGraph definition:")
print()
print(f"pen_up={self.pw_up}, pen_down={self.pw_down}")
|
# coding=utf-8
from time import sleep
import readchar
import math
import numpy
import json
import pigpio
from turtle_draw import BrachioGraphTurtle
try:
pigpio.exceptions = False
rpi = pigpio.pi()
rpi.set_PWM_frequency(18, 50)
pigpio.exceptions = True
force_virtual = False
except:
print("pigpio daemon is not available; running in virtual mode")
force_virtual = True
import tqdm
class BrachioGraph:
def __init__(
self,
# ----------------- geometry of the plotter -----------------
inner_arm=8, # the lengths of the arms
outer_arm=8,
bounds=[-8, 4, 6, 13], # the maximum rectangular drawing area
# ----------------- naive calculation values -----------------
servo_1_parked_pw=1500, # pulse-widths when parked
servo_2_parked_pw=1500,
servo_1_degree_ms=-10, # milliseconds pulse-width per degree
servo_2_degree_ms=10, # reversed for the mounting of the shoulder servo
servo_1_parked_angle=-90, # the arm angle in the parked position
servo_2_parked_angle=90,
# ----------------- hysteresis -----------------
hysteresis_correction_1=0, # hardware error compensation
hysteresis_correction_2=0,
# ----------------- servo angles and pulse-widths in lists -----------------
servo_1_angle_pws=[], # pulse-widths for various angles
servo_2_angle_pws=[],
# ----------------- servo angles and pulse-widths in lists (bi-directional) ------
servo_1_angle_pws_bidi = [], # bi-directional pulse-widths for various angles
servo_2_angle_pws_bidi = [],
# ----------------- the pen -----------------
pw_up=1500, # pulse-widths for pen up/down
pw_down=1100,
# ----------------- misc -----------------
wait=None, # default wait time between operations
virtual = False, # run in virtual mode
turtle = False
):
# set the geometry
self.inner_arm = inner_arm
self.outer_arm = outer_arm
self.virtual = virtual or force_virtual
self.turtle = turtle
if self.turtle:
self.reset_turtle()
# the box bounds describe a rectangle that we can safely draw in
self.bounds = bounds
# if pulse-widths to angles are supplied for each servo, we will feed them to
# numpy.polyfit(), to produce a function for each one. Otherwise, we will use a simple
# approximation based on a centre of travel of 1500µS and 10µS per degree
self.servo_1_parked_pw = servo_1_parked_pw
self.servo_1_degree_ms = servo_1_degree_ms
self.servo_1_parked_angle = servo_1_parked_angle
self.hysteresis_correction_1 = hysteresis_correction_1
self.servo_2_parked_pw = servo_2_parked_pw
self.servo_2_degree_ms = servo_2_degree_ms
self.servo_2_parked_angle = servo_2_parked_angle
self.hysteresis_correction_2 = hysteresis_correction_2
# set some initial values required for moving methods
self.previous_pw_1 = self.previous_pw_2 = 0
self.active_hysteresis_correction_1 = self.active_hysteresis_correction_2 = 0
self.reset_report()
# Set the x and y position state, so it knows its current x/y position.
self.x = -self.inner_arm
self.y = self.outer_arm
if servo_1_angle_pws_bidi:
servo_1_angle_pws = []
differences = []
for angle, pws in servo_1_angle_pws_bidi.items():
pw = (pws['acw'] + pws['cw']) / 2
servo_1_angle_pws.append([angle, pw])
differences.append((pws['acw'] - pws['cw']) / 2)
self.hysteresis_correction_1 = numpy.mean(differences)
if servo_1_angle_pws:
servo_1_array = numpy.array(servo_1_angle_pws)
self.angles_to_pw_1 = numpy.poly1d(
numpy.polyfit(
servo_1_array[:,0],
servo_1_array[:,1],
3
)
)
else:
self.angles_to_pw_1 = self.naive_angles_to_pulse_widths_1
if servo_2_angle_pws_bidi:
servo_2_angle_pws = []
differences = []
for angle, pws in servo_2_angle_pws_bidi.items():
pw = (pws['acw'] + pws['cw']) / 2
servo_2_angle_pws.append([angle, pw])
differences.append((pws['acw'] - pws['cw']) / 2)
self.hysteresis_correction_2 = numpy.mean(differences)
print(servo_2_angle_pws)
if servo_2_angle_pws:
servo_2_array = numpy.array(servo_2_angle_pws)
self.angles_to_pw_2 = numpy.poly1d(
numpy.polyfit(
servo_2_array[:,0],
servo_2_array[:,1],
3
)
)
else:
self.angles_to_pw_2 = self.naive_angles_to_pulse_widths_2
# create the pen object
self.pen = Pen(bg=self, pw_up=pw_up, pw_down=pw_down, virtual=self.virtual)
if self.virtual:
print("Initialising virtual BrachioGraph")
self.virtual_pw_1 = self.angles_to_pw_1(-90)
self.virtual_pw_2 = self.angles_to_pw_2(90)
# by default in virtual mode, we use a wait factor of 0 for speed
self.wait = wait or 0
else:
# instantiate this Raspberry Pi as a pigpio.pi() instance
self.rpi = pigpio.pi()
# the pulse frequency should be no higher than 100Hz - higher values could (supposedly) damage the servos
self.rpi.set_PWM_frequency(14, 50)
self.rpi.set_PWM_frequency(15, 50)
# by default we use a wait factor of 0.1 for accuracy
self.wait = wait or .1
self.set_angles(-90, 90)
if self.turtle:
self.turtle.showturtle()
self.status()
# methods in this class:
# drawing
# line-processing
# test patterns
# pen-moving
# angles-to-pulse-widths
# hardware-related
# trigonometric
# calibration
# manual driving
# reporting
# ----------------- drawing methods -----------------
def plot_file(self, filename="", wait=0, interpolate=10, bounds=None):
"""Passes the lines in the supplied JSON file to ``plot_lines()``"""
wait = wait or self.wait
bounds = bounds or self.bounds
if not bounds:
return "File plotting is only possible when BrachioGraph.bounds is set."
with open(filename, "r") as line_file:
lines = json.load(line_file)
self.plot_lines(lines=lines, wait=wait, interpolate=interpolate, bounds=bounds, flip=True)
def plot_lines(self, lines=[], wait=0, interpolate=10, rotate=False, flip=False, bounds=None):
"""Passes each segment of each line in lines to ``draw_line()``"""
wait = wait or self.wait
bounds = bounds or self.bounds
if not bounds:
return "Line plotting is only possible when BrachioGraph.bounds is set."
lines = self.rotate_and_scale_lines(lines=lines, bounds=bounds, flip=True)
for line in tqdm.tqdm(lines, desc="Lines", leave=False):
x, y = line[0]
# only if we are not within 1mm of the start of the line, lift pen and go there
if (round(self.x, 1), round(self.y, 1)) != (round(x, 1), round(y, 1)):
self.xy(x, y, wait=wait, interpolate=interpolate)
for point in tqdm.tqdm(line[1:], desc="Segments", leave=False):
x, y = point
self.xy(x, y, wait=wait, interpolate=interpolate, draw=True)
self.park()
def draw_line(self, start=(0, 0), end=(0, 0), wait=0, interpolate=10, both=False):
"""Draws a straight line between two points"""
wait = wait or self.wait
start_x, start_y = start
end_x, end_y = end
self.xy(x=start_x, y=start_y, wait=wait, interpolate=interpolate)
self.xy(x=end_x, y=end_y, wait=wait, interpolate=interpolate, draw=True)
if both:
self.xy(x=start_x, y=start_y, wait=wait, interpolate=interpolate, draw=True)
# ----------------- line-processing methods -----------------
def rotate_and_scale_lines(self, lines=[], rotate=False, flip=False, bounds=None):
rotate, x_mid_point, y_mid_point, box_x_mid_point, box_y_mid_point, divider = self.analyse_lines(
lines=lines, rotate=rotate, bounds=bounds
)
for line in lines:
for point in line:
if rotate:
point[0], point[1] = point[1], point[0]
x = point[0]
x = x - x_mid_point # shift x values so that they have zero as their mid-point
x = x / divider # scale x values to fit in our box width
if flip ^ rotate: # flip before moving back into drwaing pane
x = -x
x = x + box_x_mid_point # shift x values so that they have the box x midpoint as their endpoint
y = point[1]
y = y - y_mid_point
y = y / divider
y = y + box_y_mid_point
point[0], point[1] = x, y
return lines
def analyse_lines(self, lines=[], rotate=False, bounds=None):
# lines is a tuple itself containing a number of tuples, each of which contains a number of 2-tuples
#
# [ # |
# [ # |
# [3, 4], # | # |
# [2, 4], # | # |
# [1, 5], # a single point in a line # | a list of points defining a line # |
# [3, 5], # | # |
# [3, 7], # | # |
# ], # |
# [ # | all the lines
# [...], # |
# [...], # |
# ], # |
# [ # |
# [...], # |
# [...], # |
# ], # |
# ] # |
# First, we create a pair of empty sets for all the x and y values in all of the lines of the plot data.
x_values_in_lines = set()
y_values_in_lines = set()
# Loop over each line and all the points in each line, to get sets of all the x and y values:
for line in lines:
x_values_in_line, y_values_in_line = zip(*line)
x_values_in_lines.update(x_values_in_line)
y_values_in_lines.update(y_values_in_line)
# Identify the minimum and maximum values.
min_x, max_x = min(x_values_in_lines), max(x_values_in_lines)
min_y, max_y = min(y_values_in_lines), max(y_values_in_lines)
# Identify the range they span.
x_range, y_range = max_x - min_x, max_y - min_y
box_x_range, box_y_range = bounds[2] - bounds[0], bounds[3] - bounds[1]
# And their mid-points.
x_mid_point, y_mid_point = (max_x + min_x) / 2, (max_y + min_y) / 2
box_x_mid_point, box_y_mid_point = (bounds[0] + bounds[2]) / 2, (bounds[1] + bounds[3]) / 2
# Get a 'divider' value for each range - the value by which we must divide all x and y so that they will
# fit safely inside the drawing range of the plotter.
# If both image and box are in portrait orientation, or both in landscape, we don't need to rotate the plot.
if (x_range >= y_range and box_x_range >= box_y_range) or (x_range <= y_range and box_x_range <= box_y_range):
divider = max((x_range / box_x_range), (y_range / box_y_range))
rotate = False
else:
divider = max((x_range / box_y_range), (y_range / box_x_range))
rotate = True
x_mid_point, y_mid_point = y_mid_point, x_mid_point
return rotate, x_mid_point, y_mid_point, box_x_mid_point, box_y_mid_point, divider
# ----------------- test pattern methods -----------------
def test_pattern(self, bounds=None, lines=4, wait=0, interpolate=10, repeat=1, reverse=False, both=False):
self.vertical_lines(
bounds=bounds, lines=lines, wait=wait, interpolate=interpolate, repeat=repeat, reverse=reverse, both=both
)
self.horizontal_lines(
bounds=bounds, lines=lines, wait=wait, interpolate=interpolate, repeat=repeat, reverse=reverse, both=both
)
def vertical_lines(self, bounds=None, lines=4, wait=0, interpolate=10, repeat=1, reverse=False, both=False):
wait = wait or self.wait
bounds = bounds or self.bounds
if not bounds:
return "Plotting a test pattern is only possible when BrachioGraph.bounds is set."
if not reverse:
top_y = self.bounds[1]
bottom_y = self.bounds[3]
else:
bottom_y = self.bounds[1]
top_y = self.bounds[3]
for n in range(repeat):
step = (self.bounds[2] - self.bounds[0]) / lines
x = self.bounds[0]
while x <= self.bounds[2]:
self.draw_line((x, top_y), (x, bottom_y), interpolate=interpolate, both=both)
x = x + step
self.park()
def horizontal_lines(self, bounds=None, lines=4, wait=0, interpolate=10, repeat=1, reverse=False, both=False):
wait = wait or self.wait
bounds = bounds or self.bounds
if not bounds:
return "Plotting a test pattern is only possible when BrachioGraph.bounds is set."
if not reverse:
min_x = self.bounds[0]
max_x = self.bounds[2]
else:
max_x = self.bounds[0]
min_x = self.bounds[2]
for n in range(repeat):
step = (self.bounds[3] - self.bounds[1]) / lines
y = self.bounds[1]
while y <= self.bounds[3]:
self.draw_line((min_x, y), (max_x, y), interpolate=interpolate, both=both)
y = y + step
self.park()
def box(self, bounds=None, wait=0, interpolate=10, repeat=1, reverse=False):
"""Draw a box marked out by the ``bounds``."""
wait = wait or self.wait
bounds = bounds or self.bounds
if not bounds:
return "Box drawing is only possible when BrachioGraph.bounds is set."
self.xy(bounds[0], bounds[1], wait, interpolate)
for r in tqdm.tqdm(tqdm.trange(repeat), desc='Iteration', leave=False):
if not reverse:
self.xy(bounds[2], bounds[1], wait, interpolate, draw=True)
self.xy(bounds[2], bounds[3], wait, interpolate, draw=True)
self.xy(bounds[0], bounds[3], wait, interpolate, draw=True)
self.xy(bounds[0], bounds[1], wait, interpolate, draw=True)
else:
self.xy(bounds[0], bounds[3], wait, interpolate, draw=True)
self.xy(bounds[2], bounds[3], wait, interpolate, draw=True)
self.xy(bounds[2], bounds[1], wait, interpolate, draw=True)
self.xy(bounds[0], bounds[1], wait, interpolate, draw=True)
self.park()
def test_arcs(self):
self.park()
elbow_angle = 120
self.move_angles(angle_2=elbow_angle)
for angle_1 in range(-135, 15, 15):
self.move_angles(angle_1=angle_1, draw=True)
for angle_2 in range(elbow_angle, elbow_angle+16):
self.move_angles(angle_2=angle_2, draw=True)
for angle_2 in range(elbow_angle+16, elbow_angle-16, -1):
self.move_angles(angle_2=angle_2, draw=True)
for angle_2 in range(elbow_angle-16, elbow_angle+1):
self.move_angles(angle_2=angle_2, draw=True)
# ----------------- pen-moving methods -----------------
def xy(self, x=None, y=None, wait=0, interpolate=10, draw=False):
"""Moves the pen to the xy position; optionally draws while doing it."""
wait = wait or self.wait
if draw:
self.pen.down()
else:
self.pen.up()
x = x or self.x
y = y or self.y
(angle_1, angle_2) = self.xy_to_angles(x, y)
# calculate how many steps we need for this move, and the x/y length of each
(x_length, y_length) = (x - self.x, y - self.y)
length = math.sqrt(x_length ** 2 + y_length **2)
no_of_steps = int(length * interpolate) or 1
if no_of_steps < 100:
disable_tqdm = True
else:
disable_tqdm = False
(length_of_step_x, length_of_step_y) = (x_length/no_of_steps, y_length/no_of_steps)
for step in tqdm.tqdm(range(no_of_steps), desc='Interpolation', leave=False, disable=disable_tqdm):
self.x = self.x + length_of_step_x
self.y = self.y + length_of_step_y
angle_1, angle_2 = self.xy_to_angles(self.x, self.y)
self.set_angles(angle_1, angle_2)
if step + 1 < no_of_steps:
sleep(length * wait/no_of_steps)
sleep(length * wait/10)
def move_angles(self, angle_1=None, angle_2=None, wait=0, interpolate=10, draw=False):
"""Moves the servo motors to the specified angles step-by-step, calling set_angles() for each step."""
wait = wait or self.wait
if draw:
self.pen.down()
else:
self.pen.up()
diff_1 = diff_2 = 0
if angle_1 is not None:
diff_1 = angle_1 - self.angle_1
if angle_2 is not None:
diff_2 = angle_2 - self.angle_2
length = math.sqrt(diff_1 ** 2 + diff_2 **2)
no_of_steps = int(length * interpolate) or 1
if no_of_steps < 100:
disable_tqdm = True
else:
disable_tqdm = False
(length_of_step_1, length_of_step_2) = (diff_1/no_of_steps, diff_2/no_of_steps)
for step in tqdm.tqdm(range(no_of_steps), desc='Interpolation', leave=False, disable=disable_tqdm):
self.angle_1 = self.angle_1 + length_of_step_1
self.angle_2 = self.angle_2 + length_of_step_2
self.set_angles(self.angle_1, self.angle_2)
if step + 1 < no_of_steps:
sleep(length * wait/no_of_steps)
sleep(length * wait/10)
def set_angles(self, angle_1=None, angle_2=None):
"""Moves the servo motors to the specified angles immediately. Relies upon getting accurate pulse-width
values.
Calls set_pulse_widths().
Sets current_x, current_y.
"""
pw_1 = pw_2 = None
if angle_1 is not None:
pw_1 = self.angles_to_pw_1(angle_1)
if pw_1 > self.previous_pw_1:
self.active_hysteresis_correction_1 = self.hysteresis_correction_1
elif pw_1 < self.previous_pw_1:
self.active_hysteresis_correction_1 = - self.hysteresis_correction_1
self.previous_pw_1 = pw_1
pw_1 = pw_1 + self.active_hysteresis_correction_1
self.angle_1 = angle_1
self.angles_used_1.add(int(angle_1))
self.pulse_widths_used_1.add(int(pw_1))
if angle_2 is not None:
pw_2 = self.angles_to_pw_2(angle_2)
if pw_2 > self.previous_pw_2:
self.active_hysteresis_correction_2 = self.hysteresis_correction_2
elif pw_2 < self.previous_pw_2:
self.active_hysteresis_correction_2 = - self.hysteresis_correction_2
self.previous_pw_2 = pw_2
pw_2 = pw_2 + self.active_hysteresis_correction_2
self.angle_2 = angle_2
self.angles_used_2.add(int(angle_2))
self.pulse_widths_used_2.add(int(pw_2))
if self.turtle:
x, y = self.angles_to_xy(self.angle_1, self.angle_2)
self.turtle.setx(x * self.turtle.multiplier)
self.turtle.sety(y * self.turtle.multiplier)
self.set_pulse_widths(pw_1, pw_2)
self.x, self.y = self.angles_to_xy(self.angle_1, self.angle_2)
# ----------------- angles-to-pulse-widths methods -----------------
def naive_angles_to_pulse_widths_1(self, angle):
return (angle - self.servo_1_parked_angle) * self.servo_1_degree_ms + self.servo_1_parked_pw
def naive_angles_to_pulse_widths_2(self, angle):
return (angle - self.servo_2_parked_angle) * self.servo_2_degree_ms + self.servo_2_parked_pw
# ----------------- hardware-related methods -----------------
def set_pulse_widths(self, pw_1=None, pw_2=None):
"""Applies the supplied pulse-width values to the servos, or pretends to, if we're in virtual
mode."""
if self.virtual:
if pw_1:
if 500 < pw_1 < 2500:
self.virtual_pw_1 = pw_1
else:
raise ValueError
if pw_2:
if 500 < pw_2 < 2500:
self.virtual_pw_2 = pw_2
else:
raise ValueError
else:
if pw_1:
self.rpi.set_servo_pulsewidth(14, pw_1)
if pw_2:
self.rpi.set_servo_pulsewidth(15, pw_2)
def get_pulse_widths(self):
"""Returns the actual pulse-widths values; if in virtual mode, returns the nominal values - i.e. the
values that they might be.
"""
if self.virtual:
actual_pulse_width_1 = self.virtual_pw_1
actual_pulse_width_2 = self.virtual_pw_2
else:
actual_pulse_width_1 = self.rpi.get_servo_pulsewidth(14)
actual_pulse_width_2 = self.rpi.get_servo_pulsewidth(15)
return (actual_pulse_width_1, actual_pulse_width_2)
def park(self):
"""Park the plotter with the inner arm at -90˚ and the outer arm at 90˚ to it.
This corresponds to an x/y position:
* x: ``-inner_arm``
* y: ``outer_arm``
"""
if self.virtual:
print("Parking")
self.pen.up()
self.xy(-self.inner_arm, self.outer_arm)
sleep(1)
def quiet(self, servos=[14, 15, 18]):
"""Stop sending pulses to the servos, so that they are no longer energised (and so that they
stop buzzing).
"""
if self.virtual:
print("Going quiet")
else:
for servo in servos:
self.rpi.set_servo_pulsewidth(servo, 0)
# ----------------- trigonometric methods -----------------
# Every x/y position of the plotter corresponds to a pair of angles of the arms. These methods
# calculate:
#
# the angles required to reach any x/y position
# the x/y position represented by any pair of angles
def xy_to_angles(self, x=0, y=0):
"""Return the servo angles required to reach any x/y position."""
hypotenuse = math.sqrt(x**2+y**2)
if hypotenuse > self.inner_arm + self.outer_arm:
raise Exception(f"Cannot reach {hypotenuse}; total arm length is {self.inner_arm + self.outer_arm}")
hypotenuse_angle = math.asin(x/hypotenuse)
inner_angle = math.acos(
(hypotenuse**2+self.inner_arm**2-self.outer_arm**2)/(2*hypotenuse*self.inner_arm)
)
outer_angle = math.acos(
(self.inner_arm**2+self.outer_arm**2-hypotenuse**2)/(2*self.inner_arm*self.outer_arm)
)
shoulder_motor_angle = hypotenuse_angle - inner_angle
elbow_motor_angle = math.pi - outer_angle
return (math.degrees(shoulder_motor_angle), math.degrees(elbow_motor_angle))
def angles_to_xy(self, shoulder_motor_angle, elbow_motor_angle):
"""Return the x/y co-ordinates represented by a pair of servo angles."""
elbow_motor_angle = math.radians(elbow_motor_angle)
shoulder_motor_angle = math.radians(shoulder_motor_angle)
hypotenuse = math.sqrt(
(self.inner_arm ** 2 + self.outer_arm ** 2 - 2 * self.inner_arm * self.outer_arm * math.cos(
math.pi - elbow_motor_angle)
)
)
base_angle = math.acos(
(hypotenuse ** 2 + self.inner_arm ** 2 - self.outer_arm ** 2) / (2 * hypotenuse * self.inner_arm)
)
inner_angle = base_angle + shoulder_motor_angle
x = math.sin(inner_angle) * hypotenuse
y = math.cos(inner_angle) * hypotenuse
return(x, y)
# ----------------- calibration -----------------
def auto_calibrate(self):
self.park()
for elbow in range(90, 136):
self.set_angles(None, elbow)
sleep(.01)
for shoulder in range(-90, -140, -1):
self.set_angles(shoulder, None)
sleep(.01)
def calibrate(self, servo=1):
pin = {1: 14, 2: 15}[servo]
servo_centre = {1: self.servo_1_parked_pw, 2: self.servo_2_parked_pw}.get(servo)
servo_angle_pws = []
texts = {
"arm-name": {1: "inner", 2: "outer"},
"nominal-centre": {1: 0, 2: 90},
"mount-arm": {
1: "(straight ahead)",
2: "(i.e. to the right) to the inner arm)"
},
"safe-guess": {1: -60, 2: 90}
}
pw = servo_centre
print(f"Calibrating servo {servo}, for the {texts['arm-name'][servo]} arm.")
print(f"See https://brachiograph.art/how-to/calibrate.html")
print()
self.rpi.set_servo_pulsewidth(pin, pw)
print(f"The servo is now at {pw}µS, in the centre of its range of movement.")
print("Attach the protractor to the base, with its centre at the axis of the servo.")
print(f"Mount the arm at a position as close as possible to {texts['nominal-centre'][servo]}˚ {texts['mount-arm'][servo]}.")
print("Now drive the arm to a known angle, as marked on the protractor.")
print("When the arm reaches the angle, press 1 and record the angle. Do this for as many angles as possible.")
print()
print("When you have done all the angles, press 2.")
print("Press 0 to exit at any time.")
while True:
key = readchar.readchar()
if key == "0":
return
elif key == "1":
angle = float(input("Enter the angle: "))
servo_angle_pws.append([angle, pw])
elif key == "2":
break
elif key=="a":
pw = pw - 10
elif key=="s":
pw = pw + 10
elif key=="A":
pw = pw - 1
elif key=="S":
pw = pw + 1
else:
continue
print(pw)
self.rpi.set_servo_pulsewidth(pin, pw)
print(f"------------------------")
print(f"Recorded angles servo {servo}")
print(f"------------------------")
print(f" angle | pulse-width ")
print(f"---------+--------------")
servo_angle_pws.sort()
for [angle, pw] in servo_angle_pws:
print(f" {angle:>6.1f} | {pw:>4.0f}")
servo_array = numpy.array(servo_angle_pws)
pw = int(numpy.poly1d(
numpy.polyfit(
servo_array[:,0],
servo_array[:,1],
3
)
)(0))
self.rpi.set_servo_pulsewidth(pin, pw)
print()
print(f"The servo is now at {int(pw)}µS, which should correspond to {texts['nominal-centre'][servo]}˚.")
print("If necessary, remount the arm at the centre of its optimal sweep for your drawing area.")
print()
print(f"Alternatively as a rule of thumb, if the arms are of equal length, use the position closest to {texts['safe-guess'][servo]}˚.")
print("Carefully count how many spline positions you had to move the arm by to get it there.")
print("Multiply that by the number of degrees for each spline to get the angle by which you moved it.")
offset = float(input("Enter the angle by which you moved the arm (anti-clockwise is negative): "))
print(f"---------------------------")
print(f"Calculated angles {texts['arm-name'][servo]} arm")
print(f"---------------------------")
print(f" angle | pulse-width ")
print(f"----------+----------------")
servo_angle_including_offset_pws = []
for [angle, pw] in servo_angle_pws:
angle_including_offset = round(angle + offset, 1)
servo_angle_including_offset_pws.append([angle_including_offset, pw])
print(f" {angle:>6.1f} | {pw:>4.0f}")
print()
print("Use this list of angles and pulse-widths in your BrachioGraph definition:")
print()
print(f"servo_{servo}_angle_pws={servo_angle_including_offset_pws}")
# ----------------- manual driving methods -----------------
def drive(self):
# adjust the pulse-widths using the keyboard
pw_1, pw_2 = self.get_pulse_widths()
self.set_pulse_widths(pw_1, pw_2)
while True:
key = readchar.readchar()
if key == "0":
return
elif key=="a":
pw_1 = pw_1 - 10
elif key=="s":
pw_1 = pw_1 + 10
elif key=="A":
pw_1 = pw_1 - 2
elif key=="S":
pw_1 = pw_1 + 2
elif key=="k":
pw_2 = pw_2 - 10
elif key=="l":
pw_2 = pw_2 + 10
elif key=="K":
pw_2 = pw_2 - 2
elif key=="L":
pw_2 = pw_2 + 2
print(pw_1, pw_2)
self.set_pulse_widths(pw_1, pw_2)
def drive_xy(self):
# move the pen up/down and left/right using the keyboard
while True:
key = readchar.readchar()
if key == "0":
return
elif key=="a":
self.x = self.x - 1
elif key=="s":
self.x = self.x + 1
elif key=="A":
self.x = self.x - .1
elif key=="S":
self.x = self.x + .1
elif key=="k":
self.y = self.y - 1
elif key=="l":
self.y = self.y + 1
elif key=="K":
self.y = self.y - .1
elif key=="L":
self.y = self.y + .1
print(self.x, self.y)
self.xy(self.x, self.y)
# ----------------- reporting methods -----------------
def status(self):
print("------------------------------------------")
print(" | Servo 1 | Servo 2 ")
print(" | Shoulder| Elbow ")
print("----------------------|---------|---------")
pw_1, pw_2 = self.get_pulse_widths()
print(f"{'pulse-width |':>23}", f"{pw_1:>7.0f}", "|", f"{pw_2:>7.0f}")
angle_1, angle_2 = self.angle_1, self.angle_2
print(f"{'angle |':>23}", f"{angle_1:>7.0f}", "|", f"{angle_2:>7.0f}")
h1, h2 = self.hysteresis_correction_1, self.hysteresis_correction_2
print(f"{'hysteresis correction |':>23}", f"{h1:>7.1f}", "|", f"{h2:>7.1f}")
print("------------------------------------------")
print(f"{'x/y location |':>23}", f"{self.x:>7.1f}", "|", f"{self.y:>7.1f}")
print()
print("------------------------------------------")
print("pen:", self.pen.position)
bl = self.bounds[0], self.bounds[1]
tr = self.bounds[2], self.bounds[3]
print("------------------------------------------")
print("bottom left:", bl, "top right:", tr)
print("------------------------------------------")
def report(self):
print(f" -----------------|-----------------")
print(f" Servo 1 | Servo 2 ")
print(f" -----------------|-----------------")
h1, h2 = self.hysteresis_correction_1, self.hysteresis_correction_2
print(f"hysteresis {h1:>2.1f} | {h2:>2.1f}")
pw_1, pw_2 = self.get_pulse_widths()
print(f"pulse-width {pw_1:<4.0f} | {pw_2:<4.0f}")
angle_1, angle_2 = self.angle_1, self.angle_2
if angle_1 and angle_2:
print(f" angle {angle_1:>4.0f} | {angle_2:>4.0f}")
print(f" -----------------|-----------------")
print(f" min max mid | min max mid")
print(f" -----------------|-----------------")
if self.angles_used_1 and self.angles_used_2 and self.pulse_widths_used_1 and self.pulse_widths_used_2:
min1 = min(self.pulse_widths_used_1)
max1 = max(self.pulse_widths_used_1)
mid1 = (min1 + max1) / 2
min2 = min(self.pulse_widths_used_2)
max2 = max(self.pulse_widths_used_2)
mid2 = (min2 + max2) / 2
print(f"pulse-widths {min1:>4.0f} {max1:>4.0f} {mid1:>4.0f} | {min2:>4.0f} {max2:>4.0f} {mid2:>4.0f}")
min1 = min(self.angles_used_1)
max1 = max(self.angles_used_1)
mid1 = (min1 + max1) / 2
min2 = min(self.angles_used_2)
max2 = max(self.angles_used_2)
mid2 = (min2 + max2) / 2
print(f" angles {min1:>4.0f} {max1:>4.0f} {mid1:>4.0f} | {min2:>4.0f} {max2:>4.0f} {mid2:>4.0f}")
else:
print("No data recorded yet. Try calling the BrachioGraph.box() method first.")
def reset_report(self):
self.angle_1 = self.angle_2 = None
# Create sets for recording movement of the plotter.
self.angles_used_1 = set()
self.angles_used_2 = set()
self.pulse_widths_used_1 = set()
self.pulse_widths_used_2 = set()
@property
def bl(self):
return (self.bounds[0], self.bounds[1])
@property
def tl(self):
return (self.bounds[0], self.bounds[3])
@property
def tr(self):
return (self.bounds[2], self.bounds[3])
@property
def br(self):
return (self.bounds[2], self.bounds[1])
def reset_turtle(self):
self.turtle = BrachioGraphTurtle(
inner_arm=self.inner_arm, # the length of the inner arm (blue)
shoulder_centre_angle=-90, # the starting angle of the inner arm, relative to straight ahead
shoulder_sweep=180, # the arc covered by the shoulder motor
outer_arm=self.outer_arm, # the length of the outer arm (red)
elbow_centre_angle=90, # the centre of the outer arm relative to the inner arm
elbow_sweep=180, # the arc covered by the elbow motor
window_size=800, # width and height of the turtle canvas
speed=0, # how fast to draw
)
self.turtle.draw_grid()
class Pen:
def __init__(self, bg, pw_up=1700, pw_down=1300, pin=18, transition_time=0.25, virtual=False):
self.bg = bg
self.pin = pin
self.pw_up = pw_up
self.pw_down = pw_down
self.transition_time = transition_time
self.virtual = virtual
if self.virtual:
print("Initialising virtual Pen")
else:
self.rpi = pigpio.pi()
self.rpi.set_PWM_frequency(self.pin, 50)
self.up()
sleep(0.3)
self.down()
sleep(0.3)
self.up()
sleep(0.3)
def down(self):
if self.virtual:
self.virtual_pw = self.pw_down
else:
self.rpi.set_servo_pulsewidth(self.pin, self.pw_down)
sleep(self.transition_time)
if self.bg.turtle:
self.bg.turtle.down()
self.bg.turtle.color('blue')
self.bg.turtle.width(1)
self.position = "down"
def up(self):
if self.virtual:
self.virtual_pw = self.pw_up
else:
self.rpi.set_servo_pulsewidth(self.pin, self.pw_up)
sleep(self.transition_time)
if self.bg.turtle:
self.bg.turtle.up()
self.position = "up"
# for convenience, a quick way to set pen motor pulse-widths
def pw(self, pulse_width):
if self.virtual:
self.virtual_pw = pulse_width
else:
self.rpi.set_servo_pulsewidth(self.pin, pulse_width)
def calibrate(self):
print(f"Calibrating the pen-lifting servo.")
print(f"See https://brachiograph.art/how-to/calibrate.html")
pw_1, pw_2 = self.bg.get_pulse_widths()
pw_3 = self.pw_up
while True:
self.bg.set_pulse_widths(pw_1, pw_2)
self.pw(pw_3)
key = readchar.readchar()
if key == "0":
break
elif key=="a":
pw_1 = pw_1 - 10
continue
elif key=="s":
pw_1 = pw_1 + 10
continue
elif key=="k":
pw_2 = pw_2 - 10
continue
elif key=="l":
pw_2 = pw_2 + 10
continue
elif key=="t":
if pw_3 == self.pw_up:
pw_3 = self.pw_down
else:
pw_3 = self.pw_up
continue
elif key=="z":
pw_3 = pw_3 - 10
print(pw_3)
continue
elif key=="x":
pw_3 = pw_3 + 10
print(pw_3)
continue
elif key=="u":
self.pw_up = pw_3
elif key=="d":
self.pw_down = pw_3
else:
continue
mid = (self.pw_up + self.pw_down) / 2
print(f"Pen-up pulse-width: {self.pw_up}µS, pen-down pulse-width: {self.pw_down}µS, mid-point: {mid}")
print()
print("Use these values in your BrachioGraph definition:")
print()
print(f"pen_up={self.pw_up}, pen_down={self.pw_down}")
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import itertools
import logging
from dataclasses import dataclass
from typing import Iterable
from pants.backend.python.goals import lockfile
from pants.backend.python.goals.export import ExportPythonTool, ExportPythonToolSentinel
from pants.backend.python.goals.lockfile import GeneratePythonLockfile
from pants.backend.python.subsystems.python_tool_base import PythonToolBase
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import (
ConsoleScript,
PythonRequirementsField,
PythonSourceField,
)
from pants.backend.python.typecheck.mypy.skip_field import SkipMyPyField
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex_requirements import PexRequirements
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.core.goals.generate_lockfiles import GenerateToolLockfileSentinel
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.engine.addresses import Addresses, UnparsedAddressInputs
from pants.engine.fs import EMPTY_DIGEST, Digest, DigestContents, FileContent
from pants.engine.rules import Get, MultiGet, collect_rules, rule, rule_helper
from pants.engine.target import (
AllTargets,
AllTargetsRequest,
FieldSet,
Target,
TransitiveTargets,
TransitiveTargetsRequest,
)
from pants.engine.unions import UnionRule
from pants.option.option_types import (
ArgsListOption,
BoolOption,
FileOption,
SkipOption,
StrListOption,
TargetListOption,
)
from pants.util.docutil import doc_url, git_url
from pants.util.logging import LogLevel
from pants.util.ordered_set import FrozenOrderedSet
from pants.util.strutil import softwrap
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class MyPyFieldSet(FieldSet):
required_fields = (PythonSourceField,)
sources: PythonSourceField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipMyPyField).value
# --------------------------------------------------------------------------------------
# Subsystem
# --------------------------------------------------------------------------------------
class MyPy(PythonToolBase):
options_scope = "mypy"
name = "MyPy"
help = "The MyPy Python type checker (http://mypy-lang.org/)."
default_version = "mypy==0.910"
default_main = ConsoleScript("mypy")
# See `mypy/rules.py`. We only use these default constraints in some situations.
register_interpreter_constraints = True
default_interpreter_constraints = ["CPython>=3.7,<4"]
register_lockfile = True
default_lockfile_resource = ("pants.backend.python.typecheck.mypy", "mypy.lock")
default_lockfile_path = "src/python/pants/backend/python/typecheck/mypy/mypy.lock"
default_lockfile_url = git_url(default_lockfile_path)
uses_requirements_from_source_plugins = True
skip = SkipOption("check")
args = ArgsListOption(example="--python-version 3.7 --disallow-any-expr")
config = FileOption(
"--config",
default=None,
advanced=True,
help=lambda cls: softwrap(
f"""
Path to a config file understood by MyPy
(https://mypy.readthedocs.io/en/stable/config_file.html).
Setting this option will disable `[{cls.options_scope}].config_discovery`. Use
this option if the config is located in a non-standard location.
"""
),
)
config_discovery = BoolOption(
"--config-discovery",
default=True,
advanced=True,
help=lambda cls: softwrap(
f"""
If true, Pants will include any relevant config files during runs
(`mypy.ini`, `.mypy.ini`, and `setup.cfg`).
Use `[{cls.options_scope}].config` instead if your config is in a non-standard location.
"""
),
)
_source_plugins = TargetListOption(
"--source-plugins",
advanced=True,
help=softwrap(
"""
An optional list of `python_sources` target addresses to load first-party plugins.
You must also set `plugins = path.to.module` in your `mypy.ini`, and
set the `[mypy].config` option in your `pants.toml`.
To instead load third-party plugins, set the option `[mypy].extra_requirements`
and set the `plugins` option in `mypy.ini`.
Tip: it's often helpful to define a dedicated 'resolve' via
`[python].resolves` for your MyPy plugins such as 'mypy-plugins'
so that the third-party requirements used by your plugin, like `mypy`, do not
mix with the rest of your project. Read that option's help message for more info
on resolves.
"""
),
)
extra_type_stubs = StrListOption(
"--extra-type-stubs",
advanced=True,
help=softwrap(
"""
Extra type stub requirements to install when running MyPy.
Normally, type stubs can be installed as typical requirements, such as putting
them in `requirements.txt` or using a `python_requirement` target.
Alternatively, you can use this option so that the dependencies are solely
used when running MyPy and are not runtime dependencies.
Expects a list of pip-style requirement strings, like
`['types-requests==2.25.9']`.
"""
),
)
@property
def config_request(self) -> ConfigFilesRequest:
# Refer to https://mypy.readthedocs.io/en/stable/config_file.html.
return ConfigFilesRequest(
specified=self.config,
specified_option_name=f"{self.options_scope}.config",
discovery=self.config_discovery,
check_existence=["mypy.ini", ".mypy.ini"],
check_content={"setup.cfg": b"[mypy", "pyproject.toml": b"[tool.mypy"},
)
@property
def source_plugins(self) -> UnparsedAddressInputs:
return UnparsedAddressInputs(self._source_plugins, owning_address=None)
def check_and_warn_if_python_version_configured(self, config: FileContent | None) -> bool:
"""Determine if we can dynamically set `--python-version` and warn if not."""
configured = []
if config and b"python_version" in config.content:
configured.append(
f"`python_version` in {config.path} (which is used because of either config "
"discovery or the `[mypy].config` option)"
)
if "--py2" in self.args:
configured.append("`--py2` in the `--mypy-args` option")
if any(arg.startswith("--python-version") for arg in self.args):
configured.append("`--python-version` in the `--mypy-args` option")
if configured:
formatted_configured = " and you set ".join(configured)
logger.warning(
f"You set {formatted_configured}. Normally, Pants would automatically set this "
"for you based on your code's interpreter constraints "
f"({doc_url("python-interpreter-compatibility")}). Instead, it will "
"use what you set.\n\n"
"(Automatically setting the option allows Pants to partition your targets by their "
"constraints, so that, for example, you can run MyPy on Python 2-only code and "
"Python 3-only code at the same time. This feature may no longer work.)"
)
return bool(configured)
# --------------------------------------------------------------------------------------
# Config files
# --------------------------------------------------------------------------------------
@dataclass(frozen=True)
class MyPyConfigFile:
digest: Digest
_python_version_configured: bool
def python_version_to_autoset(
self, interpreter_constraints: InterpreterConstraints, interpreter_universe: Iterable[str]
) -> str | None:
"""If the user did not already set `--python-version`, return the major.minor version to
use."""
if self._python_version_configured:
return None
return interpreter_constraints.minimum_python_version(interpreter_universe)
@rule
async def setup_mypy_config(mypy: MyPy) -> MyPyConfigFile:
config_files = await Get(ConfigFiles, ConfigFilesRequest, mypy.config_request)
digest_contents = await Get(DigestContents, Digest, config_files.snapshot.digest)
python_version_configured = mypy.check_and_warn_if_python_version_configured(
digest_contents[0] if digest_contents else None
)
return MyPyConfigFile(config_files.snapshot.digest, python_version_configured)
# --------------------------------------------------------------------------------------
# First party plugins
# --------------------------------------------------------------------------------------
@dataclass(frozen=True)
class MyPyFirstPartyPlugins:
requirement_strings: FrozenOrderedSet[str]
sources_digest: Digest
source_roots: tuple[str, ...]
@rule("Prepare [mypy].source_plugins", level=LogLevel.DEBUG)
async def mypy_first_party_plugins(
mypy: MyPy,
) -> MyPyFirstPartyPlugins:
if not mypy.source_plugins:
return MyPyFirstPartyPlugins(FrozenOrderedSet(), EMPTY_DIGEST, ())
plugin_target_addresses = await Get(Addresses, UnparsedAddressInputs, mypy.source_plugins)
transitive_targets = await Get(
TransitiveTargets, TransitiveTargetsRequest(plugin_target_addresses)
)
requirements = PexRequirements.create_from_requirement_fields(
(
plugin_tgt[PythonRequirementsField]
for plugin_tgt in transitive_targets.closure
if plugin_tgt.has_field(PythonRequirementsField)
),
constraints_strings=(),
)
sources = await Get(PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure))
return MyPyFirstPartyPlugins(
requirement_strings=requirements.req_strings,
sources_digest=sources.source_files.snapshot.digest,
source_roots=sources.source_roots,
)
# --------------------------------------------------------------------------------------
# Interpreter constraints
# --------------------------------------------------------------------------------------
@rule_helper
async def _mypy_interpreter_constraints(
mypy: MyPy, python_setup: PythonSetup
) -> InterpreterConstraints:
constraints = mypy.interpreter_constraints
if mypy.options.is_default("interpreter_constraints"):
all_tgts = await Get(AllTargets, AllTargetsRequest())
all_transitive_targets = await MultiGet(
Get(TransitiveTargets, TransitiveTargetsRequest([tgt.address]))
for tgt in all_tgts
if MyPyFieldSet.is_applicable(tgt)
)
unique_constraints = {
InterpreterConstraints.create_from_targets(transitive_targets.closure, python_setup)
for transitive_targets in all_transitive_targets
}
code_constraints = InterpreterConstraints(itertools.chain.from_iterable(unique_constraints))
if code_constraints.requires_python38_or_newer(python_setup.interpreter_universe):
constraints = code_constraints
return constraints
# --------------------------------------------------------------------------------------
# Lockfile
# --------------------------------------------------------------------------------------
class MyPyLockfileSentinel(GenerateToolLockfileSentinel):
resolve_name = MyPy.options_scope
@rule(
desc="Determine MyPy interpreter constraints (for lockfile generation)",
level=LogLevel.DEBUG,
)
async def setup_mypy_lockfile(
_: MyPyLockfileSentinel,
first_party_plugins: MyPyFirstPartyPlugins,
mypy: MyPy,
python_setup: PythonSetup,
) -> GeneratePythonLockfile:
if not mypy.uses_custom_lockfile:
return GeneratePythonLockfile.from_tool(
mypy, use_pex=python_setup.generate_lockfiles_with_pex
)
constraints = await _mypy_interpreter_constraints(mypy, python_setup)
return GeneratePythonLockfile.from_tool(
mypy,
constraints,
extra_requirements=first_party_plugins.requirement_strings,
use_pex=python_setup.generate_lockfiles_with_pex,
)
# --------------------------------------------------------------------------------------
# Export
# --------------------------------------------------------------------------------------
class MyPyExportSentinel(ExportPythonToolSentinel):
pass
@rule(desc="Determine MyPy interpreter constraints (for `export` goal)", level=LogLevel.DEBUG)
async def mypy_export(
_: MyPyExportSentinel,
mypy: MyPy,
python_setup: PythonSetup,
first_party_plugins: MyPyFirstPartyPlugins,
) -> ExportPythonTool:
constraints = await _mypy_interpreter_constraints(mypy, python_setup)
return ExportPythonTool(
resolve_name=mypy.options_scope,
pex_request=mypy.to_pex_request(
interpreter_constraints=constraints,
extra_requirements=first_party_plugins.requirement_strings,
),
)
def rules():
return (
*collect_rules(),
*lockfile.rules(),
UnionRule(GenerateToolLockfileSentinel, MyPyLockfileSentinel),
UnionRule(ExportPythonToolSentinel, MyPyExportSentinel),
)
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import itertools
import logging
from dataclasses import dataclass
from typing import Iterable
from pants.backend.python.goals import lockfile
from pants.backend.python.goals.export import ExportPythonTool, ExportPythonToolSentinel
from pants.backend.python.goals.lockfile import GeneratePythonLockfile
from pants.backend.python.subsystems.python_tool_base import PythonToolBase
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import (
ConsoleScript,
PythonRequirementsField,
PythonSourceField,
)
from pants.backend.python.typecheck.mypy.skip_field import SkipMyPyField
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex_requirements import PexRequirements
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.core.goals.generate_lockfiles import GenerateToolLockfileSentinel
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.engine.addresses import Addresses, UnparsedAddressInputs
from pants.engine.fs import EMPTY_DIGEST, Digest, DigestContents, FileContent
from pants.engine.rules import Get, MultiGet, collect_rules, rule, rule_helper
from pants.engine.target import (
AllTargets,
AllTargetsRequest,
FieldSet,
Target,
TransitiveTargets,
TransitiveTargetsRequest,
)
from pants.engine.unions import UnionRule
from pants.option.option_types import (
ArgsListOption,
BoolOption,
FileOption,
SkipOption,
StrListOption,
TargetListOption,
)
from pants.util.docutil import doc_url, git_url
from pants.util.logging import LogLevel
from pants.util.ordered_set import FrozenOrderedSet
from pants.util.strutil import softwrap
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class MyPyFieldSet(FieldSet):
required_fields = (PythonSourceField,)
sources: PythonSourceField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipMyPyField).value
# --------------------------------------------------------------------------------------
# Subsystem
# --------------------------------------------------------------------------------------
class MyPy(PythonToolBase):
options_scope = "mypy"
name = "MyPy"
help = "The MyPy Python type checker (http://mypy-lang.org/)."
default_version = "mypy==0.910"
default_main = ConsoleScript("mypy")
# See `mypy/rules.py`. We only use these default constraints in some situations.
register_interpreter_constraints = True
default_interpreter_constraints = ["CPython>=3.7,<4"]
register_lockfile = True
default_lockfile_resource = ("pants.backend.python.typecheck.mypy", "mypy.lock")
default_lockfile_path = "src/python/pants/backend/python/typecheck/mypy/mypy.lock"
default_lockfile_url = git_url(default_lockfile_path)
uses_requirements_from_source_plugins = True
skip = SkipOption("check")
args = ArgsListOption(example="--python-version 3.7 --disallow-any-expr")
config = FileOption(
"--config",
default=None,
advanced=True,
help=lambda cls: softwrap(
f"""
Path to a config file understood by MyPy
(https://mypy.readthedocs.io/en/stable/config_file.html).
Setting this option will disable `[{cls.options_scope}].config_discovery`. Use
this option if the config is located in a non-standard location.
"""
),
)
config_discovery = BoolOption(
"--config-discovery",
default=True,
advanced=True,
help=lambda cls: softwrap(
f"""
If true, Pants will include any relevant config files during runs
(`mypy.ini`, `.mypy.ini`, and `setup.cfg`).
Use `[{cls.options_scope}].config` instead if your config is in a non-standard location.
"""
),
)
_source_plugins = TargetListOption(
"--source-plugins",
advanced=True,
help=softwrap(
"""
An optional list of `python_sources` target addresses to load first-party plugins.
You must also set `plugins = path.to.module` in your `mypy.ini`, and
set the `[mypy].config` option in your `pants.toml`.
To instead load third-party plugins, set the option `[mypy].extra_requirements`
and set the `plugins` option in `mypy.ini`.
Tip: it's often helpful to define a dedicated 'resolve' via
`[python].resolves` for your MyPy plugins such as 'mypy-plugins'
so that the third-party requirements used by your plugin, like `mypy`, do not
mix with the rest of your project. Read that option's help message for more info
on resolves.
"""
),
)
extra_type_stubs = StrListOption(
"--extra-type-stubs",
advanced=True,
help=softwrap(
"""
Extra type stub requirements to install when running MyPy.
Normally, type stubs can be installed as typical requirements, such as putting
them in `requirements.txt` or using a `python_requirement` target.
Alternatively, you can use this option so that the dependencies are solely
used when running MyPy and are not runtime dependencies.
Expects a list of pip-style requirement strings, like
`['types-requests==2.25.9']`.
"""
),
)
@property
def config_request(self) -> ConfigFilesRequest:
# Refer to https://mypy.readthedocs.io/en/stable/config_file.html.
return ConfigFilesRequest(
specified=self.config,
specified_option_name=f"{self.options_scope}.config",
discovery=self.config_discovery,
check_existence=["mypy.ini", ".mypy.ini"],
check_content={"setup.cfg": b"[mypy", "pyproject.toml": b"[tool.mypy"},
)
@property
def source_plugins(self) -> UnparsedAddressInputs:
return UnparsedAddressInputs(self._source_plugins, owning_address=None)
def check_and_warn_if_python_version_configured(self, config: FileContent | None) -> bool:
"""Determine if we can dynamically set `--python-version` and warn if not."""
configured = []
if config and b"python_version" in config.content:
configured.append(
f"`python_version` in {config.path} (which is used because of either config "
"discovery or the `[mypy].config` option)"
)
if "--py2" in self.args:
configured.append("`--py2` in the `--mypy-args` option")
if any(arg.startswith("--python-version") for arg in self.args):
configured.append("`--python-version` in the `--mypy-args` option")
if configured:
formatted_configured = " and you set ".join(configured)
logger.warning(
f"You set {formatted_configured}. Normally, Pants would automatically set this "
"for you based on your code's interpreter constraints "
f"({doc_url('python-interpreter-compatibility')}). Instead, it will "
"use what you set.\n\n"
"(Automatically setting the option allows Pants to partition your targets by their "
"constraints, so that, for example, you can run MyPy on Python 2-only code and "
"Python 3-only code at the same time. This feature may no longer work.)"
)
return bool(configured)
# --------------------------------------------------------------------------------------
# Config files
# --------------------------------------------------------------------------------------
@dataclass(frozen=True)
class MyPyConfigFile:
digest: Digest
_python_version_configured: bool
def python_version_to_autoset(
self, interpreter_constraints: InterpreterConstraints, interpreter_universe: Iterable[str]
) -> str | None:
"""If the user did not already set `--python-version`, return the major.minor version to
use."""
if self._python_version_configured:
return None
return interpreter_constraints.minimum_python_version(interpreter_universe)
@rule
async def setup_mypy_config(mypy: MyPy) -> MyPyConfigFile:
config_files = await Get(ConfigFiles, ConfigFilesRequest, mypy.config_request)
digest_contents = await Get(DigestContents, Digest, config_files.snapshot.digest)
python_version_configured = mypy.check_and_warn_if_python_version_configured(
digest_contents[0] if digest_contents else None
)
return MyPyConfigFile(config_files.snapshot.digest, python_version_configured)
# --------------------------------------------------------------------------------------
# First party plugins
# --------------------------------------------------------------------------------------
@dataclass(frozen=True)
class MyPyFirstPartyPlugins:
requirement_strings: FrozenOrderedSet[str]
sources_digest: Digest
source_roots: tuple[str, ...]
@rule("Prepare [mypy].source_plugins", level=LogLevel.DEBUG)
async def mypy_first_party_plugins(
mypy: MyPy,
) -> MyPyFirstPartyPlugins:
if not mypy.source_plugins:
return MyPyFirstPartyPlugins(FrozenOrderedSet(), EMPTY_DIGEST, ())
plugin_target_addresses = await Get(Addresses, UnparsedAddressInputs, mypy.source_plugins)
transitive_targets = await Get(
TransitiveTargets, TransitiveTargetsRequest(plugin_target_addresses)
)
requirements = PexRequirements.create_from_requirement_fields(
(
plugin_tgt[PythonRequirementsField]
for plugin_tgt in transitive_targets.closure
if plugin_tgt.has_field(PythonRequirementsField)
),
constraints_strings=(),
)
sources = await Get(PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure))
return MyPyFirstPartyPlugins(
requirement_strings=requirements.req_strings,
sources_digest=sources.source_files.snapshot.digest,
source_roots=sources.source_roots,
)
# --------------------------------------------------------------------------------------
# Interpreter constraints
# --------------------------------------------------------------------------------------
@rule_helper
async def _mypy_interpreter_constraints(
mypy: MyPy, python_setup: PythonSetup
) -> InterpreterConstraints:
constraints = mypy.interpreter_constraints
if mypy.options.is_default("interpreter_constraints"):
all_tgts = await Get(AllTargets, AllTargetsRequest())
all_transitive_targets = await MultiGet(
Get(TransitiveTargets, TransitiveTargetsRequest([tgt.address]))
for tgt in all_tgts
if MyPyFieldSet.is_applicable(tgt)
)
unique_constraints = {
InterpreterConstraints.create_from_targets(transitive_targets.closure, python_setup)
for transitive_targets in all_transitive_targets
}
code_constraints = InterpreterConstraints(itertools.chain.from_iterable(unique_constraints))
if code_constraints.requires_python38_or_newer(python_setup.interpreter_universe):
constraints = code_constraints
return constraints
# --------------------------------------------------------------------------------------
# Lockfile
# --------------------------------------------------------------------------------------
class MyPyLockfileSentinel(GenerateToolLockfileSentinel):
resolve_name = MyPy.options_scope
@rule(
desc="Determine MyPy interpreter constraints (for lockfile generation)",
level=LogLevel.DEBUG,
)
async def setup_mypy_lockfile(
_: MyPyLockfileSentinel,
first_party_plugins: MyPyFirstPartyPlugins,
mypy: MyPy,
python_setup: PythonSetup,
) -> GeneratePythonLockfile:
if not mypy.uses_custom_lockfile:
return GeneratePythonLockfile.from_tool(
mypy, use_pex=python_setup.generate_lockfiles_with_pex
)
constraints = await _mypy_interpreter_constraints(mypy, python_setup)
return GeneratePythonLockfile.from_tool(
mypy,
constraints,
extra_requirements=first_party_plugins.requirement_strings,
use_pex=python_setup.generate_lockfiles_with_pex,
)
# --------------------------------------------------------------------------------------
# Export
# --------------------------------------------------------------------------------------
class MyPyExportSentinel(ExportPythonToolSentinel):
pass
@rule(desc="Determine MyPy interpreter constraints (for `export` goal)", level=LogLevel.DEBUG)
async def mypy_export(
_: MyPyExportSentinel,
mypy: MyPy,
python_setup: PythonSetup,
first_party_plugins: MyPyFirstPartyPlugins,
) -> ExportPythonTool:
constraints = await _mypy_interpreter_constraints(mypy, python_setup)
return ExportPythonTool(
resolve_name=mypy.options_scope,
pex_request=mypy.to_pex_request(
interpreter_constraints=constraints,
extra_requirements=first_party_plugins.requirement_strings,
),
)
def rules():
return (
*collect_rules(),
*lockfile.rules(),
UnionRule(GenerateToolLockfileSentinel, MyPyLockfileSentinel),
UnionRule(ExportPythonToolSentinel, MyPyExportSentinel),
)
|
import click
from . import cli
from .params import project
from meltano.core.db import project_engine
from meltano.core.project import Project
from meltano.core.config_service import ConfigService
from meltano.core.plugin.settings_service import PluginSettingsService
@cli.group(invoke_without_command=True)
@click.argument("plugin_name")
@click.option("--format", default="json")
@project(migrate=True)
@click.pass_context
def config(ctx, project, plugin_name, format):
config = ConfigService(project)
plugin = config.find_plugin(plugin_name)
_, Session = project_engine(project)
session = Session()
settings = PluginSettingsService(project)
ctx.obj["settings"] = settings
ctx.obj["plugin"] = plugin
ctx.obj["session"] = session
if ctx.invoked_subcommand is None:
if format == "json":
print(settings.as_config(session, plugin))
if format == "env":
for env, value in settings.as_env(session, plugin).items():
print(f"{env}={value}")
@config.command()
@click.argument("setting_name")
@click.argument("value")
@click.pass_context
def set(ctx, setting_name, value):
settings = ctx.obj["settings"]
plugin = ctx.obj["plugin"]
session = ctx.obj["session"]
settings.set(session, plugin, setting_name, value)
@config.command()
@click.argument("setting_name")
@click.pass_context
def unset(ctx, setting_name):
settings = ctx.obj["settings"]
plugin = ctx.obj["plugin"]
session = ctx.obj["session"]
settings.unset(session, plugin, setting_name)
@config.command()
@click.pass_context
def reset(ctx):
settings = ctx.obj["settings"]
plugin = ctx.obj["plugin"]
session = ctx.obj["session"]
for setting in settings.definitions(plugin):
settings.unset(session, plugin, setting.name)
@config.command()
@click.pass_context
def list(ctx):
settings = ctx.obj["settings"]
plugin = ctx.obj["plugin"]
plugin_def = settings.get_definition(plugin)
for setting_def in settings.definitions(plugin):
env_key = settings.setting_env(setting_def, plugin_def)
description_marker = (
f": {setting_def["description"]}" if setting_def.get("description") else ""
)
click.secho(f"{setting_def["name"]} [{env_key}]{description_marker}")
|
import click
from . import cli
from .params import project
from meltano.core.db import project_engine
from meltano.core.project import Project
from meltano.core.config_service import ConfigService
from meltano.core.plugin.settings_service import PluginSettingsService
@cli.group(invoke_without_command=True)
@click.argument("plugin_name")
@click.option("--format", default="json")
@project(migrate=True)
@click.pass_context
def config(ctx, project, plugin_name, format):
config = ConfigService(project)
plugin = config.find_plugin(plugin_name)
_, Session = project_engine(project)
session = Session()
settings = PluginSettingsService(project)
ctx.obj["settings"] = settings
ctx.obj["plugin"] = plugin
ctx.obj["session"] = session
if ctx.invoked_subcommand is None:
if format == "json":
print(settings.as_config(session, plugin))
if format == "env":
for env, value in settings.as_env(session, plugin).items():
print(f"{env}={value}")
@config.command()
@click.argument("setting_name")
@click.argument("value")
@click.pass_context
def set(ctx, setting_name, value):
settings = ctx.obj["settings"]
plugin = ctx.obj["plugin"]
session = ctx.obj["session"]
settings.set(session, plugin, setting_name, value)
@config.command()
@click.argument("setting_name")
@click.pass_context
def unset(ctx, setting_name):
settings = ctx.obj["settings"]
plugin = ctx.obj["plugin"]
session = ctx.obj["session"]
settings.unset(session, plugin, setting_name)
@config.command()
@click.pass_context
def reset(ctx):
settings = ctx.obj["settings"]
plugin = ctx.obj["plugin"]
session = ctx.obj["session"]
for setting in settings.definitions(plugin):
settings.unset(session, plugin, setting.name)
@config.command()
@click.pass_context
def list(ctx):
settings = ctx.obj["settings"]
plugin = ctx.obj["plugin"]
plugin_def = settings.get_definition(plugin)
for setting_def in settings.definitions(plugin):
env_key = settings.setting_env(setting_def, plugin_def)
description_marker = (
f": {setting_def['description']}" if setting_def.get("description") else ""
)
click.secho(f"{setting_def['name']} [{env_key}]{description_marker}")
|
import argparse
import os
import collections
from os import system
import re
from collections import defaultdict
parser = argparse.ArgumentParser()
parser.add_argument(
"-f",
"--folder",
default=".",
help="Specifies which folder to make the naviagtable bat file",
)
parser.add_argument(
"-n",
"--name",
default="jump",
help="Specifies what is the name of the bat file, other wise it adds to the jump.bat",
)
parser.add_argument(
"-r",
"--recursive",
default=0,
help="Specifies if we want: to make navigation recursive to all the folders recursively",
)
parser.add_argument(
"-j",
"--injump",
default=0,
help="Specifies if we want: to make navigation inside jump command",
)
parser.add_argument(
"-c",
"--code",
default=0,
help="Specifies if we want: to add open in code options for the base folder ",
)
parser.add_argument(
"-s",
"--start",
default=0,
help="Specifies if we want: to add open in file manager options for the base folder ",
)
args = parser.parse_args()
__name__ = args.name
_name = f"C:\\nable-bin\{args.name}.bat" # shotcut batch file
_folder = args.folder
_r = args.recursive
_code = int(args.code)
_jump = args.injump
_start = int(args.start)
_fPath = os.path.abspath(_folder)
__help__ = f""" if "%{2 if args.name == "jump" else 1:d}" == "-h" (\n \necho Shortcut for the Folder are\necho {"":=<70}"""
__taken__ = defaultdict(bool) # <-- name is taken
__initial__ = defaultdict(int)
"""
./ds/hon/stat
py
scikit
./ds/tf/dl/relu
conv
/ml/lr
/mr
/stat/
# --> ds .honstat ds ..py ds ..scikit ..dl ..ml .tfstat
# --> ds ...relu
# so for conflict resolution we expand the dot
"""
def check():
if _jump:
if __name__ == "jump":
raise Exception(
"Warning: Jump Flag raised but no name specified!\n----------> -j 1\nYou must specifie name option as\n----------> -j 1 -n XXXX"
)
"""---------------------------Code generator---------------------------"""
"""________________folder extraction _______________"""
def __explore(r: int) -> list:
if r:
for root, _, _ in os.walk(_folder):
if not root.startswith(".\."):
yield f"{os.getcwd()}\{root.lstrip(".")}".replace("\\\\", "\\")
else:
for entry in os.scandir(_folder):
if not entry.is_file() and not entry.name.startswith("."):
yield f"{os.getcwd()}\{entry.name}"
"""________________folder processing_______________"""
def __process(fPath: str):
_rel = os.path.relpath(fPath)
_lvl = __gtLvl(_rel)
_ret = ""
if _lvl == ".":
_ret = __gt(fPath)
elif _lvl < "..." and _lvl > ".":
_ret = __gt(fPath, 0)
return _ret
"""________________code gen_______________"""
def __gt(fPath: str, lvl1=True):
_name = __gtName(fPath)
_rel = os.path.relpath(fPath)
_lvl = __gtLvl(_rel) if not lvl1 else ""
_ret = f""" if "%{__gtScope():d}" == "{_lvl}{_name}" (
cd /d "{fPath}"
{__code(__gtScope()+1,fPath)}
{__start(__gtScope()+1,fPath)}
)
"""
global __help__
__help__ += f"\necho {_lvl}{_name:-<50}{_rel}"
return _ret
def __gtScope() -> int:
return 2 if args.name == "jump" else 1
def __gtLvl(relPath: str) -> str:
_l = len(relPath.split("\\"))
_lvl = ""
for _ in range(_l):
_lvl += "."
return _lvl
"""---------------------------Options---------------------------"""
"""________________code_______________"""
def __gtCode(path: str) -> str:
_ret = f""" code "{os.path.abspath(path)}"
"""
return _ret
def __code(scope: int, path: str = _fPath, rflg: int = 0) -> str:
_ret = ""
if _code == 1:
_ret = f""" if "%{scope}" == "c" (
cd /d "{path}"
{__gtCode(path)}
)
"""
_ret = _ret if not rflg == 1 else ""
elif _code == 2:
_ret = __gtCode(path)
_ret = _ret if not rflg == 2 else ""
return _ret
"""________________start_______________"""
def __gtStart(path: str) -> str:
_ret = f""" start .
"""
return _ret
def __start(scope: int, path: str = _fPath, rflg=0) -> str:
_ret = ""
if _start == 1:
_ret = f""" if "%{scope}" == "s" (
cd /d "{path}"
{__gtStart(path)}
)
"""
_ret = _ret if not rflg == 1 else ""
elif _start == 2:
_ret = __gtStart(path)
_ret = _ret if not rflg == 2 else ""
return _ret
"""---------------------------Process name---------------------------"""
def __gtName(relPath: str) -> str:
global __taken__
_items = relPath.rsplit("\\", 1)
name = _items[1] if (len(_items) > 1) else relPath
_commons = defaultdict(str)
for k, v in {
"argument": "arg",
"git": "git",
"arguments": "args",
"basics": "bsc",
"utilities": "utils",
"fuctions": "funcs",
"database": "db",
"numpy": "np",
"io": "io",
"string": "str",
"function": "fucn",
"utility": "util",
"oop": "oop",
"networking": "net",
}.items():
_commons[k] = v
name = name.lstrip(".")
_ret = _commons[name.lower()]
_pat = r"^[A-Za-z0-9]|\s[A-Za-z]|[A-Z]|[0-9]{,2}$|_[a-zA-Z]"
_mtchs = re.findall(_pat, name)
_ret = (
"".join(_mtchs[:5]).replace(" ", "").replace("_", "").lower()
if _ret == ""
else _ret
)
""" ___Note:For Name collision___ """
if __taken__[_ret]:
__initial__[_ret] += 1
_ret = f"{_ret}{__initial__[_ret]}"
else:
__taken__[_ret] = True
return _ret
"""---------------------------Final draft---------------------------"""
"""________________basics root folder and option_______________"""
def __bsc_op(iflg: int = 0) -> str:
_ret = ""
_ret += f"""
cd /d "{_fPath}"
{__start(2, _fPath, 1)}
{__code(2, _fPath, 1)}
"""
_ret = (
f""" if "%2" == "" (
{_ret}
) """
if iflg
else f"""
if "%1" == "" (
{_ret}
)
"""
)
_ret = f"""
{_ret}
{__start(1, _fPath, 2)}
{__code(1, _fPath, 2)}
"""
return _ret
def __gtWrite(_data: str):
global __help__
_write = ""
if __name__ == "jump":
_write += f"""
\n\n
\rREM {'':-<30} {__gtName(_fPath)} {'':-<30}
if "%1" == "-{__gtName(_fPath) if not _jump else __name__ }" (
{__bsc_op(1)}
{_data}
{__help__}
)
"""
else:
_write += f"""
\r@echo off\n
{__bsc_op()}
{_data}
{__help__}
"""
return _write
"""---------------------------Execution code---------------------------"""
def __cached(data: str) -> None:
if os.path.exists(_name) and __name__ == "jump":
with open(_name, "a") as file:
file.write(data)
else:
with open(_name, "w") as file:
file.write(data)
def __main__():
check()
_data = ""
global __main__
global __help__
for pth in __explore(_r):
_data += __process(pth) + "\n"
__help__ += f"\necho {"":=<70} \n)"
__cached(__gtWrite(_data))
try:
__main__()
except Exception as e:
print(e)
|
import argparse
import os
import collections
from os import system
import re
from collections import defaultdict
parser = argparse.ArgumentParser()
parser.add_argument(
"-f",
"--folder",
default=".",
help="Specifies which folder to make the naviagtable bat file",
)
parser.add_argument(
"-n",
"--name",
default="jump",
help="Specifies what is the name of the bat file, other wise it adds to the jump.bat",
)
parser.add_argument(
"-r",
"--recursive",
default=0,
help="Specifies if we want: to make navigation recursive to all the folders recursively",
)
parser.add_argument(
"-j",
"--injump",
default=0,
help="Specifies if we want: to make navigation inside jump command",
)
parser.add_argument(
"-c",
"--code",
default=0,
help="Specifies if we want: to add open in code options for the base folder ",
)
parser.add_argument(
"-s",
"--start",
default=0,
help="Specifies if we want: to add open in file manager options for the base folder ",
)
args = parser.parse_args()
__name__ = args.name
_name = f"C:\\nable-bin\{args.name}.bat" # shotcut batch file
_folder = args.folder
_r = args.recursive
_code = int(args.code)
_jump = args.injump
_start = int(args.start)
_fPath = os.path.abspath(_folder)
__help__ = f""" if "%{2 if args.name == "jump" else 1:d}" == "-h" (\n \necho Shortcut for the Folder are\necho {"":=<70}"""
__taken__ = defaultdict(bool) # <-- name is taken
__initial__ = defaultdict(int)
"""
./ds/hon/stat
py
scikit
./ds/tf/dl/relu
conv
/ml/lr
/mr
/stat/
# --> ds .honstat ds ..py ds ..scikit ..dl ..ml .tfstat
# --> ds ...relu
# so for conflict resolution we expand the dot
"""
def check():
if _jump:
if __name__ == "jump":
raise Exception(
"Warning: Jump Flag raised but no name specified!\n----------> -j 1\nYou must specifie name option as\n----------> -j 1 -n XXXX"
)
"""---------------------------Code generator---------------------------"""
"""________________folder extraction _______________"""
def __explore(r: int) -> list:
if r:
for root, _, _ in os.walk(_folder):
if not root.startswith(".\."):
yield f"{os.getcwd()}\{root.lstrip('.')}".replace("\\\\", "\\")
else:
for entry in os.scandir(_folder):
if not entry.is_file() and not entry.name.startswith("."):
yield f"{os.getcwd()}\{entry.name}"
"""________________folder processing_______________"""
def __process(fPath: str):
_rel = os.path.relpath(fPath)
_lvl = __gtLvl(_rel)
_ret = ""
if _lvl == ".":
_ret = __gt(fPath)
elif _lvl < "..." and _lvl > ".":
_ret = __gt(fPath, 0)
return _ret
"""________________code gen_______________"""
def __gt(fPath: str, lvl1=True):
_name = __gtName(fPath)
_rel = os.path.relpath(fPath)
_lvl = __gtLvl(_rel) if not lvl1 else ""
_ret = f""" if "%{__gtScope():d}" == "{_lvl}{_name}" (
cd /d "{fPath}"
{__code(__gtScope()+1,fPath)}
{__start(__gtScope()+1,fPath)}
)
"""
global __help__
__help__ += f"\necho {_lvl}{_name:-<50}{_rel}"
return _ret
def __gtScope() -> int:
return 2 if args.name == "jump" else 1
def __gtLvl(relPath: str) -> str:
_l = len(relPath.split("\\"))
_lvl = ""
for _ in range(_l):
_lvl += "."
return _lvl
"""---------------------------Options---------------------------"""
"""________________code_______________"""
def __gtCode(path: str) -> str:
_ret = f""" code "{os.path.abspath(path)}"
"""
return _ret
def __code(scope: int, path: str = _fPath, rflg: int = 0) -> str:
_ret = ""
if _code == 1:
_ret = f""" if "%{scope}" == "c" (
cd /d "{path}"
{__gtCode(path)}
)
"""
_ret = _ret if not rflg == 1 else ""
elif _code == 2:
_ret = __gtCode(path)
_ret = _ret if not rflg == 2 else ""
return _ret
"""________________start_______________"""
def __gtStart(path: str) -> str:
_ret = f""" start .
"""
return _ret
def __start(scope: int, path: str = _fPath, rflg=0) -> str:
_ret = ""
if _start == 1:
_ret = f""" if "%{scope}" == "s" (
cd /d "{path}"
{__gtStart(path)}
)
"""
_ret = _ret if not rflg == 1 else ""
elif _start == 2:
_ret = __gtStart(path)
_ret = _ret if not rflg == 2 else ""
return _ret
"""---------------------------Process name---------------------------"""
def __gtName(relPath: str) -> str:
global __taken__
_items = relPath.rsplit("\\", 1)
name = _items[1] if (len(_items) > 1) else relPath
_commons = defaultdict(str)
for k, v in {
"argument": "arg",
"git": "git",
"arguments": "args",
"basics": "bsc",
"utilities": "utils",
"fuctions": "funcs",
"database": "db",
"numpy": "np",
"io": "io",
"string": "str",
"function": "fucn",
"utility": "util",
"oop": "oop",
"networking": "net",
}.items():
_commons[k] = v
name = name.lstrip(".")
_ret = _commons[name.lower()]
_pat = r"^[A-Za-z0-9]|\s[A-Za-z]|[A-Z]|[0-9]{,2}$|_[a-zA-Z]"
_mtchs = re.findall(_pat, name)
_ret = (
"".join(_mtchs[:5]).replace(" ", "").replace("_", "").lower()
if _ret == ""
else _ret
)
""" ___Note:For Name collision___ """
if __taken__[_ret]:
__initial__[_ret] += 1
_ret = f"{_ret}{__initial__[_ret]}"
else:
__taken__[_ret] = True
return _ret
"""---------------------------Final draft---------------------------"""
"""________________basics root folder and option_______________"""
def __bsc_op(iflg: int = 0) -> str:
_ret = ""
_ret += f"""
cd /d "{_fPath}"
{__start(2, _fPath, 1)}
{__code(2, _fPath, 1)}
"""
_ret = (
f""" if "%2" == "" (
{_ret}
) """
if iflg
else f"""
if "%1" == "" (
{_ret}
)
"""
)
_ret = f"""
{_ret}
{__start(1, _fPath, 2)}
{__code(1, _fPath, 2)}
"""
return _ret
def __gtWrite(_data: str):
global __help__
_write = ""
if __name__ == "jump":
_write += f"""
\n\n
\rREM {'':-<30} {__gtName(_fPath)} {'':-<30}
if "%1" == "-{__gtName(_fPath) if not _jump else __name__ }" (
{__bsc_op(1)}
{_data}
{__help__}
)
"""
else:
_write += f"""
\r@echo off\n
{__bsc_op()}
{_data}
{__help__}
"""
return _write
"""---------------------------Execution code---------------------------"""
def __cached(data: str) -> None:
if os.path.exists(_name) and __name__ == "jump":
with open(_name, "a") as file:
file.write(data)
else:
with open(_name, "w") as file:
file.write(data)
def __main__():
check()
_data = ""
global __main__
global __help__
for pth in __explore(_r):
_data += __process(pth) + "\n"
__help__ += f"\necho {'':=<70} \n)"
__cached(__gtWrite(_data))
try:
__main__()
except Exception as e:
print(e)
|
# -*- coding: utf-8 -*-
# Copyright © 2022, Neuroethology Lab Uni Tuebingen
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
import re
import os
import glob
import odml
import logging
import subprocess
import numpy as np
import nixio as nix
from .config import ConfigFile
from .traces import EventTrace, RawTrace
from .stimuli import StimuliDat
from .util import parse_value, odml2nix, only_number
from .stimdescription import parse_stimulus_description
from IPython import embed
class Converter(object):
def __init__(self, folder_name, output_name, force=False) -> None:
if not os.path.exists(folder_name):
logging.error(f"{folder_name} does not exist!")
raise ValueError("File not found error!")
self._folder = folder_name
self._output = output_name
self._event_traces = None
self._raw_traces = None
self._raw_data_arrays = {}
self._event_data_arrays = {}
self._stimuli_dat = None
self._force = force
self._nixfile = None
self._block = None
self._repro_tags = {}
self._stimulus_mtags = {}
self.preflight()
def preflight(self):
logging.debug(f"Pre-checking folder {self._folder}!")
self.check_output()
self.check_folder()
logging.debug("Pre-checking done.")
def check_output(self):
logging.debug(f"Checking output name: {self._output}!")
if os.path.exists(self._output):
logging.warn(f"Output file name {self._output} already exists!")
if self._force:
logging.warn(f"... force flag is set {self._force}, going to overwrite!")
else:
logging.error(f"Force flag is not set ({self._force}), abort!")
raise ValueError("Output file {self._output} already exists! If you want to overwrite it use the --force flag.")
logging.debug(f"... ok!")
return True
def unzip(self, tracename):
if os.path.exists(tracename):
logging.debug(f"\tunzip: {tracename}")
subprocess.check_call(["gunzip", tracename])
def find_traces(self):
event_traces = []
raw_traces = []
configuration = self.find_config_file()
for et in self.find_event_traces():
event_traces.append(EventTrace(et, configuration))
for rt in self.find_raw_traces():
raw_traces.append(RawTrace(rt, configuration))
return raw_traces, event_traces
def find_raw_traces(self):
logging.debug(f"Checking for raw traces!")
traces = sorted(glob.glob(os.path.join(self._folder, "trace-*.raw*")))
for rt in traces:
if rt.endswith(".gz") and rt.split(".gz")[0] not in traces:
self.unzip(os.path.split(rt)[-1])
traces = sorted(glob.glob(os.path.join(self._folder, "trace-*.raw")))
logging.debug(f"Found {len(traces)} raw traces. {[os.path.split(t)[-1] for t in traces]}")
return traces
def find_event_traces(self):
logging.debug("Discovering event traces!")
traces = sorted(glob.glob(os.path.join(self._folder, "*-events.dat")))
logging.debug(f"Found {len(traces)} event traces. {[os.path.split(t)[-1] for t in traces]}")
return traces
def find_config_file(self):
if not os.path.exists(os.path.join(self._folder, "relacs.cfg")):
logging.error("Found no info file!")
raise ValueError(f"No relacs.cfg file found in {self._folder}!")
configuration = ConfigFile(os.path.join(self._folder, "relacs.cfg"))
return configuration
def find_info(self):
filename = os.path.join(self._folder, "info.dat")
if not os.path.exists(filename):
logging.error("Found no info file!")
raise ValueError(f"No info file found in {self._folder}!")
return True
def read_info_file(self):
def looks_like_oldstyle(filename):
with open(filename, 'r') as f:
for l in f:
if "# Recording" in l:
oldtyle = not l.strip().endswith(":")
break
return oldtyle
filename = os.path.join(self._folder, "info.dat")
oldstyle = looks_like_oldstyle(filename)
info = {}
logging.info("Reading info file....")
try:
with open(filename, 'r') as f:
lines = f.readlines()
except UnicodeDecodeError:
logging.debug("Replacing experimenter...")
command = r"sudo sed -i '/Experimenter/c\# Experimenter: Anna Stoeckl' %s" % filename
subprocess.check_call(command, shell=True)
with open(filename, 'r') as f:
lines = f.readlines()
for l in lines:
if not l.startswith("#"):
continue
l = l.strip("#").strip()
if len(l) == 0:
continue
if oldstyle:
if not ":" in l: # subsection
sec = {}
info[l[:-1] if l.endswith(":") else l] = sec
else:
parts = l.split(':')
sec[parts[0].strip()] = parts[1].strip('"').strip() if len(parts) > 1 else ""
else:
if l.endswith(":"): # subsection
sec = {}
info[l[:-1] if l.endswith(":") else l] = sec
else:
parts = l.split(': ')
sec[parts[0].strip()] = parts[1].strip('"').strip() if len(parts) > 1 else ""
return info
def read_channel_config(self):
logging.info("Reading channel configuration ...")
ids = [f"identifier{i}" for i in range(1, len(self._raw_traces)+1)]
units = [f"unit{i}" for i in range(1, len(self._raw_traces)+1)]
sampling_intervals = [f"sample interval{i}" for i in range(1, len(self._raw_traces)+1)]
sampling_rates = [f"sampling rate{i}" for i in range(1, len(self._raw_traces)+1)]
channel_config = {}
for i in range(1, len(self._raw_traces)+1):
channel_config[i] = {}
with open(os.path.join(self._folder, "stimuli.dat")) as f:
for line in f:
if "#" in line:
line = line[1:]
prop = line.strip().split(":")[0].strip()
value = line.strip().split(":")[-1].strip()
if prop in ids:
index = int(prop[-1])
channel_config[index]["identifier"] = value
if prop in units:
index = int(prop[-1])
channel_config[index]["unit"] = value
if prop in sampling_intervals:
index = int(prop[-1])
channel_config[index]["sampling interval"] = value
if prop in sampling_rates:
index = int(prop[-1])
channel_config[index]["sampling rates"] = value
if "analog output traces" in line: # end of channel configuration, we are done here
break
return channel_config
def find_stimulus_info(self):
logging.debug("Scanning stimuli.dat file!")
if not os.path.exists(os.path.join(self._folder, "stimuli.dat")):
logging.error("Found no stimuli.dat file! Abort!")
raise ValueError("No stimuli.dat file found!")
def find_stimulus_descriptions(self):
logging.debug("Scanning stimulus-descriptions.dat!")
filename = os.path.join(self._folder, "stimulus-descriptions.dat")
if not os.path.exists(filename):
logging.warning("Stimulus descriptions file {filename} does not exist!")
return False
return True
def check_folder(self):
logging.debug("Checking folder structure: ...")
self._raw_traces, self._event_traces = self.find_traces()
self.find_info()
logging.debug("Found info file!")
self.find_stimulus_info()
logging.debug("Found stimulus information!")
stim_descriptions_found = self.find_stimulus_descriptions()
if stim_descriptions_found:
logging.debug("Found stimulus descriptions!")
else:
logging.debug("Did not find stimulus descriptions!")
return True
def convert_dataset_info(self, metadata, parent_section=None):
def split_list(value_str):
results = None
if len(value_str) == 0:
return " "
if "|" in value_str:
results = list(map(str.strip, value_str.split("|")))
elif value_str[0] == "[" and "]" in value_str:
results = list(map(str.strip, value_str[1:value_str.index("]")].split(', ')))
else:
results = value_str
return results
if parent_section is not None:
for k in metadata.keys():
if isinstance(metadata[k], dict):
sec = parent_section.create_section(k, k.lower())
self.convert_dataset_info(metadata[k], sec)
else: # is property
value, unit = parse_value(metadata[k])
if value is None:
continue
if isinstance(value, str):
value = split_list(value)
p = parent_section.create_property(k, value)
if unit is not None:
p.unit = unit
def open_nix_file(self):
info = self.read_info_file()
logging.info(f"Creating output file {self._output} ...")
self._nixfile = nix.File.open(self._output, nix.FileMode.Overwrite)
dataset_name = os.path.split(self._output)[-1].strip(".nix")
self._block = self._nixfile.create_block(dataset_name, "relacs.recording")
sec = self._nixfile.create_section(dataset_name, "relacs.recording")
self._block.metadata = sec
sec.create_property("relacs-nix version", 1.1)
self.convert_dataset_info(info, sec)
def convert_raw_traces(self, channel_config):
logging.info("Converting raw traces, this may take a little while...")
for rt in self._raw_traces:
logging.info(f"... trace {rt._trace_no}: {rt.name}")
data = np.fromfile(os.path.join(self._folder, rt.filename), dtype=np.float32)
da = self._block.create_data_array(rt.name, f"relacs.data.sampled.{rt.name}", dtype=nix.DataType.Float, data=data)
da.unit = channel_config[rt._trace_no]["unit"]
si = float(channel_config[rt._trace_no]["sampling interval"][:-2]) / 1000.
da.append_sampled_dimension(si, unit="s")
self._raw_data_arrays[rt] = da
def convert_event_traces(self):
def read_event_data(filename):
logging.info(f"... reading event times from file {filename}...")
times = []
with open(filename, 'r') as f:
for l in f:
if len(l.strip()) == 0 or "#" in l:
continue
times.append(float(l.strip().split()[0].strip()))
return np.array(times)
logging.info("Converting event traces...")
for et in self._event_traces:
logging.info(f"... trace {et.name}")
event_times = read_event_data(et._filename)
da = self._block.create_data_array(et.name, f"relacs.data.events.{et.name}", data=event_times)
da.unit = "s"
da.append_range_dimension_using_self()
da.definition = f"Events detected in {et.inputtrace}"
self._event_data_arrays[et] = da
def convert_stimuli(self):
def stimulus_descriptions(repro_name, reprorun, sampleinterval):
def skip_first_index(signals):
skip = True
for s in signals:
skip = skip and s.data[0].strip() == "-"
return skip
def find_active_signal(signals, stimulus_no):
for i, s in enumerate(signals):
if s.data[stimulus_no].strip() != "-":
return i
def parse_parameter(parameter_str):
props = []
if parameter_str.strip().startswith("\""):
parameter_str = parameter_str[1:-1]
parts = parameter_str.split(",")
for p in parts:
name = p.split(":")[0].strip()
value_str = p.split(":")[-1].strip()
value, unit = parse_value(value_str)
props.append(odml.Property(name=name, value=value, unit=unit))
return props
stimuli = []
stimulus_columns = reprorun.table["stimulus"]
signals = stimulus_columns.columns_by_name("signal")
skip_first = skip_first_index(signals)
index_col = reprorun.table.find_column(1)
abstimes = stimulus_columns.columns_by_name("time")[0]
delays = stimulus_columns.columns_by_name("delay")[0]
durations = stimulus_columns.columns_by_name("duration")
amplitudes = stimulus_columns.columns_by_name("amplitude")
if len(amplitudes) == 0: # this is an attempt for very old pre 2011 files.
amplitudes = stimulus_columns.columns_by_name("%6.3f")
parameters = stimulus_columns.columns_by_name("parameter")
for i in range(0 if not skip_first else 1, len(index_col)):
start_time = index_col[i] * sampleinterval
active = find_active_signal(signals, i)
characteristics = odml.Section(f"{repro_name}_{i}")
characteristics.create_property("signal", signals[active].data[i])
p = characteristics.create_property("start_time", start_time)
p.unit = "s"
dur = float(durations[active].data[i]) / (1000 if durations[active].type_or_unit == "ms" else 1)
p = characteristics.create_property("duration", dur)
p.unit = "s"
p = characteristics.create_property("amplitude", float(amplitudes[active].data[i]))
p.unit = amplitudes[active].type_or_unit
d = float(delays.data[i]) / (1000 if delays.type_or_unit == "ms" else 1)
p = characteristics.create_property("delay", d)
p.unit = "s"
at = float(abstimes.data[i]) / (1000 if abstimes.type_or_unit == "ms" else 1)
p = characteristics.create_property("abs_time", at)
p.unit = "s"
characteristics.create_property("repro_tag_id", self._repro_tags[repro_name].id)
if len(parameters) > 0:
params = parse_parameter(parameters[active].data[i])
for p in params:
characteristics.append(p)
stimuli.append(characteristics)
return stimuli
def stimuli(sampleinterval):
stims = {}
counter = {}
stim_metadata = parse_stimulus_description(os.path.join(self._folder, "stimulus-descriptions.dat"))
for rr in self._stimuli_dat.repro_runs:
if rr is None or rr.name is None:
print(rr)
continue
if rr.name in counter:
counter[rr.name] += 1
else:
counter[rr.name] = 1
if not rr.valid:
continue
if "BaselineActivity" in rr.name:
continue # there are no stimulus presented during baseline
repro_name = f"{rr.name}_{counter[rr.name]}"
stims[repro_name] = stimulus_descriptions(repro_name, rr, sampleinterval)
return stims, stim_metadata
def store_stimuli(stims, stim_metadata):
def store_features(signal, features):
excluded_feats = ["start_time", "duration", "signal"]
fixed_feats = ["abs_time", "amplitude", "repro_tag_id"]
feats = {}
for i, feat in enumerate(features):
for p in feat:
if p.name in excluded_feats:
continue
if p.name not in feats:
if p.dtype == "string":
feats[p.name] = np.empty(len(features), dtype=object)
feats[p.name][i] = p.values[0]
else:
feats[p.name] = np.empty(len(features))
else:
feats[p.name][i] = p.values[0]
for key in feats.keys():
feat_name = f"{signal}_{key}"
feat_type = f"relacs.feature.{key if key in fixed_feats else "mutable"}"
mtag = self._stimulus_mtags[signal]
shape = (len(feats[key]), 1)
data = np.reshape(feats[key], shape)
dtype = nix.DataType.String if data.dtype == object else nix.DataType.Float
feature_da = self._block.create_data_array(feat_name, feat_type,
shape= shape, dtype=dtype,
data=data)
feature_da.append_set_dimension()
mtag.create_feature(feature_da, nix.LinkType.Indexed)
return None
unique_signals = []
signal_counts = {}
signal_starts = {}
signal_durations = {}
signal_features = {}
for repro_run in stims:
for stim in stims[repro_run]:
signal = stim.props["signal"].values[0]
if signal not in unique_signals:
unique_signals.append(signal)
signal_counts[signal] = 1
signal_starts[signal] = [stim.props["start_time"].values[0]]
signal_durations[signal] = [stim.props["duration"].values[0]]
signal_features[signal] = [stim]
else:
signal_starts[signal].append(stim.props["start_time"].values[0])
signal_durations[signal].append(stim.props["duration"].values[0])
signal_counts[signal] += 1
signal_features[signal].append(stim)
excluded_refs = ["restart", "recording", "stimulus"]
for signal in unique_signals:
positions = self._block.create_data_array(f"{signal}_onset_times", "relacs.stimulus.onset",
data=np.atleast_2d(signal_starts[signal]).T)
positions.append_set_dimension()
extents = self._block.create_data_array(f"{signal}_durations", "relacs.stimulus.duration",
data=np.atleast_2d(signal_durations[signal]).T)
extents.append_set_dimension()
mtag = self._block.create_multi_tag(signal, "relacs.stimulus.segment", positions=positions,
extents=extents)
self._stimulus_mtags[signal] = mtag
for et in self._event_data_arrays:
if et not in excluded_refs:
mtag.references.append(self._event_data_arrays[et])
for rt in self._raw_data_arrays:
mtag.references.append(self._raw_data_arrays[rt])
if stim_metadata is not None and signal in stim_metadata.sections:
metadata = stim_metadata[signal]
mtag.metadata = self._nixfile.create_section(mtag.name, "relacs.stimulus")
odml2nix(metadata, mtag.metadata)
store_features(signal, signal_features[signal])
return None
sampleinterval = self._stimuli_dat.input_settings.props["sample interval1"].values[0] /1000
stims, metadata = stimuli(sampleinterval)
store_stimuli(stims, metadata)
return
def convert_repro_runs(self):
def repro_times(reprorun, sampleinterval):
if reprorun.name is None:
return None, None
if not reprorun.valid:
return None, None
index_col = reprorun.table.find_column(1)
if len(index_col) == 0:
return None, None
stimulus_grp = reprorun.table["stimulus"]
signals = stimulus_grp.columns_by_name("signal")
is_init = np.any(np.array([s[0] for s in signals], dtype=object) == "init")
delay_cols = stimulus_grp.columns_by_name("delay")
delay = 0.0 if (len(delay_cols) == 0 or is_init) else delay_cols[0][0]
start_time = index_col[0] * sampleinterval - delay / 1000.
duration_cols = stimulus_grp.columns_by_name("duration")
duration = 0.0
if "BaselineActivity" in reprorun.name:
duration = 0.0
end_time = start_time
else:
for d in duration_cols:
dur = d[-1]
if isinstance(dur, (int, float)):
duration = dur / 1000
break
elif isinstance(dur, str) and only_number.search(dur) is not None:
duration = float(dur) / 1000
break
end_time = index_col[-1] * sampleinterval + duration
logging.debug(f"Repro {reprorun.name} from {start_time} to {end_time}s")
return start_time, end_time
def repro_runs():
repro_names = []
repro_starts = []
repro_ends = []
repro_durations = []
repro_metadata = []
sampleinterval = self._stimuli_dat.input_settings.props["sample interval1"].values[0] /1000
counter = {}
for i, rr in enumerate(self._stimuli_dat.repro_runs):
if rr.name in counter:
counter[rr.name] += 1
else:
counter[rr.name] = 1
if not rr.valid:
continue
start, end = repro_times(rr, sampleinterval)
if start is None:
logging.error(f"RePro run: {rr.name} has no start/stop entries! It is ignored!")
continue
repro_names.append(f"{rr.name}_{counter[rr.name]}")
repro_starts.append(start)
repro_durations.append(end - start)
repro_ends.append(end)
repro_metadata.append(rr.metadata)
for i, (start, end , duration) in enumerate(zip(repro_starts, repro_ends, repro_durations)):
logging.debug(f"Duration {duration} for repro {repro_names[i]} and {i} < {len(repro_starts) - 1}")
if duration < sampleinterval and i < len(repro_starts) -1:
repro_durations[i] = repro_starts[i+1] - start
logging.debug(f"\t new duration: {repro_durations[i]}")
repro_ends[i] = repro_starts[i+1]
return repro_names, repro_metadata, repro_starts, repro_durations
def store_repro_runs(repro_names, repro_metadata, start_times, durations):
excluded_refs = ["restart", "recording", "stimulus"]
for name, metadata, start, duration in zip(repro_names, repro_metadata, start_times, durations):
logging.debug(f"... storing {name} which ran from {start} to {start + duration}.")
tag = self._block.create_tag(name, "relacs.repro_run", position=[start])
tag.extent = [duration]
for et in self._event_data_arrays:
if et not in excluded_refs:
tag.references.append(self._event_data_arrays[et])
for rt in self._raw_data_arrays:
tag.references.append(self._raw_data_arrays[rt])
tag.metadata = self._nixfile.create_section(name, "relacs.repro")
odml2nix(metadata, tag.metadata)
self._repro_tags[name] = tag
names, metadata, starts, durations = repro_runs()
logging.info("Converting RePro runs...")
store_repro_runs(names, metadata, starts, durations)
def convert(self):
logging.info(f"Converting dataset {self._folder} to nix file {self._output}!")
channel_config = self.read_channel_config()
self.open_nix_file()
self.convert_raw_traces(channel_config)
self.convert_event_traces()
self._stimuli_dat = StimuliDat(os.path.join(self._folder, "stimuli.dat"))
self.convert_repro_runs()
self.convert_stimuli()
self._nixfile.close()
|
# -*- coding: utf-8 -*-
# Copyright © 2022, Neuroethology Lab Uni Tuebingen
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
import re
import os
import glob
import odml
import logging
import subprocess
import numpy as np
import nixio as nix
from .config import ConfigFile
from .traces import EventTrace, RawTrace
from .stimuli import StimuliDat
from .util import parse_value, odml2nix, only_number
from .stimdescription import parse_stimulus_description
from IPython import embed
class Converter(object):
def __init__(self, folder_name, output_name, force=False) -> None:
if not os.path.exists(folder_name):
logging.error(f"{folder_name} does not exist!")
raise ValueError("File not found error!")
self._folder = folder_name
self._output = output_name
self._event_traces = None
self._raw_traces = None
self._raw_data_arrays = {}
self._event_data_arrays = {}
self._stimuli_dat = None
self._force = force
self._nixfile = None
self._block = None
self._repro_tags = {}
self._stimulus_mtags = {}
self.preflight()
def preflight(self):
logging.debug(f"Pre-checking folder {self._folder}!")
self.check_output()
self.check_folder()
logging.debug("Pre-checking done.")
def check_output(self):
logging.debug(f"Checking output name: {self._output}!")
if os.path.exists(self._output):
logging.warn(f"Output file name {self._output} already exists!")
if self._force:
logging.warn(f"... force flag is set {self._force}, going to overwrite!")
else:
logging.error(f"Force flag is not set ({self._force}), abort!")
raise ValueError("Output file {self._output} already exists! If you want to overwrite it use the --force flag.")
logging.debug(f"... ok!")
return True
def unzip(self, tracename):
if os.path.exists(tracename):
logging.debug(f"\tunzip: {tracename}")
subprocess.check_call(["gunzip", tracename])
def find_traces(self):
event_traces = []
raw_traces = []
configuration = self.find_config_file()
for et in self.find_event_traces():
event_traces.append(EventTrace(et, configuration))
for rt in self.find_raw_traces():
raw_traces.append(RawTrace(rt, configuration))
return raw_traces, event_traces
def find_raw_traces(self):
logging.debug(f"Checking for raw traces!")
traces = sorted(glob.glob(os.path.join(self._folder, "trace-*.raw*")))
for rt in traces:
if rt.endswith(".gz") and rt.split(".gz")[0] not in traces:
self.unzip(os.path.split(rt)[-1])
traces = sorted(glob.glob(os.path.join(self._folder, "trace-*.raw")))
logging.debug(f"Found {len(traces)} raw traces. {[os.path.split(t)[-1] for t in traces]}")
return traces
def find_event_traces(self):
logging.debug("Discovering event traces!")
traces = sorted(glob.glob(os.path.join(self._folder, "*-events.dat")))
logging.debug(f"Found {len(traces)} event traces. {[os.path.split(t)[-1] for t in traces]}")
return traces
def find_config_file(self):
if not os.path.exists(os.path.join(self._folder, "relacs.cfg")):
logging.error("Found no info file!")
raise ValueError(f"No relacs.cfg file found in {self._folder}!")
configuration = ConfigFile(os.path.join(self._folder, "relacs.cfg"))
return configuration
def find_info(self):
filename = os.path.join(self._folder, "info.dat")
if not os.path.exists(filename):
logging.error("Found no info file!")
raise ValueError(f"No info file found in {self._folder}!")
return True
def read_info_file(self):
def looks_like_oldstyle(filename):
with open(filename, 'r') as f:
for l in f:
if "# Recording" in l:
oldtyle = not l.strip().endswith(":")
break
return oldtyle
filename = os.path.join(self._folder, "info.dat")
oldstyle = looks_like_oldstyle(filename)
info = {}
logging.info("Reading info file....")
try:
with open(filename, 'r') as f:
lines = f.readlines()
except UnicodeDecodeError:
logging.debug("Replacing experimenter...")
command = r"sudo sed -i '/Experimenter/c\# Experimenter: Anna Stoeckl' %s" % filename
subprocess.check_call(command, shell=True)
with open(filename, 'r') as f:
lines = f.readlines()
for l in lines:
if not l.startswith("#"):
continue
l = l.strip("#").strip()
if len(l) == 0:
continue
if oldstyle:
if not ":" in l: # subsection
sec = {}
info[l[:-1] if l.endswith(":") else l] = sec
else:
parts = l.split(':')
sec[parts[0].strip()] = parts[1].strip('"').strip() if len(parts) > 1 else ""
else:
if l.endswith(":"): # subsection
sec = {}
info[l[:-1] if l.endswith(":") else l] = sec
else:
parts = l.split(': ')
sec[parts[0].strip()] = parts[1].strip('"').strip() if len(parts) > 1 else ""
return info
def read_channel_config(self):
logging.info("Reading channel configuration ...")
ids = [f"identifier{i}" for i in range(1, len(self._raw_traces)+1)]
units = [f"unit{i}" for i in range(1, len(self._raw_traces)+1)]
sampling_intervals = [f"sample interval{i}" for i in range(1, len(self._raw_traces)+1)]
sampling_rates = [f"sampling rate{i}" for i in range(1, len(self._raw_traces)+1)]
channel_config = {}
for i in range(1, len(self._raw_traces)+1):
channel_config[i] = {}
with open(os.path.join(self._folder, "stimuli.dat")) as f:
for line in f:
if "#" in line:
line = line[1:]
prop = line.strip().split(":")[0].strip()
value = line.strip().split(":")[-1].strip()
if prop in ids:
index = int(prop[-1])
channel_config[index]["identifier"] = value
if prop in units:
index = int(prop[-1])
channel_config[index]["unit"] = value
if prop in sampling_intervals:
index = int(prop[-1])
channel_config[index]["sampling interval"] = value
if prop in sampling_rates:
index = int(prop[-1])
channel_config[index]["sampling rates"] = value
if "analog output traces" in line: # end of channel configuration, we are done here
break
return channel_config
def find_stimulus_info(self):
logging.debug("Scanning stimuli.dat file!")
if not os.path.exists(os.path.join(self._folder, "stimuli.dat")):
logging.error("Found no stimuli.dat file! Abort!")
raise ValueError("No stimuli.dat file found!")
def find_stimulus_descriptions(self):
logging.debug("Scanning stimulus-descriptions.dat!")
filename = os.path.join(self._folder, "stimulus-descriptions.dat")
if not os.path.exists(filename):
logging.warning("Stimulus descriptions file {filename} does not exist!")
return False
return True
def check_folder(self):
logging.debug("Checking folder structure: ...")
self._raw_traces, self._event_traces = self.find_traces()
self.find_info()
logging.debug("Found info file!")
self.find_stimulus_info()
logging.debug("Found stimulus information!")
stim_descriptions_found = self.find_stimulus_descriptions()
if stim_descriptions_found:
logging.debug("Found stimulus descriptions!")
else:
logging.debug("Did not find stimulus descriptions!")
return True
def convert_dataset_info(self, metadata, parent_section=None):
def split_list(value_str):
results = None
if len(value_str) == 0:
return " "
if "|" in value_str:
results = list(map(str.strip, value_str.split("|")))
elif value_str[0] == "[" and "]" in value_str:
results = list(map(str.strip, value_str[1:value_str.index("]")].split(', ')))
else:
results = value_str
return results
if parent_section is not None:
for k in metadata.keys():
if isinstance(metadata[k], dict):
sec = parent_section.create_section(k, k.lower())
self.convert_dataset_info(metadata[k], sec)
else: # is property
value, unit = parse_value(metadata[k])
if value is None:
continue
if isinstance(value, str):
value = split_list(value)
p = parent_section.create_property(k, value)
if unit is not None:
p.unit = unit
def open_nix_file(self):
info = self.read_info_file()
logging.info(f"Creating output file {self._output} ...")
self._nixfile = nix.File.open(self._output, nix.FileMode.Overwrite)
dataset_name = os.path.split(self._output)[-1].strip(".nix")
self._block = self._nixfile.create_block(dataset_name, "relacs.recording")
sec = self._nixfile.create_section(dataset_name, "relacs.recording")
self._block.metadata = sec
sec.create_property("relacs-nix version", 1.1)
self.convert_dataset_info(info, sec)
def convert_raw_traces(self, channel_config):
logging.info("Converting raw traces, this may take a little while...")
for rt in self._raw_traces:
logging.info(f"... trace {rt._trace_no}: {rt.name}")
data = np.fromfile(os.path.join(self._folder, rt.filename), dtype=np.float32)
da = self._block.create_data_array(rt.name, f"relacs.data.sampled.{rt.name}", dtype=nix.DataType.Float, data=data)
da.unit = channel_config[rt._trace_no]["unit"]
si = float(channel_config[rt._trace_no]["sampling interval"][:-2]) / 1000.
da.append_sampled_dimension(si, unit="s")
self._raw_data_arrays[rt] = da
def convert_event_traces(self):
def read_event_data(filename):
logging.info(f"... reading event times from file {filename}...")
times = []
with open(filename, 'r') as f:
for l in f:
if len(l.strip()) == 0 or "#" in l:
continue
times.append(float(l.strip().split()[0].strip()))
return np.array(times)
logging.info("Converting event traces...")
for et in self._event_traces:
logging.info(f"... trace {et.name}")
event_times = read_event_data(et._filename)
da = self._block.create_data_array(et.name, f"relacs.data.events.{et.name}", data=event_times)
da.unit = "s"
da.append_range_dimension_using_self()
da.definition = f"Events detected in {et.inputtrace}"
self._event_data_arrays[et] = da
def convert_stimuli(self):
def stimulus_descriptions(repro_name, reprorun, sampleinterval):
def skip_first_index(signals):
skip = True
for s in signals:
skip = skip and s.data[0].strip() == "-"
return skip
def find_active_signal(signals, stimulus_no):
for i, s in enumerate(signals):
if s.data[stimulus_no].strip() != "-":
return i
def parse_parameter(parameter_str):
props = []
if parameter_str.strip().startswith("\""):
parameter_str = parameter_str[1:-1]
parts = parameter_str.split(",")
for p in parts:
name = p.split(":")[0].strip()
value_str = p.split(":")[-1].strip()
value, unit = parse_value(value_str)
props.append(odml.Property(name=name, value=value, unit=unit))
return props
stimuli = []
stimulus_columns = reprorun.table["stimulus"]
signals = stimulus_columns.columns_by_name("signal")
skip_first = skip_first_index(signals)
index_col = reprorun.table.find_column(1)
abstimes = stimulus_columns.columns_by_name("time")[0]
delays = stimulus_columns.columns_by_name("delay")[0]
durations = stimulus_columns.columns_by_name("duration")
amplitudes = stimulus_columns.columns_by_name("amplitude")
if len(amplitudes) == 0: # this is an attempt for very old pre 2011 files.
amplitudes = stimulus_columns.columns_by_name("%6.3f")
parameters = stimulus_columns.columns_by_name("parameter")
for i in range(0 if not skip_first else 1, len(index_col)):
start_time = index_col[i] * sampleinterval
active = find_active_signal(signals, i)
characteristics = odml.Section(f"{repro_name}_{i}")
characteristics.create_property("signal", signals[active].data[i])
p = characteristics.create_property("start_time", start_time)
p.unit = "s"
dur = float(durations[active].data[i]) / (1000 if durations[active].type_or_unit == "ms" else 1)
p = characteristics.create_property("duration", dur)
p.unit = "s"
p = characteristics.create_property("amplitude", float(amplitudes[active].data[i]))
p.unit = amplitudes[active].type_or_unit
d = float(delays.data[i]) / (1000 if delays.type_or_unit == "ms" else 1)
p = characteristics.create_property("delay", d)
p.unit = "s"
at = float(abstimes.data[i]) / (1000 if abstimes.type_or_unit == "ms" else 1)
p = characteristics.create_property("abs_time", at)
p.unit = "s"
characteristics.create_property("repro_tag_id", self._repro_tags[repro_name].id)
if len(parameters) > 0:
params = parse_parameter(parameters[active].data[i])
for p in params:
characteristics.append(p)
stimuli.append(characteristics)
return stimuli
def stimuli(sampleinterval):
stims = {}
counter = {}
stim_metadata = parse_stimulus_description(os.path.join(self._folder, "stimulus-descriptions.dat"))
for rr in self._stimuli_dat.repro_runs:
if rr is None or rr.name is None:
print(rr)
continue
if rr.name in counter:
counter[rr.name] += 1
else:
counter[rr.name] = 1
if not rr.valid:
continue
if "BaselineActivity" in rr.name:
continue # there are no stimulus presented during baseline
repro_name = f"{rr.name}_{counter[rr.name]}"
stims[repro_name] = stimulus_descriptions(repro_name, rr, sampleinterval)
return stims, stim_metadata
def store_stimuli(stims, stim_metadata):
def store_features(signal, features):
excluded_feats = ["start_time", "duration", "signal"]
fixed_feats = ["abs_time", "amplitude", "repro_tag_id"]
feats = {}
for i, feat in enumerate(features):
for p in feat:
if p.name in excluded_feats:
continue
if p.name not in feats:
if p.dtype == "string":
feats[p.name] = np.empty(len(features), dtype=object)
feats[p.name][i] = p.values[0]
else:
feats[p.name] = np.empty(len(features))
else:
feats[p.name][i] = p.values[0]
for key in feats.keys():
feat_name = f"{signal}_{key}"
feat_type = f"relacs.feature.{key if key in fixed_feats else 'mutable'}"
mtag = self._stimulus_mtags[signal]
shape = (len(feats[key]), 1)
data = np.reshape(feats[key], shape)
dtype = nix.DataType.String if data.dtype == object else nix.DataType.Float
feature_da = self._block.create_data_array(feat_name, feat_type,
shape= shape, dtype=dtype,
data=data)
feature_da.append_set_dimension()
mtag.create_feature(feature_da, nix.LinkType.Indexed)
return None
unique_signals = []
signal_counts = {}
signal_starts = {}
signal_durations = {}
signal_features = {}
for repro_run in stims:
for stim in stims[repro_run]:
signal = stim.props["signal"].values[0]
if signal not in unique_signals:
unique_signals.append(signal)
signal_counts[signal] = 1
signal_starts[signal] = [stim.props["start_time"].values[0]]
signal_durations[signal] = [stim.props["duration"].values[0]]
signal_features[signal] = [stim]
else:
signal_starts[signal].append(stim.props["start_time"].values[0])
signal_durations[signal].append(stim.props["duration"].values[0])
signal_counts[signal] += 1
signal_features[signal].append(stim)
excluded_refs = ["restart", "recording", "stimulus"]
for signal in unique_signals:
positions = self._block.create_data_array(f"{signal}_onset_times", "relacs.stimulus.onset",
data=np.atleast_2d(signal_starts[signal]).T)
positions.append_set_dimension()
extents = self._block.create_data_array(f"{signal}_durations", "relacs.stimulus.duration",
data=np.atleast_2d(signal_durations[signal]).T)
extents.append_set_dimension()
mtag = self._block.create_multi_tag(signal, "relacs.stimulus.segment", positions=positions,
extents=extents)
self._stimulus_mtags[signal] = mtag
for et in self._event_data_arrays:
if et not in excluded_refs:
mtag.references.append(self._event_data_arrays[et])
for rt in self._raw_data_arrays:
mtag.references.append(self._raw_data_arrays[rt])
if stim_metadata is not None and signal in stim_metadata.sections:
metadata = stim_metadata[signal]
mtag.metadata = self._nixfile.create_section(mtag.name, "relacs.stimulus")
odml2nix(metadata, mtag.metadata)
store_features(signal, signal_features[signal])
return None
sampleinterval = self._stimuli_dat.input_settings.props["sample interval1"].values[0] /1000
stims, metadata = stimuli(sampleinterval)
store_stimuli(stims, metadata)
return
def convert_repro_runs(self):
def repro_times(reprorun, sampleinterval):
if reprorun.name is None:
return None, None
if not reprorun.valid:
return None, None
index_col = reprorun.table.find_column(1)
if len(index_col) == 0:
return None, None
stimulus_grp = reprorun.table["stimulus"]
signals = stimulus_grp.columns_by_name("signal")
is_init = np.any(np.array([s[0] for s in signals], dtype=object) == "init")
delay_cols = stimulus_grp.columns_by_name("delay")
delay = 0.0 if (len(delay_cols) == 0 or is_init) else delay_cols[0][0]
start_time = index_col[0] * sampleinterval - delay / 1000.
duration_cols = stimulus_grp.columns_by_name("duration")
duration = 0.0
if "BaselineActivity" in reprorun.name:
duration = 0.0
end_time = start_time
else:
for d in duration_cols:
dur = d[-1]
if isinstance(dur, (int, float)):
duration = dur / 1000
break
elif isinstance(dur, str) and only_number.search(dur) is not None:
duration = float(dur) / 1000
break
end_time = index_col[-1] * sampleinterval + duration
logging.debug(f"Repro {reprorun.name} from {start_time} to {end_time}s")
return start_time, end_time
def repro_runs():
repro_names = []
repro_starts = []
repro_ends = []
repro_durations = []
repro_metadata = []
sampleinterval = self._stimuli_dat.input_settings.props["sample interval1"].values[0] /1000
counter = {}
for i, rr in enumerate(self._stimuli_dat.repro_runs):
if rr.name in counter:
counter[rr.name] += 1
else:
counter[rr.name] = 1
if not rr.valid:
continue
start, end = repro_times(rr, sampleinterval)
if start is None:
logging.error(f"RePro run: {rr.name} has no start/stop entries! It is ignored!")
continue
repro_names.append(f"{rr.name}_{counter[rr.name]}")
repro_starts.append(start)
repro_durations.append(end - start)
repro_ends.append(end)
repro_metadata.append(rr.metadata)
for i, (start, end , duration) in enumerate(zip(repro_starts, repro_ends, repro_durations)):
logging.debug(f"Duration {duration} for repro {repro_names[i]} and {i} < {len(repro_starts) - 1}")
if duration < sampleinterval and i < len(repro_starts) -1:
repro_durations[i] = repro_starts[i+1] - start
logging.debug(f"\t new duration: {repro_durations[i]}")
repro_ends[i] = repro_starts[i+1]
return repro_names, repro_metadata, repro_starts, repro_durations
def store_repro_runs(repro_names, repro_metadata, start_times, durations):
excluded_refs = ["restart", "recording", "stimulus"]
for name, metadata, start, duration in zip(repro_names, repro_metadata, start_times, durations):
logging.debug(f"... storing {name} which ran from {start} to {start + duration}.")
tag = self._block.create_tag(name, "relacs.repro_run", position=[start])
tag.extent = [duration]
for et in self._event_data_arrays:
if et not in excluded_refs:
tag.references.append(self._event_data_arrays[et])
for rt in self._raw_data_arrays:
tag.references.append(self._raw_data_arrays[rt])
tag.metadata = self._nixfile.create_section(name, "relacs.repro")
odml2nix(metadata, tag.metadata)
self._repro_tags[name] = tag
names, metadata, starts, durations = repro_runs()
logging.info("Converting RePro runs...")
store_repro_runs(names, metadata, starts, durations)
def convert(self):
logging.info(f"Converting dataset {self._folder} to nix file {self._output}!")
channel_config = self.read_channel_config()
self.open_nix_file()
self.convert_raw_traces(channel_config)
self.convert_event_traces()
self._stimuli_dat = StimuliDat(os.path.join(self._folder, "stimuli.dat"))
self.convert_repro_runs()
self.convert_stimuli()
self._nixfile.close()
|
import bs4
import requests
from typing import Union, List
from .Infopage import info, InfoPage
class SearchResult:
def __init__(self, urls: dict):
self.names = tuple(urls.keys())
self.urls = urls
def __getitem__(self, item):
if type(item) == str:
return self.urls[item]
elif type(item) == int:
return self.names[item]
else:
raise TypeError
def __len__(self):
return len(self.names)
def __iter__(self):
self.__index = 0
return self
def __next__(self):
if self.__index < len(self.names):
index = self.__index
self.__index += 1
return self.names[index]
else:
raise StopIteration
def __str__(self):
return str(self.names)
def get(self, x: Union[int, str]) -> InfoPage:
if type(x) == int:
return info(self.urls[self.names[x]])
elif type(x) == str:
return info(self.urls[x])
def get_all(self, limit: int = 20) -> List[InfoPage]:
lst = []
try:
if limit > 20:
limit = 20
except TypeError:
limit = 20
for item in list(self.urls.keys())[:limit]:
print('Getting:', item)
lst.append(info(self.urls[item]))
return lst
def search(name: str, page: int = 1, style: str = None, year=None, eps: int = None, score: str = None,
match_all: bool = True, max_results: int = 20) -> Union[SearchResult, None]:
urls = {}
if max_results > 20:
print("Cannot have more than 20 Results!")
max_results = 20
filters_given = any([style, year, eps, score])
url = f"https://mydramalist.com/search?q={name.replace(" ", "+")}&page={page}"
base = requests.get(url)
soup = bs4.BeautifulSoup(base.text, 'lxml')
results_box = soup.find('div', class_='col-lg-8 col-md-8').find_all('div', class_='box')
for item in results_box:
# Get Title
try:
curr_title = item.find("h6").find('a').text
except AttributeError:
return None
# Get Category
curr_cateory = item.find('span', class_='text-muted')
# Check if Ctegory Exists
if curr_cateory:
curr_cateory = curr_cateory.text
else:
continue
# Get URL
curr_url = item.find("h6").find('a')['href']
# Apply filters
if filters_given:
if match_all:
filter_check = 15 # Has to match all filters given
else:
filter_check = 0 # Has to match atleast one of the filters given
# In Binary from MSB [0] is style, [1] is year, [2] is eps, [3] is score
# Check for Score
curr_score = item.find('span', class_='score').text
if score:
if curr_score:
if score.endswith('+'):
if not float(curr_score) >= float(score.rstrip('+')):
filter_check &= 0b1110
else:
filter_check |= 0b0001
elif score.endswith('-'):
if not float(curr_score) <= float(score.rstrip('-')):
filter_check &= 0b1110
else:
filter_check |= 0b0001
else:
if not curr_score == score:
filter_check &= 0b1110
else:
filter_check |= 0b0001
else:
filter_check &= 0b1110
# Check for Episodes Filter
if eps:
if not ((curr_cateory.split(',')[-1]).startswith(f" {eps} episode")):
filter_check &= 0b1101
else:
filter_check |= 0b0010
# Check for Year Filter
if year:
if not curr_cateory.split(',')[0].split('-')[-1].strip() == str(year):
filter_check &= 0b1011
else:
filter_check |= 0b0100
# Check for Style Filter
if style:
if curr_cateory.find(style) == -1:
filter_check &= 0b0111
else:
filter_check |= 0b1000
# Add it to list if checks pass
elif match_all and filter_check == 15:
urls[curr_title] = curr_url
elif (not match_all) and filter_check != 0:
urls[curr_title] = curr_url
else: # Directly add if no filters are given
urls[curr_title] = curr_url
if len(urls) >= max_results:
break
if len(urls) > 0:
return SearchResult(urls)
else:
return None
|
import bs4
import requests
from typing import Union, List
from .Infopage import info, InfoPage
class SearchResult:
def __init__(self, urls: dict):
self.names = tuple(urls.keys())
self.urls = urls
def __getitem__(self, item):
if type(item) == str:
return self.urls[item]
elif type(item) == int:
return self.names[item]
else:
raise TypeError
def __len__(self):
return len(self.names)
def __iter__(self):
self.__index = 0
return self
def __next__(self):
if self.__index < len(self.names):
index = self.__index
self.__index += 1
return self.names[index]
else:
raise StopIteration
def __str__(self):
return str(self.names)
def get(self, x: Union[int, str]) -> InfoPage:
if type(x) == int:
return info(self.urls[self.names[x]])
elif type(x) == str:
return info(self.urls[x])
def get_all(self, limit: int = 20) -> List[InfoPage]:
lst = []
try:
if limit > 20:
limit = 20
except TypeError:
limit = 20
for item in list(self.urls.keys())[:limit]:
print('Getting:', item)
lst.append(info(self.urls[item]))
return lst
def search(name: str, page: int = 1, style: str = None, year=None, eps: int = None, score: str = None,
match_all: bool = True, max_results: int = 20) -> Union[SearchResult, None]:
urls = {}
if max_results > 20:
print("Cannot have more than 20 Results!")
max_results = 20
filters_given = any([style, year, eps, score])
url = f"https://mydramalist.com/search?q={name.replace(' ', '+')}&page={page}"
base = requests.get(url)
soup = bs4.BeautifulSoup(base.text, 'lxml')
results_box = soup.find('div', class_='col-lg-8 col-md-8').find_all('div', class_='box')
for item in results_box:
# Get Title
try:
curr_title = item.find("h6").find('a').text
except AttributeError:
return None
# Get Category
curr_cateory = item.find('span', class_='text-muted')
# Check if Ctegory Exists
if curr_cateory:
curr_cateory = curr_cateory.text
else:
continue
# Get URL
curr_url = item.find("h6").find('a')['href']
# Apply filters
if filters_given:
if match_all:
filter_check = 15 # Has to match all filters given
else:
filter_check = 0 # Has to match atleast one of the filters given
# In Binary from MSB [0] is style, [1] is year, [2] is eps, [3] is score
# Check for Score
curr_score = item.find('span', class_='score').text
if score:
if curr_score:
if score.endswith('+'):
if not float(curr_score) >= float(score.rstrip('+')):
filter_check &= 0b1110
else:
filter_check |= 0b0001
elif score.endswith('-'):
if not float(curr_score) <= float(score.rstrip('-')):
filter_check &= 0b1110
else:
filter_check |= 0b0001
else:
if not curr_score == score:
filter_check &= 0b1110
else:
filter_check |= 0b0001
else:
filter_check &= 0b1110
# Check for Episodes Filter
if eps:
if not ((curr_cateory.split(',')[-1]).startswith(f" {eps} episode")):
filter_check &= 0b1101
else:
filter_check |= 0b0010
# Check for Year Filter
if year:
if not curr_cateory.split(',')[0].split('-')[-1].strip() == str(year):
filter_check &= 0b1011
else:
filter_check |= 0b0100
# Check for Style Filter
if style:
if curr_cateory.find(style) == -1:
filter_check &= 0b0111
else:
filter_check |= 0b1000
# Add it to list if checks pass
elif match_all and filter_check == 15:
urls[curr_title] = curr_url
elif (not match_all) and filter_check != 0:
urls[curr_title] = curr_url
else: # Directly add if no filters are given
urls[curr_title] = curr_url
if len(urls) >= max_results:
break
if len(urls) > 0:
return SearchResult(urls)
else:
return None
|
'''Contains helper function for this project filenames
and example band dictionary and list'''
def dirname(measurement):
'''Returns directory of .asc file in CMIP5 dataset'''
_dirname_prefix = 'bcc_csm1_1_m_rcp8_5_2080s'
_dirname_postfix = '10min_r1i1p1_no_tile_asc'
_dirname_global = './data/CMIP5'
return f'{_dirname_global}/{_dirname_prefix}_{measurement}_{_dirname_postfix}'
TEMPLATE = '#'
BANDS = [
# `range`: parameters to substitute template with
# len(`range`) == 1 if no template was required, dummy str
{
'path': f'{dirname('cons')}/cons_mths.asc',
'range': ['-999'],
'desc': 'consecutive_dry_months'
},
{
'path': f'{dirname('prec')}/prec_{TEMPLATE}.asc',
'range': [str(i) for i in range(1, 13)],
'desc': 'precipitation'
},
{
'path': f'{dirname('tmax')}/tmax_{TEMPLATE}.asc',
'range': [str(i) for i in range(1, 13)],
'desc': 'maximum_monthly_temperature'
},
{
'path': f'{dirname('tmean')}/tmean_{TEMPLATE}.asc',
'range': [str(i) for i in range(1, 13)],
'desc': 'mean_monthly_temperature'
},
{
'path': f'{dirname('tmin')}/tmin_{TEMPLATE}.asc',
'range': [str(i) for i in range(1, 13)],
'desc': 'min_monthly_temperature'
},
]
|
'''Contains helper function for this project filenames
and example band dictionary and list'''
def dirname(measurement):
'''Returns directory of .asc file in CMIP5 dataset'''
_dirname_prefix = 'bcc_csm1_1_m_rcp8_5_2080s'
_dirname_postfix = '10min_r1i1p1_no_tile_asc'
_dirname_global = './data/CMIP5'
return f'{_dirname_global}/{_dirname_prefix}_{measurement}_{_dirname_postfix}'
TEMPLATE = '#'
BANDS = [
# `range`: parameters to substitute template with
# len(`range`) == 1 if no template was required, dummy str
{
'path': f'{dirname("cons")}/cons_mths.asc',
'range': ['-999'],
'desc': 'consecutive_dry_months'
},
{
'path': f'{dirname("prec")}/prec_{TEMPLATE}.asc',
'range': [str(i) for i in range(1, 13)],
'desc': 'precipitation'
},
{
'path': f'{dirname("tmax")}/tmax_{TEMPLATE}.asc',
'range': [str(i) for i in range(1, 13)],
'desc': 'maximum_monthly_temperature'
},
{
'path': f'{dirname("tmean")}/tmean_{TEMPLATE}.asc',
'range': [str(i) for i in range(1, 13)],
'desc': 'mean_monthly_temperature'
},
{
'path': f'{dirname("tmin")}/tmin_{TEMPLATE}.asc',
'range': [str(i) for i in range(1, 13)],
'desc': 'min_monthly_temperature'
},
]
|
import datetime
from email.headerregistry import Address
from typing import Any, Dict, Iterable, List, Mapping, Optional, TypeVar, Union
from unittest import mock
import orjson
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.test import override_settings
from django.utils.timezone import now as timezone_now
from zerver.lib.actions import (
change_user_is_active,
create_users,
do_change_can_create_users,
do_change_user_role,
do_create_user,
do_deactivate_user,
do_delete_user,
do_invite_users,
do_reactivate_user,
do_set_realm_property,
get_emails_from_user_ids,
get_recipient_info,
)
from zerver.lib.avatar import avatar_url, get_gravatar_url
from zerver.lib.create_user import copy_user_settings
from zerver.lib.events import do_events_register
from zerver.lib.exceptions import JsonableError
from zerver.lib.send_email import (
clear_scheduled_emails,
deliver_scheduled_emails,
send_future_email,
)
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import (
cache_tries_captured,
get_subscription,
get_test_image_file,
queries_captured,
reset_emails_in_zulip_realm,
simulated_empty_cache,
tornado_redirected_to_list,
)
from zerver.lib.topic_mutes import add_topic_mute
from zerver.lib.upload import upload_avatar_image
from zerver.lib.users import Accounts, access_user_by_id, get_accounts_for_email, user_ids_to_users
from zerver.models import (
CustomProfileField,
InvalidFakeEmailDomain,
Message,
PreregistrationUser,
Realm,
RealmDomain,
Recipient,
ScheduledEmail,
Stream,
Subscription,
UserHotspot,
UserProfile,
check_valid_user_ids,
get_client,
get_fake_email_domain,
get_realm,
get_source_profile,
get_stream,
get_system_bot,
get_user,
get_user_by_delivery_email,
get_user_by_id_in_realm_including_cross_realm,
)
K = TypeVar("K")
V = TypeVar("V")
def find_dict(lst: Iterable[Dict[K, V]], k: K, v: V) -> Dict[K, V]:
for dct in lst:
if dct[k] == v:
return dct
raise AssertionError(f"Cannot find element in list where key {k} == {v}")
class PermissionTest(ZulipTestCase):
def test_role_setters(self) -> None:
user_profile = self.example_user("hamlet")
user_profile.is_realm_admin = True
self.assertEqual(user_profile.is_realm_admin, True)
self.assertEqual(user_profile.role, UserProfile.ROLE_REALM_ADMINISTRATOR)
user_profile.is_guest = False
self.assertEqual(user_profile.is_guest, False)
self.assertEqual(user_profile.role, UserProfile.ROLE_REALM_ADMINISTRATOR)
user_profile.is_realm_admin = False
self.assertEqual(user_profile.is_realm_admin, False)
self.assertEqual(user_profile.role, UserProfile.ROLE_MEMBER)
user_profile.is_guest = True
self.assertEqual(user_profile.is_guest, True)
self.assertEqual(user_profile.role, UserProfile.ROLE_GUEST)
user_profile.is_realm_admin = False
self.assertEqual(user_profile.is_guest, True)
self.assertEqual(user_profile.role, UserProfile.ROLE_GUEST)
user_profile.is_guest = False
self.assertEqual(user_profile.is_guest, False)
self.assertEqual(user_profile.role, UserProfile.ROLE_MEMBER)
def test_get_admin_users(self) -> None:
user_profile = self.example_user("hamlet")
do_change_user_role(user_profile, UserProfile.ROLE_MEMBER, acting_user=None)
self.assertFalse(user_profile.is_realm_owner)
admin_users = user_profile.realm.get_human_admin_users()
self.assertFalse(user_profile in admin_users)
admin_users = user_profile.realm.get_admin_users_and_bots()
self.assertFalse(user_profile in admin_users)
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)
self.assertFalse(user_profile.is_realm_owner)
admin_users = user_profile.realm.get_human_admin_users()
self.assertTrue(user_profile in admin_users)
admin_users = user_profile.realm.get_admin_users_and_bots()
self.assertTrue(user_profile in admin_users)
do_change_user_role(user_profile, UserProfile.ROLE_REALM_OWNER, acting_user=None)
self.assertTrue(user_profile.is_realm_owner)
admin_users = user_profile.realm.get_human_admin_users()
self.assertTrue(user_profile in admin_users)
admin_users = user_profile.realm.get_human_admin_users(include_realm_owners=False)
self.assertFalse(user_profile in admin_users)
admin_users = user_profile.realm.get_admin_users_and_bots()
self.assertTrue(user_profile in admin_users)
admin_users = user_profile.realm.get_admin_users_and_bots(include_realm_owners=False)
self.assertFalse(user_profile in admin_users)
def test_get_first_human_user(self) -> None:
realm = get_realm("zulip")
UserProfile.objects.filter(realm=realm).delete()
UserProfile.objects.create(
realm=realm, email="bot1@zulip.com", delivery_email="bot1@zulip.com", is_bot=True
)
first_human_user = UserProfile.objects.create(
realm=realm, email="user1@zulip.com", delivery_email="user1@zulip.com", is_bot=False
)
UserProfile.objects.create(
realm=realm, email="user2@zulip.com", delivery_email="user2@zulip.com", is_bot=False
)
UserProfile.objects.create(
realm=realm, email="bot2@zulip.com", delivery_email="bot2@zulip.com", is_bot=True
)
self.assertEqual(first_human_user, realm.get_first_human_user())
def test_updating_non_existent_user(self) -> None:
self.login("hamlet")
admin = self.example_user("hamlet")
do_change_user_role(admin, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)
invalid_user_id = 1000
result = self.client_patch(f"/json/users/{invalid_user_id}", {})
self.assert_json_error(result, "No such user")
def test_owner_api(self) -> None:
self.login("iago")
desdemona = self.example_user("desdemona")
othello = self.example_user("othello")
iago = self.example_user("iago")
realm = iago.realm
do_change_user_role(iago, UserProfile.ROLE_REALM_OWNER, acting_user=None)
result = self.client_get("/json/users")
self.assert_json_success(result)
members = result.json()["members"]
iago_dict = find_dict(members, "email", iago.email)
self.assertTrue(iago_dict["is_owner"])
othello_dict = find_dict(members, "email", othello.email)
self.assertFalse(othello_dict["is_owner"])
req = dict(role=UserProfile.ROLE_REALM_OWNER)
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
result = self.client_patch(f"/json/users/{othello.id}", req)
self.assert_json_success(result)
owner_users = realm.get_human_owner_users()
self.assertTrue(othello in owner_users)
person = events[0]["event"]["person"]
self.assertEqual(person["user_id"], othello.id)
self.assertEqual(person["role"], UserProfile.ROLE_REALM_OWNER)
req = dict(role=UserProfile.ROLE_MEMBER)
events = []
with tornado_redirected_to_list(events):
result = self.client_patch(f"/json/users/{othello.id}", req)
self.assert_json_success(result)
owner_users = realm.get_human_owner_users()
self.assertFalse(othello in owner_users)
person = events[0]["event"]["person"]
self.assertEqual(person["user_id"], othello.id)
self.assertEqual(person["role"], UserProfile.ROLE_MEMBER)
# Cannot take away from last owner
self.login("desdemona")
req = dict(role=UserProfile.ROLE_MEMBER)
events = []
with tornado_redirected_to_list(events):
result = self.client_patch(f"/json/users/{iago.id}", req)
self.assert_json_success(result)
owner_users = realm.get_human_owner_users()
self.assertFalse(iago in owner_users)
person = events[0]["event"]["person"]
self.assertEqual(person["user_id"], iago.id)
self.assertEqual(person["role"], UserProfile.ROLE_MEMBER)
with tornado_redirected_to_list([]):
result = self.client_patch(f"/json/users/{desdemona.id}", req)
self.assert_json_error(
result, "The owner permission cannot be removed from the only organization owner."
)
do_change_user_role(iago, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)
self.login("iago")
with tornado_redirected_to_list([]):
result = self.client_patch(f"/json/users/{desdemona.id}", req)
self.assert_json_error(result, "Must be an organization owner")
def test_admin_api(self) -> None:
self.login("desdemona")
hamlet = self.example_user("hamlet")
othello = self.example_user("othello")
desdemona = self.example_user("desdemona")
realm = hamlet.realm
# Make sure we see is_admin flag in /json/users
result = self.client_get("/json/users")
self.assert_json_success(result)
members = result.json()["members"]
desdemona_dict = find_dict(members, "email", desdemona.email)
self.assertTrue(desdemona_dict["is_admin"])
othello_dict = find_dict(members, "email", othello.email)
self.assertFalse(othello_dict["is_admin"])
# Giveth
req = dict(role=orjson.dumps(UserProfile.ROLE_REALM_ADMINISTRATOR).decode())
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
result = self.client_patch(f"/json/users/{othello.id}", req)
self.assert_json_success(result)
admin_users = realm.get_human_admin_users()
self.assertTrue(othello in admin_users)
person = events[0]["event"]["person"]
self.assertEqual(person["user_id"], othello.id)
self.assertEqual(person["role"], UserProfile.ROLE_REALM_ADMINISTRATOR)
# Taketh away
req = dict(role=orjson.dumps(UserProfile.ROLE_MEMBER).decode())
events = []
with tornado_redirected_to_list(events):
result = self.client_patch(f"/json/users/{othello.id}", req)
self.assert_json_success(result)
admin_users = realm.get_human_admin_users()
self.assertFalse(othello in admin_users)
person = events[0]["event"]["person"]
self.assertEqual(person["user_id"], othello.id)
self.assertEqual(person["role"], UserProfile.ROLE_MEMBER)
# Make sure only admins can patch other user's info.
self.login("othello")
result = self.client_patch(f"/json/users/{hamlet.id}", req)
self.assert_json_error(result, "Insufficient permission")
def test_admin_api_hide_emails(self) -> None:
reset_emails_in_zulip_realm()
user = self.example_user("hamlet")
admin = self.example_user("iago")
self.login_user(user)
# First, verify client_gravatar works normally
result = self.client_get("/json/users", {"client_gravatar": "true"})
self.assert_json_success(result)
members = result.json()["members"]
hamlet = find_dict(members, "user_id", user.id)
self.assertEqual(hamlet["email"], user.email)
self.assertIsNone(hamlet["avatar_url"])
self.assertNotIn("delivery_email", hamlet)
# Also verify the /events code path. This is a bit hacky, but
# we need to verify client_gravatar is not being overridden.
with mock.patch(
"zerver.lib.events.request_event_queue", return_value=None
) as mock_request_event_queue:
with self.assertRaises(JsonableError):
result = do_events_register(user, get_client("website"), client_gravatar=True)
self.assertEqual(mock_request_event_queue.call_args_list[0][0][3], True)
#############################################################
# Now, switch email address visibility, check client_gravatar
# is automatically disabled for the user.
do_set_realm_property(
user.realm,
"email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS,
acting_user=None,
)
result = self.client_get("/json/users", {"client_gravatar": "true"})
self.assert_json_success(result)
members = result.json()["members"]
hamlet = find_dict(members, "user_id", user.id)
self.assertEqual(hamlet["email"], f"user{user.id}@zulip.testserver")
# Note that the Gravatar URL should still be computed from the
# `delivery_email`; otherwise, we won't be able to serve the
# user's Gravatar.
self.assertEqual(hamlet["avatar_url"], get_gravatar_url(user.delivery_email, 1))
self.assertNotIn("delivery_email", hamlet)
# Also verify the /events code path. This is a bit hacky, but
# basically we want to verify client_gravatar is being
# overridden.
with mock.patch(
"zerver.lib.events.request_event_queue", return_value=None
) as mock_request_event_queue:
with self.assertRaises(JsonableError):
result = do_events_register(user, get_client("website"), client_gravatar=True)
self.assertEqual(mock_request_event_queue.call_args_list[0][0][3], False)
# client_gravatar is still turned off for admins. In theory,
# it doesn't need to be, but client-side changes would be
# required in apps like the mobile apps.
# delivery_email is sent for admins.
admin.refresh_from_db()
self.login_user(admin)
result = self.client_get("/json/users", {"client_gravatar": "true"})
self.assert_json_success(result)
members = result.json()["members"]
hamlet = find_dict(members, "user_id", user.id)
self.assertEqual(hamlet["email"], f"user{user.id}@zulip.testserver")
self.assertEqual(hamlet["avatar_url"], get_gravatar_url(user.email, 1))
self.assertEqual(hamlet["delivery_email"], self.example_email("hamlet"))
def test_user_cannot_promote_to_admin(self) -> None:
self.login("hamlet")
req = dict(role=orjson.dumps(UserProfile.ROLE_REALM_ADMINISTRATOR).decode())
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Insufficient permission")
def test_admin_user_can_change_full_name(self) -> None:
new_name = "new name"
self.login("iago")
hamlet = self.example_user("hamlet")
req = dict(full_name=orjson.dumps(new_name).decode())
result = self.client_patch(f"/json/users/{hamlet.id}", req)
self.assert_json_success(result)
hamlet = self.example_user("hamlet")
self.assertEqual(hamlet.full_name, new_name)
def test_non_admin_cannot_change_full_name(self) -> None:
self.login("hamlet")
req = dict(full_name=orjson.dumps("new name").decode())
result = self.client_patch("/json/users/{}".format(self.example_user("othello").id), req)
self.assert_json_error(result, "Insufficient permission")
def test_admin_cannot_set_long_full_name(self) -> None:
new_name = "a" * (UserProfile.MAX_NAME_LENGTH + 1)
self.login("iago")
req = dict(full_name=orjson.dumps(new_name).decode())
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Name too long!")
def test_admin_cannot_set_short_full_name(self) -> None:
new_name = "a"
self.login("iago")
req = dict(full_name=orjson.dumps(new_name).decode())
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Name too short!")
def test_not_allowed_format(self) -> None:
# Name of format "Alice|999" breaks in Markdown
new_name = "iago|72"
self.login("iago")
req = dict(full_name=orjson.dumps(new_name).decode())
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Invalid format!")
def test_allowed_format_complex(self) -> None:
# Adding characters after r'|d+' doesn't break Markdown
new_name = "Hello- 12iago|72k"
self.login("iago")
req = dict(full_name=orjson.dumps(new_name).decode())
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_success(result)
def test_not_allowed_format_complex(self) -> None:
new_name = "Hello- 12iago|72"
self.login("iago")
req = dict(full_name=orjson.dumps(new_name).decode())
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Invalid format!")
def test_admin_cannot_set_full_name_with_invalid_characters(self) -> None:
new_name = "Opheli*"
self.login("iago")
req = dict(full_name=orjson.dumps(new_name).decode())
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Invalid characters in name!")
def test_access_user_by_id(self) -> None:
iago = self.example_user("iago")
# Must be a valid user ID in the realm
with self.assertRaises(JsonableError):
access_user_by_id(iago, 1234, for_admin=False)
with self.assertRaises(JsonableError):
access_user_by_id(iago, self.mit_user("sipbtest").id, for_admin=False)
# Can only access bot users if allow_bots is passed
bot = self.example_user("default_bot")
access_user_by_id(iago, bot.id, allow_bots=True, for_admin=True)
with self.assertRaises(JsonableError):
access_user_by_id(iago, bot.id, for_admin=True)
# Can only access deactivated users if allow_deactivated is passed
hamlet = self.example_user("hamlet")
do_deactivate_user(hamlet, acting_user=None)
with self.assertRaises(JsonableError):
access_user_by_id(iago, hamlet.id, for_admin=False)
with self.assertRaises(JsonableError):
access_user_by_id(iago, hamlet.id, for_admin=True)
access_user_by_id(iago, hamlet.id, allow_deactivated=True, for_admin=True)
# Non-admin user can't admin another user
with self.assertRaises(JsonableError):
access_user_by_id(
self.example_user("cordelia"), self.example_user("aaron").id, for_admin=True
)
# But does have read-only access to it.
access_user_by_id(
self.example_user("cordelia"), self.example_user("aaron").id, for_admin=False
)
def check_property_for_role(self, user_profile: UserProfile, role: int) -> bool:
if role == UserProfile.ROLE_REALM_ADMINISTRATOR:
return (
user_profile.is_realm_admin
and not user_profile.is_guest
and not user_profile.is_realm_owner
and not user_profile.is_moderator
)
elif role == UserProfile.ROLE_REALM_OWNER:
return (
user_profile.is_realm_owner
and user_profile.is_realm_admin
and not user_profile.is_moderator
and not user_profile.is_guest
)
elif role == UserProfile.ROLE_MODERATOR:
return (
user_profile.is_moderator
and not user_profile.is_realm_owner
and not user_profile.is_realm_admin
and not user_profile.is_guest
)
if role == UserProfile.ROLE_MEMBER:
return (
not user_profile.is_guest
and not user_profile.is_moderator
and not user_profile.is_realm_admin
and not user_profile.is_realm_owner
)
assert role == UserProfile.ROLE_GUEST
return (
user_profile.is_guest
and not user_profile.is_moderator
and not user_profile.is_realm_admin
and not user_profile.is_realm_owner
)
def check_user_role_change(
self,
user_email: str,
new_role: int,
) -> None:
self.login("desdemona")
user_profile = self.example_user(user_email)
old_role = user_profile.role
self.assertTrue(self.check_property_for_role(user_profile, old_role))
req = dict(role=orjson.dumps(new_role).decode())
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
result = self.client_patch(f"/json/users/{user_profile.id}", req)
self.assert_json_success(result)
user_profile = self.example_user(user_email)
self.assertTrue(self.check_property_for_role(user_profile, new_role))
person = events[0]["event"]["person"]
self.assertEqual(person["user_id"], user_profile.id)
self.assertTrue(person["role"], new_role)
def test_change_regular_member_to_guest(self) -> None:
self.check_user_role_change("hamlet", UserProfile.ROLE_GUEST)
def test_change_guest_to_regular_member(self) -> None:
self.check_user_role_change("polonius", UserProfile.ROLE_MEMBER)
def test_change_admin_to_guest(self) -> None:
self.check_user_role_change("iago", UserProfile.ROLE_GUEST)
def test_change_guest_to_admin(self) -> None:
self.check_user_role_change("polonius", UserProfile.ROLE_REALM_ADMINISTRATOR)
def test_change_owner_to_guest(self) -> None:
self.login("desdemona")
iago = self.example_user("iago")
do_change_user_role(iago, UserProfile.ROLE_REALM_OWNER, acting_user=None)
self.check_user_role_change("iago", UserProfile.ROLE_GUEST)
def test_change_guest_to_owner(self) -> None:
self.check_user_role_change("polonius", UserProfile.ROLE_REALM_OWNER)
def test_change_admin_to_owner(self) -> None:
self.check_user_role_change("iago", UserProfile.ROLE_REALM_OWNER)
def test_change_owner_to_admin(self) -> None:
self.login("desdemona")
iago = self.example_user("iago")
do_change_user_role(iago, UserProfile.ROLE_REALM_OWNER, acting_user=None)
self.check_user_role_change("iago", UserProfile.ROLE_REALM_ADMINISTRATOR)
def test_change_owner_to_moderator(self) -> None:
iago = self.example_user("iago")
do_change_user_role(iago, UserProfile.ROLE_REALM_OWNER, acting_user=None)
self.check_user_role_change("iago", UserProfile.ROLE_MODERATOR)
def test_change_moderator_to_owner(self) -> None:
self.check_user_role_change("shiva", UserProfile.ROLE_REALM_OWNER)
def test_change_admin_to_moderator(self) -> None:
self.check_user_role_change("iago", UserProfile.ROLE_MODERATOR)
def test_change_moderator_to_admin(self) -> None:
self.check_user_role_change("shiva", UserProfile.ROLE_REALM_ADMINISTRATOR)
def test_change_guest_to_moderator(self) -> None:
self.check_user_role_change("polonius", UserProfile.ROLE_MODERATOR)
def test_change_moderator_to_guest(self) -> None:
self.check_user_role_change("shiva", UserProfile.ROLE_GUEST)
def test_admin_user_can_change_profile_data(self) -> None:
realm = get_realm("zulip")
self.login("iago")
new_profile_data = []
cordelia = self.example_user("cordelia")
# Test for all type of data
fields = {
"Phone number": "short text data",
"Biography": "long text data",
"Favorite food": "short text data",
"Favorite editor": "vim",
"Birthday": "1909-03-05",
"Favorite website": "https://zulip.com",
"Mentor": [cordelia.id],
"GitHub": "timabbott",
}
for field_name in fields:
field = CustomProfileField.objects.get(name=field_name, realm=realm)
new_profile_data.append(
{
"id": field.id,
"value": fields[field_name],
}
)
result = self.client_patch(
f"/json/users/{cordelia.id}", {"profile_data": orjson.dumps(new_profile_data).decode()}
)
self.assert_json_success(result)
cordelia = self.example_user("cordelia")
for field_dict in cordelia.profile_data:
with self.subTest(field_name=field_dict["name"]):
self.assertEqual(field_dict["value"], fields[field_dict["name"]])
# Test admin user cannot set invalid profile data
invalid_fields = [
(
"Favorite editor",
"invalid choice",
"'invalid choice' is not a valid choice for 'Favorite editor'.",
),
("Birthday", "1909-34-55", "Birthday is not a date"),
("Favorite website", "not url", "Favorite website is not a URL"),
("Mentor", "not list of user ids", "User IDs is not a list"),
]
for field_name, field_value, error_msg in invalid_fields:
new_profile_data = []
field = CustomProfileField.objects.get(name=field_name, realm=realm)
new_profile_data.append(
{
"id": field.id,
"value": field_value,
}
)
result = self.client_patch(
f"/json/users/{cordelia.id}",
{"profile_data": orjson.dumps(new_profile_data).decode()},
)
self.assert_json_error(result, error_msg)
# non-existent field and no data
invalid_profile_data = [
{
"id": 9001,
"value": "",
}
]
result = self.client_patch(
f"/json/users/{cordelia.id}",
{"profile_data": orjson.dumps(invalid_profile_data).decode()},
)
self.assert_json_error(result, "Field id 9001 not found.")
# non-existent field and data
invalid_profile_data = [
{
"id": 9001,
"value": "some data",
}
]
result = self.client_patch(
f"/json/users/{cordelia.id}",
{"profile_data": orjson.dumps(invalid_profile_data).decode()},
)
self.assert_json_error(result, "Field id 9001 not found.")
# Test for clearing/resetting field values.
empty_profile_data = []
for field_name in fields:
field = CustomProfileField.objects.get(name=field_name, realm=realm)
value: Union[str, None, List[Any]] = ""
if field.field_type == CustomProfileField.USER:
value = []
empty_profile_data.append(
{
"id": field.id,
"value": value,
}
)
result = self.client_patch(
f"/json/users/{cordelia.id}",
{"profile_data": orjson.dumps(empty_profile_data).decode()},
)
self.assert_json_success(result)
for field_dict in cordelia.profile_data:
with self.subTest(field_name=field_dict["name"]):
self.assertEqual(field_dict["value"], None)
# Test adding some of the field values after removing all.
hamlet = self.example_user("hamlet")
new_fields = {
"Phone number": None,
"Biography": "A test user",
"Favorite food": None,
"Favorite editor": None,
"Birthday": None,
"Favorite website": "https://zulip.github.io",
"Mentor": [hamlet.id],
"GitHub": "timabbott",
}
new_profile_data = []
for field_name in fields:
field = CustomProfileField.objects.get(name=field_name, realm=realm)
value = None
if new_fields[field_name]:
value = new_fields[field_name]
new_profile_data.append(
{
"id": field.id,
"value": value,
}
)
result = self.client_patch(
f"/json/users/{cordelia.id}", {"profile_data": orjson.dumps(new_profile_data).decode()}
)
self.assert_json_success(result)
for field_dict in cordelia.profile_data:
with self.subTest(field_name=field_dict["name"]):
self.assertEqual(field_dict["value"], new_fields[str(field_dict["name"])])
def test_non_admin_user_cannot_change_profile_data(self) -> None:
self.login("cordelia")
hamlet = self.example_user("hamlet")
realm = get_realm("zulip")
new_profile_data = []
field = CustomProfileField.objects.get(name="Biography", realm=realm)
new_profile_data.append(
{
"id": field.id,
"value": "New hamlet Biography",
}
)
result = self.client_patch(
f"/json/users/{hamlet.id}", {"profile_data": orjson.dumps(new_profile_data).decode()}
)
self.assert_json_error(result, "Insufficient permission")
result = self.client_patch(
"/json/users/{}".format(self.example_user("cordelia").id),
{"profile_data": orjson.dumps(new_profile_data).decode()},
)
self.assert_json_error(result, "Insufficient permission")
class QueryCountTest(ZulipTestCase):
def test_create_user_with_multiple_streams(self) -> None:
# add_new_user_history needs messages to be current
Message.objects.all().update(date_sent=timezone_now())
ContentType.objects.clear_cache()
# This just focuses on making sure we don't too many
# queries/cache tries or send too many events.
realm = get_realm("zulip")
self.make_stream("private_stream1", invite_only=True)
self.make_stream("private_stream2", invite_only=True)
stream_names = [
"Denmark",
"Scotland",
"Verona",
"private_stream1",
"private_stream2",
]
streams = [get_stream(stream_name, realm) for stream_name in stream_names]
do_invite_users(
user_profile=self.example_user("hamlet"),
invitee_emails=["fred@zulip.com"],
streams=streams,
)
prereg_user = PreregistrationUser.objects.get(email="fred@zulip.com")
events: List[Mapping[str, Any]] = []
with queries_captured() as queries:
with cache_tries_captured() as cache_tries:
with tornado_redirected_to_list(events):
fred = do_create_user(
email="fred@zulip.com",
password="password",
realm=realm,
full_name="Fred Flintstone",
prereg_user=prereg_user,
acting_user=None,
)
self.assert_length(queries, 70)
self.assert_length(cache_tries, 22)
self.assert_length(events, 7)
peer_add_events = [event for event in events if event["event"].get("op") == "peer_add"]
notifications = set()
for event in peer_add_events:
stream_ids = event["event"]["stream_ids"]
stream_names = sorted(Stream.objects.get(id=stream_id).name for stream_id in stream_ids)
self.assertTrue(event["event"]["user_ids"], {fred.id})
notifications.add(",".join(stream_names))
self.assertEqual(
notifications, {"Denmark,Scotland,Verona", "private_stream1", "private_stream2"}
)
class BulkCreateUserTest(ZulipTestCase):
def test_create_users(self) -> None:
realm = get_realm("zulip")
realm.email_address_visibility = Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS
realm.save()
name_list = [
("Fred Flintstone", "fred@zulip.com"),
("Lisa Simpson", "lisa@zulip.com"),
]
create_users(realm, name_list)
fred = get_user_by_delivery_email("fred@zulip.com", realm)
self.assertEqual(
fred.email,
f"user{fred.id}@zulip.testserver",
)
lisa = get_user_by_delivery_email("lisa@zulip.com", realm)
self.assertEqual(lisa.full_name, "Lisa Simpson")
self.assertEqual(lisa.is_bot, False)
self.assertEqual(lisa.bot_type, None)
realm.email_address_visibility = Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE
realm.save()
name_list = [
("Bono", "bono@zulip.com"),
("Cher", "cher@zulip.com"),
]
create_users(realm, name_list)
bono = get_user_by_delivery_email("bono@zulip.com", realm)
self.assertEqual(bono.email, "bono@zulip.com")
self.assertEqual(bono.delivery_email, "bono@zulip.com")
cher = get_user_by_delivery_email("cher@zulip.com", realm)
self.assertEqual(cher.full_name, "Cher")
class AdminCreateUserTest(ZulipTestCase):
def test_create_user_backend(self) -> None:
# This test should give us complete coverage on
# create_user_backend. It mostly exercises error
# conditions, and it also does a basic test of the success
# path.
admin = self.example_user("hamlet")
realm = admin.realm
self.login_user(admin)
do_change_user_role(admin, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)
valid_params = dict(
email="romeo@zulip.net",
password="xxxx",
full_name="Romeo Montague",
)
self.assertEqual(admin.can_create_users, False)
result = self.client_post("/json/users", valid_params)
self.assert_json_error(result, "User not authorized for this query")
do_change_can_create_users(admin, True)
# can_create_users is insufficient without being a realm administrator:
do_change_user_role(admin, UserProfile.ROLE_MEMBER, acting_user=None)
result = self.client_post("/json/users", valid_params)
self.assert_json_error(result, "Must be an organization administrator")
do_change_user_role(admin, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)
result = self.client_post("/json/users", {})
self.assert_json_error(result, "Missing 'email' argument")
result = self.client_post(
"/json/users",
dict(
email="romeo@not-zulip.com",
),
)
self.assert_json_error(result, "Missing 'password' argument")
result = self.client_post(
"/json/users",
dict(
email="romeo@not-zulip.com",
password="xxxx",
),
)
self.assert_json_error(result, "Missing 'full_name' argument")
# Test short_name gets properly ignored
result = self.client_post(
"/json/users",
dict(
email="romeo@zulip.com",
password="xxxx",
full_name="Romeo Montague",
short_name="DEPRECATED",
),
)
self.assert_json_success(result)
result = self.client_post(
"/json/users",
dict(
email="broken",
password="xxxx",
full_name="Romeo Montague",
),
)
self.assert_json_error(result, "Bad name or username")
do_set_realm_property(realm, "emails_restricted_to_domains", True, acting_user=None)
result = self.client_post(
"/json/users",
dict(
email="romeo@not-zulip.com",
password="xxxx",
full_name="Romeo Montague",
),
)
self.assert_json_error(
result, "Email 'romeo@not-zulip.com' not allowed in this organization"
)
RealmDomain.objects.create(realm=get_realm("zulip"), domain="zulip.net")
# Check can't use a bad password with zxcvbn enabled
with self.settings(PASSWORD_MIN_LENGTH=6, PASSWORD_MIN_GUESSES=1000):
result = self.client_post("/json/users", valid_params)
self.assert_json_error(result, "The password is too weak.")
result = self.client_post("/json/users", valid_params)
self.assert_json_success(result)
# Romeo is a newly registered user
new_user = get_user_by_delivery_email("romeo@zulip.net", get_realm("zulip"))
result = orjson.loads(result.content)
self.assertEqual(new_user.full_name, "Romeo Montague")
self.assertEqual(new_user.id, result["user_id"])
# Make sure the recipient field is set correctly.
self.assertEqual(
new_user.recipient, Recipient.objects.get(type=Recipient.PERSONAL, type_id=new_user.id)
)
# we can't create the same user twice.
result = self.client_post("/json/users", valid_params)
self.assert_json_error(result, "Email 'romeo@zulip.net' already in use")
# Don't allow user to sign up with disposable email.
realm.emails_restricted_to_domains = False
realm.disallow_disposable_email_addresses = True
realm.save()
valid_params["email"] = "abc@mailnator.com"
result = self.client_post("/json/users", valid_params)
self.assert_json_error(
result, "Disposable email addresses are not allowed in this organization"
)
# Don't allow creating a user with + in their email address when realm
# is restricted to a domain.
realm.emails_restricted_to_domains = True
realm.save()
valid_params["email"] = "iago+label@zulip.com"
result = self.client_post("/json/users", valid_params)
self.assert_json_error(result, "Email addresses containing + are not allowed.")
# Users can be created with + in their email address when realm
# is not restricted to a domain.
realm.emails_restricted_to_domains = False
realm.save()
valid_params["email"] = "iago+label@zulip.com"
result = self.client_post("/json/users", valid_params)
self.assert_json_success(result)
class UserProfileTest(ZulipTestCase):
def test_get_emails_from_user_ids(self) -> None:
hamlet = self.example_user("hamlet")
othello = self.example_user("othello")
dct = get_emails_from_user_ids([hamlet.id, othello.id])
self.assertEqual(dct[hamlet.id], hamlet.email)
self.assertEqual(dct[othello.id], othello.email)
def test_valid_user_id(self) -> None:
realm = get_realm("zulip")
hamlet = self.example_user("hamlet")
othello = self.example_user("othello")
bot = self.example_user("default_bot")
# Invalid user ID
invalid_uid: object = 1000
with self.assertRaisesRegex(ValidationError, r"User IDs is not a list"):
check_valid_user_ids(realm.id, invalid_uid)
with self.assertRaisesRegex(ValidationError, rf"Invalid user ID: {invalid_uid}"):
check_valid_user_ids(realm.id, [invalid_uid])
invalid_uid = "abc"
with self.assertRaisesRegex(ValidationError, r"User IDs\[0\] is not an integer"):
check_valid_user_ids(realm.id, [invalid_uid])
invalid_uid = str(othello.id)
with self.assertRaisesRegex(ValidationError, r"User IDs\[0\] is not an integer"):
check_valid_user_ids(realm.id, [invalid_uid])
# User is in different realm
with self.assertRaisesRegex(ValidationError, rf"Invalid user ID: {hamlet.id}"):
check_valid_user_ids(get_realm("zephyr").id, [hamlet.id])
# User is not active
change_user_is_active(hamlet, False)
with self.assertRaisesRegex(ValidationError, rf"User with ID {hamlet.id} is deactivated"):
check_valid_user_ids(realm.id, [hamlet.id])
check_valid_user_ids(realm.id, [hamlet.id], allow_deactivated=True)
# User is a bot
with self.assertRaisesRegex(ValidationError, rf"User with ID {bot.id} is a bot"):
check_valid_user_ids(realm.id, [bot.id])
# Successfully get non-bot, active user belong to your realm
check_valid_user_ids(realm.id, [othello.id])
def test_cache_invalidation(self) -> None:
hamlet = self.example_user("hamlet")
with mock.patch("zerver.lib.cache.delete_display_recipient_cache") as m:
hamlet.full_name = "Hamlet Junior"
hamlet.save(update_fields=["full_name"])
self.assertTrue(m.called)
with mock.patch("zerver.lib.cache.delete_display_recipient_cache") as m:
hamlet.long_term_idle = True
hamlet.save(update_fields=["long_term_idle"])
self.assertFalse(m.called)
def test_user_ids_to_users(self) -> None:
real_user_ids = [
self.example_user("hamlet").id,
self.example_user("cordelia").id,
]
self.assertEqual(user_ids_to_users([], get_realm("zulip")), [])
self.assertEqual(
{
user_profile.id
for user_profile in user_ids_to_users(real_user_ids, get_realm("zulip"))
},
set(real_user_ids),
)
with self.assertRaises(JsonableError):
user_ids_to_users([1234], get_realm("zephyr"))
with self.assertRaises(JsonableError):
user_ids_to_users(real_user_ids, get_realm("zephyr"))
def test_bulk_get_users(self) -> None:
from zerver.lib.users import bulk_get_users
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
webhook_bot = self.example_user("webhook_bot")
result = bulk_get_users(
[hamlet.email, cordelia.email],
get_realm("zulip"),
)
self.assertEqual(result[hamlet.email].email, hamlet.email)
self.assertEqual(result[cordelia.email].email, cordelia.email)
result = bulk_get_users(
[hamlet.email, cordelia.email, webhook_bot.email],
None,
base_query=UserProfile.objects.all(),
)
self.assertEqual(result[hamlet.email].email, hamlet.email)
self.assertEqual(result[cordelia.email].email, cordelia.email)
self.assertEqual(result[webhook_bot.email].email, webhook_bot.email)
def test_get_accounts_for_email(self) -> None:
reset_emails_in_zulip_realm()
def check_account_present_in_accounts(user: UserProfile, accounts: List[Accounts]) -> None:
for account in accounts:
realm = user.realm
if (
account["avatar"] == avatar_url(user)
and account["full_name"] == user.full_name
and account["realm_name"] == realm.name
and account["realm_id"] == realm.id
):
return
raise AssertionError("Account not found")
lear_realm = get_realm("lear")
cordelia_in_zulip = self.example_user("cordelia")
cordelia_in_lear = get_user_by_delivery_email("cordelia@zulip.com", lear_realm)
email = "cordelia@zulip.com"
accounts = get_accounts_for_email(email)
self.assert_length(accounts, 2)
check_account_present_in_accounts(cordelia_in_zulip, accounts)
check_account_present_in_accounts(cordelia_in_lear, accounts)
email = "CORDelia@zulip.com"
accounts = get_accounts_for_email(email)
self.assert_length(accounts, 2)
check_account_present_in_accounts(cordelia_in_zulip, accounts)
check_account_present_in_accounts(cordelia_in_lear, accounts)
email = "IAGO@ZULIP.COM"
accounts = get_accounts_for_email(email)
self.assert_length(accounts, 1)
check_account_present_in_accounts(self.example_user("iago"), accounts)
# We verify that get_accounts_for_email don't return deactivated users accounts
user = self.example_user("hamlet")
do_deactivate_user(user, acting_user=None)
email = self.example_email("hamlet")
accounts = get_accounts_for_email(email)
with self.assertRaises(AssertionError):
check_account_present_in_accounts(user, accounts)
def test_get_source_profile(self) -> None:
reset_emails_in_zulip_realm()
zulip_realm_id = get_realm("zulip").id
iago = get_source_profile("iago@zulip.com", zulip_realm_id)
assert iago is not None
self.assertEqual(iago.email, "iago@zulip.com")
self.assertEqual(iago.realm, get_realm("zulip"))
iago = get_source_profile("IAGO@ZULIP.com", zulip_realm_id)
assert iago is not None
self.assertEqual(iago.email, "iago@zulip.com")
lear_realm_id = get_realm("lear").id
cordelia = get_source_profile("cordelia@zulip.com", lear_realm_id)
assert cordelia is not None
self.assertEqual(cordelia.email, "cordelia@zulip.com")
self.assertIsNone(get_source_profile("iagod@zulip.com", zulip_realm_id))
self.assertIsNone(get_source_profile("iago@zulip.com", 0))
self.assertIsNone(get_source_profile("iago@zulip.com", lear_realm_id))
def test_copy_user_settings(self) -> None:
iago = self.example_user("iago")
cordelia = self.example_user("cordelia")
hamlet = self.example_user("hamlet")
hamlet.color_scheme = UserProfile.COLOR_SCHEME_LIGHT
cordelia.default_language = "de"
cordelia.default_view = "all_messages"
cordelia.emojiset = "twitter"
cordelia.timezone = "America/Phoenix"
cordelia.color_scheme = UserProfile.COLOR_SCHEME_NIGHT
cordelia.enable_offline_email_notifications = False
cordelia.enable_stream_push_notifications = True
cordelia.enter_sends = False
cordelia.avatar_source = UserProfile.AVATAR_FROM_USER
cordelia.save()
# Upload cordelia's avatar
with get_test_image_file("img.png") as image_file:
upload_avatar_image(image_file, cordelia, cordelia)
UserHotspot.objects.filter(user=cordelia).delete()
UserHotspot.objects.filter(user=iago).delete()
hotspots_completed = ["intro_reply", "intro_streams", "intro_topics"]
for hotspot in hotspots_completed:
UserHotspot.objects.create(user=cordelia, hotspot=hotspot)
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
copy_user_settings(cordelia, iago)
# Check that we didn't send an realm_user update events to
# users; this work is happening before the user account is
# created, so any changes will be reflected in the "add" event
# introducing the user to clients.
self.assertEqual(len(events), 0)
# We verify that cordelia and iago match, but hamlet has the defaults.
self.assertEqual(iago.full_name, "Cordelia, Lear's daughter")
self.assertEqual(cordelia.full_name, "Cordelia, Lear's daughter")
self.assertEqual(hamlet.full_name, "King Hamlet")
self.assertEqual(iago.default_language, "de")
self.assertEqual(cordelia.default_language, "de")
self.assertEqual(hamlet.default_language, "en")
self.assertEqual(iago.emojiset, "twitter")
self.assertEqual(cordelia.emojiset, "twitter")
self.assertEqual(hamlet.emojiset, "google-blob")
self.assertEqual(iago.timezone, "America/Phoenix")
self.assertEqual(cordelia.timezone, "America/Phoenix")
self.assertEqual(hamlet.timezone, "")
self.assertEqual(iago.color_scheme, UserProfile.COLOR_SCHEME_NIGHT)
self.assertEqual(cordelia.color_scheme, UserProfile.COLOR_SCHEME_NIGHT)
self.assertEqual(hamlet.color_scheme, UserProfile.COLOR_SCHEME_LIGHT)
self.assertEqual(iago.enable_offline_email_notifications, False)
self.assertEqual(cordelia.enable_offline_email_notifications, False)
self.assertEqual(hamlet.enable_offline_email_notifications, True)
self.assertEqual(iago.enable_stream_push_notifications, True)
self.assertEqual(cordelia.enable_stream_push_notifications, True)
self.assertEqual(hamlet.enable_stream_push_notifications, False)
self.assertEqual(iago.enter_sends, False)
self.assertEqual(cordelia.enter_sends, False)
self.assertEqual(hamlet.enter_sends, True)
hotspots = list(UserHotspot.objects.filter(user=iago).values_list("hotspot", flat=True))
self.assertEqual(hotspots, hotspots_completed)
def test_get_user_by_id_in_realm_including_cross_realm(self) -> None:
realm = get_realm("zulip")
hamlet = self.example_user("hamlet")
othello = self.example_user("othello")
bot = get_system_bot(settings.WELCOME_BOT)
# Pass in the ID of a cross-realm bot and a valid realm
cross_realm_bot = get_user_by_id_in_realm_including_cross_realm(bot.id, realm)
self.assertEqual(cross_realm_bot.email, bot.email)
self.assertEqual(cross_realm_bot.id, bot.id)
# Pass in the ID of a cross-realm bot but with a invalid realm,
# note that the realm should be irrelevant here
cross_realm_bot = get_user_by_id_in_realm_including_cross_realm(bot.id, None)
self.assertEqual(cross_realm_bot.email, bot.email)
self.assertEqual(cross_realm_bot.id, bot.id)
# Pass in the ID of a non-cross-realm user with a realm
user_profile = get_user_by_id_in_realm_including_cross_realm(othello.id, realm)
self.assertEqual(user_profile.email, othello.email)
self.assertEqual(user_profile.id, othello.id)
# If the realm doesn't match, or if the ID is not that of a
# cross-realm bot, UserProfile.DoesNotExist is raised
with self.assertRaises(UserProfile.DoesNotExist):
get_user_by_id_in_realm_including_cross_realm(hamlet.id, None)
def test_get_user_subscription_status(self) -> None:
self.login("hamlet")
iago = self.example_user("iago")
stream = get_stream("Rome", iago.realm)
# Invalid user ID.
result = self.client_get(f"/json/users/25/subscriptions/{stream.id}")
self.assert_json_error(result, "No such user")
# Invalid stream ID.
result = self.client_get(f"/json/users/{iago.id}/subscriptions/25")
self.assert_json_error(result, "Invalid stream id")
result = orjson.loads(
self.client_get(f"/json/users/{iago.id}/subscriptions/{stream.id}").content
)
self.assertFalse(result["is_subscribed"])
# Subscribe to the stream.
self.subscribe(iago, stream.name)
with queries_captured() as queries:
result = orjson.loads(
self.client_get(f"/json/users/{iago.id}/subscriptions/{stream.id}").content
)
self.assert_length(queries, 6)
self.assertTrue(result["is_subscribed"])
# Logging in with a Guest user.
polonius = self.example_user("polonius")
self.login("polonius")
self.assertTrue(polonius.is_guest)
self.assertTrue(stream.is_web_public)
result = orjson.loads(
self.client_get(f"/json/users/{iago.id}/subscriptions/{stream.id}").content
)
self.assertTrue(result["is_subscribed"])
class ActivateTest(ZulipTestCase):
def test_basics(self) -> None:
user = self.example_user("hamlet")
do_deactivate_user(user, acting_user=None)
self.assertFalse(user.is_active)
do_reactivate_user(user, acting_user=None)
self.assertTrue(user.is_active)
def test_subscriptions_is_user_active(self) -> None:
user = self.example_user("hamlet")
do_deactivate_user(user, acting_user=None)
self.assertFalse(user.is_active)
self.assertTrue(Subscription.objects.filter(user_profile=user).exists())
self.assertFalse(
Subscription.objects.filter(user_profile=user, is_user_active=True).exists()
)
do_reactivate_user(user, acting_user=None)
self.assertTrue(user.is_active)
self.assertTrue(Subscription.objects.filter(user_profile=user).exists())
self.assertFalse(
Subscription.objects.filter(user_profile=user, is_user_active=False).exists()
)
def test_api(self) -> None:
admin = self.example_user("othello")
do_change_user_role(admin, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)
self.login("othello")
user = self.example_user("hamlet")
self.assertTrue(user.is_active)
result = self.client_delete(f"/json/users/{user.id}")
self.assert_json_success(result)
user = self.example_user("hamlet")
self.assertFalse(user.is_active)
result = self.client_post(f"/json/users/{user.id}/reactivate")
self.assert_json_success(result)
user = self.example_user("hamlet")
self.assertTrue(user.is_active)
def test_api_with_nonexistent_user(self) -> None:
self.login("iago")
# Organization administrator cannot deactivate organization owner.
result = self.client_delete(f'/json/users/{self.example_user('desdemona').id}')
self.assert_json_error(result, "Must be an organization owner")
iago = self.example_user("iago")
desdemona = self.example_user("desdemona")
do_change_user_role(iago, UserProfile.ROLE_REALM_OWNER, acting_user=None)
# Cannot deactivate a user with the bot api
result = self.client_delete("/json/bots/{}".format(self.example_user("hamlet").id))
self.assert_json_error(result, "No such bot")
# Cannot deactivate a nonexistent user.
invalid_user_id = 1000
result = self.client_delete(f"/json/users/{invalid_user_id}")
self.assert_json_error(result, "No such user")
result = self.client_delete("/json/users/{}".format(self.example_user("webhook_bot").id))
self.assert_json_error(result, "No such user")
result = self.client_delete(f"/json/users/{desdemona.id}")
self.assert_json_success(result)
result = self.client_delete(f"/json/users/{iago.id}")
self.assert_json_error(result, "Cannot deactivate the only organization owner")
# Cannot reactivate a nonexistent user.
invalid_user_id = 1000
result = self.client_post(f"/json/users/{invalid_user_id}/reactivate")
self.assert_json_error(result, "No such user")
def test_api_with_insufficient_permissions(self) -> None:
non_admin = self.example_user("othello")
do_change_user_role(non_admin, UserProfile.ROLE_MEMBER, acting_user=None)
self.login("othello")
# Cannot deactivate a user with the users api
result = self.client_delete("/json/users/{}".format(self.example_user("hamlet").id))
self.assert_json_error(result, "Insufficient permission")
# Cannot reactivate a user
result = self.client_post(
"/json/users/{}/reactivate".format(self.example_user("hamlet").id)
)
self.assert_json_error(result, "Insufficient permission")
def test_clear_scheduled_jobs(self) -> None:
user = self.example_user("hamlet")
send_future_email(
"zerver/emails/followup_day1",
user.realm,
to_user_ids=[user.id],
delay=datetime.timedelta(hours=1),
)
self.assertEqual(ScheduledEmail.objects.count(), 1)
do_deactivate_user(user, acting_user=None)
self.assertEqual(ScheduledEmail.objects.count(), 0)
def test_send_future_email_with_multiple_recipients(self) -> None:
hamlet = self.example_user("hamlet")
iago = self.example_user("iago")
send_future_email(
"zerver/emails/followup_day1",
iago.realm,
to_user_ids=[hamlet.id, iago.id],
delay=datetime.timedelta(hours=1),
)
self.assertEqual(
ScheduledEmail.objects.filter(users__in=[hamlet, iago]).distinct().count(), 1
)
email = ScheduledEmail.objects.all().first()
self.assertEqual(email.users.count(), 2)
def test_clear_scheduled_emails_with_multiple_user_ids(self) -> None:
hamlet = self.example_user("hamlet")
iago = self.example_user("iago")
send_future_email(
"zerver/emails/followup_day1",
iago.realm,
to_user_ids=[hamlet.id, iago.id],
delay=datetime.timedelta(hours=1),
)
self.assertEqual(ScheduledEmail.objects.count(), 1)
clear_scheduled_emails([hamlet.id, iago.id])
self.assertEqual(ScheduledEmail.objects.count(), 0)
def test_clear_schedule_emails_with_one_user_id(self) -> None:
hamlet = self.example_user("hamlet")
iago = self.example_user("iago")
send_future_email(
"zerver/emails/followup_day1",
iago.realm,
to_user_ids=[hamlet.id, iago.id],
delay=datetime.timedelta(hours=1),
)
self.assertEqual(ScheduledEmail.objects.count(), 1)
clear_scheduled_emails([hamlet.id])
self.assertEqual(ScheduledEmail.objects.count(), 1)
self.assertEqual(ScheduledEmail.objects.filter(users=hamlet).count(), 0)
self.assertEqual(ScheduledEmail.objects.filter(users=iago).count(), 1)
def test_deliver_scheduled_emails(self) -> None:
iago = self.example_user("iago")
hamlet = self.example_user("hamlet")
send_future_email(
"zerver/emails/followup_day1",
iago.realm,
to_user_ids=[hamlet.id, iago.id],
delay=datetime.timedelta(hours=1),
)
self.assertEqual(ScheduledEmail.objects.count(), 1)
email = ScheduledEmail.objects.all().first()
deliver_scheduled_emails(email)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
for message in outbox:
self.assertEqual(
set(message.to),
{
str(Address(display_name=hamlet.full_name, addr_spec=hamlet.delivery_email)),
str(Address(display_name=iago.full_name, addr_spec=iago.delivery_email)),
},
)
self.assertEqual(ScheduledEmail.objects.count(), 0)
def test_deliver_scheduled_emails_no_addressees(self) -> None:
iago = self.example_user("iago")
hamlet = self.example_user("hamlet")
to_user_ids = [hamlet.id, iago.id]
send_future_email(
"zerver/emails/followup_day1",
iago.realm,
to_user_ids=to_user_ids,
delay=datetime.timedelta(hours=1),
)
self.assertEqual(ScheduledEmail.objects.count(), 1)
email = ScheduledEmail.objects.all().first()
email.users.remove(*to_user_ids)
with self.assertLogs("zulip.send_email", level="INFO") as info_log:
deliver_scheduled_emails(email)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
self.assertEqual(ScheduledEmail.objects.count(), 1)
self.assertEqual(
info_log.output,
[
f"WARNING:zulip.send_email:ScheduledEmail id {email.id} has empty users and address attributes."
],
)
class RecipientInfoTest(ZulipTestCase):
def test_stream_recipient_info(self) -> None:
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
othello = self.example_user("othello")
# These tests were written with the old default for
# enable_online_push_notifications; that default is better for
# testing the full code path anyway.
hamlet.enable_online_push_notifications = False
cordelia.enable_online_push_notifications = False
othello.enable_online_push_notifications = False
hamlet.save()
cordelia.save()
othello.save()
realm = hamlet.realm
stream_name = "Test stream"
topic_name = "test topic"
for user in [hamlet, cordelia, othello]:
self.subscribe(user, stream_name)
stream = get_stream(stream_name, realm)
recipient = stream.recipient
stream_topic = StreamTopicTarget(
stream_id=stream.id,
topic_name=topic_name,
)
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possible_wildcard_mention=False,
)
all_user_ids = {hamlet.id, cordelia.id, othello.id}
expected_info = dict(
active_user_ids=all_user_ids,
push_notify_user_ids=set(),
stream_push_user_ids=set(),
stream_email_user_ids=set(),
wildcard_mention_user_ids=set(),
um_eligible_user_ids=all_user_ids,
long_term_idle_user_ids=set(),
default_bot_user_ids=set(),
service_bot_tuples=[],
)
self.assertEqual(info, expected_info)
cordelia.wildcard_mentions_notify = False
cordelia.save()
hamlet.enable_stream_push_notifications = True
hamlet.save()
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possible_wildcard_mention=False,
)
self.assertEqual(info["stream_push_user_ids"], {hamlet.id})
self.assertEqual(info["wildcard_mention_user_ids"], set())
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possible_wildcard_mention=True,
)
self.assertEqual(info["wildcard_mention_user_ids"], {hamlet.id, othello.id})
sub = get_subscription(stream_name, hamlet)
sub.push_notifications = False
sub.save()
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
)
self.assertEqual(info["stream_push_user_ids"], set())
hamlet.enable_stream_push_notifications = False
hamlet.save()
sub = get_subscription(stream_name, hamlet)
sub.push_notifications = True
sub.save()
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
)
self.assertEqual(info["stream_push_user_ids"], {hamlet.id})
# Now mute Hamlet to omit him from stream_push_user_ids.
add_topic_mute(
user_profile=hamlet,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name=topic_name,
)
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possible_wildcard_mention=False,
)
self.assertEqual(info["stream_push_user_ids"], set())
self.assertEqual(info["wildcard_mention_user_ids"], set())
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possible_wildcard_mention=True,
)
self.assertEqual(info["stream_push_user_ids"], set())
# Since Hamlet has muted the stream and Cordelia has disabled
# wildcard notifications, it should just be Othello here.
self.assertEqual(info["wildcard_mention_user_ids"], {othello.id})
sub = get_subscription(stream_name, othello)
sub.wildcard_mentions_notify = False
sub.save()
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possible_wildcard_mention=True,
)
self.assertEqual(info["stream_push_user_ids"], set())
# Verify that stream-level wildcard_mentions_notify=False works correctly.
self.assertEqual(info["wildcard_mention_user_ids"], set())
# Verify that True works as expected as well
sub = get_subscription(stream_name, othello)
sub.wildcard_mentions_notify = True
sub.save()
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possible_wildcard_mention=True,
)
self.assertEqual(info["stream_push_user_ids"], set())
self.assertEqual(info["wildcard_mention_user_ids"], {othello.id})
# Add a service bot.
service_bot = do_create_user(
email="service-bot@zulip.com",
password="",
realm=realm,
full_name="",
bot_type=UserProfile.EMBEDDED_BOT,
acting_user=None,
)
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possibly_mentioned_user_ids={service_bot.id},
)
self.assertEqual(
info["service_bot_tuples"],
[
(service_bot.id, UserProfile.EMBEDDED_BOT),
],
)
# Add a normal bot.
normal_bot = do_create_user(
email="normal-bot@zulip.com",
password="",
realm=realm,
full_name="",
bot_type=UserProfile.DEFAULT_BOT,
acting_user=None,
)
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possibly_mentioned_user_ids={service_bot.id, normal_bot.id},
)
self.assertEqual(info["default_bot_user_ids"], {normal_bot.id})
def test_get_recipient_info_invalid_recipient_type(self) -> None:
hamlet = self.example_user("hamlet")
realm = hamlet.realm
stream = get_stream("Rome", realm)
stream_topic = StreamTopicTarget(
stream_id=stream.id,
topic_name="test topic",
)
# Make sure get_recipient_info asserts on invalid recipient types
with self.assertRaisesRegex(ValueError, "Bad recipient type"):
invalid_recipient = Recipient(type=999) # 999 is not a valid type
get_recipient_info(
recipient=invalid_recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
)
class BulkUsersTest(ZulipTestCase):
def test_client_gravatar_option(self) -> None:
reset_emails_in_zulip_realm()
self.login("cordelia")
hamlet = self.example_user("hamlet")
def get_hamlet_avatar(client_gravatar: bool) -> Optional[str]:
data = dict(client_gravatar=orjson.dumps(client_gravatar).decode())
result = self.client_get("/json/users", data)
self.assert_json_success(result)
rows = result.json()["members"]
hamlet_data = [row for row in rows if row["user_id"] == hamlet.id][0]
return hamlet_data["avatar_url"]
self.assertEqual(
get_hamlet_avatar(client_gravatar=True),
None,
)
"""
The main purpose of this test is to make sure we
return None for avatar_url when client_gravatar is
set to True. And we do a sanity check for when it's
False, but we leave it to other tests to validate
the specific URL.
"""
self.assertIn(
"gravatar.com",
get_hamlet_avatar(client_gravatar=False),
)
class GetProfileTest(ZulipTestCase):
def test_cache_behavior(self) -> None:
"""Tests whether fetching a user object the normal way, with
`get_user`, makes 1 cache query and 1 database query.
"""
realm = get_realm("zulip")
email = self.example_user("hamlet").email
with queries_captured() as queries:
with simulated_empty_cache() as cache_queries:
user_profile = get_user(email, realm)
self.assert_length(queries, 1)
self.assert_length(cache_queries, 1)
self.assertEqual(user_profile.email, email)
def test_get_user_profile(self) -> None:
hamlet = self.example_user("hamlet")
iago = self.example_user("iago")
desdemona = self.example_user("desdemona")
shiva = self.example_user("shiva")
self.login("hamlet")
result = orjson.loads(self.client_get("/json/users/me").content)
self.assertEqual(result["email"], hamlet.email)
self.assertEqual(result["full_name"], "King Hamlet")
self.assertIn("user_id", result)
self.assertFalse(result["is_bot"])
self.assertFalse(result["is_admin"])
self.assertFalse(result["is_owner"])
self.assertFalse(result["is_guest"])
self.assertEqual(result["role"], UserProfile.ROLE_MEMBER)
self.assertFalse("delivery_email" in result)
self.login("iago")
result = orjson.loads(self.client_get("/json/users/me").content)
self.assertEqual(result["email"], iago.email)
self.assertEqual(result["full_name"], "Iago")
self.assertFalse(result["is_bot"])
self.assertTrue(result["is_admin"])
self.assertFalse(result["is_owner"])
self.assertFalse(result["is_guest"])
self.assertEqual(result["role"], UserProfile.ROLE_REALM_ADMINISTRATOR)
self.login("desdemona")
result = orjson.loads(self.client_get("/json/users/me").content)
self.assertEqual(result["email"], desdemona.email)
self.assertFalse(result["is_bot"])
self.assertTrue(result["is_admin"])
self.assertTrue(result["is_owner"])
self.assertFalse(result["is_guest"])
self.assertEqual(result["role"], UserProfile.ROLE_REALM_OWNER)
self.login("shiva")
result = orjson.loads(self.client_get("/json/users/me").content)
self.assertEqual(result["email"], shiva.email)
self.assertFalse(result["is_bot"])
self.assertFalse(result["is_admin"])
self.assertFalse(result["is_owner"])
self.assertFalse(result["is_guest"])
self.assertEqual(result["role"], UserProfile.ROLE_MODERATOR)
# Tests the GET ../users/{id} API endpoint.
user = self.example_user("hamlet")
result = orjson.loads(self.client_get(f"/json/users/{user.id}").content)
self.assertEqual(result["user"]["email"], user.email)
self.assertEqual(result["user"]["full_name"], user.full_name)
self.assertIn("user_id", result["user"])
self.assertNotIn("profile_data", result["user"])
self.assertFalse(result["user"]["is_bot"])
self.assertFalse(result["user"]["is_admin"])
self.assertFalse(result["user"]["is_owner"])
result = orjson.loads(
self.client_get(
f"/json/users/{user.id}", {"include_custom_profile_fields": "true"}
).content
)
self.assertIn("profile_data", result["user"])
result = self.client_get("/json/users/30")
self.assert_json_error(result, "No such user")
bot = self.example_user("default_bot")
result = orjson.loads(self.client_get(f"/json/users/{bot.id}").content)
self.assertEqual(result["user"]["email"], bot.email)
self.assertTrue(result["user"]["is_bot"])
def test_get_user_by_email(self) -> None:
user = self.example_user("hamlet")
self.login("hamlet")
result = orjson.loads(self.client_get(f"/json/users/{user.email}").content)
self.assertEqual(result["user"]["email"], user.email)
self.assertEqual(result["user"]["full_name"], user.full_name)
self.assertIn("user_id", result["user"])
self.assertNotIn("profile_data", result["user"])
self.assertFalse(result["user"]["is_bot"])
self.assertFalse(result["user"]["is_admin"])
self.assertFalse(result["user"]["is_owner"])
result = orjson.loads(
self.client_get(
f"/json/users/{user.email}", {"include_custom_profile_fields": "true"}
).content
)
self.assertIn("profile_data", result["user"])
result = self.client_get("/json/users/invalid")
self.assert_json_error(result, "No such user")
bot = self.example_user("default_bot")
result = orjson.loads(self.client_get(f"/json/users/{bot.email}").content)
self.assertEqual(result["user"]["email"], bot.email)
self.assertTrue(result["user"]["is_bot"])
def test_get_all_profiles_avatar_urls(self) -> None:
hamlet = self.example_user("hamlet")
result = self.api_get(hamlet, "/api/v1/users")
self.assert_json_success(result)
(my_user,) = [user for user in result.json()["members"] if user["email"] == hamlet.email]
self.assertEqual(
my_user["avatar_url"],
avatar_url(hamlet),
)
class DeleteUserTest(ZulipTestCase):
def test_do_delete_user(self) -> None:
realm = get_realm("zulip")
cordelia = self.example_user("cordelia")
othello = self.example_user("othello")
hamlet = self.example_user("hamlet")
hamlet_personal_recipient = hamlet.recipient
hamlet_user_id = hamlet.id
self.send_personal_message(cordelia, hamlet)
self.send_personal_message(hamlet, cordelia)
personal_message_ids_to_hamlet = Message.objects.filter(
recipient=hamlet_personal_recipient
).values_list("id", flat=True)
self.assertTrue(len(personal_message_ids_to_hamlet) > 0)
self.assertTrue(Message.objects.filter(sender=hamlet).exists())
huddle_message_ids_from_cordelia = [
self.send_huddle_message(cordelia, [hamlet, othello]) for i in range(3)
]
huddle_message_ids_from_hamlet = [
self.send_huddle_message(hamlet, [cordelia, othello]) for i in range(3)
]
huddle_with_hamlet_recipient_ids = list(
Subscription.objects.filter(
user_profile=hamlet, recipient__type=Recipient.HUDDLE
).values_list("recipient_id", flat=True)
)
self.assertTrue(len(huddle_with_hamlet_recipient_ids) > 0)
do_delete_user(hamlet)
replacement_dummy_user = UserProfile.objects.get(id=hamlet_user_id, realm=realm)
self.assertEqual(
replacement_dummy_user.delivery_email, f"deleteduser{hamlet_user_id}@{realm.uri}"
)
self.assertEqual(replacement_dummy_user.is_mirror_dummy, True)
self.assertEqual(Message.objects.filter(id__in=personal_message_ids_to_hamlet).count(), 0)
# Huddle messages from hamlet should have been deleted, but messages of other participants should
# be kept.
self.assertEqual(Message.objects.filter(id__in=huddle_message_ids_from_hamlet).count(), 0)
self.assertEqual(Message.objects.filter(id__in=huddle_message_ids_from_cordelia).count(), 3)
self.assertEqual(Message.objects.filter(sender_id=hamlet_user_id).count(), 0)
# Verify that the dummy user is subscribed to the deleted user's huddles, to keep huddle data
# in a correct state.
for recipient_id in huddle_with_hamlet_recipient_ids:
self.assertTrue(
Subscription.objects.filter(
user_profile=replacement_dummy_user, recipient_id=recipient_id
).exists()
)
class FakeEmailDomainTest(ZulipTestCase):
def test_get_fake_email_domain(self) -> None:
realm = get_realm("zulip")
self.assertEqual("zulip.testserver", get_fake_email_domain(realm))
with self.settings(EXTERNAL_HOST="example.com"):
self.assertEqual("zulip.example.com", get_fake_email_domain(realm))
@override_settings(FAKE_EMAIL_DOMAIN="fakedomain.com", REALM_HOSTS={"zulip": "127.0.0.1"})
def test_get_fake_email_domain_realm_host_is_ip_addr(self) -> None:
realm = get_realm("zulip")
self.assertEqual("fakedomain.com", get_fake_email_domain(realm))
@override_settings(FAKE_EMAIL_DOMAIN="invaliddomain", REALM_HOSTS={"zulip": "127.0.0.1"})
def test_invalid_fake_email_domain(self) -> None:
realm = get_realm("zulip")
with self.assertRaises(InvalidFakeEmailDomain):
get_fake_email_domain(realm)
@override_settings(FAKE_EMAIL_DOMAIN="127.0.0.1", REALM_HOSTS={"zulip": "127.0.0.1"})
def test_invalid_fake_email_domain_ip(self) -> None:
with self.assertRaises(InvalidFakeEmailDomain):
realm = get_realm("zulip")
get_fake_email_domain(realm)
|
import datetime
from email.headerregistry import Address
from typing import Any, Dict, Iterable, List, Mapping, Optional, TypeVar, Union
from unittest import mock
import orjson
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.test import override_settings
from django.utils.timezone import now as timezone_now
from zerver.lib.actions import (
change_user_is_active,
create_users,
do_change_can_create_users,
do_change_user_role,
do_create_user,
do_deactivate_user,
do_delete_user,
do_invite_users,
do_reactivate_user,
do_set_realm_property,
get_emails_from_user_ids,
get_recipient_info,
)
from zerver.lib.avatar import avatar_url, get_gravatar_url
from zerver.lib.create_user import copy_user_settings
from zerver.lib.events import do_events_register
from zerver.lib.exceptions import JsonableError
from zerver.lib.send_email import (
clear_scheduled_emails,
deliver_scheduled_emails,
send_future_email,
)
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import (
cache_tries_captured,
get_subscription,
get_test_image_file,
queries_captured,
reset_emails_in_zulip_realm,
simulated_empty_cache,
tornado_redirected_to_list,
)
from zerver.lib.topic_mutes import add_topic_mute
from zerver.lib.upload import upload_avatar_image
from zerver.lib.users import Accounts, access_user_by_id, get_accounts_for_email, user_ids_to_users
from zerver.models import (
CustomProfileField,
InvalidFakeEmailDomain,
Message,
PreregistrationUser,
Realm,
RealmDomain,
Recipient,
ScheduledEmail,
Stream,
Subscription,
UserHotspot,
UserProfile,
check_valid_user_ids,
get_client,
get_fake_email_domain,
get_realm,
get_source_profile,
get_stream,
get_system_bot,
get_user,
get_user_by_delivery_email,
get_user_by_id_in_realm_including_cross_realm,
)
K = TypeVar("K")
V = TypeVar("V")
def find_dict(lst: Iterable[Dict[K, V]], k: K, v: V) -> Dict[K, V]:
for dct in lst:
if dct[k] == v:
return dct
raise AssertionError(f"Cannot find element in list where key {k} == {v}")
class PermissionTest(ZulipTestCase):
def test_role_setters(self) -> None:
user_profile = self.example_user("hamlet")
user_profile.is_realm_admin = True
self.assertEqual(user_profile.is_realm_admin, True)
self.assertEqual(user_profile.role, UserProfile.ROLE_REALM_ADMINISTRATOR)
user_profile.is_guest = False
self.assertEqual(user_profile.is_guest, False)
self.assertEqual(user_profile.role, UserProfile.ROLE_REALM_ADMINISTRATOR)
user_profile.is_realm_admin = False
self.assertEqual(user_profile.is_realm_admin, False)
self.assertEqual(user_profile.role, UserProfile.ROLE_MEMBER)
user_profile.is_guest = True
self.assertEqual(user_profile.is_guest, True)
self.assertEqual(user_profile.role, UserProfile.ROLE_GUEST)
user_profile.is_realm_admin = False
self.assertEqual(user_profile.is_guest, True)
self.assertEqual(user_profile.role, UserProfile.ROLE_GUEST)
user_profile.is_guest = False
self.assertEqual(user_profile.is_guest, False)
self.assertEqual(user_profile.role, UserProfile.ROLE_MEMBER)
def test_get_admin_users(self) -> None:
user_profile = self.example_user("hamlet")
do_change_user_role(user_profile, UserProfile.ROLE_MEMBER, acting_user=None)
self.assertFalse(user_profile.is_realm_owner)
admin_users = user_profile.realm.get_human_admin_users()
self.assertFalse(user_profile in admin_users)
admin_users = user_profile.realm.get_admin_users_and_bots()
self.assertFalse(user_profile in admin_users)
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)
self.assertFalse(user_profile.is_realm_owner)
admin_users = user_profile.realm.get_human_admin_users()
self.assertTrue(user_profile in admin_users)
admin_users = user_profile.realm.get_admin_users_and_bots()
self.assertTrue(user_profile in admin_users)
do_change_user_role(user_profile, UserProfile.ROLE_REALM_OWNER, acting_user=None)
self.assertTrue(user_profile.is_realm_owner)
admin_users = user_profile.realm.get_human_admin_users()
self.assertTrue(user_profile in admin_users)
admin_users = user_profile.realm.get_human_admin_users(include_realm_owners=False)
self.assertFalse(user_profile in admin_users)
admin_users = user_profile.realm.get_admin_users_and_bots()
self.assertTrue(user_profile in admin_users)
admin_users = user_profile.realm.get_admin_users_and_bots(include_realm_owners=False)
self.assertFalse(user_profile in admin_users)
def test_get_first_human_user(self) -> None:
realm = get_realm("zulip")
UserProfile.objects.filter(realm=realm).delete()
UserProfile.objects.create(
realm=realm, email="bot1@zulip.com", delivery_email="bot1@zulip.com", is_bot=True
)
first_human_user = UserProfile.objects.create(
realm=realm, email="user1@zulip.com", delivery_email="user1@zulip.com", is_bot=False
)
UserProfile.objects.create(
realm=realm, email="user2@zulip.com", delivery_email="user2@zulip.com", is_bot=False
)
UserProfile.objects.create(
realm=realm, email="bot2@zulip.com", delivery_email="bot2@zulip.com", is_bot=True
)
self.assertEqual(first_human_user, realm.get_first_human_user())
def test_updating_non_existent_user(self) -> None:
self.login("hamlet")
admin = self.example_user("hamlet")
do_change_user_role(admin, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)
invalid_user_id = 1000
result = self.client_patch(f"/json/users/{invalid_user_id}", {})
self.assert_json_error(result, "No such user")
def test_owner_api(self) -> None:
self.login("iago")
desdemona = self.example_user("desdemona")
othello = self.example_user("othello")
iago = self.example_user("iago")
realm = iago.realm
do_change_user_role(iago, UserProfile.ROLE_REALM_OWNER, acting_user=None)
result = self.client_get("/json/users")
self.assert_json_success(result)
members = result.json()["members"]
iago_dict = find_dict(members, "email", iago.email)
self.assertTrue(iago_dict["is_owner"])
othello_dict = find_dict(members, "email", othello.email)
self.assertFalse(othello_dict["is_owner"])
req = dict(role=UserProfile.ROLE_REALM_OWNER)
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
result = self.client_patch(f"/json/users/{othello.id}", req)
self.assert_json_success(result)
owner_users = realm.get_human_owner_users()
self.assertTrue(othello in owner_users)
person = events[0]["event"]["person"]
self.assertEqual(person["user_id"], othello.id)
self.assertEqual(person["role"], UserProfile.ROLE_REALM_OWNER)
req = dict(role=UserProfile.ROLE_MEMBER)
events = []
with tornado_redirected_to_list(events):
result = self.client_patch(f"/json/users/{othello.id}", req)
self.assert_json_success(result)
owner_users = realm.get_human_owner_users()
self.assertFalse(othello in owner_users)
person = events[0]["event"]["person"]
self.assertEqual(person["user_id"], othello.id)
self.assertEqual(person["role"], UserProfile.ROLE_MEMBER)
# Cannot take away from last owner
self.login("desdemona")
req = dict(role=UserProfile.ROLE_MEMBER)
events = []
with tornado_redirected_to_list(events):
result = self.client_patch(f"/json/users/{iago.id}", req)
self.assert_json_success(result)
owner_users = realm.get_human_owner_users()
self.assertFalse(iago in owner_users)
person = events[0]["event"]["person"]
self.assertEqual(person["user_id"], iago.id)
self.assertEqual(person["role"], UserProfile.ROLE_MEMBER)
with tornado_redirected_to_list([]):
result = self.client_patch(f"/json/users/{desdemona.id}", req)
self.assert_json_error(
result, "The owner permission cannot be removed from the only organization owner."
)
do_change_user_role(iago, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)
self.login("iago")
with tornado_redirected_to_list([]):
result = self.client_patch(f"/json/users/{desdemona.id}", req)
self.assert_json_error(result, "Must be an organization owner")
def test_admin_api(self) -> None:
self.login("desdemona")
hamlet = self.example_user("hamlet")
othello = self.example_user("othello")
desdemona = self.example_user("desdemona")
realm = hamlet.realm
# Make sure we see is_admin flag in /json/users
result = self.client_get("/json/users")
self.assert_json_success(result)
members = result.json()["members"]
desdemona_dict = find_dict(members, "email", desdemona.email)
self.assertTrue(desdemona_dict["is_admin"])
othello_dict = find_dict(members, "email", othello.email)
self.assertFalse(othello_dict["is_admin"])
# Giveth
req = dict(role=orjson.dumps(UserProfile.ROLE_REALM_ADMINISTRATOR).decode())
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
result = self.client_patch(f"/json/users/{othello.id}", req)
self.assert_json_success(result)
admin_users = realm.get_human_admin_users()
self.assertTrue(othello in admin_users)
person = events[0]["event"]["person"]
self.assertEqual(person["user_id"], othello.id)
self.assertEqual(person["role"], UserProfile.ROLE_REALM_ADMINISTRATOR)
# Taketh away
req = dict(role=orjson.dumps(UserProfile.ROLE_MEMBER).decode())
events = []
with tornado_redirected_to_list(events):
result = self.client_patch(f"/json/users/{othello.id}", req)
self.assert_json_success(result)
admin_users = realm.get_human_admin_users()
self.assertFalse(othello in admin_users)
person = events[0]["event"]["person"]
self.assertEqual(person["user_id"], othello.id)
self.assertEqual(person["role"], UserProfile.ROLE_MEMBER)
# Make sure only admins can patch other user's info.
self.login("othello")
result = self.client_patch(f"/json/users/{hamlet.id}", req)
self.assert_json_error(result, "Insufficient permission")
def test_admin_api_hide_emails(self) -> None:
reset_emails_in_zulip_realm()
user = self.example_user("hamlet")
admin = self.example_user("iago")
self.login_user(user)
# First, verify client_gravatar works normally
result = self.client_get("/json/users", {"client_gravatar": "true"})
self.assert_json_success(result)
members = result.json()["members"]
hamlet = find_dict(members, "user_id", user.id)
self.assertEqual(hamlet["email"], user.email)
self.assertIsNone(hamlet["avatar_url"])
self.assertNotIn("delivery_email", hamlet)
# Also verify the /events code path. This is a bit hacky, but
# we need to verify client_gravatar is not being overridden.
with mock.patch(
"zerver.lib.events.request_event_queue", return_value=None
) as mock_request_event_queue:
with self.assertRaises(JsonableError):
result = do_events_register(user, get_client("website"), client_gravatar=True)
self.assertEqual(mock_request_event_queue.call_args_list[0][0][3], True)
#############################################################
# Now, switch email address visibility, check client_gravatar
# is automatically disabled for the user.
do_set_realm_property(
user.realm,
"email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS,
acting_user=None,
)
result = self.client_get("/json/users", {"client_gravatar": "true"})
self.assert_json_success(result)
members = result.json()["members"]
hamlet = find_dict(members, "user_id", user.id)
self.assertEqual(hamlet["email"], f"user{user.id}@zulip.testserver")
# Note that the Gravatar URL should still be computed from the
# `delivery_email`; otherwise, we won't be able to serve the
# user's Gravatar.
self.assertEqual(hamlet["avatar_url"], get_gravatar_url(user.delivery_email, 1))
self.assertNotIn("delivery_email", hamlet)
# Also verify the /events code path. This is a bit hacky, but
# basically we want to verify client_gravatar is being
# overridden.
with mock.patch(
"zerver.lib.events.request_event_queue", return_value=None
) as mock_request_event_queue:
with self.assertRaises(JsonableError):
result = do_events_register(user, get_client("website"), client_gravatar=True)
self.assertEqual(mock_request_event_queue.call_args_list[0][0][3], False)
# client_gravatar is still turned off for admins. In theory,
# it doesn't need to be, but client-side changes would be
# required in apps like the mobile apps.
# delivery_email is sent for admins.
admin.refresh_from_db()
self.login_user(admin)
result = self.client_get("/json/users", {"client_gravatar": "true"})
self.assert_json_success(result)
members = result.json()["members"]
hamlet = find_dict(members, "user_id", user.id)
self.assertEqual(hamlet["email"], f"user{user.id}@zulip.testserver")
self.assertEqual(hamlet["avatar_url"], get_gravatar_url(user.email, 1))
self.assertEqual(hamlet["delivery_email"], self.example_email("hamlet"))
def test_user_cannot_promote_to_admin(self) -> None:
self.login("hamlet")
req = dict(role=orjson.dumps(UserProfile.ROLE_REALM_ADMINISTRATOR).decode())
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Insufficient permission")
def test_admin_user_can_change_full_name(self) -> None:
new_name = "new name"
self.login("iago")
hamlet = self.example_user("hamlet")
req = dict(full_name=orjson.dumps(new_name).decode())
result = self.client_patch(f"/json/users/{hamlet.id}", req)
self.assert_json_success(result)
hamlet = self.example_user("hamlet")
self.assertEqual(hamlet.full_name, new_name)
def test_non_admin_cannot_change_full_name(self) -> None:
self.login("hamlet")
req = dict(full_name=orjson.dumps("new name").decode())
result = self.client_patch("/json/users/{}".format(self.example_user("othello").id), req)
self.assert_json_error(result, "Insufficient permission")
def test_admin_cannot_set_long_full_name(self) -> None:
new_name = "a" * (UserProfile.MAX_NAME_LENGTH + 1)
self.login("iago")
req = dict(full_name=orjson.dumps(new_name).decode())
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Name too long!")
def test_admin_cannot_set_short_full_name(self) -> None:
new_name = "a"
self.login("iago")
req = dict(full_name=orjson.dumps(new_name).decode())
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Name too short!")
def test_not_allowed_format(self) -> None:
# Name of format "Alice|999" breaks in Markdown
new_name = "iago|72"
self.login("iago")
req = dict(full_name=orjson.dumps(new_name).decode())
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Invalid format!")
def test_allowed_format_complex(self) -> None:
# Adding characters after r'|d+' doesn't break Markdown
new_name = "Hello- 12iago|72k"
self.login("iago")
req = dict(full_name=orjson.dumps(new_name).decode())
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_success(result)
def test_not_allowed_format_complex(self) -> None:
new_name = "Hello- 12iago|72"
self.login("iago")
req = dict(full_name=orjson.dumps(new_name).decode())
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Invalid format!")
def test_admin_cannot_set_full_name_with_invalid_characters(self) -> None:
new_name = "Opheli*"
self.login("iago")
req = dict(full_name=orjson.dumps(new_name).decode())
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assert_json_error(result, "Invalid characters in name!")
def test_access_user_by_id(self) -> None:
iago = self.example_user("iago")
# Must be a valid user ID in the realm
with self.assertRaises(JsonableError):
access_user_by_id(iago, 1234, for_admin=False)
with self.assertRaises(JsonableError):
access_user_by_id(iago, self.mit_user("sipbtest").id, for_admin=False)
# Can only access bot users if allow_bots is passed
bot = self.example_user("default_bot")
access_user_by_id(iago, bot.id, allow_bots=True, for_admin=True)
with self.assertRaises(JsonableError):
access_user_by_id(iago, bot.id, for_admin=True)
# Can only access deactivated users if allow_deactivated is passed
hamlet = self.example_user("hamlet")
do_deactivate_user(hamlet, acting_user=None)
with self.assertRaises(JsonableError):
access_user_by_id(iago, hamlet.id, for_admin=False)
with self.assertRaises(JsonableError):
access_user_by_id(iago, hamlet.id, for_admin=True)
access_user_by_id(iago, hamlet.id, allow_deactivated=True, for_admin=True)
# Non-admin user can't admin another user
with self.assertRaises(JsonableError):
access_user_by_id(
self.example_user("cordelia"), self.example_user("aaron").id, for_admin=True
)
# But does have read-only access to it.
access_user_by_id(
self.example_user("cordelia"), self.example_user("aaron").id, for_admin=False
)
def check_property_for_role(self, user_profile: UserProfile, role: int) -> bool:
if role == UserProfile.ROLE_REALM_ADMINISTRATOR:
return (
user_profile.is_realm_admin
and not user_profile.is_guest
and not user_profile.is_realm_owner
and not user_profile.is_moderator
)
elif role == UserProfile.ROLE_REALM_OWNER:
return (
user_profile.is_realm_owner
and user_profile.is_realm_admin
and not user_profile.is_moderator
and not user_profile.is_guest
)
elif role == UserProfile.ROLE_MODERATOR:
return (
user_profile.is_moderator
and not user_profile.is_realm_owner
and not user_profile.is_realm_admin
and not user_profile.is_guest
)
if role == UserProfile.ROLE_MEMBER:
return (
not user_profile.is_guest
and not user_profile.is_moderator
and not user_profile.is_realm_admin
and not user_profile.is_realm_owner
)
assert role == UserProfile.ROLE_GUEST
return (
user_profile.is_guest
and not user_profile.is_moderator
and not user_profile.is_realm_admin
and not user_profile.is_realm_owner
)
def check_user_role_change(
self,
user_email: str,
new_role: int,
) -> None:
self.login("desdemona")
user_profile = self.example_user(user_email)
old_role = user_profile.role
self.assertTrue(self.check_property_for_role(user_profile, old_role))
req = dict(role=orjson.dumps(new_role).decode())
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
result = self.client_patch(f"/json/users/{user_profile.id}", req)
self.assert_json_success(result)
user_profile = self.example_user(user_email)
self.assertTrue(self.check_property_for_role(user_profile, new_role))
person = events[0]["event"]["person"]
self.assertEqual(person["user_id"], user_profile.id)
self.assertTrue(person["role"], new_role)
def test_change_regular_member_to_guest(self) -> None:
self.check_user_role_change("hamlet", UserProfile.ROLE_GUEST)
def test_change_guest_to_regular_member(self) -> None:
self.check_user_role_change("polonius", UserProfile.ROLE_MEMBER)
def test_change_admin_to_guest(self) -> None:
self.check_user_role_change("iago", UserProfile.ROLE_GUEST)
def test_change_guest_to_admin(self) -> None:
self.check_user_role_change("polonius", UserProfile.ROLE_REALM_ADMINISTRATOR)
def test_change_owner_to_guest(self) -> None:
self.login("desdemona")
iago = self.example_user("iago")
do_change_user_role(iago, UserProfile.ROLE_REALM_OWNER, acting_user=None)
self.check_user_role_change("iago", UserProfile.ROLE_GUEST)
def test_change_guest_to_owner(self) -> None:
self.check_user_role_change("polonius", UserProfile.ROLE_REALM_OWNER)
def test_change_admin_to_owner(self) -> None:
self.check_user_role_change("iago", UserProfile.ROLE_REALM_OWNER)
def test_change_owner_to_admin(self) -> None:
self.login("desdemona")
iago = self.example_user("iago")
do_change_user_role(iago, UserProfile.ROLE_REALM_OWNER, acting_user=None)
self.check_user_role_change("iago", UserProfile.ROLE_REALM_ADMINISTRATOR)
def test_change_owner_to_moderator(self) -> None:
iago = self.example_user("iago")
do_change_user_role(iago, UserProfile.ROLE_REALM_OWNER, acting_user=None)
self.check_user_role_change("iago", UserProfile.ROLE_MODERATOR)
def test_change_moderator_to_owner(self) -> None:
self.check_user_role_change("shiva", UserProfile.ROLE_REALM_OWNER)
def test_change_admin_to_moderator(self) -> None:
self.check_user_role_change("iago", UserProfile.ROLE_MODERATOR)
def test_change_moderator_to_admin(self) -> None:
self.check_user_role_change("shiva", UserProfile.ROLE_REALM_ADMINISTRATOR)
def test_change_guest_to_moderator(self) -> None:
self.check_user_role_change("polonius", UserProfile.ROLE_MODERATOR)
def test_change_moderator_to_guest(self) -> None:
self.check_user_role_change("shiva", UserProfile.ROLE_GUEST)
def test_admin_user_can_change_profile_data(self) -> None:
realm = get_realm("zulip")
self.login("iago")
new_profile_data = []
cordelia = self.example_user("cordelia")
# Test for all type of data
fields = {
"Phone number": "short text data",
"Biography": "long text data",
"Favorite food": "short text data",
"Favorite editor": "vim",
"Birthday": "1909-03-05",
"Favorite website": "https://zulip.com",
"Mentor": [cordelia.id],
"GitHub": "timabbott",
}
for field_name in fields:
field = CustomProfileField.objects.get(name=field_name, realm=realm)
new_profile_data.append(
{
"id": field.id,
"value": fields[field_name],
}
)
result = self.client_patch(
f"/json/users/{cordelia.id}", {"profile_data": orjson.dumps(new_profile_data).decode()}
)
self.assert_json_success(result)
cordelia = self.example_user("cordelia")
for field_dict in cordelia.profile_data:
with self.subTest(field_name=field_dict["name"]):
self.assertEqual(field_dict["value"], fields[field_dict["name"]])
# Test admin user cannot set invalid profile data
invalid_fields = [
(
"Favorite editor",
"invalid choice",
"'invalid choice' is not a valid choice for 'Favorite editor'.",
),
("Birthday", "1909-34-55", "Birthday is not a date"),
("Favorite website", "not url", "Favorite website is not a URL"),
("Mentor", "not list of user ids", "User IDs is not a list"),
]
for field_name, field_value, error_msg in invalid_fields:
new_profile_data = []
field = CustomProfileField.objects.get(name=field_name, realm=realm)
new_profile_data.append(
{
"id": field.id,
"value": field_value,
}
)
result = self.client_patch(
f"/json/users/{cordelia.id}",
{"profile_data": orjson.dumps(new_profile_data).decode()},
)
self.assert_json_error(result, error_msg)
# non-existent field and no data
invalid_profile_data = [
{
"id": 9001,
"value": "",
}
]
result = self.client_patch(
f"/json/users/{cordelia.id}",
{"profile_data": orjson.dumps(invalid_profile_data).decode()},
)
self.assert_json_error(result, "Field id 9001 not found.")
# non-existent field and data
invalid_profile_data = [
{
"id": 9001,
"value": "some data",
}
]
result = self.client_patch(
f"/json/users/{cordelia.id}",
{"profile_data": orjson.dumps(invalid_profile_data).decode()},
)
self.assert_json_error(result, "Field id 9001 not found.")
# Test for clearing/resetting field values.
empty_profile_data = []
for field_name in fields:
field = CustomProfileField.objects.get(name=field_name, realm=realm)
value: Union[str, None, List[Any]] = ""
if field.field_type == CustomProfileField.USER:
value = []
empty_profile_data.append(
{
"id": field.id,
"value": value,
}
)
result = self.client_patch(
f"/json/users/{cordelia.id}",
{"profile_data": orjson.dumps(empty_profile_data).decode()},
)
self.assert_json_success(result)
for field_dict in cordelia.profile_data:
with self.subTest(field_name=field_dict["name"]):
self.assertEqual(field_dict["value"], None)
# Test adding some of the field values after removing all.
hamlet = self.example_user("hamlet")
new_fields = {
"Phone number": None,
"Biography": "A test user",
"Favorite food": None,
"Favorite editor": None,
"Birthday": None,
"Favorite website": "https://zulip.github.io",
"Mentor": [hamlet.id],
"GitHub": "timabbott",
}
new_profile_data = []
for field_name in fields:
field = CustomProfileField.objects.get(name=field_name, realm=realm)
value = None
if new_fields[field_name]:
value = new_fields[field_name]
new_profile_data.append(
{
"id": field.id,
"value": value,
}
)
result = self.client_patch(
f"/json/users/{cordelia.id}", {"profile_data": orjson.dumps(new_profile_data).decode()}
)
self.assert_json_success(result)
for field_dict in cordelia.profile_data:
with self.subTest(field_name=field_dict["name"]):
self.assertEqual(field_dict["value"], new_fields[str(field_dict["name"])])
def test_non_admin_user_cannot_change_profile_data(self) -> None:
self.login("cordelia")
hamlet = self.example_user("hamlet")
realm = get_realm("zulip")
new_profile_data = []
field = CustomProfileField.objects.get(name="Biography", realm=realm)
new_profile_data.append(
{
"id": field.id,
"value": "New hamlet Biography",
}
)
result = self.client_patch(
f"/json/users/{hamlet.id}", {"profile_data": orjson.dumps(new_profile_data).decode()}
)
self.assert_json_error(result, "Insufficient permission")
result = self.client_patch(
"/json/users/{}".format(self.example_user("cordelia").id),
{"profile_data": orjson.dumps(new_profile_data).decode()},
)
self.assert_json_error(result, "Insufficient permission")
class QueryCountTest(ZulipTestCase):
def test_create_user_with_multiple_streams(self) -> None:
# add_new_user_history needs messages to be current
Message.objects.all().update(date_sent=timezone_now())
ContentType.objects.clear_cache()
# This just focuses on making sure we don't too many
# queries/cache tries or send too many events.
realm = get_realm("zulip")
self.make_stream("private_stream1", invite_only=True)
self.make_stream("private_stream2", invite_only=True)
stream_names = [
"Denmark",
"Scotland",
"Verona",
"private_stream1",
"private_stream2",
]
streams = [get_stream(stream_name, realm) for stream_name in stream_names]
do_invite_users(
user_profile=self.example_user("hamlet"),
invitee_emails=["fred@zulip.com"],
streams=streams,
)
prereg_user = PreregistrationUser.objects.get(email="fred@zulip.com")
events: List[Mapping[str, Any]] = []
with queries_captured() as queries:
with cache_tries_captured() as cache_tries:
with tornado_redirected_to_list(events):
fred = do_create_user(
email="fred@zulip.com",
password="password",
realm=realm,
full_name="Fred Flintstone",
prereg_user=prereg_user,
acting_user=None,
)
self.assert_length(queries, 70)
self.assert_length(cache_tries, 22)
self.assert_length(events, 7)
peer_add_events = [event for event in events if event["event"].get("op") == "peer_add"]
notifications = set()
for event in peer_add_events:
stream_ids = event["event"]["stream_ids"]
stream_names = sorted(Stream.objects.get(id=stream_id).name for stream_id in stream_ids)
self.assertTrue(event["event"]["user_ids"], {fred.id})
notifications.add(",".join(stream_names))
self.assertEqual(
notifications, {"Denmark,Scotland,Verona", "private_stream1", "private_stream2"}
)
class BulkCreateUserTest(ZulipTestCase):
def test_create_users(self) -> None:
realm = get_realm("zulip")
realm.email_address_visibility = Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS
realm.save()
name_list = [
("Fred Flintstone", "fred@zulip.com"),
("Lisa Simpson", "lisa@zulip.com"),
]
create_users(realm, name_list)
fred = get_user_by_delivery_email("fred@zulip.com", realm)
self.assertEqual(
fred.email,
f"user{fred.id}@zulip.testserver",
)
lisa = get_user_by_delivery_email("lisa@zulip.com", realm)
self.assertEqual(lisa.full_name, "Lisa Simpson")
self.assertEqual(lisa.is_bot, False)
self.assertEqual(lisa.bot_type, None)
realm.email_address_visibility = Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE
realm.save()
name_list = [
("Bono", "bono@zulip.com"),
("Cher", "cher@zulip.com"),
]
create_users(realm, name_list)
bono = get_user_by_delivery_email("bono@zulip.com", realm)
self.assertEqual(bono.email, "bono@zulip.com")
self.assertEqual(bono.delivery_email, "bono@zulip.com")
cher = get_user_by_delivery_email("cher@zulip.com", realm)
self.assertEqual(cher.full_name, "Cher")
class AdminCreateUserTest(ZulipTestCase):
def test_create_user_backend(self) -> None:
# This test should give us complete coverage on
# create_user_backend. It mostly exercises error
# conditions, and it also does a basic test of the success
# path.
admin = self.example_user("hamlet")
realm = admin.realm
self.login_user(admin)
do_change_user_role(admin, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)
valid_params = dict(
email="romeo@zulip.net",
password="xxxx",
full_name="Romeo Montague",
)
self.assertEqual(admin.can_create_users, False)
result = self.client_post("/json/users", valid_params)
self.assert_json_error(result, "User not authorized for this query")
do_change_can_create_users(admin, True)
# can_create_users is insufficient without being a realm administrator:
do_change_user_role(admin, UserProfile.ROLE_MEMBER, acting_user=None)
result = self.client_post("/json/users", valid_params)
self.assert_json_error(result, "Must be an organization administrator")
do_change_user_role(admin, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)
result = self.client_post("/json/users", {})
self.assert_json_error(result, "Missing 'email' argument")
result = self.client_post(
"/json/users",
dict(
email="romeo@not-zulip.com",
),
)
self.assert_json_error(result, "Missing 'password' argument")
result = self.client_post(
"/json/users",
dict(
email="romeo@not-zulip.com",
password="xxxx",
),
)
self.assert_json_error(result, "Missing 'full_name' argument")
# Test short_name gets properly ignored
result = self.client_post(
"/json/users",
dict(
email="romeo@zulip.com",
password="xxxx",
full_name="Romeo Montague",
short_name="DEPRECATED",
),
)
self.assert_json_success(result)
result = self.client_post(
"/json/users",
dict(
email="broken",
password="xxxx",
full_name="Romeo Montague",
),
)
self.assert_json_error(result, "Bad name or username")
do_set_realm_property(realm, "emails_restricted_to_domains", True, acting_user=None)
result = self.client_post(
"/json/users",
dict(
email="romeo@not-zulip.com",
password="xxxx",
full_name="Romeo Montague",
),
)
self.assert_json_error(
result, "Email 'romeo@not-zulip.com' not allowed in this organization"
)
RealmDomain.objects.create(realm=get_realm("zulip"), domain="zulip.net")
# Check can't use a bad password with zxcvbn enabled
with self.settings(PASSWORD_MIN_LENGTH=6, PASSWORD_MIN_GUESSES=1000):
result = self.client_post("/json/users", valid_params)
self.assert_json_error(result, "The password is too weak.")
result = self.client_post("/json/users", valid_params)
self.assert_json_success(result)
# Romeo is a newly registered user
new_user = get_user_by_delivery_email("romeo@zulip.net", get_realm("zulip"))
result = orjson.loads(result.content)
self.assertEqual(new_user.full_name, "Romeo Montague")
self.assertEqual(new_user.id, result["user_id"])
# Make sure the recipient field is set correctly.
self.assertEqual(
new_user.recipient, Recipient.objects.get(type=Recipient.PERSONAL, type_id=new_user.id)
)
# we can't create the same user twice.
result = self.client_post("/json/users", valid_params)
self.assert_json_error(result, "Email 'romeo@zulip.net' already in use")
# Don't allow user to sign up with disposable email.
realm.emails_restricted_to_domains = False
realm.disallow_disposable_email_addresses = True
realm.save()
valid_params["email"] = "abc@mailnator.com"
result = self.client_post("/json/users", valid_params)
self.assert_json_error(
result, "Disposable email addresses are not allowed in this organization"
)
# Don't allow creating a user with + in their email address when realm
# is restricted to a domain.
realm.emails_restricted_to_domains = True
realm.save()
valid_params["email"] = "iago+label@zulip.com"
result = self.client_post("/json/users", valid_params)
self.assert_json_error(result, "Email addresses containing + are not allowed.")
# Users can be created with + in their email address when realm
# is not restricted to a domain.
realm.emails_restricted_to_domains = False
realm.save()
valid_params["email"] = "iago+label@zulip.com"
result = self.client_post("/json/users", valid_params)
self.assert_json_success(result)
class UserProfileTest(ZulipTestCase):
def test_get_emails_from_user_ids(self) -> None:
hamlet = self.example_user("hamlet")
othello = self.example_user("othello")
dct = get_emails_from_user_ids([hamlet.id, othello.id])
self.assertEqual(dct[hamlet.id], hamlet.email)
self.assertEqual(dct[othello.id], othello.email)
def test_valid_user_id(self) -> None:
realm = get_realm("zulip")
hamlet = self.example_user("hamlet")
othello = self.example_user("othello")
bot = self.example_user("default_bot")
# Invalid user ID
invalid_uid: object = 1000
with self.assertRaisesRegex(ValidationError, r"User IDs is not a list"):
check_valid_user_ids(realm.id, invalid_uid)
with self.assertRaisesRegex(ValidationError, rf"Invalid user ID: {invalid_uid}"):
check_valid_user_ids(realm.id, [invalid_uid])
invalid_uid = "abc"
with self.assertRaisesRegex(ValidationError, r"User IDs\[0\] is not an integer"):
check_valid_user_ids(realm.id, [invalid_uid])
invalid_uid = str(othello.id)
with self.assertRaisesRegex(ValidationError, r"User IDs\[0\] is not an integer"):
check_valid_user_ids(realm.id, [invalid_uid])
# User is in different realm
with self.assertRaisesRegex(ValidationError, rf"Invalid user ID: {hamlet.id}"):
check_valid_user_ids(get_realm("zephyr").id, [hamlet.id])
# User is not active
change_user_is_active(hamlet, False)
with self.assertRaisesRegex(ValidationError, rf"User with ID {hamlet.id} is deactivated"):
check_valid_user_ids(realm.id, [hamlet.id])
check_valid_user_ids(realm.id, [hamlet.id], allow_deactivated=True)
# User is a bot
with self.assertRaisesRegex(ValidationError, rf"User with ID {bot.id} is a bot"):
check_valid_user_ids(realm.id, [bot.id])
# Successfully get non-bot, active user belong to your realm
check_valid_user_ids(realm.id, [othello.id])
def test_cache_invalidation(self) -> None:
hamlet = self.example_user("hamlet")
with mock.patch("zerver.lib.cache.delete_display_recipient_cache") as m:
hamlet.full_name = "Hamlet Junior"
hamlet.save(update_fields=["full_name"])
self.assertTrue(m.called)
with mock.patch("zerver.lib.cache.delete_display_recipient_cache") as m:
hamlet.long_term_idle = True
hamlet.save(update_fields=["long_term_idle"])
self.assertFalse(m.called)
def test_user_ids_to_users(self) -> None:
real_user_ids = [
self.example_user("hamlet").id,
self.example_user("cordelia").id,
]
self.assertEqual(user_ids_to_users([], get_realm("zulip")), [])
self.assertEqual(
{
user_profile.id
for user_profile in user_ids_to_users(real_user_ids, get_realm("zulip"))
},
set(real_user_ids),
)
with self.assertRaises(JsonableError):
user_ids_to_users([1234], get_realm("zephyr"))
with self.assertRaises(JsonableError):
user_ids_to_users(real_user_ids, get_realm("zephyr"))
def test_bulk_get_users(self) -> None:
from zerver.lib.users import bulk_get_users
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
webhook_bot = self.example_user("webhook_bot")
result = bulk_get_users(
[hamlet.email, cordelia.email],
get_realm("zulip"),
)
self.assertEqual(result[hamlet.email].email, hamlet.email)
self.assertEqual(result[cordelia.email].email, cordelia.email)
result = bulk_get_users(
[hamlet.email, cordelia.email, webhook_bot.email],
None,
base_query=UserProfile.objects.all(),
)
self.assertEqual(result[hamlet.email].email, hamlet.email)
self.assertEqual(result[cordelia.email].email, cordelia.email)
self.assertEqual(result[webhook_bot.email].email, webhook_bot.email)
def test_get_accounts_for_email(self) -> None:
reset_emails_in_zulip_realm()
def check_account_present_in_accounts(user: UserProfile, accounts: List[Accounts]) -> None:
for account in accounts:
realm = user.realm
if (
account["avatar"] == avatar_url(user)
and account["full_name"] == user.full_name
and account["realm_name"] == realm.name
and account["realm_id"] == realm.id
):
return
raise AssertionError("Account not found")
lear_realm = get_realm("lear")
cordelia_in_zulip = self.example_user("cordelia")
cordelia_in_lear = get_user_by_delivery_email("cordelia@zulip.com", lear_realm)
email = "cordelia@zulip.com"
accounts = get_accounts_for_email(email)
self.assert_length(accounts, 2)
check_account_present_in_accounts(cordelia_in_zulip, accounts)
check_account_present_in_accounts(cordelia_in_lear, accounts)
email = "CORDelia@zulip.com"
accounts = get_accounts_for_email(email)
self.assert_length(accounts, 2)
check_account_present_in_accounts(cordelia_in_zulip, accounts)
check_account_present_in_accounts(cordelia_in_lear, accounts)
email = "IAGO@ZULIP.COM"
accounts = get_accounts_for_email(email)
self.assert_length(accounts, 1)
check_account_present_in_accounts(self.example_user("iago"), accounts)
# We verify that get_accounts_for_email don't return deactivated users accounts
user = self.example_user("hamlet")
do_deactivate_user(user, acting_user=None)
email = self.example_email("hamlet")
accounts = get_accounts_for_email(email)
with self.assertRaises(AssertionError):
check_account_present_in_accounts(user, accounts)
def test_get_source_profile(self) -> None:
reset_emails_in_zulip_realm()
zulip_realm_id = get_realm("zulip").id
iago = get_source_profile("iago@zulip.com", zulip_realm_id)
assert iago is not None
self.assertEqual(iago.email, "iago@zulip.com")
self.assertEqual(iago.realm, get_realm("zulip"))
iago = get_source_profile("IAGO@ZULIP.com", zulip_realm_id)
assert iago is not None
self.assertEqual(iago.email, "iago@zulip.com")
lear_realm_id = get_realm("lear").id
cordelia = get_source_profile("cordelia@zulip.com", lear_realm_id)
assert cordelia is not None
self.assertEqual(cordelia.email, "cordelia@zulip.com")
self.assertIsNone(get_source_profile("iagod@zulip.com", zulip_realm_id))
self.assertIsNone(get_source_profile("iago@zulip.com", 0))
self.assertIsNone(get_source_profile("iago@zulip.com", lear_realm_id))
def test_copy_user_settings(self) -> None:
iago = self.example_user("iago")
cordelia = self.example_user("cordelia")
hamlet = self.example_user("hamlet")
hamlet.color_scheme = UserProfile.COLOR_SCHEME_LIGHT
cordelia.default_language = "de"
cordelia.default_view = "all_messages"
cordelia.emojiset = "twitter"
cordelia.timezone = "America/Phoenix"
cordelia.color_scheme = UserProfile.COLOR_SCHEME_NIGHT
cordelia.enable_offline_email_notifications = False
cordelia.enable_stream_push_notifications = True
cordelia.enter_sends = False
cordelia.avatar_source = UserProfile.AVATAR_FROM_USER
cordelia.save()
# Upload cordelia's avatar
with get_test_image_file("img.png") as image_file:
upload_avatar_image(image_file, cordelia, cordelia)
UserHotspot.objects.filter(user=cordelia).delete()
UserHotspot.objects.filter(user=iago).delete()
hotspots_completed = ["intro_reply", "intro_streams", "intro_topics"]
for hotspot in hotspots_completed:
UserHotspot.objects.create(user=cordelia, hotspot=hotspot)
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
copy_user_settings(cordelia, iago)
# Check that we didn't send an realm_user update events to
# users; this work is happening before the user account is
# created, so any changes will be reflected in the "add" event
# introducing the user to clients.
self.assertEqual(len(events), 0)
# We verify that cordelia and iago match, but hamlet has the defaults.
self.assertEqual(iago.full_name, "Cordelia, Lear's daughter")
self.assertEqual(cordelia.full_name, "Cordelia, Lear's daughter")
self.assertEqual(hamlet.full_name, "King Hamlet")
self.assertEqual(iago.default_language, "de")
self.assertEqual(cordelia.default_language, "de")
self.assertEqual(hamlet.default_language, "en")
self.assertEqual(iago.emojiset, "twitter")
self.assertEqual(cordelia.emojiset, "twitter")
self.assertEqual(hamlet.emojiset, "google-blob")
self.assertEqual(iago.timezone, "America/Phoenix")
self.assertEqual(cordelia.timezone, "America/Phoenix")
self.assertEqual(hamlet.timezone, "")
self.assertEqual(iago.color_scheme, UserProfile.COLOR_SCHEME_NIGHT)
self.assertEqual(cordelia.color_scheme, UserProfile.COLOR_SCHEME_NIGHT)
self.assertEqual(hamlet.color_scheme, UserProfile.COLOR_SCHEME_LIGHT)
self.assertEqual(iago.enable_offline_email_notifications, False)
self.assertEqual(cordelia.enable_offline_email_notifications, False)
self.assertEqual(hamlet.enable_offline_email_notifications, True)
self.assertEqual(iago.enable_stream_push_notifications, True)
self.assertEqual(cordelia.enable_stream_push_notifications, True)
self.assertEqual(hamlet.enable_stream_push_notifications, False)
self.assertEqual(iago.enter_sends, False)
self.assertEqual(cordelia.enter_sends, False)
self.assertEqual(hamlet.enter_sends, True)
hotspots = list(UserHotspot.objects.filter(user=iago).values_list("hotspot", flat=True))
self.assertEqual(hotspots, hotspots_completed)
def test_get_user_by_id_in_realm_including_cross_realm(self) -> None:
realm = get_realm("zulip")
hamlet = self.example_user("hamlet")
othello = self.example_user("othello")
bot = get_system_bot(settings.WELCOME_BOT)
# Pass in the ID of a cross-realm bot and a valid realm
cross_realm_bot = get_user_by_id_in_realm_including_cross_realm(bot.id, realm)
self.assertEqual(cross_realm_bot.email, bot.email)
self.assertEqual(cross_realm_bot.id, bot.id)
# Pass in the ID of a cross-realm bot but with a invalid realm,
# note that the realm should be irrelevant here
cross_realm_bot = get_user_by_id_in_realm_including_cross_realm(bot.id, None)
self.assertEqual(cross_realm_bot.email, bot.email)
self.assertEqual(cross_realm_bot.id, bot.id)
# Pass in the ID of a non-cross-realm user with a realm
user_profile = get_user_by_id_in_realm_including_cross_realm(othello.id, realm)
self.assertEqual(user_profile.email, othello.email)
self.assertEqual(user_profile.id, othello.id)
# If the realm doesn't match, or if the ID is not that of a
# cross-realm bot, UserProfile.DoesNotExist is raised
with self.assertRaises(UserProfile.DoesNotExist):
get_user_by_id_in_realm_including_cross_realm(hamlet.id, None)
def test_get_user_subscription_status(self) -> None:
self.login("hamlet")
iago = self.example_user("iago")
stream = get_stream("Rome", iago.realm)
# Invalid user ID.
result = self.client_get(f"/json/users/25/subscriptions/{stream.id}")
self.assert_json_error(result, "No such user")
# Invalid stream ID.
result = self.client_get(f"/json/users/{iago.id}/subscriptions/25")
self.assert_json_error(result, "Invalid stream id")
result = orjson.loads(
self.client_get(f"/json/users/{iago.id}/subscriptions/{stream.id}").content
)
self.assertFalse(result["is_subscribed"])
# Subscribe to the stream.
self.subscribe(iago, stream.name)
with queries_captured() as queries:
result = orjson.loads(
self.client_get(f"/json/users/{iago.id}/subscriptions/{stream.id}").content
)
self.assert_length(queries, 6)
self.assertTrue(result["is_subscribed"])
# Logging in with a Guest user.
polonius = self.example_user("polonius")
self.login("polonius")
self.assertTrue(polonius.is_guest)
self.assertTrue(stream.is_web_public)
result = orjson.loads(
self.client_get(f"/json/users/{iago.id}/subscriptions/{stream.id}").content
)
self.assertTrue(result["is_subscribed"])
class ActivateTest(ZulipTestCase):
def test_basics(self) -> None:
user = self.example_user("hamlet")
do_deactivate_user(user, acting_user=None)
self.assertFalse(user.is_active)
do_reactivate_user(user, acting_user=None)
self.assertTrue(user.is_active)
def test_subscriptions_is_user_active(self) -> None:
user = self.example_user("hamlet")
do_deactivate_user(user, acting_user=None)
self.assertFalse(user.is_active)
self.assertTrue(Subscription.objects.filter(user_profile=user).exists())
self.assertFalse(
Subscription.objects.filter(user_profile=user, is_user_active=True).exists()
)
do_reactivate_user(user, acting_user=None)
self.assertTrue(user.is_active)
self.assertTrue(Subscription.objects.filter(user_profile=user).exists())
self.assertFalse(
Subscription.objects.filter(user_profile=user, is_user_active=False).exists()
)
def test_api(self) -> None:
admin = self.example_user("othello")
do_change_user_role(admin, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)
self.login("othello")
user = self.example_user("hamlet")
self.assertTrue(user.is_active)
result = self.client_delete(f"/json/users/{user.id}")
self.assert_json_success(result)
user = self.example_user("hamlet")
self.assertFalse(user.is_active)
result = self.client_post(f"/json/users/{user.id}/reactivate")
self.assert_json_success(result)
user = self.example_user("hamlet")
self.assertTrue(user.is_active)
def test_api_with_nonexistent_user(self) -> None:
self.login("iago")
# Organization administrator cannot deactivate organization owner.
result = self.client_delete(f'/json/users/{self.example_user("desdemona").id}')
self.assert_json_error(result, "Must be an organization owner")
iago = self.example_user("iago")
desdemona = self.example_user("desdemona")
do_change_user_role(iago, UserProfile.ROLE_REALM_OWNER, acting_user=None)
# Cannot deactivate a user with the bot api
result = self.client_delete("/json/bots/{}".format(self.example_user("hamlet").id))
self.assert_json_error(result, "No such bot")
# Cannot deactivate a nonexistent user.
invalid_user_id = 1000
result = self.client_delete(f"/json/users/{invalid_user_id}")
self.assert_json_error(result, "No such user")
result = self.client_delete("/json/users/{}".format(self.example_user("webhook_bot").id))
self.assert_json_error(result, "No such user")
result = self.client_delete(f"/json/users/{desdemona.id}")
self.assert_json_success(result)
result = self.client_delete(f"/json/users/{iago.id}")
self.assert_json_error(result, "Cannot deactivate the only organization owner")
# Cannot reactivate a nonexistent user.
invalid_user_id = 1000
result = self.client_post(f"/json/users/{invalid_user_id}/reactivate")
self.assert_json_error(result, "No such user")
def test_api_with_insufficient_permissions(self) -> None:
non_admin = self.example_user("othello")
do_change_user_role(non_admin, UserProfile.ROLE_MEMBER, acting_user=None)
self.login("othello")
# Cannot deactivate a user with the users api
result = self.client_delete("/json/users/{}".format(self.example_user("hamlet").id))
self.assert_json_error(result, "Insufficient permission")
# Cannot reactivate a user
result = self.client_post(
"/json/users/{}/reactivate".format(self.example_user("hamlet").id)
)
self.assert_json_error(result, "Insufficient permission")
def test_clear_scheduled_jobs(self) -> None:
user = self.example_user("hamlet")
send_future_email(
"zerver/emails/followup_day1",
user.realm,
to_user_ids=[user.id],
delay=datetime.timedelta(hours=1),
)
self.assertEqual(ScheduledEmail.objects.count(), 1)
do_deactivate_user(user, acting_user=None)
self.assertEqual(ScheduledEmail.objects.count(), 0)
def test_send_future_email_with_multiple_recipients(self) -> None:
hamlet = self.example_user("hamlet")
iago = self.example_user("iago")
send_future_email(
"zerver/emails/followup_day1",
iago.realm,
to_user_ids=[hamlet.id, iago.id],
delay=datetime.timedelta(hours=1),
)
self.assertEqual(
ScheduledEmail.objects.filter(users__in=[hamlet, iago]).distinct().count(), 1
)
email = ScheduledEmail.objects.all().first()
self.assertEqual(email.users.count(), 2)
def test_clear_scheduled_emails_with_multiple_user_ids(self) -> None:
hamlet = self.example_user("hamlet")
iago = self.example_user("iago")
send_future_email(
"zerver/emails/followup_day1",
iago.realm,
to_user_ids=[hamlet.id, iago.id],
delay=datetime.timedelta(hours=1),
)
self.assertEqual(ScheduledEmail.objects.count(), 1)
clear_scheduled_emails([hamlet.id, iago.id])
self.assertEqual(ScheduledEmail.objects.count(), 0)
def test_clear_schedule_emails_with_one_user_id(self) -> None:
hamlet = self.example_user("hamlet")
iago = self.example_user("iago")
send_future_email(
"zerver/emails/followup_day1",
iago.realm,
to_user_ids=[hamlet.id, iago.id],
delay=datetime.timedelta(hours=1),
)
self.assertEqual(ScheduledEmail.objects.count(), 1)
clear_scheduled_emails([hamlet.id])
self.assertEqual(ScheduledEmail.objects.count(), 1)
self.assertEqual(ScheduledEmail.objects.filter(users=hamlet).count(), 0)
self.assertEqual(ScheduledEmail.objects.filter(users=iago).count(), 1)
def test_deliver_scheduled_emails(self) -> None:
iago = self.example_user("iago")
hamlet = self.example_user("hamlet")
send_future_email(
"zerver/emails/followup_day1",
iago.realm,
to_user_ids=[hamlet.id, iago.id],
delay=datetime.timedelta(hours=1),
)
self.assertEqual(ScheduledEmail.objects.count(), 1)
email = ScheduledEmail.objects.all().first()
deliver_scheduled_emails(email)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
for message in outbox:
self.assertEqual(
set(message.to),
{
str(Address(display_name=hamlet.full_name, addr_spec=hamlet.delivery_email)),
str(Address(display_name=iago.full_name, addr_spec=iago.delivery_email)),
},
)
self.assertEqual(ScheduledEmail.objects.count(), 0)
def test_deliver_scheduled_emails_no_addressees(self) -> None:
iago = self.example_user("iago")
hamlet = self.example_user("hamlet")
to_user_ids = [hamlet.id, iago.id]
send_future_email(
"zerver/emails/followup_day1",
iago.realm,
to_user_ids=to_user_ids,
delay=datetime.timedelta(hours=1),
)
self.assertEqual(ScheduledEmail.objects.count(), 1)
email = ScheduledEmail.objects.all().first()
email.users.remove(*to_user_ids)
with self.assertLogs("zulip.send_email", level="INFO") as info_log:
deliver_scheduled_emails(email)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
self.assertEqual(ScheduledEmail.objects.count(), 1)
self.assertEqual(
info_log.output,
[
f"WARNING:zulip.send_email:ScheduledEmail id {email.id} has empty users and address attributes."
],
)
class RecipientInfoTest(ZulipTestCase):
def test_stream_recipient_info(self) -> None:
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
othello = self.example_user("othello")
# These tests were written with the old default for
# enable_online_push_notifications; that default is better for
# testing the full code path anyway.
hamlet.enable_online_push_notifications = False
cordelia.enable_online_push_notifications = False
othello.enable_online_push_notifications = False
hamlet.save()
cordelia.save()
othello.save()
realm = hamlet.realm
stream_name = "Test stream"
topic_name = "test topic"
for user in [hamlet, cordelia, othello]:
self.subscribe(user, stream_name)
stream = get_stream(stream_name, realm)
recipient = stream.recipient
stream_topic = StreamTopicTarget(
stream_id=stream.id,
topic_name=topic_name,
)
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possible_wildcard_mention=False,
)
all_user_ids = {hamlet.id, cordelia.id, othello.id}
expected_info = dict(
active_user_ids=all_user_ids,
push_notify_user_ids=set(),
stream_push_user_ids=set(),
stream_email_user_ids=set(),
wildcard_mention_user_ids=set(),
um_eligible_user_ids=all_user_ids,
long_term_idle_user_ids=set(),
default_bot_user_ids=set(),
service_bot_tuples=[],
)
self.assertEqual(info, expected_info)
cordelia.wildcard_mentions_notify = False
cordelia.save()
hamlet.enable_stream_push_notifications = True
hamlet.save()
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possible_wildcard_mention=False,
)
self.assertEqual(info["stream_push_user_ids"], {hamlet.id})
self.assertEqual(info["wildcard_mention_user_ids"], set())
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possible_wildcard_mention=True,
)
self.assertEqual(info["wildcard_mention_user_ids"], {hamlet.id, othello.id})
sub = get_subscription(stream_name, hamlet)
sub.push_notifications = False
sub.save()
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
)
self.assertEqual(info["stream_push_user_ids"], set())
hamlet.enable_stream_push_notifications = False
hamlet.save()
sub = get_subscription(stream_name, hamlet)
sub.push_notifications = True
sub.save()
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
)
self.assertEqual(info["stream_push_user_ids"], {hamlet.id})
# Now mute Hamlet to omit him from stream_push_user_ids.
add_topic_mute(
user_profile=hamlet,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name=topic_name,
)
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possible_wildcard_mention=False,
)
self.assertEqual(info["stream_push_user_ids"], set())
self.assertEqual(info["wildcard_mention_user_ids"], set())
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possible_wildcard_mention=True,
)
self.assertEqual(info["stream_push_user_ids"], set())
# Since Hamlet has muted the stream and Cordelia has disabled
# wildcard notifications, it should just be Othello here.
self.assertEqual(info["wildcard_mention_user_ids"], {othello.id})
sub = get_subscription(stream_name, othello)
sub.wildcard_mentions_notify = False
sub.save()
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possible_wildcard_mention=True,
)
self.assertEqual(info["stream_push_user_ids"], set())
# Verify that stream-level wildcard_mentions_notify=False works correctly.
self.assertEqual(info["wildcard_mention_user_ids"], set())
# Verify that True works as expected as well
sub = get_subscription(stream_name, othello)
sub.wildcard_mentions_notify = True
sub.save()
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possible_wildcard_mention=True,
)
self.assertEqual(info["stream_push_user_ids"], set())
self.assertEqual(info["wildcard_mention_user_ids"], {othello.id})
# Add a service bot.
service_bot = do_create_user(
email="service-bot@zulip.com",
password="",
realm=realm,
full_name="",
bot_type=UserProfile.EMBEDDED_BOT,
acting_user=None,
)
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possibly_mentioned_user_ids={service_bot.id},
)
self.assertEqual(
info["service_bot_tuples"],
[
(service_bot.id, UserProfile.EMBEDDED_BOT),
],
)
# Add a normal bot.
normal_bot = do_create_user(
email="normal-bot@zulip.com",
password="",
realm=realm,
full_name="",
bot_type=UserProfile.DEFAULT_BOT,
acting_user=None,
)
info = get_recipient_info(
recipient=recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
possibly_mentioned_user_ids={service_bot.id, normal_bot.id},
)
self.assertEqual(info["default_bot_user_ids"], {normal_bot.id})
def test_get_recipient_info_invalid_recipient_type(self) -> None:
hamlet = self.example_user("hamlet")
realm = hamlet.realm
stream = get_stream("Rome", realm)
stream_topic = StreamTopicTarget(
stream_id=stream.id,
topic_name="test topic",
)
# Make sure get_recipient_info asserts on invalid recipient types
with self.assertRaisesRegex(ValueError, "Bad recipient type"):
invalid_recipient = Recipient(type=999) # 999 is not a valid type
get_recipient_info(
recipient=invalid_recipient,
sender_id=hamlet.id,
stream_topic=stream_topic,
)
class BulkUsersTest(ZulipTestCase):
def test_client_gravatar_option(self) -> None:
reset_emails_in_zulip_realm()
self.login("cordelia")
hamlet = self.example_user("hamlet")
def get_hamlet_avatar(client_gravatar: bool) -> Optional[str]:
data = dict(client_gravatar=orjson.dumps(client_gravatar).decode())
result = self.client_get("/json/users", data)
self.assert_json_success(result)
rows = result.json()["members"]
hamlet_data = [row for row in rows if row["user_id"] == hamlet.id][0]
return hamlet_data["avatar_url"]
self.assertEqual(
get_hamlet_avatar(client_gravatar=True),
None,
)
"""
The main purpose of this test is to make sure we
return None for avatar_url when client_gravatar is
set to True. And we do a sanity check for when it's
False, but we leave it to other tests to validate
the specific URL.
"""
self.assertIn(
"gravatar.com",
get_hamlet_avatar(client_gravatar=False),
)
class GetProfileTest(ZulipTestCase):
def test_cache_behavior(self) -> None:
"""Tests whether fetching a user object the normal way, with
`get_user`, makes 1 cache query and 1 database query.
"""
realm = get_realm("zulip")
email = self.example_user("hamlet").email
with queries_captured() as queries:
with simulated_empty_cache() as cache_queries:
user_profile = get_user(email, realm)
self.assert_length(queries, 1)
self.assert_length(cache_queries, 1)
self.assertEqual(user_profile.email, email)
def test_get_user_profile(self) -> None:
hamlet = self.example_user("hamlet")
iago = self.example_user("iago")
desdemona = self.example_user("desdemona")
shiva = self.example_user("shiva")
self.login("hamlet")
result = orjson.loads(self.client_get("/json/users/me").content)
self.assertEqual(result["email"], hamlet.email)
self.assertEqual(result["full_name"], "King Hamlet")
self.assertIn("user_id", result)
self.assertFalse(result["is_bot"])
self.assertFalse(result["is_admin"])
self.assertFalse(result["is_owner"])
self.assertFalse(result["is_guest"])
self.assertEqual(result["role"], UserProfile.ROLE_MEMBER)
self.assertFalse("delivery_email" in result)
self.login("iago")
result = orjson.loads(self.client_get("/json/users/me").content)
self.assertEqual(result["email"], iago.email)
self.assertEqual(result["full_name"], "Iago")
self.assertFalse(result["is_bot"])
self.assertTrue(result["is_admin"])
self.assertFalse(result["is_owner"])
self.assertFalse(result["is_guest"])
self.assertEqual(result["role"], UserProfile.ROLE_REALM_ADMINISTRATOR)
self.login("desdemona")
result = orjson.loads(self.client_get("/json/users/me").content)
self.assertEqual(result["email"], desdemona.email)
self.assertFalse(result["is_bot"])
self.assertTrue(result["is_admin"])
self.assertTrue(result["is_owner"])
self.assertFalse(result["is_guest"])
self.assertEqual(result["role"], UserProfile.ROLE_REALM_OWNER)
self.login("shiva")
result = orjson.loads(self.client_get("/json/users/me").content)
self.assertEqual(result["email"], shiva.email)
self.assertFalse(result["is_bot"])
self.assertFalse(result["is_admin"])
self.assertFalse(result["is_owner"])
self.assertFalse(result["is_guest"])
self.assertEqual(result["role"], UserProfile.ROLE_MODERATOR)
# Tests the GET ../users/{id} API endpoint.
user = self.example_user("hamlet")
result = orjson.loads(self.client_get(f"/json/users/{user.id}").content)
self.assertEqual(result["user"]["email"], user.email)
self.assertEqual(result["user"]["full_name"], user.full_name)
self.assertIn("user_id", result["user"])
self.assertNotIn("profile_data", result["user"])
self.assertFalse(result["user"]["is_bot"])
self.assertFalse(result["user"]["is_admin"])
self.assertFalse(result["user"]["is_owner"])
result = orjson.loads(
self.client_get(
f"/json/users/{user.id}", {"include_custom_profile_fields": "true"}
).content
)
self.assertIn("profile_data", result["user"])
result = self.client_get("/json/users/30")
self.assert_json_error(result, "No such user")
bot = self.example_user("default_bot")
result = orjson.loads(self.client_get(f"/json/users/{bot.id}").content)
self.assertEqual(result["user"]["email"], bot.email)
self.assertTrue(result["user"]["is_bot"])
def test_get_user_by_email(self) -> None:
user = self.example_user("hamlet")
self.login("hamlet")
result = orjson.loads(self.client_get(f"/json/users/{user.email}").content)
self.assertEqual(result["user"]["email"], user.email)
self.assertEqual(result["user"]["full_name"], user.full_name)
self.assertIn("user_id", result["user"])
self.assertNotIn("profile_data", result["user"])
self.assertFalse(result["user"]["is_bot"])
self.assertFalse(result["user"]["is_admin"])
self.assertFalse(result["user"]["is_owner"])
result = orjson.loads(
self.client_get(
f"/json/users/{user.email}", {"include_custom_profile_fields": "true"}
).content
)
self.assertIn("profile_data", result["user"])
result = self.client_get("/json/users/invalid")
self.assert_json_error(result, "No such user")
bot = self.example_user("default_bot")
result = orjson.loads(self.client_get(f"/json/users/{bot.email}").content)
self.assertEqual(result["user"]["email"], bot.email)
self.assertTrue(result["user"]["is_bot"])
def test_get_all_profiles_avatar_urls(self) -> None:
hamlet = self.example_user("hamlet")
result = self.api_get(hamlet, "/api/v1/users")
self.assert_json_success(result)
(my_user,) = [user for user in result.json()["members"] if user["email"] == hamlet.email]
self.assertEqual(
my_user["avatar_url"],
avatar_url(hamlet),
)
class DeleteUserTest(ZulipTestCase):
def test_do_delete_user(self) -> None:
realm = get_realm("zulip")
cordelia = self.example_user("cordelia")
othello = self.example_user("othello")
hamlet = self.example_user("hamlet")
hamlet_personal_recipient = hamlet.recipient
hamlet_user_id = hamlet.id
self.send_personal_message(cordelia, hamlet)
self.send_personal_message(hamlet, cordelia)
personal_message_ids_to_hamlet = Message.objects.filter(
recipient=hamlet_personal_recipient
).values_list("id", flat=True)
self.assertTrue(len(personal_message_ids_to_hamlet) > 0)
self.assertTrue(Message.objects.filter(sender=hamlet).exists())
huddle_message_ids_from_cordelia = [
self.send_huddle_message(cordelia, [hamlet, othello]) for i in range(3)
]
huddle_message_ids_from_hamlet = [
self.send_huddle_message(hamlet, [cordelia, othello]) for i in range(3)
]
huddle_with_hamlet_recipient_ids = list(
Subscription.objects.filter(
user_profile=hamlet, recipient__type=Recipient.HUDDLE
).values_list("recipient_id", flat=True)
)
self.assertTrue(len(huddle_with_hamlet_recipient_ids) > 0)
do_delete_user(hamlet)
replacement_dummy_user = UserProfile.objects.get(id=hamlet_user_id, realm=realm)
self.assertEqual(
replacement_dummy_user.delivery_email, f"deleteduser{hamlet_user_id}@{realm.uri}"
)
self.assertEqual(replacement_dummy_user.is_mirror_dummy, True)
self.assertEqual(Message.objects.filter(id__in=personal_message_ids_to_hamlet).count(), 0)
# Huddle messages from hamlet should have been deleted, but messages of other participants should
# be kept.
self.assertEqual(Message.objects.filter(id__in=huddle_message_ids_from_hamlet).count(), 0)
self.assertEqual(Message.objects.filter(id__in=huddle_message_ids_from_cordelia).count(), 3)
self.assertEqual(Message.objects.filter(sender_id=hamlet_user_id).count(), 0)
# Verify that the dummy user is subscribed to the deleted user's huddles, to keep huddle data
# in a correct state.
for recipient_id in huddle_with_hamlet_recipient_ids:
self.assertTrue(
Subscription.objects.filter(
user_profile=replacement_dummy_user, recipient_id=recipient_id
).exists()
)
class FakeEmailDomainTest(ZulipTestCase):
def test_get_fake_email_domain(self) -> None:
realm = get_realm("zulip")
self.assertEqual("zulip.testserver", get_fake_email_domain(realm))
with self.settings(EXTERNAL_HOST="example.com"):
self.assertEqual("zulip.example.com", get_fake_email_domain(realm))
@override_settings(FAKE_EMAIL_DOMAIN="fakedomain.com", REALM_HOSTS={"zulip": "127.0.0.1"})
def test_get_fake_email_domain_realm_host_is_ip_addr(self) -> None:
realm = get_realm("zulip")
self.assertEqual("fakedomain.com", get_fake_email_domain(realm))
@override_settings(FAKE_EMAIL_DOMAIN="invaliddomain", REALM_HOSTS={"zulip": "127.0.0.1"})
def test_invalid_fake_email_domain(self) -> None:
realm = get_realm("zulip")
with self.assertRaises(InvalidFakeEmailDomain):
get_fake_email_domain(realm)
@override_settings(FAKE_EMAIL_DOMAIN="127.0.0.1", REALM_HOSTS={"zulip": "127.0.0.1"})
def test_invalid_fake_email_domain_ip(self) -> None:
with self.assertRaises(InvalidFakeEmailDomain):
realm = get_realm("zulip")
get_fake_email_domain(realm)
|
import subprocess
import argparse
import re
from datetime import datetime, timedelta
from time import sleep
import socket
import requests
import scapy.all as scapy
from concurrent.futures import ThreadPoolExecutor
MONITOR_INTERVAL = 60
DISCOVERY_INTERVAL = 300
parser = argparse.ArgumentParser(description="Host Monitor")
parser.add_argument('--poolsize', default=10, help='Size of the threadpool')
parser.add_argument('--quokka', default="localhost:5001", help='Hostname/IP and port of the quokka server')
args = parser.parse_args()
threadpool_size = int(args.poolsize)
quokka = args.quokka
def get_hosts():
global quokka
print("\n\n----> Retrieving hosts ...", end="")
try:
response = requests.get("http://"+quokka+"/hosts")
except requests.exceptions.ConnectionError as e:
print(f" !!! Exception trying to get hosts via REST API: {e}")
return {}
if response.status_code != 200:
print(f" !!! Failed to retrieve hosts from server: {response.reason}")
return {}
print(" Hosts successfully retrieved")
return response.json()
def discovery():
# DISCOVER HOSTS ON NETWORK USING ARPING FUNCTION
print(
"\n\n----- Discovery hosts on network using arping() function ---------------------"
)
ans, unans = scapy.arping("192.168.254.0/24")
ans.summary()
for res in ans.res:
print(f"oooo> IP address discovered: {res[0].payload.pdst}")
ip_addr = res[1].payload.psrc
mac_addr = res[1].payload.hwsrc
try:
hostname = socket.gethostbyaddr(str(ip_addr))
except (socket.error, socket.gaierror):
hostname = (str(ip_addr), [], [str(ip_addr)])
last_heard = str(datetime.now())[:-3]
host = {
"ip_address": ip_addr,
"mac_address": mac_addr,
"hostname": hostname[0],
"last_heard": last_heard,
"availability": True,
"response_time": 0
}
update_host(host)
def update_host(host):
global quokka
print(f"----> Updating host status via REST API: {host["hostname"]}", end="")
try:
rsp = requests.put("http://"+quokka+"/hosts", params={"hostname": host["hostname"]}, json=host)
except requests.exceptions.ConnectionError as e:
print(f" !!! Exception trying to update host {host["hostname"]} via REST API: {e}")
return
if rsp.status_code != 204:
print(
f"{str(datetime.now())[:-3]}: Error posting to /hosts, response: {rsp.status_code}, {rsp.content}"
)
print(f" !!! Unsuccessful attempt to update host status via REST API: {host["hostname"]}")
else:
print(f" Successfully updated host status via REST API: {host["hostname"]}")
def get_response_time(ping_output):
m = re.search(r"time=([0-9]*)", ping_output)
if m.group(1).isnumeric():
return str(float(m.group(1))/1000)
else:
return 0
def ping_host(host):
try:
print(f"----> Pinging host: {host["hostname"]}", end="")
ping_output = subprocess.check_output(
["ping", "-c3", "-n", "-i0.5", "-W2", host["ip_address"]]
)
host["availability"] = True
host["response_time"] = get_response_time(str(ping_output))
host["last_heard"] = str(datetime.now())[:-3]
print(f" Host ping successful: {host["hostname"]}")
except subprocess.CalledProcessError:
host["availability"] = False
print(f" !!! Host ping failed: {host["hostname"]}")
update_host(host)
def main():
global threadpool_size
last_discovery = datetime.now()-timedelta(days=1)
while True:
if (datetime.now() - last_discovery).total_seconds() > DISCOVERY_INTERVAL:
discovery()
last_discovery = datetime.now()
hosts = get_hosts()
with ThreadPoolExecutor(max_workers=threadpool_size) as executor:
executor.map(ping_host, hosts.values())
sleep(MONITOR_INTERVAL)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\n\nExiting host-monitor")
exit()
|
import subprocess
import argparse
import re
from datetime import datetime, timedelta
from time import sleep
import socket
import requests
import scapy.all as scapy
from concurrent.futures import ThreadPoolExecutor
MONITOR_INTERVAL = 60
DISCOVERY_INTERVAL = 300
parser = argparse.ArgumentParser(description="Host Monitor")
parser.add_argument('--poolsize', default=10, help='Size of the threadpool')
parser.add_argument('--quokka', default="localhost:5001", help='Hostname/IP and port of the quokka server')
args = parser.parse_args()
threadpool_size = int(args.poolsize)
quokka = args.quokka
def get_hosts():
global quokka
print("\n\n----> Retrieving hosts ...", end="")
try:
response = requests.get("http://"+quokka+"/hosts")
except requests.exceptions.ConnectionError as e:
print(f" !!! Exception trying to get hosts via REST API: {e}")
return {}
if response.status_code != 200:
print(f" !!! Failed to retrieve hosts from server: {response.reason}")
return {}
print(" Hosts successfully retrieved")
return response.json()
def discovery():
# DISCOVER HOSTS ON NETWORK USING ARPING FUNCTION
print(
"\n\n----- Discovery hosts on network using arping() function ---------------------"
)
ans, unans = scapy.arping("192.168.254.0/24")
ans.summary()
for res in ans.res:
print(f"oooo> IP address discovered: {res[0].payload.pdst}")
ip_addr = res[1].payload.psrc
mac_addr = res[1].payload.hwsrc
try:
hostname = socket.gethostbyaddr(str(ip_addr))
except (socket.error, socket.gaierror):
hostname = (str(ip_addr), [], [str(ip_addr)])
last_heard = str(datetime.now())[:-3]
host = {
"ip_address": ip_addr,
"mac_address": mac_addr,
"hostname": hostname[0],
"last_heard": last_heard,
"availability": True,
"response_time": 0
}
update_host(host)
def update_host(host):
global quokka
print(f"----> Updating host status via REST API: {host['hostname']}", end="")
try:
rsp = requests.put("http://"+quokka+"/hosts", params={"hostname": host["hostname"]}, json=host)
except requests.exceptions.ConnectionError as e:
print(f" !!! Exception trying to update host {host['hostname']} via REST API: {e}")
return
if rsp.status_code != 204:
print(
f"{str(datetime.now())[:-3]}: Error posting to /hosts, response: {rsp.status_code}, {rsp.content}"
)
print(f" !!! Unsuccessful attempt to update host status via REST API: {host['hostname']}")
else:
print(f" Successfully updated host status via REST API: {host['hostname']}")
def get_response_time(ping_output):
m = re.search(r"time=([0-9]*)", ping_output)
if m.group(1).isnumeric():
return str(float(m.group(1))/1000)
else:
return 0
def ping_host(host):
try:
print(f"----> Pinging host: {host['hostname']}", end="")
ping_output = subprocess.check_output(
["ping", "-c3", "-n", "-i0.5", "-W2", host["ip_address"]]
)
host["availability"] = True
host["response_time"] = get_response_time(str(ping_output))
host["last_heard"] = str(datetime.now())[:-3]
print(f" Host ping successful: {host['hostname']}")
except subprocess.CalledProcessError:
host["availability"] = False
print(f" !!! Host ping failed: {host['hostname']}")
update_host(host)
def main():
global threadpool_size
last_discovery = datetime.now()-timedelta(days=1)
while True:
if (datetime.now() - last_discovery).total_seconds() > DISCOVERY_INTERVAL:
discovery()
last_discovery = datetime.now()
hosts = get_hosts()
with ThreadPoolExecutor(max_workers=threadpool_size) as executor:
executor.map(ping_host, hosts.values())
sleep(MONITOR_INTERVAL)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\n\nExiting host-monitor")
exit()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.