edited_code
stringlengths 17
978k
| original_code
stringlengths 17
978k
|
|---|---|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import ip_config
import arp
import interface_po_dhcp_conf
import icmp
import igmp_po_intf_cfg
import interface_PO_ospf_conf
import pim_intf_po_cont
class ip(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/port-channel/ip. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The IP configurations for an interface.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__ip_config','__arp','__interface_po_dhcp_conf','__icmp','__igmp_po_intf_cfg','__interface_PO_ospf_conf','__pim_intf_po_cont',)
_yang_name = 'ip'
_rest_name = 'ip'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__arp = YANGDynClass(base=arp.arp, is_container='container', presence=False, yang_name="arp", rest_name="arp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ARP', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-dai', defining_module='brocade-dai', yang_type='container', is_config=True)
self.__igmp_po_intf_cfg = YANGDynClass(base=igmp_po_intf_cfg.igmp_po_intf_cfg, is_container='container', presence=False, yang_name="igmp-po-intf-cfg", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IgmpPo', u'sort-priority': u'122'}}, namespace='urn:brocade.com:mgmt:brocade-igmp', defining_module='brocade-igmp', yang_type='container', is_config=True)
self.__interface_po_dhcp_conf = YANGDynClass(base=interface_po_dhcp_conf.interface_po_dhcp_conf, is_container='container', presence=False, yang_name="interface-po-dhcp-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-dhcp', defining_module='brocade-dhcp', yang_type='container', is_config=True)
self.__pim_intf_po_cont = YANGDynClass(base=pim_intf_po_cont.pim_intf_po_cont, is_container='container', presence=False, yang_name="pim-intf-po-cont", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'PimPoIntfCallpoint', u'sort-priority': u'121'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='container', is_config=True)
self.__interface_PO_ospf_conf = YANGDynClass(base=interface_PO_ospf_conf.interface_PO_ospf_conf, is_container='container', presence=False, yang_name="interface-PO-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFPoInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
self.__ip_config = YANGDynClass(base=ip_config.ip_config, is_container='container', presence=False, yang_name="ip-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'intf-po-ip-cfg-cp', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-ip-config', defining_module='brocade-ip-config', yang_type='container', is_config=True)
self.__icmp = YANGDynClass(base=icmp.icmp, is_container='container', presence=False, yang_name="icmp", rest_name="icmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Internet Control Message Protocol(ICMP)', u'sort-priority': u'117', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'callpoint': u'IcmpPoIntfConfigCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-icmp', defining_module='brocade-icmp', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'port-channel', u'ip']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Port-channel', u'ip']
def _get_ip_config(self):
"""
Getter method for ip_config, mapped from YANG variable /interface/port_channel/ip/ip_config (container)
"""
return self.__ip_config
def _set_ip_config(self, v, load=False):
"""
Setter method for ip_config, mapped from YANG variable /interface/port_channel/ip/ip_config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip_config() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ip_config.ip_config, is_container='container', presence=False, yang_name="ip-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'intf-po-ip-cfg-cp', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-ip-config', defining_module='brocade-ip-config', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ip_config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ip_config.ip_config, is_container='container', presence=False, yang_name="ip-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'intf-po-ip-cfg-cp', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-ip-config', defining_module='brocade-ip-config', yang_type='container', is_config=True)""",
})
self.__ip_config = t
if hasattr(self, '_set'):
self._set()
def _unset_ip_config(self):
self.__ip_config = YANGDynClass(base=ip_config.ip_config, is_container='container', presence=False, yang_name="ip-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'intf-po-ip-cfg-cp', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-ip-config', defining_module='brocade-ip-config', yang_type='container', is_config=True)
def _get_arp(self):
"""
Getter method for arp, mapped from YANG variable /interface/port_channel/ip/arp (container)
"""
return self.__arp
def _set_arp(self, v, load=False):
"""
Setter method for arp, mapped from YANG variable /interface/port_channel/ip/arp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_arp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_arp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=arp.arp, is_container='container', presence=False, yang_name="arp", rest_name="arp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ARP', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-dai', defining_module='brocade-dai', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """arp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=arp.arp, is_container='container', presence=False, yang_name="arp", rest_name="arp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ARP', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-dai', defining_module='brocade-dai', yang_type='container', is_config=True)""",
})
self.__arp = t
if hasattr(self, '_set'):
self._set()
def _unset_arp(self):
self.__arp = YANGDynClass(base=arp.arp, is_container='container', presence=False, yang_name="arp", rest_name="arp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ARP', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-dai', defining_module='brocade-dai', yang_type='container', is_config=True)
def _get_interface_po_dhcp_conf(self):
"""
Getter method for interface_po_dhcp_conf, mapped from YANG variable /interface/port_channel/ip/interface_po_dhcp_conf (container)
"""
return self.__interface_po_dhcp_conf
def _set_interface_po_dhcp_conf(self, v, load=False):
"""
Setter method for interface_po_dhcp_conf, mapped from YANG variable /interface/port_channel/ip/interface_po_dhcp_conf (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_po_dhcp_conf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_po_dhcp_conf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=interface_po_dhcp_conf.interface_po_dhcp_conf, is_container='container', presence=False, yang_name="interface-po-dhcp-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-dhcp', defining_module='brocade-dhcp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_po_dhcp_conf must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=interface_po_dhcp_conf.interface_po_dhcp_conf, is_container='container', presence=False, yang_name="interface-po-dhcp-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u"tailf-common": {u"cli-drop-node-name": None}}, namespace='urn:brocade.com:mgmt:brocade-dhcp', defining_module='brocade-dhcp', yang_type='container', is_config=True)""",
})
self.__interface_po_dhcp_conf = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_po_dhcp_conf(self):
self.__interface_po_dhcp_conf = YANGDynClass(base=interface_po_dhcp_conf.interface_po_dhcp_conf, is_container='container', presence=False, yang_name="interface-po-dhcp-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-dhcp', defining_module='brocade-dhcp', yang_type='container', is_config=True)
def _get_icmp(self):
"""
Getter method for icmp, mapped from YANG variable /interface/port_channel/ip/icmp (container)
"""
return self.__icmp
def _set_icmp(self, v, load=False):
"""
Setter method for icmp, mapped from YANG variable /interface/port_channel/ip/icmp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_icmp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_icmp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=icmp.icmp, is_container='container', presence=False, yang_name="icmp", rest_name="icmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Internet Control Message Protocol(ICMP)', u'sort-priority': u'117', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'callpoint': u'IcmpPoIntfConfigCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-icmp', defining_module='brocade-icmp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """icmp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=icmp.icmp, is_container='container', presence=False, yang_name="icmp", rest_name="icmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Internet Control Message Protocol(ICMP)', u'sort-priority': u'117', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'callpoint': u'IcmpPoIntfConfigCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-icmp', defining_module='brocade-icmp', yang_type='container', is_config=True)""",
})
self.__icmp = t
if hasattr(self, '_set'):
self._set()
def _unset_icmp(self):
self.__icmp = YANGDynClass(base=icmp.icmp, is_container='container', presence=False, yang_name="icmp", rest_name="icmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Internet Control Message Protocol(ICMP)', u'sort-priority': u'117', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'callpoint': u'IcmpPoIntfConfigCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-icmp', defining_module='brocade-icmp', yang_type='container', is_config=True)
def _get_igmp_po_intf_cfg(self):
"""
Getter method for igmp_po_intf_cfg, mapped from YANG variable /interface/port_channel/ip/igmp_po_intf_cfg (container)
"""
return self.__igmp_po_intf_cfg
def _set_igmp_po_intf_cfg(self, v, load=False):
"""
Setter method for igmp_po_intf_cfg, mapped from YANG variable /interface/port_channel/ip/igmp_po_intf_cfg (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_igmp_po_intf_cfg is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igmp_po_intf_cfg() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=igmp_po_intf_cfg.igmp_po_intf_cfg, is_container='container', presence=False, yang_name="igmp-po-intf-cfg", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IgmpPo', u'sort-priority': u'122'}}, namespace='urn:brocade.com:mgmt:brocade-igmp', defining_module='brocade-igmp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """igmp_po_intf_cfg must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=igmp_po_intf_cfg.igmp_po_intf_cfg, is_container='container', presence=False, yang_name="igmp-po-intf-cfg", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IgmpPo', u'sort-priority': u'122'}}, namespace='urn:brocade.com:mgmt:brocade-igmp', defining_module='brocade-igmp', yang_type='container', is_config=True)""",
})
self.__igmp_po_intf_cfg = t
if hasattr(self, '_set'):
self._set()
def _unset_igmp_po_intf_cfg(self):
self.__igmp_po_intf_cfg = YANGDynClass(base=igmp_po_intf_cfg.igmp_po_intf_cfg, is_container='container', presence=False, yang_name="igmp-po-intf-cfg", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IgmpPo', u'sort-priority': u'122'}}, namespace='urn:brocade.com:mgmt:brocade-igmp', defining_module='brocade-igmp', yang_type='container', is_config=True)
def _get_interface_PO_ospf_conf(self):
"""
Getter method for interface_PO_ospf_conf, mapped from YANG variable /interface/port_channel/ip/interface_PO_ospf_conf (container)
"""
return self.__interface_PO_ospf_conf
def _set_interface_PO_ospf_conf(self, v, load=False):
"""
Setter method for interface_PO_ospf_conf, mapped from YANG variable /interface/port_channel/ip/interface_PO_ospf_conf (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_PO_ospf_conf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_PO_ospf_conf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=interface_PO_ospf_conf.interface_PO_ospf_conf, is_container='container', presence=False, yang_name="interface-PO-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFPoInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_PO_ospf_conf must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=interface_PO_ospf_conf.interface_PO_ospf_conf, is_container='container', presence=False, yang_name="interface-PO-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u"tailf-common": {u"cli-drop-node-name": None, u"callpoint": u"OSPFPoInterfaceCallPoint"}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""",
})
self.__interface_PO_ospf_conf = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_PO_ospf_conf(self):
self.__interface_PO_ospf_conf = YANGDynClass(base=interface_PO_ospf_conf.interface_PO_ospf_conf, is_container='container', presence=False, yang_name="interface-PO-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFPoInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
def _get_pim_intf_po_cont(self):
"""
Getter method for pim_intf_po_cont, mapped from YANG variable /interface/port_channel/ip/pim_intf_po_cont (container)
"""
return self.__pim_intf_po_cont
def _set_pim_intf_po_cont(self, v, load=False):
"""
Setter method for pim_intf_po_cont, mapped from YANG variable /interface/port_channel/ip/pim_intf_po_cont (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_pim_intf_po_cont is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pim_intf_po_cont() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=pim_intf_po_cont.pim_intf_po_cont, is_container='container', presence=False, yang_name="pim-intf-po-cont", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'PimPoIntfCallpoint', u'sort-priority': u'121'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """pim_intf_po_cont must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=pim_intf_po_cont.pim_intf_po_cont, is_container='container', presence=False, yang_name="pim-intf-po-cont", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'PimPoIntfCallpoint', u'sort-priority': u'121'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='container', is_config=True)""",
})
self.__pim_intf_po_cont = t
if hasattr(self, '_set'):
self._set()
def _unset_pim_intf_po_cont(self):
self.__pim_intf_po_cont = YANGDynClass(base=pim_intf_po_cont.pim_intf_po_cont, is_container='container', presence=False, yang_name="pim-intf-po-cont", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'PimPoIntfCallpoint', u'sort-priority': u'121'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='container', is_config=True)
ip_config = __builtin__.property(_get_ip_config, _set_ip_config)
arp = __builtin__.property(_get_arp, _set_arp)
interface_po_dhcp_conf = __builtin__.property(_get_interface_po_dhcp_conf, _set_interface_po_dhcp_conf)
icmp = __builtin__.property(_get_icmp, _set_icmp)
igmp_po_intf_cfg = __builtin__.property(_get_igmp_po_intf_cfg, _set_igmp_po_intf_cfg)
interface_PO_ospf_conf = __builtin__.property(_get_interface_PO_ospf_conf, _set_interface_PO_ospf_conf)
pim_intf_po_cont = __builtin__.property(_get_pim_intf_po_cont, _set_pim_intf_po_cont)
_pyangbind_elements = {'ip_config': ip_config, 'arp': arp, 'interface_po_dhcp_conf': interface_po_dhcp_conf, 'icmp': icmp, 'igmp_po_intf_cfg': igmp_po_intf_cfg, 'interface_PO_ospf_conf': interface_PO_ospf_conf, 'pim_intf_po_cont': pim_intf_po_cont, }
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import ip_config
import arp
import interface_po_dhcp_conf
import icmp
import igmp_po_intf_cfg
import interface_PO_ospf_conf
import pim_intf_po_cont
class ip(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/port-channel/ip. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The IP configurations for an interface.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__ip_config','__arp','__interface_po_dhcp_conf','__icmp','__igmp_po_intf_cfg','__interface_PO_ospf_conf','__pim_intf_po_cont',)
_yang_name = 'ip'
_rest_name = 'ip'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__arp = YANGDynClass(base=arp.arp, is_container='container', presence=False, yang_name="arp", rest_name="arp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ARP', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-dai', defining_module='brocade-dai', yang_type='container', is_config=True)
self.__igmp_po_intf_cfg = YANGDynClass(base=igmp_po_intf_cfg.igmp_po_intf_cfg, is_container='container', presence=False, yang_name="igmp-po-intf-cfg", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IgmpPo', u'sort-priority': u'122'}}, namespace='urn:brocade.com:mgmt:brocade-igmp', defining_module='brocade-igmp', yang_type='container', is_config=True)
self.__interface_po_dhcp_conf = YANGDynClass(base=interface_po_dhcp_conf.interface_po_dhcp_conf, is_container='container', presence=False, yang_name="interface-po-dhcp-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-dhcp', defining_module='brocade-dhcp', yang_type='container', is_config=True)
self.__pim_intf_po_cont = YANGDynClass(base=pim_intf_po_cont.pim_intf_po_cont, is_container='container', presence=False, yang_name="pim-intf-po-cont", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'PimPoIntfCallpoint', u'sort-priority': u'121'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='container', is_config=True)
self.__interface_PO_ospf_conf = YANGDynClass(base=interface_PO_ospf_conf.interface_PO_ospf_conf, is_container='container', presence=False, yang_name="interface-PO-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFPoInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
self.__ip_config = YANGDynClass(base=ip_config.ip_config, is_container='container', presence=False, yang_name="ip-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'intf-po-ip-cfg-cp', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-ip-config', defining_module='brocade-ip-config', yang_type='container', is_config=True)
self.__icmp = YANGDynClass(base=icmp.icmp, is_container='container', presence=False, yang_name="icmp", rest_name="icmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Internet Control Message Protocol(ICMP)', u'sort-priority': u'117', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'callpoint': u'IcmpPoIntfConfigCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-icmp', defining_module='brocade-icmp', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'port-channel', u'ip']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Port-channel', u'ip']
def _get_ip_config(self):
"""
Getter method for ip_config, mapped from YANG variable /interface/port_channel/ip/ip_config (container)
"""
return self.__ip_config
def _set_ip_config(self, v, load=False):
"""
Setter method for ip_config, mapped from YANG variable /interface/port_channel/ip/ip_config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip_config() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ip_config.ip_config, is_container='container', presence=False, yang_name="ip-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'intf-po-ip-cfg-cp', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-ip-config', defining_module='brocade-ip-config', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ip_config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ip_config.ip_config, is_container='container', presence=False, yang_name="ip-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'intf-po-ip-cfg-cp', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-ip-config', defining_module='brocade-ip-config', yang_type='container', is_config=True)""",
})
self.__ip_config = t
if hasattr(self, '_set'):
self._set()
def _unset_ip_config(self):
self.__ip_config = YANGDynClass(base=ip_config.ip_config, is_container='container', presence=False, yang_name="ip-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'intf-po-ip-cfg-cp', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-ip-config', defining_module='brocade-ip-config', yang_type='container', is_config=True)
def _get_arp(self):
"""
Getter method for arp, mapped from YANG variable /interface/port_channel/ip/arp (container)
"""
return self.__arp
def _set_arp(self, v, load=False):
"""
Setter method for arp, mapped from YANG variable /interface/port_channel/ip/arp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_arp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_arp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=arp.arp, is_container='container', presence=False, yang_name="arp", rest_name="arp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ARP', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-dai', defining_module='brocade-dai', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """arp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=arp.arp, is_container='container', presence=False, yang_name="arp", rest_name="arp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ARP', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-dai', defining_module='brocade-dai', yang_type='container', is_config=True)""",
})
self.__arp = t
if hasattr(self, '_set'):
self._set()
def _unset_arp(self):
self.__arp = YANGDynClass(base=arp.arp, is_container='container', presence=False, yang_name="arp", rest_name="arp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ARP', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-dai', defining_module='brocade-dai', yang_type='container', is_config=True)
def _get_interface_po_dhcp_conf(self):
"""
Getter method for interface_po_dhcp_conf, mapped from YANG variable /interface/port_channel/ip/interface_po_dhcp_conf (container)
"""
return self.__interface_po_dhcp_conf
def _set_interface_po_dhcp_conf(self, v, load=False):
"""
Setter method for interface_po_dhcp_conf, mapped from YANG variable /interface/port_channel/ip/interface_po_dhcp_conf (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_po_dhcp_conf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_po_dhcp_conf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=interface_po_dhcp_conf.interface_po_dhcp_conf, is_container='container', presence=False, yang_name="interface-po-dhcp-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-dhcp', defining_module='brocade-dhcp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_po_dhcp_conf must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=interface_po_dhcp_conf.interface_po_dhcp_conf, is_container='container', presence=False, yang_name="interface-po-dhcp-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-dhcp', defining_module='brocade-dhcp', yang_type='container', is_config=True)""",
})
self.__interface_po_dhcp_conf = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_po_dhcp_conf(self):
self.__interface_po_dhcp_conf = YANGDynClass(base=interface_po_dhcp_conf.interface_po_dhcp_conf, is_container='container', presence=False, yang_name="interface-po-dhcp-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-dhcp', defining_module='brocade-dhcp', yang_type='container', is_config=True)
def _get_icmp(self):
"""
Getter method for icmp, mapped from YANG variable /interface/port_channel/ip/icmp (container)
"""
return self.__icmp
def _set_icmp(self, v, load=False):
"""
Setter method for icmp, mapped from YANG variable /interface/port_channel/ip/icmp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_icmp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_icmp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=icmp.icmp, is_container='container', presence=False, yang_name="icmp", rest_name="icmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Internet Control Message Protocol(ICMP)', u'sort-priority': u'117', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'callpoint': u'IcmpPoIntfConfigCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-icmp', defining_module='brocade-icmp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """icmp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=icmp.icmp, is_container='container', presence=False, yang_name="icmp", rest_name="icmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Internet Control Message Protocol(ICMP)', u'sort-priority': u'117', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'callpoint': u'IcmpPoIntfConfigCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-icmp', defining_module='brocade-icmp', yang_type='container', is_config=True)""",
})
self.__icmp = t
if hasattr(self, '_set'):
self._set()
def _unset_icmp(self):
self.__icmp = YANGDynClass(base=icmp.icmp, is_container='container', presence=False, yang_name="icmp", rest_name="icmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Internet Control Message Protocol(ICMP)', u'sort-priority': u'117', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'callpoint': u'IcmpPoIntfConfigCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-icmp', defining_module='brocade-icmp', yang_type='container', is_config=True)
def _get_igmp_po_intf_cfg(self):
"""
Getter method for igmp_po_intf_cfg, mapped from YANG variable /interface/port_channel/ip/igmp_po_intf_cfg (container)
"""
return self.__igmp_po_intf_cfg
def _set_igmp_po_intf_cfg(self, v, load=False):
"""
Setter method for igmp_po_intf_cfg, mapped from YANG variable /interface/port_channel/ip/igmp_po_intf_cfg (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_igmp_po_intf_cfg is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igmp_po_intf_cfg() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=igmp_po_intf_cfg.igmp_po_intf_cfg, is_container='container', presence=False, yang_name="igmp-po-intf-cfg", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IgmpPo', u'sort-priority': u'122'}}, namespace='urn:brocade.com:mgmt:brocade-igmp', defining_module='brocade-igmp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """igmp_po_intf_cfg must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=igmp_po_intf_cfg.igmp_po_intf_cfg, is_container='container', presence=False, yang_name="igmp-po-intf-cfg", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IgmpPo', u'sort-priority': u'122'}}, namespace='urn:brocade.com:mgmt:brocade-igmp', defining_module='brocade-igmp', yang_type='container', is_config=True)""",
})
self.__igmp_po_intf_cfg = t
if hasattr(self, '_set'):
self._set()
def _unset_igmp_po_intf_cfg(self):
self.__igmp_po_intf_cfg = YANGDynClass(base=igmp_po_intf_cfg.igmp_po_intf_cfg, is_container='container', presence=False, yang_name="igmp-po-intf-cfg", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'IgmpPo', u'sort-priority': u'122'}}, namespace='urn:brocade.com:mgmt:brocade-igmp', defining_module='brocade-igmp', yang_type='container', is_config=True)
def _get_interface_PO_ospf_conf(self):
"""
Getter method for interface_PO_ospf_conf, mapped from YANG variable /interface/port_channel/ip/interface_PO_ospf_conf (container)
"""
return self.__interface_PO_ospf_conf
def _set_interface_PO_ospf_conf(self, v, load=False):
"""
Setter method for interface_PO_ospf_conf, mapped from YANG variable /interface/port_channel/ip/interface_PO_ospf_conf (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_PO_ospf_conf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_PO_ospf_conf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=interface_PO_ospf_conf.interface_PO_ospf_conf, is_container='container', presence=False, yang_name="interface-PO-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFPoInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_PO_ospf_conf must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=interface_PO_ospf_conf.interface_PO_ospf_conf, is_container='container', presence=False, yang_name="interface-PO-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFPoInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""",
})
self.__interface_PO_ospf_conf = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_PO_ospf_conf(self):
self.__interface_PO_ospf_conf = YANGDynClass(base=interface_PO_ospf_conf.interface_PO_ospf_conf, is_container='container', presence=False, yang_name="interface-PO-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFPoInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
def _get_pim_intf_po_cont(self):
"""
Getter method for pim_intf_po_cont, mapped from YANG variable /interface/port_channel/ip/pim_intf_po_cont (container)
"""
return self.__pim_intf_po_cont
def _set_pim_intf_po_cont(self, v, load=False):
"""
Setter method for pim_intf_po_cont, mapped from YANG variable /interface/port_channel/ip/pim_intf_po_cont (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_pim_intf_po_cont is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pim_intf_po_cont() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=pim_intf_po_cont.pim_intf_po_cont, is_container='container', presence=False, yang_name="pim-intf-po-cont", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'PimPoIntfCallpoint', u'sort-priority': u'121'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """pim_intf_po_cont must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=pim_intf_po_cont.pim_intf_po_cont, is_container='container', presence=False, yang_name="pim-intf-po-cont", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'PimPoIntfCallpoint', u'sort-priority': u'121'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='container', is_config=True)""",
})
self.__pim_intf_po_cont = t
if hasattr(self, '_set'):
self._set()
def _unset_pim_intf_po_cont(self):
self.__pim_intf_po_cont = YANGDynClass(base=pim_intf_po_cont.pim_intf_po_cont, is_container='container', presence=False, yang_name="pim-intf-po-cont", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'PimPoIntfCallpoint', u'sort-priority': u'121'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='container', is_config=True)
ip_config = __builtin__.property(_get_ip_config, _set_ip_config)
arp = __builtin__.property(_get_arp, _set_arp)
interface_po_dhcp_conf = __builtin__.property(_get_interface_po_dhcp_conf, _set_interface_po_dhcp_conf)
icmp = __builtin__.property(_get_icmp, _set_icmp)
igmp_po_intf_cfg = __builtin__.property(_get_igmp_po_intf_cfg, _set_igmp_po_intf_cfg)
interface_PO_ospf_conf = __builtin__.property(_get_interface_PO_ospf_conf, _set_interface_PO_ospf_conf)
pim_intf_po_cont = __builtin__.property(_get_pim_intf_po_cont, _set_pim_intf_po_cont)
_pyangbind_elements = {'ip_config': ip_config, 'arp': arp, 'interface_po_dhcp_conf': interface_po_dhcp_conf, 'icmp': icmp, 'igmp_po_intf_cfg': igmp_po_intf_cfg, 'interface_PO_ospf_conf': interface_PO_ospf_conf, 'pim_intf_po_cont': pim_intf_po_cont, }
|
# Copyright (c) 2020 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import pathlib
from buildbot.changes.gitpoller import GitPoller
from buildbot.plugins import schedulers, util, worker, reporters
sys.path.append(str(pathlib.Path(__file__).resolve().parents[2]))
import bb.master.config as config
import bb.utils
c = BuildmasterConfig = {}
# Add workers
c["workers"] = []
ALL_WORKERS_NAMES = []
for worker_ in config.WORKERS.values():
for w_name, prop in worker_.items():
ALL_WORKERS_NAMES.append(w_name)
c["workers"].append(worker.Worker(w_name, config.WORKER_PASS,
properties=prop,
# To disable parallel builds on one worker
max_builds=prop.get('max_builds') or 1))
# Basic config
c["protocols"] = {"pb": {"port": config.WORKER_PORT}}
c["buildbotNetUsageData"] = config.BUILDBOT_NET_USAGE_DATA
c["title"] = config.BUILDBOT_TITLE
c["titleURL"] = config.REPO_URL
c["buildbotURL"] = config.BUILDBOT_URL
def get_workers(worker_pool):
if worker_pool is None:
return ALL_WORKERS_NAMES
return list(config.WORKERS[worker_pool].keys())
# Create schedulers and builders for builds
c["builders"] = []
c["schedulers"] = [
schedulers.SingleBranchScheduler(name=config.TRIGGER,
change_filter=util.ChangeFilter(),
treeStableTimer=config.BUILDBOT_TREE_STABLE_TIMER,
builderNames=[config.TRIGGER])]
for builder_name, properties in config.FLOW.get_prepared_builders().items():
if properties.get('add_triggerable_sheduler', True):
c["schedulers"].append(schedulers.Triggerable(name=builder_name,
builderNames=[builder_name]))
c["builders"].append(util.BuilderConfig(name=builder_name,
workernames=get_workers(properties.get("worker")),
factory=properties['factory']))
class GitHubStatusPushFilter(reporters.GitHubStatusPush):
"""
This class extend filtering options for reporters.GitHubStatusPush
"""
def filterBuilds(self, build):
# All builds have basic 'repository' property
repository = bb.utils.get_repository_name_by_url(build['properties']['repository'][0])
# Status for AUTO_UPDATED_REPOSITORIES will not sent to not affect review requests
# in these repositories
if repository not in config.AUTO_UPDATED_REPOSITORIES:
if self.builders is not None:
return build['builder']['name'] in self.builders
return True
return False
# Push status of build to the Github
c["services"] = [
GitHubStatusPushFilter(token=config.GITHUB_TOKEN,
context=util.Interpolate("buildbot/%(prop:buildername)s"),
startDescription="Started",
endDescription="Done",
verbose=True)]
# Get changes
c["change_source"] = []
class MediasdkChangeChecker(bb.utils.ChangeChecker):
def pull_request_filter(self, pull_request, files):
return self.default_properties
CI_REPOSITORIES = [
{'name': config.MEDIASDK_REPO,
'organization': config.MEDIASDK_ORGANIZATION,
# All changes
'change_filter': MediasdkChangeChecker(config.GITHUB_TOKEN)},
{'name': config.DRIVER_REPO,
'organization': config.INTEL_ORGANIZATION,
'change_filter': MediasdkChangeChecker(config.GITHUB_TOKEN)},
{'name': config.PRODUCT_CONFIGS_REPO,
'organization': config.MEDIASDK_ORGANIZATION,
# Pull requests only for members of Intel-Media-SDK organization
# This filter is needed for security, because via product configs can do everything
'change_filter': bb.utils.ChangeChecker(config.GITHUB_TOKEN)},
{'name': config.INFRASTRUCTURE_REPO,
'organization': config.MEDIASDK_ORGANIZATION,
# All changes
'change_filter': MediasdkChangeChecker(config.GITHUB_TOKEN)}
]
for repo in CI_REPOSITORIES:
repo_url = f"https://github.com/{repo["organization"]}/{repo["name"]}.git"
c["change_source"].append(GitPoller(
repourl=repo_url,
# Dir for the output of git remote-ls command
workdir=f"gitpoller-{repo["name"]}",
# Poll master, release branches and open pull request branches
# Filters performs in following order:
# branches (discard all not release branches)
# pull_request (add branches of open pull request)
# *fetch branches*
# change_filter (checking changes)
branches=lambda branch: bb.utils.is_release_branch(branch),
pull_request_branches=bb.utils.get_open_pull_request_branches(repo['organization'],
repo['name'],
token=config.GITHUB_TOKEN),
change_filter=repo['change_filter'],
category="media",
pollInterval=config.POLL_INTERVAL,
pollAtLaunch=True))
for repo in config.AUTO_UPDATED_REPOSITORIES:
repo_url = f"https://github.com/{config.INTEL_ORGANIZATION}/{repo}.git"
c["change_source"].append(GitPoller(
repourl=repo_url,
workdir=f"gitpoller-{repo}",
branches=['master'],
category="auto_update",
change_filter=MediasdkChangeChecker(),
pollInterval=config.POLL_INTERVAL,
pollAtLaunch=True))
# Web Interface
c["www"] = dict(port=int(config.PORT),
plugins={"console_view": True,
"grid_view": True})
# Database
c["db"] = {"db_url": config.DATABASE_URL}
# It disables automatic merging of requests (to build EACH commit)
c["collapseRequests"] = False
|
# Copyright (c) 2020 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import pathlib
from buildbot.changes.gitpoller import GitPoller
from buildbot.plugins import schedulers, util, worker, reporters
sys.path.append(str(pathlib.Path(__file__).resolve().parents[2]))
import bb.master.config as config
import bb.utils
c = BuildmasterConfig = {}
# Add workers
c["workers"] = []
ALL_WORKERS_NAMES = []
for worker_ in config.WORKERS.values():
for w_name, prop in worker_.items():
ALL_WORKERS_NAMES.append(w_name)
c["workers"].append(worker.Worker(w_name, config.WORKER_PASS,
properties=prop,
# To disable parallel builds on one worker
max_builds=prop.get('max_builds') or 1))
# Basic config
c["protocols"] = {"pb": {"port": config.WORKER_PORT}}
c["buildbotNetUsageData"] = config.BUILDBOT_NET_USAGE_DATA
c["title"] = config.BUILDBOT_TITLE
c["titleURL"] = config.REPO_URL
c["buildbotURL"] = config.BUILDBOT_URL
def get_workers(worker_pool):
if worker_pool is None:
return ALL_WORKERS_NAMES
return list(config.WORKERS[worker_pool].keys())
# Create schedulers and builders for builds
c["builders"] = []
c["schedulers"] = [
schedulers.SingleBranchScheduler(name=config.TRIGGER,
change_filter=util.ChangeFilter(),
treeStableTimer=config.BUILDBOT_TREE_STABLE_TIMER,
builderNames=[config.TRIGGER])]
for builder_name, properties in config.FLOW.get_prepared_builders().items():
if properties.get('add_triggerable_sheduler', True):
c["schedulers"].append(schedulers.Triggerable(name=builder_name,
builderNames=[builder_name]))
c["builders"].append(util.BuilderConfig(name=builder_name,
workernames=get_workers(properties.get("worker")),
factory=properties['factory']))
class GitHubStatusPushFilter(reporters.GitHubStatusPush):
"""
This class extend filtering options for reporters.GitHubStatusPush
"""
def filterBuilds(self, build):
# All builds have basic 'repository' property
repository = bb.utils.get_repository_name_by_url(build['properties']['repository'][0])
# Status for AUTO_UPDATED_REPOSITORIES will not sent to not affect review requests
# in these repositories
if repository not in config.AUTO_UPDATED_REPOSITORIES:
if self.builders is not None:
return build['builder']['name'] in self.builders
return True
return False
# Push status of build to the Github
c["services"] = [
GitHubStatusPushFilter(token=config.GITHUB_TOKEN,
context=util.Interpolate("buildbot/%(prop:buildername)s"),
startDescription="Started",
endDescription="Done",
verbose=True)]
# Get changes
c["change_source"] = []
class MediasdkChangeChecker(bb.utils.ChangeChecker):
def pull_request_filter(self, pull_request, files):
return self.default_properties
CI_REPOSITORIES = [
{'name': config.MEDIASDK_REPO,
'organization': config.MEDIASDK_ORGANIZATION,
# All changes
'change_filter': MediasdkChangeChecker(config.GITHUB_TOKEN)},
{'name': config.DRIVER_REPO,
'organization': config.INTEL_ORGANIZATION,
'change_filter': MediasdkChangeChecker(config.GITHUB_TOKEN)},
{'name': config.PRODUCT_CONFIGS_REPO,
'organization': config.MEDIASDK_ORGANIZATION,
# Pull requests only for members of Intel-Media-SDK organization
# This filter is needed for security, because via product configs can do everything
'change_filter': bb.utils.ChangeChecker(config.GITHUB_TOKEN)},
{'name': config.INFRASTRUCTURE_REPO,
'organization': config.MEDIASDK_ORGANIZATION,
# All changes
'change_filter': MediasdkChangeChecker(config.GITHUB_TOKEN)}
]
for repo in CI_REPOSITORIES:
repo_url = f"https://github.com/{repo['organization']}/{repo['name']}.git"
c["change_source"].append(GitPoller(
repourl=repo_url,
# Dir for the output of git remote-ls command
workdir=f"gitpoller-{repo['name']}",
# Poll master, release branches and open pull request branches
# Filters performs in following order:
# branches (discard all not release branches)
# pull_request (add branches of open pull request)
# *fetch branches*
# change_filter (checking changes)
branches=lambda branch: bb.utils.is_release_branch(branch),
pull_request_branches=bb.utils.get_open_pull_request_branches(repo['organization'],
repo['name'],
token=config.GITHUB_TOKEN),
change_filter=repo['change_filter'],
category="media",
pollInterval=config.POLL_INTERVAL,
pollAtLaunch=True))
for repo in config.AUTO_UPDATED_REPOSITORIES:
repo_url = f"https://github.com/{config.INTEL_ORGANIZATION}/{repo}.git"
c["change_source"].append(GitPoller(
repourl=repo_url,
workdir=f"gitpoller-{repo}",
branches=['master'],
category="auto_update",
change_filter=MediasdkChangeChecker(),
pollInterval=config.POLL_INTERVAL,
pollAtLaunch=True))
# Web Interface
c["www"] = dict(port=int(config.PORT),
plugins={"console_view": True,
"grid_view": True})
# Database
c["db"] = {"db_url": config.DATABASE_URL}
# It disables automatic merging of requests (to build EACH commit)
c["collapseRequests"] = False
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import os
import re
import warnings
from pathlib import Path
from transformers import is_flax_available, is_tf_available, is_torch_available
from transformers.file_utils import ENV_VARS_TRUE_VALUES
from transformers.models.auto import get_values
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_repo.py
PATH_TO_TRANSFORMERS = "src/transformers"
PATH_TO_TESTS = "tests"
PATH_TO_DOC = "docs/source"
# Update this list with models that are supposed to be private.
PRIVATE_MODELS = [
"DPRSpanPredictor",
"T5Stack",
"TFDPRSpanPredictor",
]
# Update this list for models that are not tested with a comment explaining the reason it should not be.
# Being in this list is an exception and should **not** be the rule.
IGNORE_NON_TESTED = PRIVATE_MODELS.copy() + [
# models to ignore for not tested
"BigBirdPegasusEncoder", # Building part of bigger (tested) model.
"BigBirdPegasusDecoder", # Building part of bigger (tested) model.
"BigBirdPegasusDecoderWrapper", # Building part of bigger (tested) model.
"DetrEncoder", # Building part of bigger (tested) model.
"DetrDecoder", # Building part of bigger (tested) model.
"DetrDecoderWrapper", # Building part of bigger (tested) model.
"M2M100Encoder", # Building part of bigger (tested) model.
"M2M100Decoder", # Building part of bigger (tested) model.
"Speech2TextEncoder", # Building part of bigger (tested) model.
"Speech2TextDecoder", # Building part of bigger (tested) model.
"LEDEncoder", # Building part of bigger (tested) model.
"LEDDecoder", # Building part of bigger (tested) model.
"BartDecoderWrapper", # Building part of bigger (tested) model.
"BartEncoder", # Building part of bigger (tested) model.
"BertLMHeadModel", # Needs to be setup as decoder.
"BlenderbotSmallEncoder", # Building part of bigger (tested) model.
"BlenderbotSmallDecoderWrapper", # Building part of bigger (tested) model.
"BlenderbotEncoder", # Building part of bigger (tested) model.
"BlenderbotDecoderWrapper", # Building part of bigger (tested) model.
"MBartEncoder", # Building part of bigger (tested) model.
"MBartDecoderWrapper", # Building part of bigger (tested) model.
"MegatronBertLMHeadModel", # Building part of bigger (tested) model.
"MegatronBertEncoder", # Building part of bigger (tested) model.
"MegatronBertDecoder", # Building part of bigger (tested) model.
"MegatronBertDecoderWrapper", # Building part of bigger (tested) model.
"PegasusEncoder", # Building part of bigger (tested) model.
"PegasusDecoderWrapper", # Building part of bigger (tested) model.
"DPREncoder", # Building part of bigger (tested) model.
"ProphetNetDecoderWrapper", # Building part of bigger (tested) model.
"ReformerForMaskedLM", # Needs to be setup as decoder.
"TFDPREncoder", # Building part of bigger (tested) model.
"TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?)
"TFRobertaForMultipleChoice", # TODO: fix
"SeparableConv1D", # Building part of bigger (tested) model.
]
# Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't
# trigger the common tests.
TEST_FILES_WITH_NO_COMMON_TESTS = [
"test_modeling_camembert.py",
"test_modeling_flax_mt5.py",
"test_modeling_mbart.py",
"test_modeling_mt5.py",
"test_modeling_pegasus.py",
"test_modeling_tf_camembert.py",
"test_modeling_tf_mt5.py",
"test_modeling_tf_xlm_roberta.py",
"test_modeling_xlm_prophetnet.py",
"test_modeling_xlm_roberta.py",
]
# Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and
# should **not** be the rule.
IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [
# models to ignore for model xxx mapping
"BeitForMaskedImageModeling",
"CLIPTextModel",
"CLIPVisionModel",
"FlaxCLIPTextModel",
"FlaxCLIPVisionModel",
"FlaxWav2Vec2ForCTC",
"DetrForSegmentation",
"DPRReader",
"FlaubertForQuestionAnswering",
"GPT2DoubleHeadsModel",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"OpenAIGPTDoubleHeadsModel",
"RagModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
"TFDPRReader",
"TFGPT2DoubleHeadsModel",
"TFOpenAIGPTDoubleHeadsModel",
"TFRagModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
"Wav2Vec2ForCTC",
"HubertForCTC",
"XLMForQuestionAnswering",
"XLNetForQuestionAnswering",
"SeparableConv1D",
"VisualBertForRegionToPhraseAlignment",
"VisualBertForVisualReasoning",
"VisualBertForQuestionAnswering",
"VisualBertForMultipleChoice",
"TFWav2Vec2ForCTC",
"TFHubertForCTC",
]
# This is to make sure the transformers module imported is the one in the repo.
spec = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
transformers = spec.loader.load_module()
# If some modeling modules should be ignored for all checks, they should be added in the nested list
# _ignore_modules of this function.
def get_model_modules():
"""Get the model modules inside the transformers library."""
_ignore_modules = [
"modeling_auto",
"modeling_encoder_decoder",
"modeling_marian",
"modeling_mmbt",
"modeling_outputs",
"modeling_retribert",
"modeling_utils",
"modeling_flax_auto",
"modeling_flax_utils",
"modeling_transfo_xl_utilities",
"modeling_tf_auto",
"modeling_tf_outputs",
"modeling_tf_pytorch_utils",
"modeling_tf_utils",
"modeling_tf_transfo_xl_utilities",
]
modules = []
for model in dir(transformers.models):
# There are some magic dunder attributes in the dir, we ignore them
if not model.startswith("__"):
model_module = getattr(transformers.models, model)
for submodule in dir(model_module):
if submodule.startswith("modeling") and submodule not in _ignore_modules:
modeling_module = getattr(model_module, submodule)
if inspect.ismodule(modeling_module):
modules.append(modeling_module)
return modules
def get_models(module, include_pretrained=False):
"""Get the objects in module that are models."""
models = []
model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel, transformers.FlaxPreTrainedModel)
for attr_name in dir(module):
if not include_pretrained and ("Pretrained" in attr_name or "PreTrained" in attr_name):
continue
attr = getattr(module, attr_name)
if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__:
models.append((attr_name, attr))
return models
def is_a_private_model(model):
"""Returns True if the model should not be in the main init."""
if model in PRIVATE_MODELS:
return True
# Wrapper, Encoder and Decoder are all privates
if model.endswith("Wrapper"):
return True
if model.endswith("Encoder"):
return True
if model.endswith("Decoder"):
return True
return False
def check_models_are_in_init():
"""Checks all models defined in the library are in the main init."""
models_not_in_init = []
dir_transformers = dir(transformers)
for module in get_model_modules():
models_not_in_init += [
model[0] for model in get_models(module, include_pretrained=True) if model[0] not in dir_transformers
]
# Remove private models
models_not_in_init = [model for model in models_not_in_init if not is_a_private_model(model)]
if len(models_not_in_init) > 0:
raise Exception(f"The following models should be in the main init: {",".join(models_not_in_init)}.")
# If some test_modeling files should be ignored when checking models are all tested, they should be added in the
# nested list _ignore_files of this function.
def get_model_test_files():
"""Get the model test files."""
_ignore_files = [
"test_modeling_common",
"test_modeling_encoder_decoder",
"test_modeling_marian",
"test_modeling_tf_common",
]
test_files = []
for filename in os.listdir(PATH_TO_TESTS):
if (
os.path.isfile(f"{PATH_TO_TESTS}/{filename}")
and filename.startswith("test_modeling")
and not os.path.splitext(filename)[0] in _ignore_files
):
test_files.append(filename)
return test_files
# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class
# for the all_model_classes variable.
def find_tested_models(test_file):
"""Parse the content of test_file to detect what's in all_model_classes"""
# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class
with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8", newline="\n") as f:
content = f.read()
all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content)
# Check with one less parenthesis as well
all_models += re.findall(r"all_model_classes\s+=\s+\(([^\)]*)\)", content)
if len(all_models) > 0:
model_tested = []
for entry in all_models:
for line in entry.split(","):
name = line.strip()
if len(name) > 0:
model_tested.append(name)
return model_tested
def check_models_are_tested(module, test_file):
"""Check models defined in module are tested in test_file."""
# XxxPreTrainedModel are not tested
defined_models = get_models(module)
tested_models = find_tested_models(test_file)
if tested_models is None:
if test_file in TEST_FILES_WITH_NO_COMMON_TESTS:
return
return [
f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. "
+ "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file "
+ "`utils/check_repo.py`."
]
failures = []
for model_name, _ in defined_models:
if model_name not in tested_models and model_name not in IGNORE_NON_TESTED:
failures.append(
f"{model_name} is defined in {module.__name__} but is not tested in "
+ f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file."
+ "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`"
+ "in the file `utils/check_repo.py`."
)
return failures
def check_all_models_are_tested():
"""Check all models are properly tested."""
modules = get_model_modules()
test_files = get_model_test_files()
failures = []
for module in modules:
test_file = f"test_{module.__name__.split(".")[-1]}.py"
if test_file not in test_files:
failures.append(f"{module.__name__} does not have its corresponding test file {test_file}.")
new_failures = check_models_are_tested(module, test_file)
if new_failures is not None:
failures += new_failures
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
def get_all_auto_configured_models():
"""Return the list of all models in at least one auto class."""
result = set() # To avoid duplicates we concatenate all model classes in a set.
if is_torch_available():
for attr_name in dir(transformers.models.auto.modeling_auto):
if attr_name.startswith("MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_auto, attr_name)))
if is_tf_available():
for attr_name in dir(transformers.models.auto.modeling_tf_auto):
if attr_name.startswith("TF_MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_tf_auto, attr_name)))
if is_flax_available():
for attr_name in dir(transformers.models.auto.modeling_flax_auto):
if attr_name.startswith("FLAX_MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_flax_auto, attr_name)))
return [cls for cls in result]
def ignore_unautoclassed(model_name):
"""Rules to determine if `name` should be in an auto class."""
# Special white list
if model_name in IGNORE_NON_AUTO_CONFIGURED:
return True
# Encoder and Decoder should be ignored
if "Encoder" in model_name or "Decoder" in model_name:
return True
return False
def check_models_are_auto_configured(module, all_auto_models):
"""Check models defined in module are each in an auto class."""
defined_models = get_models(module)
failures = []
for model_name, _ in defined_models:
if model_name not in all_auto_models and not ignore_unautoclassed(model_name):
failures.append(
f"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. "
"If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file "
"`utils/check_repo.py`."
)
return failures
def check_all_models_are_auto_configured():
"""Check all models are each in an auto class."""
missing_backends = []
if not is_torch_available():
missing_backends.append("PyTorch")
if not is_tf_available():
missing_backends.append("TensorFlow")
if not is_flax_available():
missing_backends.append("Flax")
if len(missing_backends) > 0:
missing = ", ".join(missing_backends)
if os.getenv("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES:
raise Exception(
"Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the "
f"Transformers repo, the following are missing: {missing}."
)
else:
warnings.warn(
"Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the "
f"Transformers repo, the following are missing: {missing}. While it's probably fine as long as you "
"didn't make any change in one of those backends modeling files, you should probably execute the "
"command above to be on the safe side."
)
modules = get_model_modules()
all_auto_models = get_all_auto_configured_models()
failures = []
for module in modules:
new_failures = check_models_are_auto_configured(module, all_auto_models)
if new_failures is not None:
failures += new_failures
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
_re_decorator = re.compile(r"^\s*@(\S+)\s+$")
def check_decorator_order(filename):
"""Check that in the test file `filename` the slow decorator is always last."""
with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
decorator_before = None
errors = []
for i, line in enumerate(lines):
search = _re_decorator.search(line)
if search is not None:
decorator_name = search.groups()[0]
if decorator_before is not None and decorator_name.startswith("parameterized"):
errors.append(i)
decorator_before = decorator_name
elif decorator_before is not None:
decorator_before = None
return errors
def check_all_decorator_order():
"""Check that in all test files, the slow decorator is always last."""
errors = []
for fname in os.listdir(PATH_TO_TESTS):
if fname.endswith(".py"):
filename = os.path.join(PATH_TO_TESTS, fname)
new_errors = check_decorator_order(filename)
errors += [f"- {filename}, line {i}" for i in new_errors]
if len(errors) > 0:
msg = "\n".join(errors)
raise ValueError(
f"The parameterized decorator (and its variants) should always be first, but this is not the case in the following files:\n{msg}"
)
def find_all_documented_objects():
"""Parse the content of all doc files to detect which classes and functions it documents"""
documented_obj = []
for doc_file in Path(PATH_TO_DOC).glob("**/*.rst"):
with open(doc_file, "r", encoding="utf-8", newline="\n") as f:
content = f.read()
raw_doc_objs = re.findall(r"(?:autoclass|autofunction):: transformers.(\S+)\s+", content)
documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs]
return documented_obj
# One good reason for not being documented is to be deprecated. Put in this list deprecated objects.
DEPRECATED_OBJECTS = [
"AutoModelWithLMHead",
"BartPretrainedModel",
"DataCollator",
"DataCollatorForSOP",
"GlueDataset",
"GlueDataTrainingArguments",
"LineByLineTextDataset",
"LineByLineWithRefDataset",
"LineByLineWithSOPTextDataset",
"PretrainedBartModel",
"PretrainedFSMTModel",
"SingleSentenceClassificationProcessor",
"SquadDataTrainingArguments",
"SquadDataset",
"SquadExample",
"SquadFeatures",
"SquadV1Processor",
"SquadV2Processor",
"TFAutoModelWithLMHead",
"TFBartPretrainedModel",
"TextDataset",
"TextDatasetForNextSentencePrediction",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2Tokenizer",
"glue_compute_metrics",
"glue_convert_examples_to_features",
"glue_output_modes",
"glue_processors",
"glue_tasks_num_labels",
"squad_convert_examples_to_features",
"xnli_compute_metrics",
"xnli_output_modes",
"xnli_processors",
"xnli_tasks_num_labels",
]
# Exceptionally, some objects should not be documented after all rules passed.
# ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT!
UNDOCUMENTED_OBJECTS = [
"AddedToken", # This is a tokenizers class.
"BasicTokenizer", # Internal, should never have been in the main init.
"CharacterTokenizer", # Internal, should never have been in the main init.
"DPRPretrainedReader", # Like an Encoder.
"MecabTokenizer", # Internal, should never have been in the main init.
"ModelCard", # Internal type.
"SqueezeBertModule", # Internal building block (should have been called SqueezeBertLayer)
"TFDPRPretrainedReader", # Like an Encoder.
"TransfoXLCorpus", # Internal type.
"WordpieceTokenizer", # Internal, should never have been in the main init.
"absl", # External module
"add_end_docstrings", # Internal, should never have been in the main init.
"add_start_docstrings", # Internal, should never have been in the main init.
"cached_path", # Internal used for downloading models.
"convert_tf_weight_name_to_pt_weight_name", # Internal used to convert model weights
"logger", # Internal logger
"logging", # External module
"requires_backends", # Internal function
]
# This list should be empty. Objects in it should get their own doc page.
SHOULD_HAVE_THEIR_OWN_PAGE = [
# Benchmarks
"PyTorchBenchmark",
"PyTorchBenchmarkArguments",
"TensorFlowBenchmark",
"TensorFlowBenchmarkArguments",
]
def ignore_undocumented(name):
"""Rules to determine if `name` should be undocumented."""
# NOT DOCUMENTED ON PURPOSE.
# Constants uppercase are not documented.
if name.isupper():
return True
# PreTrainedModels / Encoders / Decoders / Layers / Embeddings / Attention are not documented.
if (
name.endswith("PreTrainedModel")
or name.endswith("Decoder")
or name.endswith("Encoder")
or name.endswith("Layer")
or name.endswith("Embeddings")
or name.endswith("Attention")
):
return True
# Submodules are not documented.
if os.path.isdir(os.path.join(PATH_TO_TRANSFORMERS, name)) or os.path.isfile(
os.path.join(PATH_TO_TRANSFORMERS, f"{name}.py")
):
return True
# All load functions are not documented.
if name.startswith("load_tf") or name.startswith("load_pytorch"):
return True
# is_xxx_available functions are not documented.
if name.startswith("is_") and name.endswith("_available"):
return True
# Deprecated objects are not documented.
if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS:
return True
# MMBT model does not really work.
if name.startswith("MMBT"):
return True
if name in SHOULD_HAVE_THEIR_OWN_PAGE:
return True
return False
def check_all_objects_are_documented():
"""Check all models are properly documented."""
documented_objs = find_all_documented_objects()
modules = transformers._modules
objects = [c for c in dir(transformers) if c not in modules and not c.startswith("_")]
undocumented_objs = [c for c in objects if c not in documented_objs and not ignore_undocumented(c)]
if len(undocumented_objs) > 0:
raise Exception(
"The following objects are in the public init so should be documented:\n - "
+ "\n - ".join(undocumented_objs)
)
def check_repo_quality():
"""Check all models are properly tested and documented."""
print("Checking all models are public.")
check_models_are_in_init()
print("Checking all models are properly tested.")
check_all_decorator_order()
check_all_models_are_tested()
print("Checking all objects are properly documented.")
check_all_objects_are_documented()
print("Checking all models are in at least one auto class.")
check_all_models_are_auto_configured()
if __name__ == "__main__":
check_repo_quality()
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import os
import re
import warnings
from pathlib import Path
from transformers import is_flax_available, is_tf_available, is_torch_available
from transformers.file_utils import ENV_VARS_TRUE_VALUES
from transformers.models.auto import get_values
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_repo.py
PATH_TO_TRANSFORMERS = "src/transformers"
PATH_TO_TESTS = "tests"
PATH_TO_DOC = "docs/source"
# Update this list with models that are supposed to be private.
PRIVATE_MODELS = [
"DPRSpanPredictor",
"T5Stack",
"TFDPRSpanPredictor",
]
# Update this list for models that are not tested with a comment explaining the reason it should not be.
# Being in this list is an exception and should **not** be the rule.
IGNORE_NON_TESTED = PRIVATE_MODELS.copy() + [
# models to ignore for not tested
"BigBirdPegasusEncoder", # Building part of bigger (tested) model.
"BigBirdPegasusDecoder", # Building part of bigger (tested) model.
"BigBirdPegasusDecoderWrapper", # Building part of bigger (tested) model.
"DetrEncoder", # Building part of bigger (tested) model.
"DetrDecoder", # Building part of bigger (tested) model.
"DetrDecoderWrapper", # Building part of bigger (tested) model.
"M2M100Encoder", # Building part of bigger (tested) model.
"M2M100Decoder", # Building part of bigger (tested) model.
"Speech2TextEncoder", # Building part of bigger (tested) model.
"Speech2TextDecoder", # Building part of bigger (tested) model.
"LEDEncoder", # Building part of bigger (tested) model.
"LEDDecoder", # Building part of bigger (tested) model.
"BartDecoderWrapper", # Building part of bigger (tested) model.
"BartEncoder", # Building part of bigger (tested) model.
"BertLMHeadModel", # Needs to be setup as decoder.
"BlenderbotSmallEncoder", # Building part of bigger (tested) model.
"BlenderbotSmallDecoderWrapper", # Building part of bigger (tested) model.
"BlenderbotEncoder", # Building part of bigger (tested) model.
"BlenderbotDecoderWrapper", # Building part of bigger (tested) model.
"MBartEncoder", # Building part of bigger (tested) model.
"MBartDecoderWrapper", # Building part of bigger (tested) model.
"MegatronBertLMHeadModel", # Building part of bigger (tested) model.
"MegatronBertEncoder", # Building part of bigger (tested) model.
"MegatronBertDecoder", # Building part of bigger (tested) model.
"MegatronBertDecoderWrapper", # Building part of bigger (tested) model.
"PegasusEncoder", # Building part of bigger (tested) model.
"PegasusDecoderWrapper", # Building part of bigger (tested) model.
"DPREncoder", # Building part of bigger (tested) model.
"ProphetNetDecoderWrapper", # Building part of bigger (tested) model.
"ReformerForMaskedLM", # Needs to be setup as decoder.
"TFDPREncoder", # Building part of bigger (tested) model.
"TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?)
"TFRobertaForMultipleChoice", # TODO: fix
"SeparableConv1D", # Building part of bigger (tested) model.
]
# Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't
# trigger the common tests.
TEST_FILES_WITH_NO_COMMON_TESTS = [
"test_modeling_camembert.py",
"test_modeling_flax_mt5.py",
"test_modeling_mbart.py",
"test_modeling_mt5.py",
"test_modeling_pegasus.py",
"test_modeling_tf_camembert.py",
"test_modeling_tf_mt5.py",
"test_modeling_tf_xlm_roberta.py",
"test_modeling_xlm_prophetnet.py",
"test_modeling_xlm_roberta.py",
]
# Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and
# should **not** be the rule.
IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [
# models to ignore for model xxx mapping
"BeitForMaskedImageModeling",
"CLIPTextModel",
"CLIPVisionModel",
"FlaxCLIPTextModel",
"FlaxCLIPVisionModel",
"FlaxWav2Vec2ForCTC",
"DetrForSegmentation",
"DPRReader",
"FlaubertForQuestionAnswering",
"GPT2DoubleHeadsModel",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"OpenAIGPTDoubleHeadsModel",
"RagModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
"TFDPRReader",
"TFGPT2DoubleHeadsModel",
"TFOpenAIGPTDoubleHeadsModel",
"TFRagModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
"Wav2Vec2ForCTC",
"HubertForCTC",
"XLMForQuestionAnswering",
"XLNetForQuestionAnswering",
"SeparableConv1D",
"VisualBertForRegionToPhraseAlignment",
"VisualBertForVisualReasoning",
"VisualBertForQuestionAnswering",
"VisualBertForMultipleChoice",
"TFWav2Vec2ForCTC",
"TFHubertForCTC",
]
# This is to make sure the transformers module imported is the one in the repo.
spec = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
transformers = spec.loader.load_module()
# If some modeling modules should be ignored for all checks, they should be added in the nested list
# _ignore_modules of this function.
def get_model_modules():
"""Get the model modules inside the transformers library."""
_ignore_modules = [
"modeling_auto",
"modeling_encoder_decoder",
"modeling_marian",
"modeling_mmbt",
"modeling_outputs",
"modeling_retribert",
"modeling_utils",
"modeling_flax_auto",
"modeling_flax_utils",
"modeling_transfo_xl_utilities",
"modeling_tf_auto",
"modeling_tf_outputs",
"modeling_tf_pytorch_utils",
"modeling_tf_utils",
"modeling_tf_transfo_xl_utilities",
]
modules = []
for model in dir(transformers.models):
# There are some magic dunder attributes in the dir, we ignore them
if not model.startswith("__"):
model_module = getattr(transformers.models, model)
for submodule in dir(model_module):
if submodule.startswith("modeling") and submodule not in _ignore_modules:
modeling_module = getattr(model_module, submodule)
if inspect.ismodule(modeling_module):
modules.append(modeling_module)
return modules
def get_models(module, include_pretrained=False):
"""Get the objects in module that are models."""
models = []
model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel, transformers.FlaxPreTrainedModel)
for attr_name in dir(module):
if not include_pretrained and ("Pretrained" in attr_name or "PreTrained" in attr_name):
continue
attr = getattr(module, attr_name)
if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__:
models.append((attr_name, attr))
return models
def is_a_private_model(model):
"""Returns True if the model should not be in the main init."""
if model in PRIVATE_MODELS:
return True
# Wrapper, Encoder and Decoder are all privates
if model.endswith("Wrapper"):
return True
if model.endswith("Encoder"):
return True
if model.endswith("Decoder"):
return True
return False
def check_models_are_in_init():
"""Checks all models defined in the library are in the main init."""
models_not_in_init = []
dir_transformers = dir(transformers)
for module in get_model_modules():
models_not_in_init += [
model[0] for model in get_models(module, include_pretrained=True) if model[0] not in dir_transformers
]
# Remove private models
models_not_in_init = [model for model in models_not_in_init if not is_a_private_model(model)]
if len(models_not_in_init) > 0:
raise Exception(f"The following models should be in the main init: {','.join(models_not_in_init)}.")
# If some test_modeling files should be ignored when checking models are all tested, they should be added in the
# nested list _ignore_files of this function.
def get_model_test_files():
"""Get the model test files."""
_ignore_files = [
"test_modeling_common",
"test_modeling_encoder_decoder",
"test_modeling_marian",
"test_modeling_tf_common",
]
test_files = []
for filename in os.listdir(PATH_TO_TESTS):
if (
os.path.isfile(f"{PATH_TO_TESTS}/{filename}")
and filename.startswith("test_modeling")
and not os.path.splitext(filename)[0] in _ignore_files
):
test_files.append(filename)
return test_files
# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class
# for the all_model_classes variable.
def find_tested_models(test_file):
"""Parse the content of test_file to detect what's in all_model_classes"""
# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class
with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8", newline="\n") as f:
content = f.read()
all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content)
# Check with one less parenthesis as well
all_models += re.findall(r"all_model_classes\s+=\s+\(([^\)]*)\)", content)
if len(all_models) > 0:
model_tested = []
for entry in all_models:
for line in entry.split(","):
name = line.strip()
if len(name) > 0:
model_tested.append(name)
return model_tested
def check_models_are_tested(module, test_file):
"""Check models defined in module are tested in test_file."""
# XxxPreTrainedModel are not tested
defined_models = get_models(module)
tested_models = find_tested_models(test_file)
if tested_models is None:
if test_file in TEST_FILES_WITH_NO_COMMON_TESTS:
return
return [
f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. "
+ "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file "
+ "`utils/check_repo.py`."
]
failures = []
for model_name, _ in defined_models:
if model_name not in tested_models and model_name not in IGNORE_NON_TESTED:
failures.append(
f"{model_name} is defined in {module.__name__} but is not tested in "
+ f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file."
+ "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`"
+ "in the file `utils/check_repo.py`."
)
return failures
def check_all_models_are_tested():
"""Check all models are properly tested."""
modules = get_model_modules()
test_files = get_model_test_files()
failures = []
for module in modules:
test_file = f"test_{module.__name__.split('.')[-1]}.py"
if test_file not in test_files:
failures.append(f"{module.__name__} does not have its corresponding test file {test_file}.")
new_failures = check_models_are_tested(module, test_file)
if new_failures is not None:
failures += new_failures
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
def get_all_auto_configured_models():
"""Return the list of all models in at least one auto class."""
result = set() # To avoid duplicates we concatenate all model classes in a set.
if is_torch_available():
for attr_name in dir(transformers.models.auto.modeling_auto):
if attr_name.startswith("MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_auto, attr_name)))
if is_tf_available():
for attr_name in dir(transformers.models.auto.modeling_tf_auto):
if attr_name.startswith("TF_MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_tf_auto, attr_name)))
if is_flax_available():
for attr_name in dir(transformers.models.auto.modeling_flax_auto):
if attr_name.startswith("FLAX_MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_flax_auto, attr_name)))
return [cls for cls in result]
def ignore_unautoclassed(model_name):
"""Rules to determine if `name` should be in an auto class."""
# Special white list
if model_name in IGNORE_NON_AUTO_CONFIGURED:
return True
# Encoder and Decoder should be ignored
if "Encoder" in model_name or "Decoder" in model_name:
return True
return False
def check_models_are_auto_configured(module, all_auto_models):
"""Check models defined in module are each in an auto class."""
defined_models = get_models(module)
failures = []
for model_name, _ in defined_models:
if model_name not in all_auto_models and not ignore_unautoclassed(model_name):
failures.append(
f"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. "
"If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file "
"`utils/check_repo.py`."
)
return failures
def check_all_models_are_auto_configured():
"""Check all models are each in an auto class."""
missing_backends = []
if not is_torch_available():
missing_backends.append("PyTorch")
if not is_tf_available():
missing_backends.append("TensorFlow")
if not is_flax_available():
missing_backends.append("Flax")
if len(missing_backends) > 0:
missing = ", ".join(missing_backends)
if os.getenv("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES:
raise Exception(
"Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the "
f"Transformers repo, the following are missing: {missing}."
)
else:
warnings.warn(
"Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the "
f"Transformers repo, the following are missing: {missing}. While it's probably fine as long as you "
"didn't make any change in one of those backends modeling files, you should probably execute the "
"command above to be on the safe side."
)
modules = get_model_modules()
all_auto_models = get_all_auto_configured_models()
failures = []
for module in modules:
new_failures = check_models_are_auto_configured(module, all_auto_models)
if new_failures is not None:
failures += new_failures
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
_re_decorator = re.compile(r"^\s*@(\S+)\s+$")
def check_decorator_order(filename):
"""Check that in the test file `filename` the slow decorator is always last."""
with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
decorator_before = None
errors = []
for i, line in enumerate(lines):
search = _re_decorator.search(line)
if search is not None:
decorator_name = search.groups()[0]
if decorator_before is not None and decorator_name.startswith("parameterized"):
errors.append(i)
decorator_before = decorator_name
elif decorator_before is not None:
decorator_before = None
return errors
def check_all_decorator_order():
"""Check that in all test files, the slow decorator is always last."""
errors = []
for fname in os.listdir(PATH_TO_TESTS):
if fname.endswith(".py"):
filename = os.path.join(PATH_TO_TESTS, fname)
new_errors = check_decorator_order(filename)
errors += [f"- {filename}, line {i}" for i in new_errors]
if len(errors) > 0:
msg = "\n".join(errors)
raise ValueError(
f"The parameterized decorator (and its variants) should always be first, but this is not the case in the following files:\n{msg}"
)
def find_all_documented_objects():
"""Parse the content of all doc files to detect which classes and functions it documents"""
documented_obj = []
for doc_file in Path(PATH_TO_DOC).glob("**/*.rst"):
with open(doc_file, "r", encoding="utf-8", newline="\n") as f:
content = f.read()
raw_doc_objs = re.findall(r"(?:autoclass|autofunction):: transformers.(\S+)\s+", content)
documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs]
return documented_obj
# One good reason for not being documented is to be deprecated. Put in this list deprecated objects.
DEPRECATED_OBJECTS = [
"AutoModelWithLMHead",
"BartPretrainedModel",
"DataCollator",
"DataCollatorForSOP",
"GlueDataset",
"GlueDataTrainingArguments",
"LineByLineTextDataset",
"LineByLineWithRefDataset",
"LineByLineWithSOPTextDataset",
"PretrainedBartModel",
"PretrainedFSMTModel",
"SingleSentenceClassificationProcessor",
"SquadDataTrainingArguments",
"SquadDataset",
"SquadExample",
"SquadFeatures",
"SquadV1Processor",
"SquadV2Processor",
"TFAutoModelWithLMHead",
"TFBartPretrainedModel",
"TextDataset",
"TextDatasetForNextSentencePrediction",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2Tokenizer",
"glue_compute_metrics",
"glue_convert_examples_to_features",
"glue_output_modes",
"glue_processors",
"glue_tasks_num_labels",
"squad_convert_examples_to_features",
"xnli_compute_metrics",
"xnli_output_modes",
"xnli_processors",
"xnli_tasks_num_labels",
]
# Exceptionally, some objects should not be documented after all rules passed.
# ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT!
UNDOCUMENTED_OBJECTS = [
"AddedToken", # This is a tokenizers class.
"BasicTokenizer", # Internal, should never have been in the main init.
"CharacterTokenizer", # Internal, should never have been in the main init.
"DPRPretrainedReader", # Like an Encoder.
"MecabTokenizer", # Internal, should never have been in the main init.
"ModelCard", # Internal type.
"SqueezeBertModule", # Internal building block (should have been called SqueezeBertLayer)
"TFDPRPretrainedReader", # Like an Encoder.
"TransfoXLCorpus", # Internal type.
"WordpieceTokenizer", # Internal, should never have been in the main init.
"absl", # External module
"add_end_docstrings", # Internal, should never have been in the main init.
"add_start_docstrings", # Internal, should never have been in the main init.
"cached_path", # Internal used for downloading models.
"convert_tf_weight_name_to_pt_weight_name", # Internal used to convert model weights
"logger", # Internal logger
"logging", # External module
"requires_backends", # Internal function
]
# This list should be empty. Objects in it should get their own doc page.
SHOULD_HAVE_THEIR_OWN_PAGE = [
# Benchmarks
"PyTorchBenchmark",
"PyTorchBenchmarkArguments",
"TensorFlowBenchmark",
"TensorFlowBenchmarkArguments",
]
def ignore_undocumented(name):
"""Rules to determine if `name` should be undocumented."""
# NOT DOCUMENTED ON PURPOSE.
# Constants uppercase are not documented.
if name.isupper():
return True
# PreTrainedModels / Encoders / Decoders / Layers / Embeddings / Attention are not documented.
if (
name.endswith("PreTrainedModel")
or name.endswith("Decoder")
or name.endswith("Encoder")
or name.endswith("Layer")
or name.endswith("Embeddings")
or name.endswith("Attention")
):
return True
# Submodules are not documented.
if os.path.isdir(os.path.join(PATH_TO_TRANSFORMERS, name)) or os.path.isfile(
os.path.join(PATH_TO_TRANSFORMERS, f"{name}.py")
):
return True
# All load functions are not documented.
if name.startswith("load_tf") or name.startswith("load_pytorch"):
return True
# is_xxx_available functions are not documented.
if name.startswith("is_") and name.endswith("_available"):
return True
# Deprecated objects are not documented.
if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS:
return True
# MMBT model does not really work.
if name.startswith("MMBT"):
return True
if name in SHOULD_HAVE_THEIR_OWN_PAGE:
return True
return False
def check_all_objects_are_documented():
"""Check all models are properly documented."""
documented_objs = find_all_documented_objects()
modules = transformers._modules
objects = [c for c in dir(transformers) if c not in modules and not c.startswith("_")]
undocumented_objs = [c for c in objects if c not in documented_objs and not ignore_undocumented(c)]
if len(undocumented_objs) > 0:
raise Exception(
"The following objects are in the public init so should be documented:\n - "
+ "\n - ".join(undocumented_objs)
)
def check_repo_quality():
"""Check all models are properly tested and documented."""
print("Checking all models are public.")
check_models_are_in_init()
print("Checking all models are properly tested.")
check_all_decorator_order()
check_all_models_are_tested()
print("Checking all objects are properly documented.")
check_all_objects_are_documented()
print("Checking all models are in at least one auto class.")
check_all_models_are_auto_configured()
if __name__ == "__main__":
check_repo_quality()
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import os
import pickle
import tarfile
import tempfile
import youtokentome as yttm
from joblib import Parallel, delayed
from omegaconf import ListConfig, OmegaConf
from pytorch_lightning import Trainer
from nemo.collections.common.tokenizers.sentencepiece_tokenizer import create_spt_model
from nemo.collections.nlp.data.language_modeling.sentence_dataset import SentenceDataset
from nemo.collections.nlp.data.machine_translation.machine_translation_dataset import TranslationDataset
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_config import MTEncDecModelConfig
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer, get_tokenizer
from nemo.utils import logging
class MTDataPreproc:
""" Automatically trains tokenizers and preprocesses machine translation data based on the MTEncDecModelConfig.
For training NMT models with datasets larger than 5M sentence pairs,
it can be inefficient to train them without first creating a tarred dataset.
If the user wants to change the tokenizer, vocab size, or batch size, for example,
they must reprocess the data with the correct configuration.
With MTDataPreproc users can sweep through data configurations and the tarred dataset will
be automatically created according to the model configuration.
To train tokenizer model and create tarred dataset specify in configuration:
model.preproc_out_dir=/path/to/preproc_out
model.encoder_tokenizer.vocab_size=32000
model.decoder_tokenizer.vocab_size=32000
model.train_ds.use_tarred_dataset=True
model.train_ds.src_file_name=/path/to/src.txt
model.train_ds.tgt_file_name=/path/to/tgt.txt
model.train_ds.tokens_in_batch=16000
Once a dataset has been constructed based on this configuration, MTDataPreproc will not process it again.
If a previously trained tokenizer model or tarred dataset is found, MTDataPreproc will not preprocess the data.
Note: the only tokenizer currently supported is YouTokenToMe.
"""
def __init__(self, cfg: MTEncDecModelConfig, trainer: Trainer = None) -> None:
self._cfg = cfg
self.global_rank = 0
self.world_size = 1
if trainer is not None:
self.global_rank = (trainer.node_rank * trainer.num_gpus) + trainer.local_rank
self.world_size = trainer.num_nodes * trainer.num_gpus
if hasattr(cfg, 'train_ds'):
supported_tokenizers = ['yttm', 'huggingface', 'sentencepiece', 'megatron']
supported_train_tokenizers = ['yttm', 'sentencepiece']
if (
cfg.encoder_tokenizer.get('library') not in supported_tokenizers
or cfg.decoder_tokenizer.get('library') not in supported_tokenizers
):
raise NotImplementedError(f"Currently we only support {supported_tokenizers}.")
if cfg.get('shared_tokenizer') and cfg.encoder_tokenizer.get('library') != cfg.decoder_tokenizer.get(
'library'
):
raise ValueError("Shared tokenizers cannot be from different libraries.")
# Prepare tokenizers
if (
cfg.encoder_tokenizer.get('library') in supported_train_tokenizers
or cfg.decoder_tokenizer.get('library') in supported_train_tokenizers
):
# Train tokenizer models if using yttm or sentencepiece and they don't exist
if (
cfg.encoder_tokenizer.get('library') in supported_train_tokenizers
and cfg.encoder_tokenizer.get('tokenizer_model') is None
) or (
cfg.decoder_tokenizer.get('library') in supported_train_tokenizers
and cfg.decoder_tokenizer.get('tokenizer_model') is None
):
if cfg.get('preproc_out_dir') is None:
raise ValueError('Tokenizer model training required but cfg.preproc_out_dir is None.')
if cfg.train_ds.get('src_file_name') is None or cfg.train_ds.get('tgt_file_name') is None:
raise ValueError(
'src_file_name and tgt_file_name needed to train tokenizers but could not be found.'
)
src_fname = cfg.train_ds.get('src_file_name')
tgt_fname = cfg.train_ds.get('tgt_file_name')
src_language = cfg.get('src_language')
tgt_language = cfg.get('tgt_language')
spt_symbols = None
tempdir = tempfile.TemporaryDirectory()
if cfg.get('multilingual'):
spt_symbols = []
if isinstance(src_fname, ListConfig):
fnames = (" ").join(src_fname)
src_fname = os.path.join(tempdir.name, 'src.txt')
os.system('cat %s > %s' % (fnames, src_fname))
if isinstance(tgt_fname, ListConfig):
fnames = (" ").join(tgt_fname)
tgt_fname = os.path.join(tempdir.name, 'tgt.txt')
os.system('cat %s > %s' % (fnames, tgt_fname))
if isinstance(src_language, ListConfig):
for lng in src_language:
spt_symbols.append("<" + lng + ">")
if isinstance(tgt_language, ListConfig):
for lng in tgt_language:
spt_symbols.append("<" + lng + ">")
# train tokenizer model on training data
self.encoder_tokenizer_model, self.decoder_tokenizer_model = MTDataPreproc.train_tokenizers(
out_dir=cfg.get('preproc_out_dir'),
src_fname=src_fname,
tgt_fname=tgt_fname,
shared_tokenizer=cfg.get('shared_tokenizer'),
encoder_tokenizer_vocab_size=cfg.encoder_tokenizer.get('vocab_size'),
decoder_tokenizer_vocab_size=cfg.decoder_tokenizer.get('vocab_size'),
encoder_tokenizer_name=cfg.encoder_tokenizer.get('library'),
decoder_tokenizer_name=cfg.decoder_tokenizer.get('library'),
encoder_tokenizer_coverage=cfg.encoder_tokenizer.get('coverage', 0.999),
decoder_tokenizer_coverage=cfg.decoder_tokenizer.get('coverage', 0.999),
global_rank=self.global_rank,
encoder_training_sample_size=cfg.encoder_tokenizer.get('training_sample_size', -1),
decoder_training_sample_size=cfg.decoder_tokenizer.get('training_sample_size', -1),
encoder_special_tokens=OmegaConf.to_container(cfg.encoder_tokenizer.special_tokens)
if cfg.encoder_tokenizer.special_tokens
else None,
decoder_special_tokens=OmegaConf.to_container(cfg.decoder_tokenizer.special_tokens)
if cfg.decoder_tokenizer.special_tokens
else None,
spt_symbols=spt_symbols,
multilingual=cfg.get('multilingual', False),
)
# update config
self._cfg.encoder_tokenizer.tokenizer_model = self.encoder_tokenizer_model
self._cfg.decoder_tokenizer.tokenizer_model = self.decoder_tokenizer_model
tempdir.cleanup()
else:
self.encoder_tokenizer_model = cfg.encoder_tokenizer.get('tokenizer_model')
self.decoder_tokenizer_model = cfg.decoder_tokenizer.get('tokenizer_model')
self.encoder_tokenizer, self.decoder_tokenizer = self.get_enc_dec_tokenizers(
encoder_tokenizer_name=cfg.encoder_tokenizer.get('library'),
encoder_model_name=cfg.encoder.get('model_name'),
encoder_tokenizer_model=self.encoder_tokenizer_model,
encoder_bpe_dropout=cfg.encoder_tokenizer.get('bpe_dropout', 0.0),
encoder_r2l=cfg.encoder_tokenizer.get('r2l', False),
decoder_tokenizer_name=cfg.decoder_tokenizer.get('library'),
decoder_model_name=cfg.decoder.get('model_name'),
decoder_tokenizer_model=self.decoder_tokenizer_model,
decoder_bpe_dropout=cfg.decoder_tokenizer.get('bpe_dropout', 0.0),
decoder_r2l=cfg.decoder_tokenizer.get('r2l', False),
)
# If using tarred dataset for training, automatically create it if needed
if cfg.train_ds.get('use_tarred_dataset'):
if cfg.train_ds.get('tar_files') is None and cfg.train_ds.get('metadata_file') is None:
if cfg.get('preproc_out_dir') is None:
raise ValueError('Data preprocessing required but cfg.preproc_out_dir is None.')
if cfg.train_ds.get('src_file_name') is None or cfg.train_ds.get('tgt_file_name') is None:
raise ValueError(
'src_file_name and tgt_file_name needed to create tarred dataset but could not be found.'
)
# Preprocess data and cache for use during training
if self.global_rank == 0:
logging.info(
f"Creating tarred dataset for src: {cfg.train_ds.get("src_file_name")} and tgt: {cfg.train_ds.get("tgt_file_name")}"
)
if isinstance(cfg.train_ds.get('src_file_name'), str):
src_file_list = [cfg.train_ds.get('src_file_name')]
tgt_file_list = [cfg.train_ds.get('tgt_file_name')]
outdir_list = [cfg.get('preproc_out_dir')]
else:
src_file_list = cfg.train_ds.get('src_file_name')
tgt_file_list = cfg.train_ds.get('tgt_file_name')
if isinstance(cfg.get('src_language'), ListConfig):
langs = cfg.get('src_language')
elif isinstance(cfg.get('tgt_language'), ListConfig):
langs = cfg.get('tgt_language')
outdir_list = []
for lang in langs:
outdir_list.append(os.path.join(cfg.get('preproc_out_dir'), lang))
if len(src_file_list) != len(tgt_file_list) or len(src_file_list) != len(outdir_list):
raise ValueError(
"Number of source files, target files, and multilingual language pairs must be the same."
)
# TODO: have to get tokenizers instide .preprocess_parallel because they can't be pickled
metadata_file_list = []
for idx, src_file in enumerate(src_file_list):
self.train_tar_files, self.train_metadata_file = MTDataPreproc.preprocess_parallel_dataset(
clean=cfg.train_ds.clean,
src_fname=src_file,
tgt_fname=tgt_file_list[idx],
out_dir=outdir_list[idx],
encoder_tokenizer_name=cfg.encoder_tokenizer.get('library'),
encoder_model_name=cfg.encoder.get('model_name'),
encoder_tokenizer_model=self.encoder_tokenizer_model,
encoder_bpe_dropout=cfg.encoder_tokenizer.get('bpe_dropout', 0.0),
encoder_tokenizer_r2l=cfg.encoder_tokenizer.get('r2l', False),
decoder_tokenizer_name=cfg.decoder_tokenizer.get('library'),
decoder_model_name=cfg.decoder.get('model_name'),
decoder_tokenizer_model=self.decoder_tokenizer_model,
decoder_bpe_dropout=cfg.decoder_tokenizer.get('bpe_dropout', 0.0),
decoder_tokenizer_r2l=cfg.decoder_tokenizer.get('r2l', False),
max_seq_length=cfg.train_ds.get('max_seq_length', 512),
tokens_in_batch=cfg.train_ds.get('tokens_in_batch', 8192),
lines_per_dataset_fragment=cfg.train_ds.get('lines_per_dataset_fragment', 1000000),
num_batches_per_tarfile=cfg.train_ds.get('num_batches_per_tarfile', 1000),
min_seq_length=1,
global_rank=self.global_rank,
world_size=self.world_size,
n_jobs=cfg.train_ds.get('n_preproc_jobs', -2),
tar_file_prefix=cfg.train_ds.get('tar_file_prefix', 'parallel'),
)
metadata_file_list.append(self.train_metadata_file)
# update config
# self._cfg.train_ds.tar_files = self.tar_files_to_string(self.train_tar_files)
# self._cfg.train_ds.tar_files = self.train_tar_files
if isinstance(cfg.train_ds.get('metadata_file'), str):
self._cfg.train_ds.metadata_file = metadata_file_list[0]
else:
self._cfg.train_ds.metadata_file = metadata_file_list
logging.info(
f"Using tarred dataset created in folder(s) {outdir_list} and metadata created at {self._cfg.train_ds.metadata_file}"
)
elif cfg.train_ds.get('tar_files') is not None and cfg.train_ds.get('metadata_file') is None:
raise ValueError('A metadata file is required for tarred dataset but cfg.metadata_file is None.')
elif cfg.train_ds.get('tar_files') is None and cfg.train_ds.get('metadata_file') is not None:
if isinstance(cfg.train_ds.get('metadata_file'), str):
metadata_file_list = [cfg.train_ds.get('metadata_file')]
else:
metadata_file_list = cfg.train_ds.get('metadata_file')
for metadata_file in metadata_file_list:
with open(metadata_file) as metadata_reader:
metadata = json.load(metadata_reader)
if metadata['tar_files']:
logging.info(f"Using tarred dataset: {metadata["tar_files"]}")
else:
raise ValueError(f'tar_files not provided and metadata does not have tar files')
else:
self.train_tar_files = cfg.train_ds.get('tar_files')
self.train_metadata_file = cfg.train_ds.get('metadata_file')
logging.info(
f"Using tarred dataset from config at {self.train_tar_files} and metadata from {self.train_metadata_file}"
)
def tar_files_to_string(self, tar_files):
""" Tar files are generated in the following format: basename.number.tar
Where number is an integer from 1 to the number of tar files.
We convert this list to a string that can be used in the model config to specify
tarred datasets: basename_OP_1..num_tar_files_CL_.tar
Args:
tar_files (List[str]): List of tar files generated by preprocess_parallel_dataset
"""
num_tar_files = len(tar_files)
split_on_dot = tar_files[0].split('.')
basename = '.'.join(split_on_dot[0:-2])
tar_file_string = f'{basename}._OP_1..{num_tar_files}_CL_.tar'
return tar_file_string
@staticmethod
def get_enc_dec_tokenizers(
encoder_tokenizer_name=None,
encoder_tokenizer_model=None,
encoder_bpe_dropout=0.0,
encoder_model_name=None,
encoder_r2l=False,
decoder_tokenizer_name=None,
decoder_tokenizer_model=None,
decoder_bpe_dropout=0.0,
decoder_model_name=None,
decoder_r2l=False,
):
# if encoder_tokenizer_name != 'yttm' or decoder_tokenizer_name != 'yttm':
# raise NotImplementedError(f"Currently we only support yttm tokenizer.")
if encoder_bpe_dropout is None:
encoder_bpe_dropout = 0.0
if decoder_bpe_dropout is None:
decoder_bpe_dropout = 0.0
encoder_tokenizer = get_nmt_tokenizer(
library=encoder_tokenizer_name,
model_name=encoder_model_name,
tokenizer_model=encoder_tokenizer_model,
bpe_dropout=encoder_bpe_dropout,
r2l=encoder_r2l,
)
decoder_tokenizer = get_nmt_tokenizer(
library=decoder_tokenizer_name,
model_name=decoder_model_name,
tokenizer_model=decoder_tokenizer_model,
bpe_dropout=decoder_bpe_dropout,
r2l=decoder_r2l,
)
return encoder_tokenizer, decoder_tokenizer
@staticmethod
def get_monolingual_tokenizer(
tokenizer_name=None, tokenizer_model=None, bpe_dropout=0.0,
):
if tokenizer_name != 'yttm':
raise NotImplementedError(f"Currently we only support yttm tokenizer.")
if bpe_dropout is None:
bpe_dropout = 0.0
tokenizer = get_tokenizer(
tokenizer_name=tokenizer_name, tokenizer_model=tokenizer_model, bpe_dropout=bpe_dropout,
)
return tokenizer
# TODO: add local or global rank 0 decorator
@staticmethod
def preprocess_parallel_dataset(
clean,
src_fname,
tgt_fname,
out_dir,
encoder_tokenizer_name,
encoder_tokenizer_model,
encoder_tokenizer_r2l,
encoder_bpe_dropout,
encoder_model_name,
decoder_tokenizer_name,
decoder_tokenizer_model,
decoder_bpe_dropout,
decoder_model_name,
decoder_tokenizer_r2l,
max_seq_length,
min_seq_length,
tokens_in_batch,
lines_per_dataset_fragment,
num_batches_per_tarfile,
global_rank,
world_size,
n_jobs=-2,
tar_file_prefix='parallel',
):
"""Create tarred dataset from large paired translation data.
Args:
clean (str): Cleans source and target sentences to get rid of noisy data.
src_fname (str): path to source text data
tgt_fname (str): path to target text data
out_dir (str): path to write tarred dataset
encoder_tokenizer (Any): tokenizer for encoder
decoder_tokenizer (Any): tokenizer for decoder
max_seq_length (int): maximum sequence length
min_seq_length (int): minimum sequence length
tokens_in_batch (int): tokens per batch per GPU, effectively batch size
lines_per_dataset_fragment (int): number of lines to consider for bucketing and padding
num_batches_per_tarfile (int): number of batches (pickle files) within each tarfile
tar_file_prefix (str) : add string prefix to tar files
n_jobs (int): number of processes to use for data processing (-2 to use all but 2)
"""
os.makedirs(out_dir, exist_ok=True)
metadata_path = os.path.join(out_dir, f'metadata.tokens.{tokens_in_batch}.json')
if global_rank == 0:
tar_files_in_out_dir = glob.glob(f'{out_dir}/*.tar')
if tar_files_in_out_dir:
logging.info(
f'Tarred dataset detected: {tar_files_in_out_dir} and will be used. Remove if reprocessing.'
)
else:
filenames = [src_fname, tgt_fname]
# get number of lines so that we can create a partition of the lines of the text file
num_src_lines, num_tgt_lines = Parallel(n_jobs=2)(
delayed(MTDataPreproc._get_num_lines)(filename) for filename in filenames
)
logging.info(f'Found {num_src_lines} source lines and {num_tgt_lines} target lines.')
assert num_src_lines == num_tgt_lines, 'Number of source lines should equal number of target lines.'
# create a partition of lines that we can parallelize over
lines_partition = MTDataPreproc._get_lines_partition(num_src_lines, lines_per_dataset_fragment)
logging.info(f"Found {len(lines_partition)} fragments to parallelize over.")
# create tarfiles for each fragment in parallel
results_list = Parallel(n_jobs=n_jobs)(
delayed(MTDataPreproc._process_fragment)(
src_filename=src_fname,
tgt_filename=tgt_fname,
lines_indices=lines_indices,
out_dir=out_dir,
num_batches_per_tarfile=num_batches_per_tarfile,
clean=clean,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
tokens_in_batch=tokens_in_batch,
encoder_tokenizer_name=encoder_tokenizer_name,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=encoder_bpe_dropout,
encoder_model_name=encoder_model_name,
decoder_tokenizer_name=decoder_tokenizer_name,
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_bpe_dropout=decoder_bpe_dropout,
decoder_model_name=decoder_model_name,
fragment_index=fragment_index,
encoder_tokenizer_r2l=encoder_tokenizer_r2l,
decoder_tokenizer_r2l=decoder_tokenizer_r2l,
)
for fragment_index, lines_indices in enumerate(lines_partition)
)
# compute total batches so far
total_batches = sum([batch_count for batch_count, _ in results_list])
# save batches from tar files containing the left over batches (if there's enough batches)
remainder_tar_file_ctr = 0
remainder_tar_file_path = os.path.join(
out_dir, f'remainder-batches.tokens.{tokens_in_batch}.tar_file_{remainder_tar_file_ctr}.tar'
)
remainder_tar_file_ptr = tarfile.open(remainder_tar_file_path, 'w')
batch_in_tar_ctr = 0
for _, tar_file_path in results_list:
tar_file_ptr = tarfile.open(tar_file_path, 'r')
for member in tar_file_ptr.getmembers():
remainder_tar_file_ptr.addfile(member, tar_file_ptr.extractfile(member.name))
batch_in_tar_ctr += 1
if batch_in_tar_ctr == num_batches_per_tarfile:
remainder_tar_file_ctr += 1
remainder_tar_file_ptr.close()
remainder_tar_file_path = os.path.join(
out_dir,
f'remainder-batches.tokens.{tokens_in_batch}.tar_file_{remainder_tar_file_ctr}.tar',
)
remainder_tar_file_ptr = tarfile.open(remainder_tar_file_path, 'w',)
batch_in_tar_ctr = 0
tar_file_ptr.close()
os.remove(tar_file_path)
# log the number of batches remaining as they will be discarded
num_batches_discarded = len(remainder_tar_file_ptr.getmembers())
total_batches -= num_batches_discarded
logging.info(
f'Number of batches discarded: {num_batches_discarded}, total batches kept: {total_batches}'
)
remainder_tar_file_ptr.close()
os.remove(remainder_tar_file_path)
# dump metadata to json
metadata = {}
metadata['num_batches'] = total_batches
# rename tar files so they can be more easily used with CLI and YAML
tar_file_paths = glob.glob(f'{out_dir}/*.tar')
for index, path in enumerate(tar_file_paths):
os.rename(
path, os.path.join(out_dir, f'{tar_file_prefix}.batches.tokens.{tokens_in_batch}.{index}.tar')
)
# add tar files to manifest
tar_file_paths = glob.glob(f'{out_dir}/*.tar')
metadata['tar_files'] = tar_file_paths
json.dump(metadata, open(metadata_path, 'w'))
tar_file_paths = glob.glob(f'{out_dir}/*.tar')
num_tar_files = len(tar_file_paths)
if num_tar_files < world_size:
raise ValueError(
(
f'Number of tar files found: {num_tar_files} is less than world size: {world_size}. '
f'There should be at least one tar file per GPU (ideally many tar files per GPU). '
f'This may be due to dataset size, it is advisable to use at least 5M sentence pairs for tarred datasets. '
f'Decrease num_batches_per_tarfile or num_tokens_per_batch to increase the number of tarfiles. '
f'Also using shard_strategy=replicate will use all available tarfiles for every GPU. '
)
)
return tar_file_paths, metadata_path
@staticmethod
def _get_num_lines(filename):
with open(filename) as f:
for i, l in enumerate(f):
pass
return i + 1
@staticmethod
def _get_lines_partition(num_lines, lines_per_dataset_fragment):
# create partition based on fragment size
fragment_indices = []
for i in range(0, num_lines, lines_per_dataset_fragment):
fragment_indices.append([i, i + lines_per_dataset_fragment])
# modify last indices
last_indices = fragment_indices.pop()
last_indices[1] = -1
fragment_indices.append(last_indices)
# if fragment_indices[-1][1] >= num_lines:
# fragment_indices.pop()
return fragment_indices
@staticmethod
def _process_fragment(
src_filename,
tgt_filename,
lines_indices,
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
tokens_in_batch,
encoder_tokenizer_name,
encoder_tokenizer_model,
encoder_bpe_dropout,
encoder_model_name,
encoder_tokenizer_r2l,
decoder_tokenizer_name,
decoder_tokenizer_model,
decoder_bpe_dropout,
decoder_model_name,
decoder_tokenizer_r2l,
fragment_index,
):
start = lines_indices[0]
stop = lines_indices[1]
# write lines in partition to temporary files to be consumed by write_parallel_batches_to_tarfiles
tmp_f_src = tempfile.NamedTemporaryFile(delete=False, mode='w')
tmp_f_tgt = tempfile.NamedTemporaryFile(delete=False, mode='w')
with open(src_filename, 'r') as src_in, open(tgt_filename) as tgt_in:
for line_number, (src_line, tgt_line) in enumerate(zip(src_in, tgt_in)):
if start <= line_number and line_number < stop:
if src_line and tgt_line:
tmp_f_src.write(src_line)
tmp_f_tgt.write(tgt_line)
tmp_f_src.close()
tmp_f_tgt.close()
num_batches_from_fragment, remainder_tar_file_path = MTDataPreproc.write_parallel_batches_to_tarfiles(
out_dir=out_dir,
num_batches_per_tarfile=num_batches_per_tarfile,
clean=clean,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
src_fname=tmp_f_src.name,
tgt_fname=tmp_f_tgt.name,
num_tokens=tokens_in_batch,
encoder_tokenizer_name=encoder_tokenizer_name,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=encoder_bpe_dropout,
encoder_model_name=encoder_model_name,
encoder_tokenizer_r2l=encoder_tokenizer_r2l,
decoder_tokenizer_name=decoder_tokenizer_name,
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_bpe_dropout=decoder_bpe_dropout,
decoder_model_name=decoder_model_name,
decoder_tokenizer_r2l=decoder_tokenizer_r2l,
fragment_index=fragment_index,
)
os.remove(tmp_f_src.name)
os.remove(tmp_f_tgt.name)
return num_batches_from_fragment, remainder_tar_file_path
@staticmethod
def preprocess_monolingual_dataset(
clean,
fname,
out_dir,
tokenizer,
max_seq_length,
min_seq_length,
tokens_in_batch,
lines_per_dataset_fragment,
num_batches_per_tarfile,
pkl_file_prefix,
global_rank,
world_size,
):
"""Create tarred dataset from a large monolingual corpus.
Args:
clean (str): Cleans sentences to get rid of very long or short sentences.
fname (str): Path to source text data
out_dir (str): Path to write tarred dataset
tokenizer (Any): Path to tokenizer model
max_seq_length (int): maximum sequence length
min_seq_length (int): minimum sequence length
tokens_in_batch (int): tokens per batch per GPU, effectively batch size
lines_per_dataset_fragment (int): number of lines to consider for bucketing and padding
num_batches_per_tarfile (int): number of batches (pickle files) within each tarfile
global_rank (int): if set to zero, data will be processed on this node
world_size (int): total number of processes being run (for training only, set to 1 when preproc only)
"""
os.makedirs(out_dir, exist_ok=True)
tar_file_ctr = 1
num_files_in_tar = 0
num_lines = 0
shard_num = 0
global_batch_ctr = 0
tmp_f = tempfile.NamedTemporaryFile(delete=False, mode='w')
tar_file_ptr = tarfile.open(
os.path.join(out_dir, '%s-batches.tokens.%d.%d.tar' % (pkl_file_prefix, tokens_in_batch, 1)), 'w'
)
metadata_path = os.path.join(out_dir, f'metadata.tokens.{tokens_in_batch}.json')
with open(fname, 'r') as f:
for line in f:
tmp_f.write(line)
num_lines += 1
if num_lines == lines_per_dataset_fragment:
tmp_f.close()
(
tar_file_ptr,
global_batch_ctr,
num_files_in_tar,
tar_file_ctr,
) = MTDataPreproc.write_monolingual_batches_to_tarfiles(
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
tmp_f.name,
tokens_in_batch,
tokenizer,
num_files_in_tar=num_files_in_tar,
tar_file_ptr=tar_file_ptr,
tar_file_ctr=tar_file_ctr,
global_batch_ctr=global_batch_ctr,
pkl_file_prefix=pkl_file_prefix,
)
num_lines = 0
shard_num += 1
os.remove(tmp_f.name)
tmp_f = tempfile.NamedTemporaryFile(delete=False, mode='w')
tmp_f.close()
(
tar_file_ptr,
global_batch_ctr,
num_files_in_tar,
tar_file_ctr,
) = MTDataPreproc.write_monolingual_batches_to_tarfiles(
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
tmp_f.name,
tokens_in_batch,
tokenizer,
num_files_in_tar=num_files_in_tar,
tar_file_ptr=tar_file_ptr,
tar_file_ctr=tar_file_ctr,
global_batch_ctr=global_batch_ctr,
pkl_file_prefix=pkl_file_prefix,
)
tar_file_ptr.close()
os.remove(tmp_f.name)
if num_files_in_tar != num_batches_per_tarfile:
os.remove(
os.path.join(out_dir, '%s-batches.tokens.%d.%d.tar' % (pkl_file_prefix, tokens_in_batch, tar_file_ctr))
)
global_batch_ctr -= num_files_in_tar
print('Dropping %d batches because of overflow' % (num_files_in_tar))
json.dump({'num_batches': global_batch_ctr}, open(os.path.join(out_dir, 'metadata.json'), 'w'))
tar_file_paths = glob.glob(f'{out_dir}/{pkl_file_prefix}-batches.tokens.{tokens_in_batch}.*.tar')
num_tar_files = len(tar_file_paths)
if num_tar_files < world_size:
raise ValueError(
(
f'Number of tar files found: {num_tar_files} is less than world size: {world_size}. '
f'There should be at least one tar file per GPU (ideally many tar files per GPU). '
f'This may be due to dataset size, it is advisable to use at least 5M sentence pairs for tarred datasets. '
f'Decrease num_batches_per_tarfile or num_tokens_per_batch to increase the number of tarfiles. '
f'Also using shard_strategy=replicate will use all available tarfiles for every GPU. '
)
)
return tar_file_paths, metadata_path
@staticmethod
def train_tokenizers(
out_dir,
src_fname,
tgt_fname,
shared_tokenizer,
encoder_tokenizer_name,
encoder_tokenizer_vocab_size,
encoder_tokenizer_coverage,
decoder_tokenizer_name,
decoder_tokenizer_vocab_size,
decoder_tokenizer_coverage,
global_rank,
encoder_training_sample_size=-1,
decoder_training_sample_size=-1,
encoder_special_tokens=None,
decoder_special_tokens=None,
spt_symbols=None,
multilingual=False,
):
encoder_tokenizer_model = None
decoder_tokenizer_model = None
os.makedirs(out_dir, exist_ok=True)
supported_train_tokenizers = ['yttm', 'sentencepiece']
if encoder_special_tokens:
if isinstance(encoder_special_tokens, dict):
encoder_special_tokens = list(encoder_special_tokens.values())
print(encoder_special_tokens)
if decoder_special_tokens:
if isinstance(decoder_special_tokens, dict):
decoder_special_tokens = list(decoder_special_tokens.values())
if multilingual and encoder_tokenizer_name != 'sentencepiece':
raise NotImplementedError(
f"Currently we only support training setencepiece tokenizer for multilingual model."
)
if shared_tokenizer:
if (
encoder_tokenizer_name not in supported_train_tokenizers
or decoder_tokenizer_name not in supported_train_tokenizers
):
raise NotImplementedError(
f"Currently we only support tokenizers in {supported_train_tokenizers} for shared tokenizer."
)
encoder_tokenizer_model = os.path.join(
out_dir, 'shared_tokenizer.%d.BPE.model' % (encoder_tokenizer_vocab_size)
)
decoder_tokenizer_model = encoder_tokenizer_model
if global_rank == 0:
if os.path.isfile(encoder_tokenizer_model):
logging.info(
f'Shared tokenizer model {encoder_tokenizer_model} already exists. Remove file if training a new tokenizer model.'
)
else:
logging.info(
f'Shared tokenizer model {encoder_tokenizer_model} not found. Training tokenizer model.'
)
with tempfile.TemporaryDirectory() as tmp:
concat_data_path = os.path.join(tmp, 'concat_dataset.txt')
os.system('cat %s %s > %s' % (src_fname, tgt_fname, concat_data_path))
if encoder_tokenizer_name == "yttm":
yttm.BPE.train(
data=concat_data_path,
vocab_size=encoder_tokenizer_vocab_size,
model=os.path.join(out_dir, encoder_tokenizer_model),
coverage=encoder_tokenizer_coverage,
n_threads=-1,
)
else:
create_spt_model(
data_file=concat_data_path,
vocab_size=encoder_tokenizer_vocab_size,
sample_size=encoder_training_sample_size,
do_lower_case=False,
tokenizer_type='bpe',
character_coverage=encoder_tokenizer_coverage,
output_dir=out_dir,
bos=True,
eos=True,
pad=True,
control_symbols=spt_symbols,
user_defined_symbols=encoder_special_tokens,
)
os.rename(
os.path.join(out_dir, 'tokenizer.model'),
os.path.join(out_dir, encoder_tokenizer_model),
)
else:
if encoder_tokenizer_name in supported_train_tokenizers:
encoder_tokenizer_model = os.path.join(
out_dir, 'tokenizer.encoder.%d.BPE.model' % (encoder_tokenizer_vocab_size)
)
if global_rank == 0:
if os.path.isfile(encoder_tokenizer_model):
logging.info(
f'Encoder tokenizer model {encoder_tokenizer_model} already exists. Remove file if training a new tokenizer model.'
)
else:
logging.info(
f'Encoder tokenizer model {encoder_tokenizer_model} not found. Training tokenizer model.'
)
if encoder_tokenizer_name == "yttm":
yttm.BPE.train(
data=src_fname,
vocab_size=encoder_tokenizer_vocab_size,
model=encoder_tokenizer_model,
coverage=encoder_tokenizer_coverage,
n_threads=-1,
)
else:
dir_name = os.path.dirname(encoder_tokenizer_model)
create_spt_model(
data_file=src_fname,
vocab_size=encoder_tokenizer_vocab_size,
sample_size=encoder_training_sample_size,
do_lower_case=False,
tokenizer_type='bpe',
character_coverage=encoder_tokenizer_coverage,
output_dir=dir_name,
bos=True,
eos=True,
pad=True,
control_symbols=spt_symbols,
user_defined_symbols=encoder_special_tokens,
)
os.rename(os.path.join(dir_name, 'tokenizer.model'), os.path.join(encoder_tokenizer_model))
if decoder_tokenizer_name in supported_train_tokenizers:
decoder_tokenizer_model = os.path.join(
out_dir, 'tokenizer.decoder.%d.BPE.model' % (decoder_tokenizer_vocab_size)
)
if global_rank == 0:
if os.path.isfile(decoder_tokenizer_model):
logging.info(
f'Decoder tokenizer model {decoder_tokenizer_model} already exists. Remove file if training a new tokenizer model.'
)
else:
logging.info(
f'Decoder tokenizer model {decoder_tokenizer_model} not found. Training tokenizer model.'
)
if decoder_tokenizer_name == "yttm":
yttm.BPE.train(
data=tgt_fname,
vocab_size=decoder_tokenizer_vocab_size,
model=decoder_tokenizer_model,
coverage=decoder_tokenizer_coverage,
n_threads=-1,
)
else:
dir_name = os.path.dirname(decoder_tokenizer_model)
create_spt_model(
data_file=tgt_fname,
vocab_size=decoder_tokenizer_vocab_size,
sample_size=decoder_training_sample_size,
do_lower_case=False,
tokenizer_type='bpe',
character_coverage=decoder_tokenizer_coverage,
output_dir=dir_name,
bos=True,
eos=True,
pad=True,
control_symbols=spt_symbols,
user_defined_symbols=decoder_special_tokens,
)
os.rename(os.path.join(dir_name, 'tokenizer.model'), os.path.join(decoder_tokenizer_model))
return encoder_tokenizer_model, decoder_tokenizer_model
@staticmethod
def write_parallel_batches_to_tarfiles(
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
src_fname,
tgt_fname,
num_tokens,
encoder_tokenizer_name,
encoder_tokenizer_model,
encoder_tokenizer_r2l,
encoder_bpe_dropout,
encoder_model_name,
decoder_tokenizer_name,
decoder_tokenizer_model,
decoder_bpe_dropout,
decoder_model_name,
decoder_tokenizer_r2l,
fragment_index,
):
"""
Writes current fragment of the overall parallel corpus to tarfiles by:
(1) Creating a minibatches using a TranslationDataset object.
(2) Writing each minibatch to a pickle file.
(3) Adding pickle files to a tarfile until it reaches num_batches_per_tarfile.
"""
dataset = TranslationDataset(
dataset_src=src_fname,
dataset_tgt=tgt_fname,
tokens_in_batch=num_tokens,
clean=clean,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
max_seq_length_diff=max_seq_length,
max_seq_length_ratio=max_seq_length,
cache_ids=False,
cache_data_per_node=False,
use_cache=False,
)
encoder_tokenizer, decoder_tokenizer = MTDataPreproc.get_enc_dec_tokenizers(
encoder_tokenizer_name=encoder_tokenizer_name,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=encoder_bpe_dropout,
encoder_model_name=encoder_model_name,
encoder_r2l=encoder_tokenizer_r2l,
decoder_tokenizer_name=decoder_tokenizer_name,
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_bpe_dropout=decoder_bpe_dropout,
decoder_model_name=decoder_model_name,
decoder_r2l=decoder_tokenizer_r2l,
)
dataset.batchify(encoder_tokenizer, decoder_tokenizer)
tar_file_ctr = 0
tar_file_path = os.path.join(
out_dir, 'fragment-%s-batches.tokens.%d.%d.tar' % (fragment_index, num_tokens, tar_file_ctr)
)
tar_file_ptr = tarfile.open(tar_file_path, 'w')
total_batch_ctr = 0
batch_ctr = 0
for _, batch in dataset.batches.items():
total_batch_ctr += 1
batch_ctr += 1
pickle.dump(
batch,
open(os.path.join(out_dir, 'fragment-%s-batch-%d.pkl' % (fragment_index, total_batch_ctr)), 'wb'),
)
tar_file_ptr.add(os.path.join(out_dir, 'fragment-%s-batch-%d.pkl' % (fragment_index, total_batch_ctr)))
os.remove(os.path.join(out_dir, 'fragment-%s-batch-%d.pkl' % (fragment_index, total_batch_ctr)))
if batch_ctr == num_batches_per_tarfile:
tar_file_ctr += 1
tar_file_ptr.close()
tar_file_path = os.path.join(
out_dir, 'fragment-%s-batches.tokens.%d.%d.tar' % (fragment_index, num_tokens, tar_file_ctr)
)
tar_file_ptr = tarfile.open(tar_file_path, 'w',)
batch_ctr = 0
# return tar files paths that have batches remaining
remainder_tar_file_path = tar_file_ptr.name
tar_file_ptr.close()
return total_batch_ctr, remainder_tar_file_path
@staticmethod
def write_monolingual_batches_to_tarfiles(
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
fname,
num_tokens,
tokenizer,
num_files_in_tar,
tar_file_ptr,
tar_file_ctr,
global_batch_ctr,
pkl_file_prefix,
):
"""
Writes current fragment of the overall parallel corpus to tarfiles by:
(1) Creating a minibatches using a SentenceDataset object.
(2) Writing each minibatch to a pickle file.
(3) Adding pickle files to a tarfile until it reaches num_batches_per_tarfile.
"""
dataset = SentenceDataset(
tokenizer=tokenizer,
dataset=fname,
tokens_in_batch=num_tokens,
clean=clean,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
cache_ids=False,
)
for batch in dataset.batches:
global_batch_ctr += 1
batch = {'src': batch}
pickle.dump(
batch, open(os.path.join(out_dir, '%s-batch-%d.pkl' % (pkl_file_prefix, global_batch_ctr)), 'wb')
)
if num_files_in_tar == num_batches_per_tarfile:
tar_file_ctr += 1
tar_file_ptr.close()
tar_file_ptr = tarfile.open(
os.path.join(out_dir, '%s-batches.tokens.%d.%d.tar' % (pkl_file_prefix, num_tokens, tar_file_ctr)),
'w',
)
num_files_in_tar = 0
tar_file_ptr.add(os.path.join(out_dir, '%s-batch-%d.pkl' % (pkl_file_prefix, global_batch_ctr)))
num_files_in_tar += 1
os.remove(os.path.join(out_dir, '%s-batch-%d.pkl' % (pkl_file_prefix, global_batch_ctr)))
return tar_file_ptr, global_batch_ctr, num_files_in_tar, tar_file_ctr
@property
def cfg(self):
return self._cfg
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import os
import pickle
import tarfile
import tempfile
import youtokentome as yttm
from joblib import Parallel, delayed
from omegaconf import ListConfig, OmegaConf
from pytorch_lightning import Trainer
from nemo.collections.common.tokenizers.sentencepiece_tokenizer import create_spt_model
from nemo.collections.nlp.data.language_modeling.sentence_dataset import SentenceDataset
from nemo.collections.nlp.data.machine_translation.machine_translation_dataset import TranslationDataset
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_config import MTEncDecModelConfig
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer, get_tokenizer
from nemo.utils import logging
class MTDataPreproc:
""" Automatically trains tokenizers and preprocesses machine translation data based on the MTEncDecModelConfig.
For training NMT models with datasets larger than 5M sentence pairs,
it can be inefficient to train them without first creating a tarred dataset.
If the user wants to change the tokenizer, vocab size, or batch size, for example,
they must reprocess the data with the correct configuration.
With MTDataPreproc users can sweep through data configurations and the tarred dataset will
be automatically created according to the model configuration.
To train tokenizer model and create tarred dataset specify in configuration:
model.preproc_out_dir=/path/to/preproc_out
model.encoder_tokenizer.vocab_size=32000
model.decoder_tokenizer.vocab_size=32000
model.train_ds.use_tarred_dataset=True
model.train_ds.src_file_name=/path/to/src.txt
model.train_ds.tgt_file_name=/path/to/tgt.txt
model.train_ds.tokens_in_batch=16000
Once a dataset has been constructed based on this configuration, MTDataPreproc will not process it again.
If a previously trained tokenizer model or tarred dataset is found, MTDataPreproc will not preprocess the data.
Note: the only tokenizer currently supported is YouTokenToMe.
"""
def __init__(self, cfg: MTEncDecModelConfig, trainer: Trainer = None) -> None:
self._cfg = cfg
self.global_rank = 0
self.world_size = 1
if trainer is not None:
self.global_rank = (trainer.node_rank * trainer.num_gpus) + trainer.local_rank
self.world_size = trainer.num_nodes * trainer.num_gpus
if hasattr(cfg, 'train_ds'):
supported_tokenizers = ['yttm', 'huggingface', 'sentencepiece', 'megatron']
supported_train_tokenizers = ['yttm', 'sentencepiece']
if (
cfg.encoder_tokenizer.get('library') not in supported_tokenizers
or cfg.decoder_tokenizer.get('library') not in supported_tokenizers
):
raise NotImplementedError(f"Currently we only support {supported_tokenizers}.")
if cfg.get('shared_tokenizer') and cfg.encoder_tokenizer.get('library') != cfg.decoder_tokenizer.get(
'library'
):
raise ValueError("Shared tokenizers cannot be from different libraries.")
# Prepare tokenizers
if (
cfg.encoder_tokenizer.get('library') in supported_train_tokenizers
or cfg.decoder_tokenizer.get('library') in supported_train_tokenizers
):
# Train tokenizer models if using yttm or sentencepiece and they don't exist
if (
cfg.encoder_tokenizer.get('library') in supported_train_tokenizers
and cfg.encoder_tokenizer.get('tokenizer_model') is None
) or (
cfg.decoder_tokenizer.get('library') in supported_train_tokenizers
and cfg.decoder_tokenizer.get('tokenizer_model') is None
):
if cfg.get('preproc_out_dir') is None:
raise ValueError('Tokenizer model training required but cfg.preproc_out_dir is None.')
if cfg.train_ds.get('src_file_name') is None or cfg.train_ds.get('tgt_file_name') is None:
raise ValueError(
'src_file_name and tgt_file_name needed to train tokenizers but could not be found.'
)
src_fname = cfg.train_ds.get('src_file_name')
tgt_fname = cfg.train_ds.get('tgt_file_name')
src_language = cfg.get('src_language')
tgt_language = cfg.get('tgt_language')
spt_symbols = None
tempdir = tempfile.TemporaryDirectory()
if cfg.get('multilingual'):
spt_symbols = []
if isinstance(src_fname, ListConfig):
fnames = (" ").join(src_fname)
src_fname = os.path.join(tempdir.name, 'src.txt')
os.system('cat %s > %s' % (fnames, src_fname))
if isinstance(tgt_fname, ListConfig):
fnames = (" ").join(tgt_fname)
tgt_fname = os.path.join(tempdir.name, 'tgt.txt')
os.system('cat %s > %s' % (fnames, tgt_fname))
if isinstance(src_language, ListConfig):
for lng in src_language:
spt_symbols.append("<" + lng + ">")
if isinstance(tgt_language, ListConfig):
for lng in tgt_language:
spt_symbols.append("<" + lng + ">")
# train tokenizer model on training data
self.encoder_tokenizer_model, self.decoder_tokenizer_model = MTDataPreproc.train_tokenizers(
out_dir=cfg.get('preproc_out_dir'),
src_fname=src_fname,
tgt_fname=tgt_fname,
shared_tokenizer=cfg.get('shared_tokenizer'),
encoder_tokenizer_vocab_size=cfg.encoder_tokenizer.get('vocab_size'),
decoder_tokenizer_vocab_size=cfg.decoder_tokenizer.get('vocab_size'),
encoder_tokenizer_name=cfg.encoder_tokenizer.get('library'),
decoder_tokenizer_name=cfg.decoder_tokenizer.get('library'),
encoder_tokenizer_coverage=cfg.encoder_tokenizer.get('coverage', 0.999),
decoder_tokenizer_coverage=cfg.decoder_tokenizer.get('coverage', 0.999),
global_rank=self.global_rank,
encoder_training_sample_size=cfg.encoder_tokenizer.get('training_sample_size', -1),
decoder_training_sample_size=cfg.decoder_tokenizer.get('training_sample_size', -1),
encoder_special_tokens=OmegaConf.to_container(cfg.encoder_tokenizer.special_tokens)
if cfg.encoder_tokenizer.special_tokens
else None,
decoder_special_tokens=OmegaConf.to_container(cfg.decoder_tokenizer.special_tokens)
if cfg.decoder_tokenizer.special_tokens
else None,
spt_symbols=spt_symbols,
multilingual=cfg.get('multilingual', False),
)
# update config
self._cfg.encoder_tokenizer.tokenizer_model = self.encoder_tokenizer_model
self._cfg.decoder_tokenizer.tokenizer_model = self.decoder_tokenizer_model
tempdir.cleanup()
else:
self.encoder_tokenizer_model = cfg.encoder_tokenizer.get('tokenizer_model')
self.decoder_tokenizer_model = cfg.decoder_tokenizer.get('tokenizer_model')
self.encoder_tokenizer, self.decoder_tokenizer = self.get_enc_dec_tokenizers(
encoder_tokenizer_name=cfg.encoder_tokenizer.get('library'),
encoder_model_name=cfg.encoder.get('model_name'),
encoder_tokenizer_model=self.encoder_tokenizer_model,
encoder_bpe_dropout=cfg.encoder_tokenizer.get('bpe_dropout', 0.0),
encoder_r2l=cfg.encoder_tokenizer.get('r2l', False),
decoder_tokenizer_name=cfg.decoder_tokenizer.get('library'),
decoder_model_name=cfg.decoder.get('model_name'),
decoder_tokenizer_model=self.decoder_tokenizer_model,
decoder_bpe_dropout=cfg.decoder_tokenizer.get('bpe_dropout', 0.0),
decoder_r2l=cfg.decoder_tokenizer.get('r2l', False),
)
# If using tarred dataset for training, automatically create it if needed
if cfg.train_ds.get('use_tarred_dataset'):
if cfg.train_ds.get('tar_files') is None and cfg.train_ds.get('metadata_file') is None:
if cfg.get('preproc_out_dir') is None:
raise ValueError('Data preprocessing required but cfg.preproc_out_dir is None.')
if cfg.train_ds.get('src_file_name') is None or cfg.train_ds.get('tgt_file_name') is None:
raise ValueError(
'src_file_name and tgt_file_name needed to create tarred dataset but could not be found.'
)
# Preprocess data and cache for use during training
if self.global_rank == 0:
logging.info(
f"Creating tarred dataset for src: {cfg.train_ds.get('src_file_name')} and tgt: {cfg.train_ds.get('tgt_file_name')}"
)
if isinstance(cfg.train_ds.get('src_file_name'), str):
src_file_list = [cfg.train_ds.get('src_file_name')]
tgt_file_list = [cfg.train_ds.get('tgt_file_name')]
outdir_list = [cfg.get('preproc_out_dir')]
else:
src_file_list = cfg.train_ds.get('src_file_name')
tgt_file_list = cfg.train_ds.get('tgt_file_name')
if isinstance(cfg.get('src_language'), ListConfig):
langs = cfg.get('src_language')
elif isinstance(cfg.get('tgt_language'), ListConfig):
langs = cfg.get('tgt_language')
outdir_list = []
for lang in langs:
outdir_list.append(os.path.join(cfg.get('preproc_out_dir'), lang))
if len(src_file_list) != len(tgt_file_list) or len(src_file_list) != len(outdir_list):
raise ValueError(
"Number of source files, target files, and multilingual language pairs must be the same."
)
# TODO: have to get tokenizers instide .preprocess_parallel because they can't be pickled
metadata_file_list = []
for idx, src_file in enumerate(src_file_list):
self.train_tar_files, self.train_metadata_file = MTDataPreproc.preprocess_parallel_dataset(
clean=cfg.train_ds.clean,
src_fname=src_file,
tgt_fname=tgt_file_list[idx],
out_dir=outdir_list[idx],
encoder_tokenizer_name=cfg.encoder_tokenizer.get('library'),
encoder_model_name=cfg.encoder.get('model_name'),
encoder_tokenizer_model=self.encoder_tokenizer_model,
encoder_bpe_dropout=cfg.encoder_tokenizer.get('bpe_dropout', 0.0),
encoder_tokenizer_r2l=cfg.encoder_tokenizer.get('r2l', False),
decoder_tokenizer_name=cfg.decoder_tokenizer.get('library'),
decoder_model_name=cfg.decoder.get('model_name'),
decoder_tokenizer_model=self.decoder_tokenizer_model,
decoder_bpe_dropout=cfg.decoder_tokenizer.get('bpe_dropout', 0.0),
decoder_tokenizer_r2l=cfg.decoder_tokenizer.get('r2l', False),
max_seq_length=cfg.train_ds.get('max_seq_length', 512),
tokens_in_batch=cfg.train_ds.get('tokens_in_batch', 8192),
lines_per_dataset_fragment=cfg.train_ds.get('lines_per_dataset_fragment', 1000000),
num_batches_per_tarfile=cfg.train_ds.get('num_batches_per_tarfile', 1000),
min_seq_length=1,
global_rank=self.global_rank,
world_size=self.world_size,
n_jobs=cfg.train_ds.get('n_preproc_jobs', -2),
tar_file_prefix=cfg.train_ds.get('tar_file_prefix', 'parallel'),
)
metadata_file_list.append(self.train_metadata_file)
# update config
# self._cfg.train_ds.tar_files = self.tar_files_to_string(self.train_tar_files)
# self._cfg.train_ds.tar_files = self.train_tar_files
if isinstance(cfg.train_ds.get('metadata_file'), str):
self._cfg.train_ds.metadata_file = metadata_file_list[0]
else:
self._cfg.train_ds.metadata_file = metadata_file_list
logging.info(
f"Using tarred dataset created in folder(s) {outdir_list} and metadata created at {self._cfg.train_ds.metadata_file}"
)
elif cfg.train_ds.get('tar_files') is not None and cfg.train_ds.get('metadata_file') is None:
raise ValueError('A metadata file is required for tarred dataset but cfg.metadata_file is None.')
elif cfg.train_ds.get('tar_files') is None and cfg.train_ds.get('metadata_file') is not None:
if isinstance(cfg.train_ds.get('metadata_file'), str):
metadata_file_list = [cfg.train_ds.get('metadata_file')]
else:
metadata_file_list = cfg.train_ds.get('metadata_file')
for metadata_file in metadata_file_list:
with open(metadata_file) as metadata_reader:
metadata = json.load(metadata_reader)
if metadata['tar_files']:
logging.info(f"Using tarred dataset: {metadata['tar_files']}")
else:
raise ValueError(f'tar_files not provided and metadata does not have tar files')
else:
self.train_tar_files = cfg.train_ds.get('tar_files')
self.train_metadata_file = cfg.train_ds.get('metadata_file')
logging.info(
f"Using tarred dataset from config at {self.train_tar_files} and metadata from {self.train_metadata_file}"
)
def tar_files_to_string(self, tar_files):
""" Tar files are generated in the following format: basename.number.tar
Where number is an integer from 1 to the number of tar files.
We convert this list to a string that can be used in the model config to specify
tarred datasets: basename_OP_1..num_tar_files_CL_.tar
Args:
tar_files (List[str]): List of tar files generated by preprocess_parallel_dataset
"""
num_tar_files = len(tar_files)
split_on_dot = tar_files[0].split('.')
basename = '.'.join(split_on_dot[0:-2])
tar_file_string = f'{basename}._OP_1..{num_tar_files}_CL_.tar'
return tar_file_string
@staticmethod
def get_enc_dec_tokenizers(
encoder_tokenizer_name=None,
encoder_tokenizer_model=None,
encoder_bpe_dropout=0.0,
encoder_model_name=None,
encoder_r2l=False,
decoder_tokenizer_name=None,
decoder_tokenizer_model=None,
decoder_bpe_dropout=0.0,
decoder_model_name=None,
decoder_r2l=False,
):
# if encoder_tokenizer_name != 'yttm' or decoder_tokenizer_name != 'yttm':
# raise NotImplementedError(f"Currently we only support yttm tokenizer.")
if encoder_bpe_dropout is None:
encoder_bpe_dropout = 0.0
if decoder_bpe_dropout is None:
decoder_bpe_dropout = 0.0
encoder_tokenizer = get_nmt_tokenizer(
library=encoder_tokenizer_name,
model_name=encoder_model_name,
tokenizer_model=encoder_tokenizer_model,
bpe_dropout=encoder_bpe_dropout,
r2l=encoder_r2l,
)
decoder_tokenizer = get_nmt_tokenizer(
library=decoder_tokenizer_name,
model_name=decoder_model_name,
tokenizer_model=decoder_tokenizer_model,
bpe_dropout=decoder_bpe_dropout,
r2l=decoder_r2l,
)
return encoder_tokenizer, decoder_tokenizer
@staticmethod
def get_monolingual_tokenizer(
tokenizer_name=None, tokenizer_model=None, bpe_dropout=0.0,
):
if tokenizer_name != 'yttm':
raise NotImplementedError(f"Currently we only support yttm tokenizer.")
if bpe_dropout is None:
bpe_dropout = 0.0
tokenizer = get_tokenizer(
tokenizer_name=tokenizer_name, tokenizer_model=tokenizer_model, bpe_dropout=bpe_dropout,
)
return tokenizer
# TODO: add local or global rank 0 decorator
@staticmethod
def preprocess_parallel_dataset(
clean,
src_fname,
tgt_fname,
out_dir,
encoder_tokenizer_name,
encoder_tokenizer_model,
encoder_tokenizer_r2l,
encoder_bpe_dropout,
encoder_model_name,
decoder_tokenizer_name,
decoder_tokenizer_model,
decoder_bpe_dropout,
decoder_model_name,
decoder_tokenizer_r2l,
max_seq_length,
min_seq_length,
tokens_in_batch,
lines_per_dataset_fragment,
num_batches_per_tarfile,
global_rank,
world_size,
n_jobs=-2,
tar_file_prefix='parallel',
):
"""Create tarred dataset from large paired translation data.
Args:
clean (str): Cleans source and target sentences to get rid of noisy data.
src_fname (str): path to source text data
tgt_fname (str): path to target text data
out_dir (str): path to write tarred dataset
encoder_tokenizer (Any): tokenizer for encoder
decoder_tokenizer (Any): tokenizer for decoder
max_seq_length (int): maximum sequence length
min_seq_length (int): minimum sequence length
tokens_in_batch (int): tokens per batch per GPU, effectively batch size
lines_per_dataset_fragment (int): number of lines to consider for bucketing and padding
num_batches_per_tarfile (int): number of batches (pickle files) within each tarfile
tar_file_prefix (str) : add string prefix to tar files
n_jobs (int): number of processes to use for data processing (-2 to use all but 2)
"""
os.makedirs(out_dir, exist_ok=True)
metadata_path = os.path.join(out_dir, f'metadata.tokens.{tokens_in_batch}.json')
if global_rank == 0:
tar_files_in_out_dir = glob.glob(f'{out_dir}/*.tar')
if tar_files_in_out_dir:
logging.info(
f'Tarred dataset detected: {tar_files_in_out_dir} and will be used. Remove if reprocessing.'
)
else:
filenames = [src_fname, tgt_fname]
# get number of lines so that we can create a partition of the lines of the text file
num_src_lines, num_tgt_lines = Parallel(n_jobs=2)(
delayed(MTDataPreproc._get_num_lines)(filename) for filename in filenames
)
logging.info(f'Found {num_src_lines} source lines and {num_tgt_lines} target lines.')
assert num_src_lines == num_tgt_lines, 'Number of source lines should equal number of target lines.'
# create a partition of lines that we can parallelize over
lines_partition = MTDataPreproc._get_lines_partition(num_src_lines, lines_per_dataset_fragment)
logging.info(f"Found {len(lines_partition)} fragments to parallelize over.")
# create tarfiles for each fragment in parallel
results_list = Parallel(n_jobs=n_jobs)(
delayed(MTDataPreproc._process_fragment)(
src_filename=src_fname,
tgt_filename=tgt_fname,
lines_indices=lines_indices,
out_dir=out_dir,
num_batches_per_tarfile=num_batches_per_tarfile,
clean=clean,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
tokens_in_batch=tokens_in_batch,
encoder_tokenizer_name=encoder_tokenizer_name,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=encoder_bpe_dropout,
encoder_model_name=encoder_model_name,
decoder_tokenizer_name=decoder_tokenizer_name,
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_bpe_dropout=decoder_bpe_dropout,
decoder_model_name=decoder_model_name,
fragment_index=fragment_index,
encoder_tokenizer_r2l=encoder_tokenizer_r2l,
decoder_tokenizer_r2l=decoder_tokenizer_r2l,
)
for fragment_index, lines_indices in enumerate(lines_partition)
)
# compute total batches so far
total_batches = sum([batch_count for batch_count, _ in results_list])
# save batches from tar files containing the left over batches (if there's enough batches)
remainder_tar_file_ctr = 0
remainder_tar_file_path = os.path.join(
out_dir, f'remainder-batches.tokens.{tokens_in_batch}.tar_file_{remainder_tar_file_ctr}.tar'
)
remainder_tar_file_ptr = tarfile.open(remainder_tar_file_path, 'w')
batch_in_tar_ctr = 0
for _, tar_file_path in results_list:
tar_file_ptr = tarfile.open(tar_file_path, 'r')
for member in tar_file_ptr.getmembers():
remainder_tar_file_ptr.addfile(member, tar_file_ptr.extractfile(member.name))
batch_in_tar_ctr += 1
if batch_in_tar_ctr == num_batches_per_tarfile:
remainder_tar_file_ctr += 1
remainder_tar_file_ptr.close()
remainder_tar_file_path = os.path.join(
out_dir,
f'remainder-batches.tokens.{tokens_in_batch}.tar_file_{remainder_tar_file_ctr}.tar',
)
remainder_tar_file_ptr = tarfile.open(remainder_tar_file_path, 'w',)
batch_in_tar_ctr = 0
tar_file_ptr.close()
os.remove(tar_file_path)
# log the number of batches remaining as they will be discarded
num_batches_discarded = len(remainder_tar_file_ptr.getmembers())
total_batches -= num_batches_discarded
logging.info(
f'Number of batches discarded: {num_batches_discarded}, total batches kept: {total_batches}'
)
remainder_tar_file_ptr.close()
os.remove(remainder_tar_file_path)
# dump metadata to json
metadata = {}
metadata['num_batches'] = total_batches
# rename tar files so they can be more easily used with CLI and YAML
tar_file_paths = glob.glob(f'{out_dir}/*.tar')
for index, path in enumerate(tar_file_paths):
os.rename(
path, os.path.join(out_dir, f'{tar_file_prefix}.batches.tokens.{tokens_in_batch}.{index}.tar')
)
# add tar files to manifest
tar_file_paths = glob.glob(f'{out_dir}/*.tar')
metadata['tar_files'] = tar_file_paths
json.dump(metadata, open(metadata_path, 'w'))
tar_file_paths = glob.glob(f'{out_dir}/*.tar')
num_tar_files = len(tar_file_paths)
if num_tar_files < world_size:
raise ValueError(
(
f'Number of tar files found: {num_tar_files} is less than world size: {world_size}. '
f'There should be at least one tar file per GPU (ideally many tar files per GPU). '
f'This may be due to dataset size, it is advisable to use at least 5M sentence pairs for tarred datasets. '
f'Decrease num_batches_per_tarfile or num_tokens_per_batch to increase the number of tarfiles. '
f'Also using shard_strategy=replicate will use all available tarfiles for every GPU. '
)
)
return tar_file_paths, metadata_path
@staticmethod
def _get_num_lines(filename):
with open(filename) as f:
for i, l in enumerate(f):
pass
return i + 1
@staticmethod
def _get_lines_partition(num_lines, lines_per_dataset_fragment):
# create partition based on fragment size
fragment_indices = []
for i in range(0, num_lines, lines_per_dataset_fragment):
fragment_indices.append([i, i + lines_per_dataset_fragment])
# modify last indices
last_indices = fragment_indices.pop()
last_indices[1] = -1
fragment_indices.append(last_indices)
# if fragment_indices[-1][1] >= num_lines:
# fragment_indices.pop()
return fragment_indices
@staticmethod
def _process_fragment(
src_filename,
tgt_filename,
lines_indices,
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
tokens_in_batch,
encoder_tokenizer_name,
encoder_tokenizer_model,
encoder_bpe_dropout,
encoder_model_name,
encoder_tokenizer_r2l,
decoder_tokenizer_name,
decoder_tokenizer_model,
decoder_bpe_dropout,
decoder_model_name,
decoder_tokenizer_r2l,
fragment_index,
):
start = lines_indices[0]
stop = lines_indices[1]
# write lines in partition to temporary files to be consumed by write_parallel_batches_to_tarfiles
tmp_f_src = tempfile.NamedTemporaryFile(delete=False, mode='w')
tmp_f_tgt = tempfile.NamedTemporaryFile(delete=False, mode='w')
with open(src_filename, 'r') as src_in, open(tgt_filename) as tgt_in:
for line_number, (src_line, tgt_line) in enumerate(zip(src_in, tgt_in)):
if start <= line_number and line_number < stop:
if src_line and tgt_line:
tmp_f_src.write(src_line)
tmp_f_tgt.write(tgt_line)
tmp_f_src.close()
tmp_f_tgt.close()
num_batches_from_fragment, remainder_tar_file_path = MTDataPreproc.write_parallel_batches_to_tarfiles(
out_dir=out_dir,
num_batches_per_tarfile=num_batches_per_tarfile,
clean=clean,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
src_fname=tmp_f_src.name,
tgt_fname=tmp_f_tgt.name,
num_tokens=tokens_in_batch,
encoder_tokenizer_name=encoder_tokenizer_name,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=encoder_bpe_dropout,
encoder_model_name=encoder_model_name,
encoder_tokenizer_r2l=encoder_tokenizer_r2l,
decoder_tokenizer_name=decoder_tokenizer_name,
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_bpe_dropout=decoder_bpe_dropout,
decoder_model_name=decoder_model_name,
decoder_tokenizer_r2l=decoder_tokenizer_r2l,
fragment_index=fragment_index,
)
os.remove(tmp_f_src.name)
os.remove(tmp_f_tgt.name)
return num_batches_from_fragment, remainder_tar_file_path
@staticmethod
def preprocess_monolingual_dataset(
clean,
fname,
out_dir,
tokenizer,
max_seq_length,
min_seq_length,
tokens_in_batch,
lines_per_dataset_fragment,
num_batches_per_tarfile,
pkl_file_prefix,
global_rank,
world_size,
):
"""Create tarred dataset from a large monolingual corpus.
Args:
clean (str): Cleans sentences to get rid of very long or short sentences.
fname (str): Path to source text data
out_dir (str): Path to write tarred dataset
tokenizer (Any): Path to tokenizer model
max_seq_length (int): maximum sequence length
min_seq_length (int): minimum sequence length
tokens_in_batch (int): tokens per batch per GPU, effectively batch size
lines_per_dataset_fragment (int): number of lines to consider for bucketing and padding
num_batches_per_tarfile (int): number of batches (pickle files) within each tarfile
global_rank (int): if set to zero, data will be processed on this node
world_size (int): total number of processes being run (for training only, set to 1 when preproc only)
"""
os.makedirs(out_dir, exist_ok=True)
tar_file_ctr = 1
num_files_in_tar = 0
num_lines = 0
shard_num = 0
global_batch_ctr = 0
tmp_f = tempfile.NamedTemporaryFile(delete=False, mode='w')
tar_file_ptr = tarfile.open(
os.path.join(out_dir, '%s-batches.tokens.%d.%d.tar' % (pkl_file_prefix, tokens_in_batch, 1)), 'w'
)
metadata_path = os.path.join(out_dir, f'metadata.tokens.{tokens_in_batch}.json')
with open(fname, 'r') as f:
for line in f:
tmp_f.write(line)
num_lines += 1
if num_lines == lines_per_dataset_fragment:
tmp_f.close()
(
tar_file_ptr,
global_batch_ctr,
num_files_in_tar,
tar_file_ctr,
) = MTDataPreproc.write_monolingual_batches_to_tarfiles(
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
tmp_f.name,
tokens_in_batch,
tokenizer,
num_files_in_tar=num_files_in_tar,
tar_file_ptr=tar_file_ptr,
tar_file_ctr=tar_file_ctr,
global_batch_ctr=global_batch_ctr,
pkl_file_prefix=pkl_file_prefix,
)
num_lines = 0
shard_num += 1
os.remove(tmp_f.name)
tmp_f = tempfile.NamedTemporaryFile(delete=False, mode='w')
tmp_f.close()
(
tar_file_ptr,
global_batch_ctr,
num_files_in_tar,
tar_file_ctr,
) = MTDataPreproc.write_monolingual_batches_to_tarfiles(
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
tmp_f.name,
tokens_in_batch,
tokenizer,
num_files_in_tar=num_files_in_tar,
tar_file_ptr=tar_file_ptr,
tar_file_ctr=tar_file_ctr,
global_batch_ctr=global_batch_ctr,
pkl_file_prefix=pkl_file_prefix,
)
tar_file_ptr.close()
os.remove(tmp_f.name)
if num_files_in_tar != num_batches_per_tarfile:
os.remove(
os.path.join(out_dir, '%s-batches.tokens.%d.%d.tar' % (pkl_file_prefix, tokens_in_batch, tar_file_ctr))
)
global_batch_ctr -= num_files_in_tar
print('Dropping %d batches because of overflow' % (num_files_in_tar))
json.dump({'num_batches': global_batch_ctr}, open(os.path.join(out_dir, 'metadata.json'), 'w'))
tar_file_paths = glob.glob(f'{out_dir}/{pkl_file_prefix}-batches.tokens.{tokens_in_batch}.*.tar')
num_tar_files = len(tar_file_paths)
if num_tar_files < world_size:
raise ValueError(
(
f'Number of tar files found: {num_tar_files} is less than world size: {world_size}. '
f'There should be at least one tar file per GPU (ideally many tar files per GPU). '
f'This may be due to dataset size, it is advisable to use at least 5M sentence pairs for tarred datasets. '
f'Decrease num_batches_per_tarfile or num_tokens_per_batch to increase the number of tarfiles. '
f'Also using shard_strategy=replicate will use all available tarfiles for every GPU. '
)
)
return tar_file_paths, metadata_path
@staticmethod
def train_tokenizers(
out_dir,
src_fname,
tgt_fname,
shared_tokenizer,
encoder_tokenizer_name,
encoder_tokenizer_vocab_size,
encoder_tokenizer_coverage,
decoder_tokenizer_name,
decoder_tokenizer_vocab_size,
decoder_tokenizer_coverage,
global_rank,
encoder_training_sample_size=-1,
decoder_training_sample_size=-1,
encoder_special_tokens=None,
decoder_special_tokens=None,
spt_symbols=None,
multilingual=False,
):
encoder_tokenizer_model = None
decoder_tokenizer_model = None
os.makedirs(out_dir, exist_ok=True)
supported_train_tokenizers = ['yttm', 'sentencepiece']
if encoder_special_tokens:
if isinstance(encoder_special_tokens, dict):
encoder_special_tokens = list(encoder_special_tokens.values())
print(encoder_special_tokens)
if decoder_special_tokens:
if isinstance(decoder_special_tokens, dict):
decoder_special_tokens = list(decoder_special_tokens.values())
if multilingual and encoder_tokenizer_name != 'sentencepiece':
raise NotImplementedError(
f"Currently we only support training setencepiece tokenizer for multilingual model."
)
if shared_tokenizer:
if (
encoder_tokenizer_name not in supported_train_tokenizers
or decoder_tokenizer_name not in supported_train_tokenizers
):
raise NotImplementedError(
f"Currently we only support tokenizers in {supported_train_tokenizers} for shared tokenizer."
)
encoder_tokenizer_model = os.path.join(
out_dir, 'shared_tokenizer.%d.BPE.model' % (encoder_tokenizer_vocab_size)
)
decoder_tokenizer_model = encoder_tokenizer_model
if global_rank == 0:
if os.path.isfile(encoder_tokenizer_model):
logging.info(
f'Shared tokenizer model {encoder_tokenizer_model} already exists. Remove file if training a new tokenizer model.'
)
else:
logging.info(
f'Shared tokenizer model {encoder_tokenizer_model} not found. Training tokenizer model.'
)
with tempfile.TemporaryDirectory() as tmp:
concat_data_path = os.path.join(tmp, 'concat_dataset.txt')
os.system('cat %s %s > %s' % (src_fname, tgt_fname, concat_data_path))
if encoder_tokenizer_name == "yttm":
yttm.BPE.train(
data=concat_data_path,
vocab_size=encoder_tokenizer_vocab_size,
model=os.path.join(out_dir, encoder_tokenizer_model),
coverage=encoder_tokenizer_coverage,
n_threads=-1,
)
else:
create_spt_model(
data_file=concat_data_path,
vocab_size=encoder_tokenizer_vocab_size,
sample_size=encoder_training_sample_size,
do_lower_case=False,
tokenizer_type='bpe',
character_coverage=encoder_tokenizer_coverage,
output_dir=out_dir,
bos=True,
eos=True,
pad=True,
control_symbols=spt_symbols,
user_defined_symbols=encoder_special_tokens,
)
os.rename(
os.path.join(out_dir, 'tokenizer.model'),
os.path.join(out_dir, encoder_tokenizer_model),
)
else:
if encoder_tokenizer_name in supported_train_tokenizers:
encoder_tokenizer_model = os.path.join(
out_dir, 'tokenizer.encoder.%d.BPE.model' % (encoder_tokenizer_vocab_size)
)
if global_rank == 0:
if os.path.isfile(encoder_tokenizer_model):
logging.info(
f'Encoder tokenizer model {encoder_tokenizer_model} already exists. Remove file if training a new tokenizer model.'
)
else:
logging.info(
f'Encoder tokenizer model {encoder_tokenizer_model} not found. Training tokenizer model.'
)
if encoder_tokenizer_name == "yttm":
yttm.BPE.train(
data=src_fname,
vocab_size=encoder_tokenizer_vocab_size,
model=encoder_tokenizer_model,
coverage=encoder_tokenizer_coverage,
n_threads=-1,
)
else:
dir_name = os.path.dirname(encoder_tokenizer_model)
create_spt_model(
data_file=src_fname,
vocab_size=encoder_tokenizer_vocab_size,
sample_size=encoder_training_sample_size,
do_lower_case=False,
tokenizer_type='bpe',
character_coverage=encoder_tokenizer_coverage,
output_dir=dir_name,
bos=True,
eos=True,
pad=True,
control_symbols=spt_symbols,
user_defined_symbols=encoder_special_tokens,
)
os.rename(os.path.join(dir_name, 'tokenizer.model'), os.path.join(encoder_tokenizer_model))
if decoder_tokenizer_name in supported_train_tokenizers:
decoder_tokenizer_model = os.path.join(
out_dir, 'tokenizer.decoder.%d.BPE.model' % (decoder_tokenizer_vocab_size)
)
if global_rank == 0:
if os.path.isfile(decoder_tokenizer_model):
logging.info(
f'Decoder tokenizer model {decoder_tokenizer_model} already exists. Remove file if training a new tokenizer model.'
)
else:
logging.info(
f'Decoder tokenizer model {decoder_tokenizer_model} not found. Training tokenizer model.'
)
if decoder_tokenizer_name == "yttm":
yttm.BPE.train(
data=tgt_fname,
vocab_size=decoder_tokenizer_vocab_size,
model=decoder_tokenizer_model,
coverage=decoder_tokenizer_coverage,
n_threads=-1,
)
else:
dir_name = os.path.dirname(decoder_tokenizer_model)
create_spt_model(
data_file=tgt_fname,
vocab_size=decoder_tokenizer_vocab_size,
sample_size=decoder_training_sample_size,
do_lower_case=False,
tokenizer_type='bpe',
character_coverage=decoder_tokenizer_coverage,
output_dir=dir_name,
bos=True,
eos=True,
pad=True,
control_symbols=spt_symbols,
user_defined_symbols=decoder_special_tokens,
)
os.rename(os.path.join(dir_name, 'tokenizer.model'), os.path.join(decoder_tokenizer_model))
return encoder_tokenizer_model, decoder_tokenizer_model
@staticmethod
def write_parallel_batches_to_tarfiles(
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
src_fname,
tgt_fname,
num_tokens,
encoder_tokenizer_name,
encoder_tokenizer_model,
encoder_tokenizer_r2l,
encoder_bpe_dropout,
encoder_model_name,
decoder_tokenizer_name,
decoder_tokenizer_model,
decoder_bpe_dropout,
decoder_model_name,
decoder_tokenizer_r2l,
fragment_index,
):
"""
Writes current fragment of the overall parallel corpus to tarfiles by:
(1) Creating a minibatches using a TranslationDataset object.
(2) Writing each minibatch to a pickle file.
(3) Adding pickle files to a tarfile until it reaches num_batches_per_tarfile.
"""
dataset = TranslationDataset(
dataset_src=src_fname,
dataset_tgt=tgt_fname,
tokens_in_batch=num_tokens,
clean=clean,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
max_seq_length_diff=max_seq_length,
max_seq_length_ratio=max_seq_length,
cache_ids=False,
cache_data_per_node=False,
use_cache=False,
)
encoder_tokenizer, decoder_tokenizer = MTDataPreproc.get_enc_dec_tokenizers(
encoder_tokenizer_name=encoder_tokenizer_name,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=encoder_bpe_dropout,
encoder_model_name=encoder_model_name,
encoder_r2l=encoder_tokenizer_r2l,
decoder_tokenizer_name=decoder_tokenizer_name,
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_bpe_dropout=decoder_bpe_dropout,
decoder_model_name=decoder_model_name,
decoder_r2l=decoder_tokenizer_r2l,
)
dataset.batchify(encoder_tokenizer, decoder_tokenizer)
tar_file_ctr = 0
tar_file_path = os.path.join(
out_dir, 'fragment-%s-batches.tokens.%d.%d.tar' % (fragment_index, num_tokens, tar_file_ctr)
)
tar_file_ptr = tarfile.open(tar_file_path, 'w')
total_batch_ctr = 0
batch_ctr = 0
for _, batch in dataset.batches.items():
total_batch_ctr += 1
batch_ctr += 1
pickle.dump(
batch,
open(os.path.join(out_dir, 'fragment-%s-batch-%d.pkl' % (fragment_index, total_batch_ctr)), 'wb'),
)
tar_file_ptr.add(os.path.join(out_dir, 'fragment-%s-batch-%d.pkl' % (fragment_index, total_batch_ctr)))
os.remove(os.path.join(out_dir, 'fragment-%s-batch-%d.pkl' % (fragment_index, total_batch_ctr)))
if batch_ctr == num_batches_per_tarfile:
tar_file_ctr += 1
tar_file_ptr.close()
tar_file_path = os.path.join(
out_dir, 'fragment-%s-batches.tokens.%d.%d.tar' % (fragment_index, num_tokens, tar_file_ctr)
)
tar_file_ptr = tarfile.open(tar_file_path, 'w',)
batch_ctr = 0
# return tar files paths that have batches remaining
remainder_tar_file_path = tar_file_ptr.name
tar_file_ptr.close()
return total_batch_ctr, remainder_tar_file_path
@staticmethod
def write_monolingual_batches_to_tarfiles(
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
fname,
num_tokens,
tokenizer,
num_files_in_tar,
tar_file_ptr,
tar_file_ctr,
global_batch_ctr,
pkl_file_prefix,
):
"""
Writes current fragment of the overall parallel corpus to tarfiles by:
(1) Creating a minibatches using a SentenceDataset object.
(2) Writing each minibatch to a pickle file.
(3) Adding pickle files to a tarfile until it reaches num_batches_per_tarfile.
"""
dataset = SentenceDataset(
tokenizer=tokenizer,
dataset=fname,
tokens_in_batch=num_tokens,
clean=clean,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
cache_ids=False,
)
for batch in dataset.batches:
global_batch_ctr += 1
batch = {'src': batch}
pickle.dump(
batch, open(os.path.join(out_dir, '%s-batch-%d.pkl' % (pkl_file_prefix, global_batch_ctr)), 'wb')
)
if num_files_in_tar == num_batches_per_tarfile:
tar_file_ctr += 1
tar_file_ptr.close()
tar_file_ptr = tarfile.open(
os.path.join(out_dir, '%s-batches.tokens.%d.%d.tar' % (pkl_file_prefix, num_tokens, tar_file_ctr)),
'w',
)
num_files_in_tar = 0
tar_file_ptr.add(os.path.join(out_dir, '%s-batch-%d.pkl' % (pkl_file_prefix, global_batch_ctr)))
num_files_in_tar += 1
os.remove(os.path.join(out_dir, '%s-batch-%d.pkl' % (pkl_file_prefix, global_batch_ctr)))
return tar_file_ptr, global_batch_ctr, num_files_in_tar, tar_file_ctr
@property
def cfg(self):
return self._cfg
|
r"""
vanilla pseudo-labeling implementation
"""
from collections import defaultdict
from alr.utils import timeop, manual_seed
from alr.data.datasets import Dataset
from alr.data import UnlabelledDataset
from alr.training import VanillaPLTrainer
from alr.training.samplers import RandomFixedLengthSampler
from alr import MCDropout
import pickle
import numpy as np
import torch
import torch.utils.data as torchdata
from torch.nn import functional as F
from pathlib import Path
if __name__ == "__main__":
manual_seed(42)
kwargs = dict(num_workers=4, pin_memory=True)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
sizes = np.arange(20, 260, 10)
N = len(sizes)
# validation dataset size
VAL_SIZE = 5_000
# according to the paper:
BATCH_SIZE = 32
UNLABELLED_BATCH_SIZE = 256
# at least prolong the epoch to have this many points (see RandomFixedLengthSampler)
MIN_TRAIN_SIZE = 12_500
# well, early stopping should kick-in before then.
EPOCHS = 200
REPEATS = 6
# paths
pl_metrics = Path("pl_metrics")
metrics = Path("metrics")
saved_models = Path("saved_models")
metrics.mkdir()
saved_models.mkdir()
log_every = 2
accs = defaultdict(list)
for r in range(1, REPEATS + 1):
for i, n in enumerate(sizes, 1):
train, test = Dataset.MNIST.get()
train, pool = torchdata.random_split(train, (n, len(train) - n))
pool, val = torchdata.random_split(pool, (len(pool) - VAL_SIZE, VAL_SIZE))
pool = UnlabelledDataset(pool, debug=True)
model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)
print(f"=== Iteration {i} of {N} ({i/N:.2%}) ===")
print(f"\ttrain: {len(train)}; pool: {len(pool)}; test: {len(test)}")
if (i - 1) % log_every == 0 and r == 1:
pl_log = str(pl_metrics / f"dsize_{n}")
else:
pl_log = None
trainer = VanillaPLTrainer(
model,
labelled_loss=F.nll_loss,
unlabelled_loss=F.nll_loss,
optimiser="Adam",
patience=3,
reload_best=True,
track_pl_metrics=pl_log,
device=device,
)
train_loader = torchdata.DataLoader(
train,
batch_size=BATCH_SIZE,
sampler=RandomFixedLengthSampler(
train, length=MIN_TRAIN_SIZE, shuffle=True
),
**kwargs,
)
pool_loader = torchdata.DataLoader(
pool,
batch_size=UNLABELLED_BATCH_SIZE,
shuffle=True,
**kwargs,
)
val_loader = torchdata.DataLoader(
val,
batch_size=1024,
shuffle=False,
**kwargs,
)
test_loader = torchdata.DataLoader(
test,
batch_size=1024,
shuffle=False,
**kwargs,
)
with timeop() as t:
history = trainer.fit(
train_loader,
pool_loader,
val_loader,
epochs=EPOCHS,
)
test_metrics = trainer.evaluate(test_loader)
accs[n].append(test_metrics["acc"])
print(
f"\t[train] loss, acc: ({history["stage2"]["train_loss"][-1]}, {history["stage2"]["train_acc"][-1]})\n"
f"\t[test] loss, acc: ({test_metrics["loss"]}, {test_metrics["acc"]})\n"
f"\ttime: {t}"
)
if pl_log:
torch.save(
model.state_dict(),
saved_models / f"repeat_{r}_dsize_{n}_weights.pth",
)
payload = {
"history": history,
"test_metrics": test_metrics,
}
with open(metrics / f"repeat_{r}_dsize_{n}_metrics.pkl", "wb") as fp:
pickle.dump(payload, fp)
with open("accs.pkl", "wb") as fp:
pickle.dump(accs, fp)
|
r"""
vanilla pseudo-labeling implementation
"""
from collections import defaultdict
from alr.utils import timeop, manual_seed
from alr.data.datasets import Dataset
from alr.data import UnlabelledDataset
from alr.training import VanillaPLTrainer
from alr.training.samplers import RandomFixedLengthSampler
from alr import MCDropout
import pickle
import numpy as np
import torch
import torch.utils.data as torchdata
from torch.nn import functional as F
from pathlib import Path
if __name__ == "__main__":
manual_seed(42)
kwargs = dict(num_workers=4, pin_memory=True)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
sizes = np.arange(20, 260, 10)
N = len(sizes)
# validation dataset size
VAL_SIZE = 5_000
# according to the paper:
BATCH_SIZE = 32
UNLABELLED_BATCH_SIZE = 256
# at least prolong the epoch to have this many points (see RandomFixedLengthSampler)
MIN_TRAIN_SIZE = 12_500
# well, early stopping should kick-in before then.
EPOCHS = 200
REPEATS = 6
# paths
pl_metrics = Path("pl_metrics")
metrics = Path("metrics")
saved_models = Path("saved_models")
metrics.mkdir()
saved_models.mkdir()
log_every = 2
accs = defaultdict(list)
for r in range(1, REPEATS + 1):
for i, n in enumerate(sizes, 1):
train, test = Dataset.MNIST.get()
train, pool = torchdata.random_split(train, (n, len(train) - n))
pool, val = torchdata.random_split(pool, (len(pool) - VAL_SIZE, VAL_SIZE))
pool = UnlabelledDataset(pool, debug=True)
model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)
print(f"=== Iteration {i} of {N} ({i/N:.2%}) ===")
print(f"\ttrain: {len(train)}; pool: {len(pool)}; test: {len(test)}")
if (i - 1) % log_every == 0 and r == 1:
pl_log = str(pl_metrics / f"dsize_{n}")
else:
pl_log = None
trainer = VanillaPLTrainer(
model,
labelled_loss=F.nll_loss,
unlabelled_loss=F.nll_loss,
optimiser="Adam",
patience=3,
reload_best=True,
track_pl_metrics=pl_log,
device=device,
)
train_loader = torchdata.DataLoader(
train,
batch_size=BATCH_SIZE,
sampler=RandomFixedLengthSampler(
train, length=MIN_TRAIN_SIZE, shuffle=True
),
**kwargs,
)
pool_loader = torchdata.DataLoader(
pool,
batch_size=UNLABELLED_BATCH_SIZE,
shuffle=True,
**kwargs,
)
val_loader = torchdata.DataLoader(
val,
batch_size=1024,
shuffle=False,
**kwargs,
)
test_loader = torchdata.DataLoader(
test,
batch_size=1024,
shuffle=False,
**kwargs,
)
with timeop() as t:
history = trainer.fit(
train_loader,
pool_loader,
val_loader,
epochs=EPOCHS,
)
test_metrics = trainer.evaluate(test_loader)
accs[n].append(test_metrics["acc"])
print(
f"\t[train] loss, acc: ({history['stage2']['train_loss'][-1]}, {history['stage2']['train_acc'][-1]})\n"
f"\t[test] loss, acc: ({test_metrics['loss']}, {test_metrics['acc']})\n"
f"\ttime: {t}"
)
if pl_log:
torch.save(
model.state_dict(),
saved_models / f"repeat_{r}_dsize_{n}_weights.pth",
)
payload = {
"history": history,
"test_metrics": test_metrics,
}
with open(metrics / f"repeat_{r}_dsize_{n}_metrics.pkl", "wb") as fp:
pickle.dump(payload, fp)
with open("accs.pkl", "wb") as fp:
pickle.dump(accs, fp)
|
import json
class OrderException(Exception):
pass
def first_step(event, context):
print(event)
if event.get('orderId') is None:
raise OrderException('No orderId was provided!')
if event['orderId'] != 'abc123':
raise OrderException(f'No record found for recordId: {event['orderId']}')
items = [
{
'item': 'item1',
'price': 3.5,
'quantity': 1
},
{
'item': 'item2',
'price': 2.0,
'quantity': 6
}
]
response = {
'statusCode': 200,
'body': json.dumps(items)
}
return response
def second_step(event, context):
print(event)
items = json.loads(event['items']['body'])
print(items)
total = 0
for item in items:
total += item['price'] * item['quantity']
response = {
'statusCode': 200,
'body': f'Order total is:{total}'
}
return response
|
import json
class OrderException(Exception):
pass
def first_step(event, context):
print(event)
if event.get('orderId') is None:
raise OrderException('No orderId was provided!')
if event['orderId'] != 'abc123':
raise OrderException(f'No record found for recordId: {event["orderId"]}')
items = [
{
'item': 'item1',
'price': 3.5,
'quantity': 1
},
{
'item': 'item2',
'price': 2.0,
'quantity': 6
}
]
response = {
'statusCode': 200,
'body': json.dumps(items)
}
return response
def second_step(event, context):
print(event)
items = json.loads(event['items']['body'])
print(items)
total = 0
for item in items:
total += item['price'] * item['quantity']
response = {
'statusCode': 200,
'body': f'Order total is:{total}'
}
return response
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05-orchestrator.ipynb (unless otherwise specified).
__all__ = ['retry_request', 'if_possible_parse_local_datetime', 'SP_and_date_request', 'handle_capping',
'date_range_request', 'year_request', 'construct_year_month_pairs', 'year_and_month_request',
'clean_year_week', 'construct_year_week_pairs', 'year_and_week_request', 'non_temporal_request',
'query_orchestrator']
# Cell
import pandas as pd
from tqdm import tqdm
from warnings import warn
from requests.models import Response
from . import utils, raw
# Cell
def retry_request(raw, method, kwargs, n_attempts=3):
attempts = 0
success = False
while (attempts < n_attempts) and (success == False):
try:
r = getattr(raw, method)(**kwargs)
utils.check_status(r)
success = True
except Exception as e:
attempts += 1
if attempts == n_attempts:
raise e
return r
def if_possible_parse_local_datetime(df):
dt_cols_with_period_in_name = ['startTimeOfHalfHrPeriod', 'initialForecastPublishingPeriodCommencingTime', 'latestForecastPublishingPeriodCommencingTime', 'outTurnPublishingPeriodCommencingTime']
dt_cols = [col for col in df.columns if 'date' in col.lower() or col in dt_cols_with_period_in_name]
sp_cols = [col for col in df.columns if 'period' in col.lower() and col not in dt_cols_with_period_in_name]
if len(dt_cols)==1 and len(sp_cols)==1:
df = utils.parse_local_datetime(df, dt_col=dt_cols[0], SP_col=sp_cols[0])
return df
def SP_and_date_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
df = pd.DataFrame()
stream = '_'.join(method.split('_')[1:])
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
df_dates_SPs = utils.dt_rng_to_SPs(start_date, end_date)
date_SP_tuples = list(df_dates_SPs.reset_index().itertuples(index=False, name=None))[:-1]
for datetime, query_date, SP in tqdm(date_SP_tuples, desc=stream, total=len(date_SP_tuples)):
kwargs.update({
kwargs_map['date']: datetime.strftime('%Y-%m-%d'),
kwargs_map['SP']: SP,
})
missing_kwargs = list(set(func_params) - set(['SP', 'date'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {", ".join(missing_kwargs)}"
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df_SP = utils.parse_xml_response(r)
df = pd.concat([df, df_SP])
df = utils.expand_cols(df)
df = if_possible_parse_local_datetime(df)
return df
# Cell
def handle_capping(
r: Response,
df: pd.DataFrame,
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
end_date: str,
request_type: str,
**kwargs
):
capping_applied = utils.check_capping(r)
assert capping_applied != None, 'No information on whether or not capping limits had been breached could be found in the response metadata'
if capping_applied == True: # only subset of date range returned
dt_cols_with_period_in_name = ['startTimeOfHalfHrPeriod']
dt_cols = [col for col in df.columns if ('date' in col.lower() or col in dt_cols_with_period_in_name) and ('end' not in col.lower())]
if len(dt_cols) == 1:
start_date = pd.to_datetime(df[dt_cols[0]]).max().strftime('%Y-%m-%d')
if 'start_time' in kwargs.keys():
kwargs['start_time'] = '00:00'
if pd.to_datetime(start_date) >= pd.to_datetime(end_date):
warnings.warn(f'The `end_date` ({end_date}) was earlier than `start_date` ({start_date})\nThe `start_date` will be set one day earlier than the `end_date`.')
start_date = (pd.to_datetime(end_date) - pd.Timedelta(days=1)).strftime('%Y-%m-%d')
warn(f'Response was capped, request is rerunning for missing data from {start_date}')
df_rerun = date_range_request(
method=method,
kwargs_map=kwargs_map,
func_params=func_params,
api_key=api_key,
start_date=start_date,
end_date=end_date,
request_type=request_type,
**kwargs
)
df = pd.concat([df, df_rerun])
df = df.drop_duplicates()
else:
warn(f'Response was capped: a new `start_date` to continue requesting could not be determined automatically, please handle manually for `{method}`')
return df
def date_range_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
request_type: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
for kwarg in ['start_time', 'end_time']:
if kwarg not in kwargs_map.keys():
kwargs_map[kwarg] = kwarg
kwargs[kwargs_map['start_date']], kwargs[kwargs_map['start_time']] = pd.to_datetime(start_date).strftime('%Y-%m-%d %H:%M:%S').split(' ')
kwargs[kwargs_map['end_date']], kwargs[kwargs_map['end_time']] = pd.to_datetime(end_date).strftime('%Y-%m-%d %H:%M:%S').split(' ')
if 'SP' in kwargs_map.keys():
kwargs[kwargs_map['SP']] = '*'
func_params.remove('SP')
func_params += [kwargs_map['SP']]
missing_kwargs = list(set(func_params) - set(['start_date', 'end_date', 'start_time', 'end_time'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {", ".join(missing_kwargs)}"
if request_type == 'date_range':
kwargs.pop(kwargs_map['start_time'])
kwargs.pop(kwargs_map['end_time'])
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df = utils.parse_xml_response(r)
df = if_possible_parse_local_datetime(df)
# Handling capping
df = handle_capping(
r,
df,
method=method,
kwargs_map=kwargs_map,
func_params=func_params,
api_key=api_key,
end_date=end_date,
request_type=request_type,
**kwargs
)
return df
# Cell
def year_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
df = pd.DataFrame()
stream = '_'.join(method.split('_')[1:])
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
start_year = int(pd.to_datetime(start_date).strftime('%Y'))
end_year = int(pd.to_datetime(end_date).strftime('%Y'))
for year in tqdm(range(start_year, end_year+1), desc=stream):
kwargs.update({kwargs_map['year']: year})
missing_kwargs = list(set(func_params) - set(['year'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {", ".join(missing_kwargs)}"
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df_year = utils.parse_xml_response(r)
df = pd.concat([df, df_year])
df = if_possible_parse_local_datetime(df)
return df
# Cell
def construct_year_month_pairs(start_date, end_date):
dt_rng = pd.date_range(start_date, end_date, freq='M')
if len(dt_rng) == 0:
year_month_pairs = [tuple(pd.to_datetime(start_date).strftime('%Y %b').split(' '))]
else:
year_month_pairs = [tuple(dt.strftime('%Y %b').split(' ')) for dt in dt_rng]
year_month_pairs = [(int(year), week.upper()) for year, week in year_month_pairs]
return year_month_pairs
def year_and_month_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
df = pd.DataFrame()
stream = '_'.join(method.split('_')[1:])
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
year_month_pairs = construct_year_month_pairs(start_date, end_date)
for year, month in tqdm(year_month_pairs, desc=stream):
kwargs.update({
kwargs_map['year']: year,
kwargs_map['month']: month
})
missing_kwargs = list(set(func_params) - set(['year', 'month'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {", ".join(missing_kwargs)}"
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df_year = utils.parse_xml_response(r)
df = pd.concat([df, df_year])
df = if_possible_parse_local_datetime(df)
return df
# Cell
def clean_year_week(year, week):
year = int(year)
if week == '00':
year = int(year) - 1
week = 52
else:
year = int(year)
week = int(week.strip('0'))
return year, week
def construct_year_week_pairs(start_date, end_date):
dt_rng = pd.date_range(start_date, end_date, freq='W')
if len(dt_rng) == 0:
year_week_pairs = [tuple(pd.to_datetime(start_date).strftime('%Y %W').split(' '))]
else:
year_week_pairs = [tuple(dt.strftime('%Y %W').split(' ')) for dt in dt_rng]
year_week_pairs = [clean_year_week(year, week) for year, week in year_week_pairs]
return year_week_pairs
def year_and_week_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
df = pd.DataFrame()
stream = '_'.join(method.split('_')[1:])
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
year_week_pairs = construct_year_week_pairs(start_date, end_date)
for year, week in tqdm(year_week_pairs, desc=stream):
kwargs.update({
kwargs_map['year']: year,
kwargs_map['week']: week
})
missing_kwargs = list(set(func_params) - set(['year', 'week'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {", ".join(missing_kwargs)}"
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df_year = utils.parse_xml_response(r)
df = pd.concat([df, df_year])
df = if_possible_parse_local_datetime(df)
return df
# Cell
def non_temporal_request(
method: str,
api_key: str,
n_attempts: int=3,
**kwargs
):
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df = utils.parse_xml_response(r)
df = if_possible_parse_local_datetime(df)
return df
# Cell
def query_orchestrator(
method: str,
api_key: str,
request_type: str,
kwargs_map: dict=None,
func_params: list=None,
start_date: str=None,
end_date: str=None,
n_attempts: int=3,
**kwargs
):
if request_type not in ['non_temporal']:
kwargs.update({
'kwargs_map': kwargs_map,
'func_params': func_params,
'start_date': start_date,
'end_date': end_date,
})
if request_type in ['date_range', 'date_time_range']:
kwargs.update({
'request_type': request_type,
})
request_type_to_func = {
'SP_and_date': SP_and_date_request,
'date_range': date_range_request,
'date_time_range': date_range_request,
'year': year_request,
'year_and_month': year_and_month_request,
'year_and_week': year_and_week_request,
'non_temporal': non_temporal_request
}
assert request_type in request_type_to_func.keys(), f"{request_type} must be one of: {", ".join(request_type_to_func.keys())}"
request_func = request_type_to_func[request_type]
df = request_func(
method=method,
api_key=api_key,
n_attempts=n_attempts,
**kwargs
)
df = df.reset_index(drop=True)
return df
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05-orchestrator.ipynb (unless otherwise specified).
__all__ = ['retry_request', 'if_possible_parse_local_datetime', 'SP_and_date_request', 'handle_capping',
'date_range_request', 'year_request', 'construct_year_month_pairs', 'year_and_month_request',
'clean_year_week', 'construct_year_week_pairs', 'year_and_week_request', 'non_temporal_request',
'query_orchestrator']
# Cell
import pandas as pd
from tqdm import tqdm
from warnings import warn
from requests.models import Response
from . import utils, raw
# Cell
def retry_request(raw, method, kwargs, n_attempts=3):
attempts = 0
success = False
while (attempts < n_attempts) and (success == False):
try:
r = getattr(raw, method)(**kwargs)
utils.check_status(r)
success = True
except Exception as e:
attempts += 1
if attempts == n_attempts:
raise e
return r
def if_possible_parse_local_datetime(df):
dt_cols_with_period_in_name = ['startTimeOfHalfHrPeriod', 'initialForecastPublishingPeriodCommencingTime', 'latestForecastPublishingPeriodCommencingTime', 'outTurnPublishingPeriodCommencingTime']
dt_cols = [col for col in df.columns if 'date' in col.lower() or col in dt_cols_with_period_in_name]
sp_cols = [col for col in df.columns if 'period' in col.lower() and col not in dt_cols_with_period_in_name]
if len(dt_cols)==1 and len(sp_cols)==1:
df = utils.parse_local_datetime(df, dt_col=dt_cols[0], SP_col=sp_cols[0])
return df
def SP_and_date_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
df = pd.DataFrame()
stream = '_'.join(method.split('_')[1:])
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
df_dates_SPs = utils.dt_rng_to_SPs(start_date, end_date)
date_SP_tuples = list(df_dates_SPs.reset_index().itertuples(index=False, name=None))[:-1]
for datetime, query_date, SP in tqdm(date_SP_tuples, desc=stream, total=len(date_SP_tuples)):
kwargs.update({
kwargs_map['date']: datetime.strftime('%Y-%m-%d'),
kwargs_map['SP']: SP,
})
missing_kwargs = list(set(func_params) - set(['SP', 'date'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {', '.join(missing_kwargs)}"
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df_SP = utils.parse_xml_response(r)
df = pd.concat([df, df_SP])
df = utils.expand_cols(df)
df = if_possible_parse_local_datetime(df)
return df
# Cell
def handle_capping(
r: Response,
df: pd.DataFrame,
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
end_date: str,
request_type: str,
**kwargs
):
capping_applied = utils.check_capping(r)
assert capping_applied != None, 'No information on whether or not capping limits had been breached could be found in the response metadata'
if capping_applied == True: # only subset of date range returned
dt_cols_with_period_in_name = ['startTimeOfHalfHrPeriod']
dt_cols = [col for col in df.columns if ('date' in col.lower() or col in dt_cols_with_period_in_name) and ('end' not in col.lower())]
if len(dt_cols) == 1:
start_date = pd.to_datetime(df[dt_cols[0]]).max().strftime('%Y-%m-%d')
if 'start_time' in kwargs.keys():
kwargs['start_time'] = '00:00'
if pd.to_datetime(start_date) >= pd.to_datetime(end_date):
warnings.warn(f'The `end_date` ({end_date}) was earlier than `start_date` ({start_date})\nThe `start_date` will be set one day earlier than the `end_date`.')
start_date = (pd.to_datetime(end_date) - pd.Timedelta(days=1)).strftime('%Y-%m-%d')
warn(f'Response was capped, request is rerunning for missing data from {start_date}')
df_rerun = date_range_request(
method=method,
kwargs_map=kwargs_map,
func_params=func_params,
api_key=api_key,
start_date=start_date,
end_date=end_date,
request_type=request_type,
**kwargs
)
df = pd.concat([df, df_rerun])
df = df.drop_duplicates()
else:
warn(f'Response was capped: a new `start_date` to continue requesting could not be determined automatically, please handle manually for `{method}`')
return df
def date_range_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
request_type: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
for kwarg in ['start_time', 'end_time']:
if kwarg not in kwargs_map.keys():
kwargs_map[kwarg] = kwarg
kwargs[kwargs_map['start_date']], kwargs[kwargs_map['start_time']] = pd.to_datetime(start_date).strftime('%Y-%m-%d %H:%M:%S').split(' ')
kwargs[kwargs_map['end_date']], kwargs[kwargs_map['end_time']] = pd.to_datetime(end_date).strftime('%Y-%m-%d %H:%M:%S').split(' ')
if 'SP' in kwargs_map.keys():
kwargs[kwargs_map['SP']] = '*'
func_params.remove('SP')
func_params += [kwargs_map['SP']]
missing_kwargs = list(set(func_params) - set(['start_date', 'end_date', 'start_time', 'end_time'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {', '.join(missing_kwargs)}"
if request_type == 'date_range':
kwargs.pop(kwargs_map['start_time'])
kwargs.pop(kwargs_map['end_time'])
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df = utils.parse_xml_response(r)
df = if_possible_parse_local_datetime(df)
# Handling capping
df = handle_capping(
r,
df,
method=method,
kwargs_map=kwargs_map,
func_params=func_params,
api_key=api_key,
end_date=end_date,
request_type=request_type,
**kwargs
)
return df
# Cell
def year_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
df = pd.DataFrame()
stream = '_'.join(method.split('_')[1:])
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
start_year = int(pd.to_datetime(start_date).strftime('%Y'))
end_year = int(pd.to_datetime(end_date).strftime('%Y'))
for year in tqdm(range(start_year, end_year+1), desc=stream):
kwargs.update({kwargs_map['year']: year})
missing_kwargs = list(set(func_params) - set(['year'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {', '.join(missing_kwargs)}"
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df_year = utils.parse_xml_response(r)
df = pd.concat([df, df_year])
df = if_possible_parse_local_datetime(df)
return df
# Cell
def construct_year_month_pairs(start_date, end_date):
dt_rng = pd.date_range(start_date, end_date, freq='M')
if len(dt_rng) == 0:
year_month_pairs = [tuple(pd.to_datetime(start_date).strftime('%Y %b').split(' '))]
else:
year_month_pairs = [tuple(dt.strftime('%Y %b').split(' ')) for dt in dt_rng]
year_month_pairs = [(int(year), week.upper()) for year, week in year_month_pairs]
return year_month_pairs
def year_and_month_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
df = pd.DataFrame()
stream = '_'.join(method.split('_')[1:])
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
year_month_pairs = construct_year_month_pairs(start_date, end_date)
for year, month in tqdm(year_month_pairs, desc=stream):
kwargs.update({
kwargs_map['year']: year,
kwargs_map['month']: month
})
missing_kwargs = list(set(func_params) - set(['year', 'month'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {', '.join(missing_kwargs)}"
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df_year = utils.parse_xml_response(r)
df = pd.concat([df, df_year])
df = if_possible_parse_local_datetime(df)
return df
# Cell
def clean_year_week(year, week):
year = int(year)
if week == '00':
year = int(year) - 1
week = 52
else:
year = int(year)
week = int(week.strip('0'))
return year, week
def construct_year_week_pairs(start_date, end_date):
dt_rng = pd.date_range(start_date, end_date, freq='W')
if len(dt_rng) == 0:
year_week_pairs = [tuple(pd.to_datetime(start_date).strftime('%Y %W').split(' '))]
else:
year_week_pairs = [tuple(dt.strftime('%Y %W').split(' ')) for dt in dt_rng]
year_week_pairs = [clean_year_week(year, week) for year, week in year_week_pairs]
return year_week_pairs
def year_and_week_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
df = pd.DataFrame()
stream = '_'.join(method.split('_')[1:])
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
year_week_pairs = construct_year_week_pairs(start_date, end_date)
for year, week in tqdm(year_week_pairs, desc=stream):
kwargs.update({
kwargs_map['year']: year,
kwargs_map['week']: week
})
missing_kwargs = list(set(func_params) - set(['year', 'week'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {', '.join(missing_kwargs)}"
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df_year = utils.parse_xml_response(r)
df = pd.concat([df, df_year])
df = if_possible_parse_local_datetime(df)
return df
# Cell
def non_temporal_request(
method: str,
api_key: str,
n_attempts: int=3,
**kwargs
):
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df = utils.parse_xml_response(r)
df = if_possible_parse_local_datetime(df)
return df
# Cell
def query_orchestrator(
method: str,
api_key: str,
request_type: str,
kwargs_map: dict=None,
func_params: list=None,
start_date: str=None,
end_date: str=None,
n_attempts: int=3,
**kwargs
):
if request_type not in ['non_temporal']:
kwargs.update({
'kwargs_map': kwargs_map,
'func_params': func_params,
'start_date': start_date,
'end_date': end_date,
})
if request_type in ['date_range', 'date_time_range']:
kwargs.update({
'request_type': request_type,
})
request_type_to_func = {
'SP_and_date': SP_and_date_request,
'date_range': date_range_request,
'date_time_range': date_range_request,
'year': year_request,
'year_and_month': year_and_month_request,
'year_and_week': year_and_week_request,
'non_temporal': non_temporal_request
}
assert request_type in request_type_to_func.keys(), f"{request_type} must be one of: {', '.join(request_type_to_func.keys())}"
request_func = request_type_to_func[request_type]
df = request_func(
method=method,
api_key=api_key,
n_attempts=n_attempts,
**kwargs
)
df = df.reset_index(drop=True)
return df
|
import argparse
import datetime
import os
import traceback
import kornia
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader
from tqdm.autonotebook import tqdm
import models
from datasets import LowLightDataset, LowLightFDataset
from models import PSNR, SSIM, CosineLR
from tools import SingleSummaryWriter
from tools import saver, mutils
def get_args():
parser = argparse.ArgumentParser('Breaking Downing the Darkness')
parser.add_argument('--num_gpus', type=int, default=1, help='number of gpus being used')
parser.add_argument('--num_workers', type=int, default=12, help='num_workers of dataloader')
parser.add_argument('--batch_size', type=int, default=1, help='The number of images per batch among all devices')
parser.add_argument('-m1', '--model1', type=str, default='INet',
help='Model1 Name')
parser.add_argument('-m2', '--model2', type=str, default='NSNet',
help='Model2 Name')
parser.add_argument('-m3', '--model3', type=str, default='NSNet',
help='Model3 Name')
parser.add_argument('-m1w', '--model1_weight', type=str, default=None,
help='Model Name')
parser.add_argument('-m2w', '--model2_weight', type=str, default=None,
help='Model Name')
parser.add_argument('--comment', type=str, default='default',
help='Project comment')
parser.add_argument('--graph', action='store_true')
parser.add_argument('--no_sche', action='store_true')
parser.add_argument('--sampling', action='store_true')
parser.add_argument('--slope', type=float, default=2.)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--optim', type=str, default='adam', help='select optimizer for training, '
'suggest using \'admaw\' until the'
' very final stage then switch to \'sgd\'')
parser.add_argument('--num_epochs', type=int, default=500)
parser.add_argument('--val_interval', type=int, default=1, help='Number of epoches between valing phases')
parser.add_argument('--save_interval', type=int, default=500, help='Number of steps between saving')
parser.add_argument('--data_path', type=str, default='./data/LOL',
help='the root folder of dataset')
parser.add_argument('--log_path', type=str, default='logs/')
parser.add_argument('--saved_path', type=str, default='logs/')
args = parser.parse_args()
return args
class ModelNSNet(nn.Module):
def __init__(self, model1, model2, model3):
super().__init__()
self.texture_loss = models.SSIML1Loss(channels=1)
self.model_ianet = model1(in_channels=1, out_channels=1)
self.model_nsnet = model2(in_channels=2, out_channels=1)
self.model_fusenet = model3(in_channels=3, out_channels=1)
assert opt.model1_weight is not None
self.load_weight(self.model_ianet, opt.model1_weight)
self.load_weight(self.model_nsnet, opt.model2_weight)
self.model_ianet.eval()
self.model_nsnet.eval()
self.eps = 1e-2
def load_weight(self, model, weight_pth):
state_dict = torch.load(weight_pth)
ret = model.load_state_dict(state_dict, strict=True)
print(ret)
def noise_syn(self, illumi, strength):
return torch.exp(-illumi) * strength
def forward(self, image, image_gt, training=True):
texture_nss = []
with torch.no_grad():
if training:
image = image.squeeze(0)
image_gt = image_gt.repeat(8, 1, 1, 1)
texture_in, _, _ = torch.split(kornia.color.rgb_to_ycbcr(image), 1, dim=1)
texture_gt, _, _ = torch.split(kornia.color.rgb_to_ycbcr(image_gt), 1, dim=1)
texture_in_down = F.interpolate(texture_in, scale_factor=0.5, mode='bicubic', align_corners=True)
illumi = self.model_ianet(texture_in_down)
illumi = F.interpolate(illumi, scale_factor=2, mode='bicubic', align_corners=True)
noisy_gt = texture_in / torch.clamp_min(illumi, self.eps)
for strength in [0, 0.05, 0.1]:
illumi = torch.clamp(illumi, 0., 1.)
attention = self.noise_syn(illumi, strength=strength)
texture_res = self.model_nsnet(torch.cat([noisy_gt, attention], dim=1))
texture_ns = noisy_gt + texture_res
texture_nss.append(texture_ns)
texture_nss = torch.cat(texture_nss, dim=1).detach()
texture_fuse = self.model_fusenet(texture_nss)
restor_loss = self.texture_loss(texture_fuse, texture_gt)
psnr = PSNR(texture_fuse, texture_gt)
ssim = SSIM(texture_fuse, texture_gt).item()
return noisy_gt, texture_nss, texture_fuse, texture_res, illumi, restor_loss, psnr, ssim
def train(opt):
if torch.cuda.is_available():
torch.cuda.manual_seed(42)
else:
torch.manual_seed(42)
timestamp = mutils.get_formatted_time()
opt.saved_path = opt.saved_path + f'/{opt.comment}/{timestamp}'
opt.log_path = opt.log_path + f'/{opt.comment}/{timestamp}/tensorboard/'
os.makedirs(opt.log_path, exist_ok=True)
os.makedirs(opt.saved_path, exist_ok=True)
training_params = {'batch_size': opt.batch_size,
'shuffle': True,
'drop_last': True,
'num_workers': opt.num_workers}
val_params = {'batch_size': 1,
'shuffle': False,
'drop_last': True,
'num_workers': opt.num_workers}
training_set = LowLightFDataset(os.path.join(opt.data_path, 'train'), image_split='images_aug')
training_generator = DataLoader(training_set, **training_params)
val_set = LowLightDataset(os.path.join(opt.data_path, 'eval'))
val_generator = DataLoader(val_set, **val_params)
model1 = getattr(models, opt.model1)
model2 = getattr(models, opt.model2)
model3 = getattr(models, opt.model3)
writer = SingleSummaryWriter(opt.log_path + f'/{datetime.datetime.now().strftime('%Y%m%d-%H%M%S')}/')
model = ModelNSNet(model1, model2, model3)
print(model)
if opt.num_gpus > 0:
model = model.cuda()
if opt.num_gpus > 1:
model = nn.DataParallel(model)
if opt.optim == 'adam':
optimizer = torch.optim.Adam(model.model_fusenet.parameters(), opt.lr)
else:
optimizer = torch.optim.SGD(model.model_fusenet.parameters(), opt.lr, momentum=0.9, nesterov=True)
scheduler = CosineLR(optimizer, opt.lr, opt.num_epochs)
epoch = 0
step = 0
model.model_fusenet.train()
num_iter_per_epoch = len(training_generator)
try:
for epoch in range(opt.num_epochs):
last_epoch = step // num_iter_per_epoch
if epoch < last_epoch:
continue
epoch_loss = []
progress_bar = tqdm(training_generator)
saver.base_url = os.path.join(opt.saved_path, 'results', '%03d' % epoch)
if not opt.sampling:
for iter, (data, target, name) in enumerate(progress_bar):
if iter < step - last_epoch * num_iter_per_epoch:
progress_bar.update()
continue
try:
if opt.num_gpus == 1:
data = data.cuda()
target = target.cuda()
optimizer.zero_grad()
noisy_gt, texture_nss, texture_fuse, texture_res, \
illumi, restor_loss, psnr, ssim = model(data, target, training=True)
loss = restor_loss
loss.backward()
optimizer.step()
epoch_loss.append(float(loss))
progress_bar.set_description(
'Step: {}. Epoch: {}/{}. Iteration: {}/{}. restor_loss: {:.5f}, psnr: {:.5f}, ssim: {:.5f}'.format(
step, epoch, opt.num_epochs, iter + 1, num_iter_per_epoch, restor_loss.item(), psnr,
ssim))
writer.add_scalar('Loss/train', loss, step)
writer.add_scalar('PSNR/train', psnr, step)
writer.add_scalar('SSIM/train', ssim, step)
# log learning_rate
current_lr = optimizer.param_groups[0]['lr']
writer.add_scalar('learning_rate', current_lr, step)
step += 1
except Exception as e:
print('[Error]', traceback.format_exc())
print(e)
continue
if not opt.no_sche:
scheduler.step()
if epoch % opt.val_interval == 0:
model.model_fusenet.eval()
loss_ls = []
psnrs = []
ssims = []
for iter, (data, target, name) in enumerate(val_generator):
with torch.no_grad():
if opt.num_gpus == 1:
data = data.cuda()
target = target.cuda()
noisy_gt, texture_nss, texture_fuse, texture_res, \
illumi, restor_loss, psnr, ssim = model(data, target, training=False)
texture_gt, _, _ = torch.split(kornia.color.rgb_to_ycbcr(target), 1, dim=1)
saver.save_image(noisy_gt, name=os.path.splitext(name[0])[0] + '_in')
saver.save_image(texture_nss.transpose(0, 1), name=os.path.splitext(name[0])[0] + '_ns')
saver.save_image(texture_fuse, name=os.path.splitext(name[0])[0] + '_fuse')
saver.save_image(texture_res, name=os.path.splitext(name[0])[0] + '_res')
saver.save_image(illumi, name=os.path.splitext(name[0])[0] + '_ill')
saver.save_image(target, name=os.path.splitext(name[0])[0] + '_gt')
loss = restor_loss
loss_ls.append(loss.item())
psnrs.append(psnr)
ssims.append(ssim)
loss = np.mean(np.array(loss_ls))
psnr = np.mean(np.array(psnrs))
ssim = np.mean(np.array(ssims))
print(
'Val. Epoch: {}/{}. Loss: {:1.5f}, psnr: {:.5f}, ssim: {:.5f}'.format(
epoch, opt.num_epochs, loss, psnr, ssim))
writer.add_scalar('Loss/val', loss, step)
writer.add_scalar('PSNR/val', psnr, step)
writer.add_scalar('SSIM/val', ssim, step)
save_checkpoint(model, f'{opt.model3}_{'%03d' % epoch}_{psnr}_{ssim}_{step}.pth')
model.model_fusenet.train()
if opt.sampling:
exit(0)
except KeyboardInterrupt:
save_checkpoint(model, f'{opt.model3}_{epoch}_{step}_keyboardInterrupt.pth')
writer.close()
writer.close()
def save_checkpoint(model, name):
if isinstance(model, nn.DataParallel):
torch.save(model.module.model_fusenet.state_dict(), os.path.join(opt.saved_path, name))
else:
torch.save(model.model_fdnet.state_dict(), os.path.join(opt.saved_path, name))
if __name__ == '__main__':
opt = get_args()
train(opt)
|
import argparse
import datetime
import os
import traceback
import kornia
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader
from tqdm.autonotebook import tqdm
import models
from datasets import LowLightDataset, LowLightFDataset
from models import PSNR, SSIM, CosineLR
from tools import SingleSummaryWriter
from tools import saver, mutils
def get_args():
parser = argparse.ArgumentParser('Breaking Downing the Darkness')
parser.add_argument('--num_gpus', type=int, default=1, help='number of gpus being used')
parser.add_argument('--num_workers', type=int, default=12, help='num_workers of dataloader')
parser.add_argument('--batch_size', type=int, default=1, help='The number of images per batch among all devices')
parser.add_argument('-m1', '--model1', type=str, default='INet',
help='Model1 Name')
parser.add_argument('-m2', '--model2', type=str, default='NSNet',
help='Model2 Name')
parser.add_argument('-m3', '--model3', type=str, default='NSNet',
help='Model3 Name')
parser.add_argument('-m1w', '--model1_weight', type=str, default=None,
help='Model Name')
parser.add_argument('-m2w', '--model2_weight', type=str, default=None,
help='Model Name')
parser.add_argument('--comment', type=str, default='default',
help='Project comment')
parser.add_argument('--graph', action='store_true')
parser.add_argument('--no_sche', action='store_true')
parser.add_argument('--sampling', action='store_true')
parser.add_argument('--slope', type=float, default=2.)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--optim', type=str, default='adam', help='select optimizer for training, '
'suggest using \'admaw\' until the'
' very final stage then switch to \'sgd\'')
parser.add_argument('--num_epochs', type=int, default=500)
parser.add_argument('--val_interval', type=int, default=1, help='Number of epoches between valing phases')
parser.add_argument('--save_interval', type=int, default=500, help='Number of steps between saving')
parser.add_argument('--data_path', type=str, default='./data/LOL',
help='the root folder of dataset')
parser.add_argument('--log_path', type=str, default='logs/')
parser.add_argument('--saved_path', type=str, default='logs/')
args = parser.parse_args()
return args
class ModelNSNet(nn.Module):
def __init__(self, model1, model2, model3):
super().__init__()
self.texture_loss = models.SSIML1Loss(channels=1)
self.model_ianet = model1(in_channels=1, out_channels=1)
self.model_nsnet = model2(in_channels=2, out_channels=1)
self.model_fusenet = model3(in_channels=3, out_channels=1)
assert opt.model1_weight is not None
self.load_weight(self.model_ianet, opt.model1_weight)
self.load_weight(self.model_nsnet, opt.model2_weight)
self.model_ianet.eval()
self.model_nsnet.eval()
self.eps = 1e-2
def load_weight(self, model, weight_pth):
state_dict = torch.load(weight_pth)
ret = model.load_state_dict(state_dict, strict=True)
print(ret)
def noise_syn(self, illumi, strength):
return torch.exp(-illumi) * strength
def forward(self, image, image_gt, training=True):
texture_nss = []
with torch.no_grad():
if training:
image = image.squeeze(0)
image_gt = image_gt.repeat(8, 1, 1, 1)
texture_in, _, _ = torch.split(kornia.color.rgb_to_ycbcr(image), 1, dim=1)
texture_gt, _, _ = torch.split(kornia.color.rgb_to_ycbcr(image_gt), 1, dim=1)
texture_in_down = F.interpolate(texture_in, scale_factor=0.5, mode='bicubic', align_corners=True)
illumi = self.model_ianet(texture_in_down)
illumi = F.interpolate(illumi, scale_factor=2, mode='bicubic', align_corners=True)
noisy_gt = texture_in / torch.clamp_min(illumi, self.eps)
for strength in [0, 0.05, 0.1]:
illumi = torch.clamp(illumi, 0., 1.)
attention = self.noise_syn(illumi, strength=strength)
texture_res = self.model_nsnet(torch.cat([noisy_gt, attention], dim=1))
texture_ns = noisy_gt + texture_res
texture_nss.append(texture_ns)
texture_nss = torch.cat(texture_nss, dim=1).detach()
texture_fuse = self.model_fusenet(texture_nss)
restor_loss = self.texture_loss(texture_fuse, texture_gt)
psnr = PSNR(texture_fuse, texture_gt)
ssim = SSIM(texture_fuse, texture_gt).item()
return noisy_gt, texture_nss, texture_fuse, texture_res, illumi, restor_loss, psnr, ssim
def train(opt):
if torch.cuda.is_available():
torch.cuda.manual_seed(42)
else:
torch.manual_seed(42)
timestamp = mutils.get_formatted_time()
opt.saved_path = opt.saved_path + f'/{opt.comment}/{timestamp}'
opt.log_path = opt.log_path + f'/{opt.comment}/{timestamp}/tensorboard/'
os.makedirs(opt.log_path, exist_ok=True)
os.makedirs(opt.saved_path, exist_ok=True)
training_params = {'batch_size': opt.batch_size,
'shuffle': True,
'drop_last': True,
'num_workers': opt.num_workers}
val_params = {'batch_size': 1,
'shuffle': False,
'drop_last': True,
'num_workers': opt.num_workers}
training_set = LowLightFDataset(os.path.join(opt.data_path, 'train'), image_split='images_aug')
training_generator = DataLoader(training_set, **training_params)
val_set = LowLightDataset(os.path.join(opt.data_path, 'eval'))
val_generator = DataLoader(val_set, **val_params)
model1 = getattr(models, opt.model1)
model2 = getattr(models, opt.model2)
model3 = getattr(models, opt.model3)
writer = SingleSummaryWriter(opt.log_path + f'/{datetime.datetime.now().strftime("%Y%m%d-%H%M%S")}/')
model = ModelNSNet(model1, model2, model3)
print(model)
if opt.num_gpus > 0:
model = model.cuda()
if opt.num_gpus > 1:
model = nn.DataParallel(model)
if opt.optim == 'adam':
optimizer = torch.optim.Adam(model.model_fusenet.parameters(), opt.lr)
else:
optimizer = torch.optim.SGD(model.model_fusenet.parameters(), opt.lr, momentum=0.9, nesterov=True)
scheduler = CosineLR(optimizer, opt.lr, opt.num_epochs)
epoch = 0
step = 0
model.model_fusenet.train()
num_iter_per_epoch = len(training_generator)
try:
for epoch in range(opt.num_epochs):
last_epoch = step // num_iter_per_epoch
if epoch < last_epoch:
continue
epoch_loss = []
progress_bar = tqdm(training_generator)
saver.base_url = os.path.join(opt.saved_path, 'results', '%03d' % epoch)
if not opt.sampling:
for iter, (data, target, name) in enumerate(progress_bar):
if iter < step - last_epoch * num_iter_per_epoch:
progress_bar.update()
continue
try:
if opt.num_gpus == 1:
data = data.cuda()
target = target.cuda()
optimizer.zero_grad()
noisy_gt, texture_nss, texture_fuse, texture_res, \
illumi, restor_loss, psnr, ssim = model(data, target, training=True)
loss = restor_loss
loss.backward()
optimizer.step()
epoch_loss.append(float(loss))
progress_bar.set_description(
'Step: {}. Epoch: {}/{}. Iteration: {}/{}. restor_loss: {:.5f}, psnr: {:.5f}, ssim: {:.5f}'.format(
step, epoch, opt.num_epochs, iter + 1, num_iter_per_epoch, restor_loss.item(), psnr,
ssim))
writer.add_scalar('Loss/train', loss, step)
writer.add_scalar('PSNR/train', psnr, step)
writer.add_scalar('SSIM/train', ssim, step)
# log learning_rate
current_lr = optimizer.param_groups[0]['lr']
writer.add_scalar('learning_rate', current_lr, step)
step += 1
except Exception as e:
print('[Error]', traceback.format_exc())
print(e)
continue
if not opt.no_sche:
scheduler.step()
if epoch % opt.val_interval == 0:
model.model_fusenet.eval()
loss_ls = []
psnrs = []
ssims = []
for iter, (data, target, name) in enumerate(val_generator):
with torch.no_grad():
if opt.num_gpus == 1:
data = data.cuda()
target = target.cuda()
noisy_gt, texture_nss, texture_fuse, texture_res, \
illumi, restor_loss, psnr, ssim = model(data, target, training=False)
texture_gt, _, _ = torch.split(kornia.color.rgb_to_ycbcr(target), 1, dim=1)
saver.save_image(noisy_gt, name=os.path.splitext(name[0])[0] + '_in')
saver.save_image(texture_nss.transpose(0, 1), name=os.path.splitext(name[0])[0] + '_ns')
saver.save_image(texture_fuse, name=os.path.splitext(name[0])[0] + '_fuse')
saver.save_image(texture_res, name=os.path.splitext(name[0])[0] + '_res')
saver.save_image(illumi, name=os.path.splitext(name[0])[0] + '_ill')
saver.save_image(target, name=os.path.splitext(name[0])[0] + '_gt')
loss = restor_loss
loss_ls.append(loss.item())
psnrs.append(psnr)
ssims.append(ssim)
loss = np.mean(np.array(loss_ls))
psnr = np.mean(np.array(psnrs))
ssim = np.mean(np.array(ssims))
print(
'Val. Epoch: {}/{}. Loss: {:1.5f}, psnr: {:.5f}, ssim: {:.5f}'.format(
epoch, opt.num_epochs, loss, psnr, ssim))
writer.add_scalar('Loss/val', loss, step)
writer.add_scalar('PSNR/val', psnr, step)
writer.add_scalar('SSIM/val', ssim, step)
save_checkpoint(model, f'{opt.model3}_{"%03d" % epoch}_{psnr}_{ssim}_{step}.pth')
model.model_fusenet.train()
if opt.sampling:
exit(0)
except KeyboardInterrupt:
save_checkpoint(model, f'{opt.model3}_{epoch}_{step}_keyboardInterrupt.pth')
writer.close()
writer.close()
def save_checkpoint(model, name):
if isinstance(model, nn.DataParallel):
torch.save(model.module.model_fusenet.state_dict(), os.path.join(opt.saved_path, name))
else:
torch.save(model.model_fdnet.state_dict(), os.path.join(opt.saved_path, name))
if __name__ == '__main__':
opt = get_args()
train(opt)
|
"""
Downloading images scrapped from the https://substance3d.adobe.com/assets/allassets
and saved in local SQLite file
"""
import os
import time
import sys
import platform
from os import path
import requests # to get image from the web
import shutil # to save it locally
from rich import pretty
from rich.console import Console
from rich.traceback import install
from rich.progress import track
from common_database_access import CommonDatabaseAccess
import f_icon
from pathlib import Path
console = Console()
pretty.install()
install() # this is for tracing project activity
global_data = {"version": "Beta 1.2 (22.01.2022)\n"}
def clear_console():
"""Clears console view"""
command = "clear"
if os.name in ("nt", "dos"): # If Machine is running on Windows, use cls
command = "cls"
os.system(command)
def download_image(url, file_path):
if not path.exists(file_path):
r = requests.get(url, stream=True)
# Set decode_content value to True, otherwise the downloaded image file's size will be zero.
r.raw.decode_content = True
# Open a local file with wb ( write binary ) permission.
with open(file_path, "wb") as f:
shutil.copyfileobj(r.raw, f)
def append_date(filename):
"""adds date to the end of the filename
:param str filename: filename
:return:
"""
p = Path(filename)
return "{0}_{2}{1}".format(
Path.joinpath(p.parent, p.stem), p.suffix, time.strftime("%Y%m%d-%H%M%S")
)
def check_for_download(url, file_path, need_to_refresh):
# console.print(url)
if url:
if os.path.exists(file_path) and need_to_refresh:
os.rename(file_path, append_date(file_path))
download_image(url, file_path)
def convert_to_nice_name(filename) -> str:
"""
Replaces _ with spaces in filename
:param str filename: filename to convert
:return:
"""
return filename.replace("_", " ")
def convert_to_ugly_name(filename) -> str:
"""
Replaces space with _ in filename
:param str filename: filename to convert
:return:
"""
return filename.replace(" ", "_")
def create_folder_for_type(database, asset_types):
# 1. create _source folder for files to move to their location
if not os.path.exists(
global_data["local_path"] + os.sep + global_data["source_path"]
):
os.makedirs(global_data["local_path"] + os.sep + global_data["source_path"])
# 2. Now creating rest of the folders
console.print("Creating folders ...")
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
os.makedirs(global_data["local_path"] + os.sep + a["name"])
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
os.makedirs(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
)
console.print(f"{a["name"]} - {c["name"]}")
for asset in track(assets, description="Assets.", total=len(assets)):
if not os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
os.makedirs(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
)
input("Press any enter to close...")
def create_folders(database):
menu_title = " Select asset type to create folder"
count = 1
menu_items = []
all_asset_types = database.get_all_asset_types()
for asset_type in all_asset_types:
menu_items.append(f"[{count}] {asset_type["name"]}")
count = count + 1
menu_items.append(f"[{count}] All")
count = count + 1
menu_items.append(f"[{count}] Return")
menu_exit = False
while not menu_exit:
# cls()
clear_console()
console.print("version " + global_data["version"])
console.print(menu_title + "")
for m_i in menu_items:
console.print(m_i + "")
console.print("")
user_input = input("Enter a number: ")
if user_input.isnumeric():
menu_sel = int(user_input)
if 1 <= menu_sel < count - 1: # Specific asset type
# categories = database.get_all_categories_by_asset_type_id(
# all_asset_types[menu_sel - 1]["id"]
# )
create_folder_for_type(database, [all_asset_types[menu_sel - 1]])
elif menu_sel == count - 1: # all asset types
# categories = database.get_all_categories_by_id(14)
# categories = database.get_all_categories()
create_folder_for_type(database, all_asset_types)
elif menu_sel == count: # Quit
menu_exit = True
def download_all_images(database):
console.print("Downloading images ...")
asset_types = database.get_all_asset_types()
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
continue
console.print(f"{a["name"]} - {c["name"]}")
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
local_path = (
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
)
# console.print(asset)
check_for_download(
asset["preview_image"],
local_path + "Preview.png",
asset["have_preview_image_changed"],
)
check_for_download(
asset["details_image"],
local_path + "Details.png",
asset["have_details_image_changed"],
)
check_for_download(
asset["variant_1_image"],
local_path + "Variant1.png",
asset["have_variant_1_image_changed"],
)
check_for_download(
asset["variant_2_image"],
local_path + "Variant2.png",
asset["have_variant_2_image_changed"],
)
check_for_download(
asset["variant_3_image"],
local_path + "Variant3.png",
asset["have_variant_3_image_changed"],
)
database.set_asset_art_as_updated(asset["id"])
input("Press any enter to close...")
def make_all_icons(database, ignore_created=True):
console.print("Creating folder icons ...")
asset_types = database.get_all_asset_types()
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
continue
console.print(f"{a["name"]} - {c["name"]}")
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
local_path = (
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
)
# console.print(asset)
if platform.system() == "Windows":
if os.path.exists(local_path + "Preview.png") and (
not os.path.exists(local_path + "Preview.ico")
or ignore_created
):
f_icon.create_icon(local_path + "Preview.png")
else:
if os.path.exists(local_path + "Preview.png"):
f_icon.create_icon(local_path + "Preview.png")
input("Press any enter to close...")
def transfer_all_local_files(database):
console.print("Placing files in corresponding folders ...")
files = os.listdir(global_data["local_path"] + os.sep + global_data["source_path"])
asset_types = database.get_all_asset_types()
placement_log = {"moved": [], "existing": [], "missing": [], "existing_full": []}
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
continue
console.print(f"{a["name"]} - {c["name"]}")
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
for (
f
) in (
files
): # going over all files in the _source folder that we know from the start
if os.path.exists( # checking, that file is still there. can be moved already
global_data["local_path"]
+ os.sep
+ global_data["source_path"]
+ os.sep
+ f
):
if not os.path.exists( # checking, that this file already exists at destination.
global_data[
"local_path"
] # if it is, then we have a double
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f
):
if (
f.lower().endswith(".jpg")
and convert_to_nice_name(f.lower()).find(
asset["name"].lower()
)
>= 0
):
# if it is jpeg, then extra case. We check if asset name is inside file name
os.rename(
global_data["local_path"]
+ os.sep
+ global_data["source_path"]
+ os.sep
+ f,
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f,
)
placement_log["moved"].append(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f
)
elif not f.lower().endswith(
".jpg"
): # if this is not a jpg, then we check name
# without extension to match with asset name
file_details = os.path.splitext(f)
if (
convert_to_nice_name(file_details[0].lower())
== asset["name"].lower()
):
os.rename(
global_data["local_path"]
+ os.sep
+ global_data["source_path"]
+ os.sep
+ f,
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f,
)
placement_log["moved"].append(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f
)
else: # we had a double name, so mark it as double
placement_log["existing_full"].append(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f
)
placement_log["existing"].append(f)
# generating report
files = os.listdir(global_data["local_path"] + os.sep + global_data["source_path"])
placement_log["missing"] = list(set(files) - set(placement_log["existing"]))
file = open(
append_date(global_data["local_path"] + os.sep + "FileTransferReport.txt"),
"w",
encoding="utf-8",
)
file.write(f'Moved files({len(placement_log['moved'])}): \n')
file.write("\n")
for f in placement_log["moved"]:
file.write(f + "\n")
file.write("\n")
file.write(f'Existed files({len(placement_log['existing_full'])}): \n')
file.write("\n")
for f in placement_log["existing_full"]:
file.write(f + "\n")
file.write("\n")
file.write(f'Missing locations for files({len(placement_log['missing'])}): \n')
file.write("\n")
for f in placement_log["missing"]:
file.write(f + "\n")
file.close()
input("Press any enter to close...")
def generate_detail_report(database):
console.print("Generating detail report ...")
asset_types = database.get_all_asset_types()
placement_log = {"have": [], "missing": [], "need": []}
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
continue
console.print(f"{a["name"]} - {c["name"]}")
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
local_path = (
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
)
count = 0
have = 0
missing = ""
if asset["format_sbsar"]:
count = count + 1
if asset["have_format_sbsar"]:
have = have + 1
else:
missing = missing + "sbsar "
changed_record = True
if asset["format_sbs"]:
count = count + 1
if asset["have_format_sbs"]:
have = have + 1
else:
missing = missing + "sbs "
if asset["format_exr"]:
count = count + 1
if asset["have_format_exr"]:
have = have + 1
else:
missing = missing + "exr "
if asset["format_fbx"]:
count = count + 1
if asset["have_format_fbx"]:
have = have + 1
else:
missing = missing + "fbx "
if asset["format_glb"]:
count = count + 1
if asset["have_format_glb"]:
have = have + 1
else:
missing = missing + "glb "
if asset["format_mdl"]:
count = count + 1
if asset["have_format_mdl"]:
have = have + 1
else:
missing = missing + "mdl "
if count == have:
placement_log["have"].append(
a["name"] + " > " + c["name"] + " > " + asset["name"]
)
elif count != have and have > 0:
placement_log["missing"].append(
a["name"]
+ " > "
+ c["name"]
+ " > "
+ asset["name"]
+ " : missing formats "
+ missing
)
else:
placement_log["need"].append(
a["name"] + " > " + c["name"] + " > " + asset["name"]
)
file = open(
append_date(global_data["local_path"] + os.sep + "AssetDetailsCountReport.txt"),
"w",
encoding="utf-8",
)
file.write(f'Have assets({len(placement_log['have'])}): \n')
file.write("\n")
for f in placement_log["have"]:
file.write(f + "\n")
file.write("\n")
file.write(f'Missing assets({len(placement_log['missing'])}): \n')
file.write("\n")
for f in placement_log["missing"]:
file.write(f + "\n")
file.write("\n")
file.write(f'Needed assets({len(placement_log['need'])}): \n')
file.write("\n")
for f in placement_log["need"]:
file.write(f + "\n")
file.close()
input("Press any enter to close...")
def generate_folder_report(database):
console.print("Generating folder report ...")
asset_types = database.get_all_asset_types()
placement_log = []
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
# if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
# continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
# if not os.path.exists(
# global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
# ):
# continue
console.print(f"{a["name"]} - {c["name"]}")
have = 0
missing = 0
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
have = have + 1
else:
missing = missing + 1
placement_log.append(f"{a["name"]} - {c["name"]} (Have {have}; Missing {missing})")
file = open(
append_date(global_data["local_path"] + os.sep + "AssetFolderCountReport.txt"),
"w",
encoding="utf-8",
)
for f in placement_log:
file.write(f + "\n")
file.close()
input("Press any enter to close...")
def mark_database_with_my_files(database):
console.print("Checking local files for the database ...")
asset_types = database.get_all_asset_types()
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
continue
console.print(f"{a["name"]} - {c["name"]}")
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
local_path = (
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
)
all_files = []
for lp, currentDirectory, files in os.walk(local_path):
all_files.extend(files)
asset["have_format_sbsar"] = False
asset["have_format_sbs"] = False
asset["have_format_exr"] = False
asset["have_format_fbx"] = False
asset["have_format_glb"] = False
asset["have_format_mdl"] = False
for file in all_files:
if file.lower().endswith(".sbsar") and asset["format_sbsar"]:
asset["have_format_sbsar"] = True
if file.lower().endswith(".sbs") and asset["format_sbs"]:
asset["have_format_sbs"] = True
if file.lower().endswith(".exr") and asset["format_exr"]:
asset["have_format_exr"] = True
if file.lower().endswith(".fbx") and asset["format_fbx"]:
asset["have_format_fbx"] = True
if file.lower().endswith(".glb") and asset["format_glb"]:
asset["have_format_glb"] = True
if file.lower().endswith(".mdl") and asset["format_mdl"]:
asset["have_format_mdl"] = True
database.update_asset(asset)
input("Press any enter to close...")
def fancy_list_generation(database):
console.print("Generating request list ...")
fancy_requests = []
if os.path.exists(global_data["local_path"] + os.sep + "Requests.txt"):
with open(global_data["local_path"] + os.sep + "Requests.txt") as f:
base_requests = f.read().splitlines()
for base_r in track(
base_requests, description="Requests.", total=len(base_requests)
):
asset = database.get_asset_by_name(base_r)
if len(asset) > 0:
asset_format = ""
if asset[0]["format_sbsar"]:
asset_format = asset_format + "sbsar "
if asset[0]["format_sbs"]:
asset_format = asset_format + "sbs "
if asset[0]["format_exr"]:
asset_format = asset_format + "exr "
if asset[0]["format_fbx"]:
asset_format = asset_format + "cbx "
if asset[0]["format_glb"]:
asset_format = asset_format + "glb "
if asset[0]["format_mdl"]:
asset_format = asset_format + "mdl "
fancy_requests.append(
asset[0]["name"]
+ " - "
+ asset_format.strip()
+ " - "
+ asset[0]["url"]
)
if len(fancy_requests) > 0:
file = open(
append_date(global_data["local_path"] + os.sep + "Result.txt"),
"w",
encoding="utf-8",
)
for f in fancy_requests:
file.write(f + "\n")
file.close()
input("Press any enter to close...")
def move_folders_to_new_category(database):
"""
Checks if asset folder do not exist at category location, then looks in every category
for the asset to relocate to the proper location
:param CommonDatabaseAccess database: reference to the database
"""
console.print("Generating report ...")
asset_types = database.get_all_asset_types()
all_categories = database.get_all_categories()
log = []
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
# if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
# continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
# if not os.path.exists(
# global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
# ):
# continue
console.print(f"{a["name"]} - {c["name"]}")
for asset in track(assets, description="Assets.", total=len(assets)):
expected_path = (
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
)
if not os.path.exists(expected_path):
# we did not find our asset in the right place, so we check everywhere
found = False
for a1 in asset_types:
for c1 in all_categories:
checked_path = (
global_data["local_path"]
+ os.sep
+ a1["name"]
+ os.sep
+ c1["name"]
+ os.sep
+ asset["name"]
)
if checked_path != expected_path and os.path.exists(
checked_path
):
log.append(checked_path + " >> " + expected_path)
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
os.makedirs(global_data["local_path"] + os.sep + a["name"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
os.makedirs(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
)
os.rename(checked_path, expected_path)
found = True
break
if found:
break
console.print("Moved Assets - " + str(len(log)))
console.print()
console.print("All Done !!!")
if len(log) > 0:
file = open(
append_date(
global_data["local_path"] + os.sep + "AssetCategoryChangeLog.txt"
),
"w",
encoding="utf-8",
)
for f in log:
file.write(f + "\n")
file.close()
input("Press any enter to close...")
def main_menu(database):
"""
Draw main menu
:param CommonDatabaseAccess database: reference to the database
:return:
"""
menu_title = " Select action"
menu_items = [
"[1] Create folders.",
"[2] Download all images.",
"[3] Make all icons. Where Preview.ico do not exist.",
"[4] Make all icons, but ignore where Preview.ico exists.",
"[5] Transfer all local files from _source folder to appropriate folders.",
"[6] Mark database with my files. (Do this before Generating report).",
"[7] Generate all folder report. (Do this after Marking database with my files).",
"[8] Generate existing folder report. (Do this after Marking database with my files).",
"[9] Fancy list generation. (Convert simple material list to list with format and links, looks for Requests.txt).",
"[10] Move folders if Category changed.",
"[11] Quit.",
]
menu_exit = False
while not menu_exit:
clear_console()
console.print("version " + global_data["version"])
console.print(menu_title + "")
for m_i in menu_items:
console.print(m_i + "")
console.print("")
user_input = input("Enter a number: ")
if user_input.isnumeric():
menu_sel = int(user_input)
if menu_sel == 1: # Create folders
create_folders(database)
if menu_sel == 2: # Download all images
download_all_images(database)
if menu_sel == 3: # Make all icons
make_all_icons(database, False)
if menu_sel == 4: # Make all icons
make_all_icons(database)
if menu_sel == 5: # Transfer all local files
transfer_all_local_files(database)
if menu_sel == 6: # Mark database with my files
mark_database_with_my_files(database)
if menu_sel == 7: # Generate folder report
generate_folder_report(database)
if menu_sel == 8: # Generate detail report
generate_detail_report(database)
if menu_sel == 9: # Fancy list generation
fancy_list_generation(database)
if menu_sel == 10: # Move folders to new category
move_folders_to_new_category(database)
if menu_sel == 11: # Quit
menu_exit = True
def main():
"""
Check location of the database and then going to main menu
:return:
"""
menu_title = " Select database file"
menu_items = []
menu_items_count = 0
menu_items_references = []
local_path = os.path.dirname(sys.argv[0])
global_data["local_path"] = local_path
global_data["source_path"] = "_source"
files = os.listdir(local_path)
for f in files:
file_details = os.path.splitext(f)
if os.path.isfile(local_path + os.sep + f) and file_details[1] == ".db":
menu_items.append(f"[{menu_items_count + 1}] {f}")
menu_items_count = menu_items_count + 1
menu_items_references.append(f)
if menu_items_count == 0:
clear_console()
console.print("Database files not found next to the application files.")
input("Press any enter to close...")
elif menu_items_count == 1:
database = CommonDatabaseAccess(
db_path=local_path + os.sep + menu_items_references[0], force=False
)
main_menu(database)
else:
menu_exit = False
while not menu_exit:
clear_console()
console.print("version " + global_data["version"])
console.print(menu_title + "")
for m_i in menu_items:
console.print(m_i + "")
console.print("")
user_input = input("Enter a number: ")
if user_input.isnumeric():
menu_sel = int(user_input)
if 0 < menu_sel <= len(menu_items_references): # Initial scan
database = CommonDatabaseAccess(
db_path=local_path
+ os.sep
+ menu_items_references[menu_sel - 1],
force=False,
)
main_menu(database)
menu_exit = True
if __name__ == "__main__":
main()
|
"""
Downloading images scrapped from the https://substance3d.adobe.com/assets/allassets
and saved in local SQLite file
"""
import os
import time
import sys
import platform
from os import path
import requests # to get image from the web
import shutil # to save it locally
from rich import pretty
from rich.console import Console
from rich.traceback import install
from rich.progress import track
from common_database_access import CommonDatabaseAccess
import f_icon
from pathlib import Path
console = Console()
pretty.install()
install() # this is for tracing project activity
global_data = {"version": "Beta 1.2 (22.01.2022)\n"}
def clear_console():
"""Clears console view"""
command = "clear"
if os.name in ("nt", "dos"): # If Machine is running on Windows, use cls
command = "cls"
os.system(command)
def download_image(url, file_path):
if not path.exists(file_path):
r = requests.get(url, stream=True)
# Set decode_content value to True, otherwise the downloaded image file's size will be zero.
r.raw.decode_content = True
# Open a local file with wb ( write binary ) permission.
with open(file_path, "wb") as f:
shutil.copyfileobj(r.raw, f)
def append_date(filename):
"""adds date to the end of the filename
:param str filename: filename
:return:
"""
p = Path(filename)
return "{0}_{2}{1}".format(
Path.joinpath(p.parent, p.stem), p.suffix, time.strftime("%Y%m%d-%H%M%S")
)
def check_for_download(url, file_path, need_to_refresh):
# console.print(url)
if url:
if os.path.exists(file_path) and need_to_refresh:
os.rename(file_path, append_date(file_path))
download_image(url, file_path)
def convert_to_nice_name(filename) -> str:
"""
Replaces _ with spaces in filename
:param str filename: filename to convert
:return:
"""
return filename.replace("_", " ")
def convert_to_ugly_name(filename) -> str:
"""
Replaces space with _ in filename
:param str filename: filename to convert
:return:
"""
return filename.replace(" ", "_")
def create_folder_for_type(database, asset_types):
# 1. create _source folder for files to move to their location
if not os.path.exists(
global_data["local_path"] + os.sep + global_data["source_path"]
):
os.makedirs(global_data["local_path"] + os.sep + global_data["source_path"])
# 2. Now creating rest of the folders
console.print("Creating folders ...")
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
os.makedirs(global_data["local_path"] + os.sep + a["name"])
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
os.makedirs(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
)
console.print(f"{a['name']} - {c['name']}")
for asset in track(assets, description="Assets.", total=len(assets)):
if not os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
os.makedirs(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
)
input("Press any enter to close...")
def create_folders(database):
menu_title = " Select asset type to create folder"
count = 1
menu_items = []
all_asset_types = database.get_all_asset_types()
for asset_type in all_asset_types:
menu_items.append(f"[{count}] {asset_type['name']}")
count = count + 1
menu_items.append(f"[{count}] All")
count = count + 1
menu_items.append(f"[{count}] Return")
menu_exit = False
while not menu_exit:
# cls()
clear_console()
console.print("version " + global_data["version"])
console.print(menu_title + "")
for m_i in menu_items:
console.print(m_i + "")
console.print("")
user_input = input("Enter a number: ")
if user_input.isnumeric():
menu_sel = int(user_input)
if 1 <= menu_sel < count - 1: # Specific asset type
# categories = database.get_all_categories_by_asset_type_id(
# all_asset_types[menu_sel - 1]["id"]
# )
create_folder_for_type(database, [all_asset_types[menu_sel - 1]])
elif menu_sel == count - 1: # all asset types
# categories = database.get_all_categories_by_id(14)
# categories = database.get_all_categories()
create_folder_for_type(database, all_asset_types)
elif menu_sel == count: # Quit
menu_exit = True
def download_all_images(database):
console.print("Downloading images ...")
asset_types = database.get_all_asset_types()
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
continue
console.print(f"{a['name']} - {c['name']}")
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
local_path = (
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
)
# console.print(asset)
check_for_download(
asset["preview_image"],
local_path + "Preview.png",
asset["have_preview_image_changed"],
)
check_for_download(
asset["details_image"],
local_path + "Details.png",
asset["have_details_image_changed"],
)
check_for_download(
asset["variant_1_image"],
local_path + "Variant1.png",
asset["have_variant_1_image_changed"],
)
check_for_download(
asset["variant_2_image"],
local_path + "Variant2.png",
asset["have_variant_2_image_changed"],
)
check_for_download(
asset["variant_3_image"],
local_path + "Variant3.png",
asset["have_variant_3_image_changed"],
)
database.set_asset_art_as_updated(asset["id"])
input("Press any enter to close...")
def make_all_icons(database, ignore_created=True):
console.print("Creating folder icons ...")
asset_types = database.get_all_asset_types()
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
continue
console.print(f"{a['name']} - {c['name']}")
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
local_path = (
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
)
# console.print(asset)
if platform.system() == "Windows":
if os.path.exists(local_path + "Preview.png") and (
not os.path.exists(local_path + "Preview.ico")
or ignore_created
):
f_icon.create_icon(local_path + "Preview.png")
else:
if os.path.exists(local_path + "Preview.png"):
f_icon.create_icon(local_path + "Preview.png")
input("Press any enter to close...")
def transfer_all_local_files(database):
console.print("Placing files in corresponding folders ...")
files = os.listdir(global_data["local_path"] + os.sep + global_data["source_path"])
asset_types = database.get_all_asset_types()
placement_log = {"moved": [], "existing": [], "missing": [], "existing_full": []}
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
continue
console.print(f"{a['name']} - {c['name']}")
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
for (
f
) in (
files
): # going over all files in the _source folder that we know from the start
if os.path.exists( # checking, that file is still there. can be moved already
global_data["local_path"]
+ os.sep
+ global_data["source_path"]
+ os.sep
+ f
):
if not os.path.exists( # checking, that this file already exists at destination.
global_data[
"local_path"
] # if it is, then we have a double
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f
):
if (
f.lower().endswith(".jpg")
and convert_to_nice_name(f.lower()).find(
asset["name"].lower()
)
>= 0
):
# if it is jpeg, then extra case. We check if asset name is inside file name
os.rename(
global_data["local_path"]
+ os.sep
+ global_data["source_path"]
+ os.sep
+ f,
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f,
)
placement_log["moved"].append(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f
)
elif not f.lower().endswith(
".jpg"
): # if this is not a jpg, then we check name
# without extension to match with asset name
file_details = os.path.splitext(f)
if (
convert_to_nice_name(file_details[0].lower())
== asset["name"].lower()
):
os.rename(
global_data["local_path"]
+ os.sep
+ global_data["source_path"]
+ os.sep
+ f,
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f,
)
placement_log["moved"].append(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f
)
else: # we had a double name, so mark it as double
placement_log["existing_full"].append(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
+ f
)
placement_log["existing"].append(f)
# generating report
files = os.listdir(global_data["local_path"] + os.sep + global_data["source_path"])
placement_log["missing"] = list(set(files) - set(placement_log["existing"]))
file = open(
append_date(global_data["local_path"] + os.sep + "FileTransferReport.txt"),
"w",
encoding="utf-8",
)
file.write(f'Moved files({len(placement_log["moved"])}): \n')
file.write("\n")
for f in placement_log["moved"]:
file.write(f + "\n")
file.write("\n")
file.write(f'Existed files({len(placement_log["existing_full"])}): \n')
file.write("\n")
for f in placement_log["existing_full"]:
file.write(f + "\n")
file.write("\n")
file.write(f'Missing locations for files({len(placement_log["missing"])}): \n')
file.write("\n")
for f in placement_log["missing"]:
file.write(f + "\n")
file.close()
input("Press any enter to close...")
def generate_detail_report(database):
console.print("Generating detail report ...")
asset_types = database.get_all_asset_types()
placement_log = {"have": [], "missing": [], "need": []}
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
continue
console.print(f"{a['name']} - {c['name']}")
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
local_path = (
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
)
count = 0
have = 0
missing = ""
if asset["format_sbsar"]:
count = count + 1
if asset["have_format_sbsar"]:
have = have + 1
else:
missing = missing + "sbsar "
changed_record = True
if asset["format_sbs"]:
count = count + 1
if asset["have_format_sbs"]:
have = have + 1
else:
missing = missing + "sbs "
if asset["format_exr"]:
count = count + 1
if asset["have_format_exr"]:
have = have + 1
else:
missing = missing + "exr "
if asset["format_fbx"]:
count = count + 1
if asset["have_format_fbx"]:
have = have + 1
else:
missing = missing + "fbx "
if asset["format_glb"]:
count = count + 1
if asset["have_format_glb"]:
have = have + 1
else:
missing = missing + "glb "
if asset["format_mdl"]:
count = count + 1
if asset["have_format_mdl"]:
have = have + 1
else:
missing = missing + "mdl "
if count == have:
placement_log["have"].append(
a["name"] + " > " + c["name"] + " > " + asset["name"]
)
elif count != have and have > 0:
placement_log["missing"].append(
a["name"]
+ " > "
+ c["name"]
+ " > "
+ asset["name"]
+ " : missing formats "
+ missing
)
else:
placement_log["need"].append(
a["name"] + " > " + c["name"] + " > " + asset["name"]
)
file = open(
append_date(global_data["local_path"] + os.sep + "AssetDetailsCountReport.txt"),
"w",
encoding="utf-8",
)
file.write(f'Have assets({len(placement_log["have"])}): \n')
file.write("\n")
for f in placement_log["have"]:
file.write(f + "\n")
file.write("\n")
file.write(f'Missing assets({len(placement_log["missing"])}): \n')
file.write("\n")
for f in placement_log["missing"]:
file.write(f + "\n")
file.write("\n")
file.write(f'Needed assets({len(placement_log["need"])}): \n')
file.write("\n")
for f in placement_log["need"]:
file.write(f + "\n")
file.close()
input("Press any enter to close...")
def generate_folder_report(database):
console.print("Generating folder report ...")
asset_types = database.get_all_asset_types()
placement_log = []
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
# if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
# continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
# if not os.path.exists(
# global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
# ):
# continue
console.print(f"{a['name']} - {c['name']}")
have = 0
missing = 0
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
have = have + 1
else:
missing = missing + 1
placement_log.append(f"{a['name']} - {c['name']} (Have {have}; Missing {missing})")
file = open(
append_date(global_data["local_path"] + os.sep + "AssetFolderCountReport.txt"),
"w",
encoding="utf-8",
)
for f in placement_log:
file.write(f + "\n")
file.close()
input("Press any enter to close...")
def mark_database_with_my_files(database):
console.print("Checking local files for the database ...")
asset_types = database.get_all_asset_types()
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
continue
console.print(f"{a['name']} - {c['name']}")
for asset in track(assets, description="Assets.", total=len(assets)):
if os.path.exists(
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
):
local_path = (
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
+ os.sep
)
all_files = []
for lp, currentDirectory, files in os.walk(local_path):
all_files.extend(files)
asset["have_format_sbsar"] = False
asset["have_format_sbs"] = False
asset["have_format_exr"] = False
asset["have_format_fbx"] = False
asset["have_format_glb"] = False
asset["have_format_mdl"] = False
for file in all_files:
if file.lower().endswith(".sbsar") and asset["format_sbsar"]:
asset["have_format_sbsar"] = True
if file.lower().endswith(".sbs") and asset["format_sbs"]:
asset["have_format_sbs"] = True
if file.lower().endswith(".exr") and asset["format_exr"]:
asset["have_format_exr"] = True
if file.lower().endswith(".fbx") and asset["format_fbx"]:
asset["have_format_fbx"] = True
if file.lower().endswith(".glb") and asset["format_glb"]:
asset["have_format_glb"] = True
if file.lower().endswith(".mdl") and asset["format_mdl"]:
asset["have_format_mdl"] = True
database.update_asset(asset)
input("Press any enter to close...")
def fancy_list_generation(database):
console.print("Generating request list ...")
fancy_requests = []
if os.path.exists(global_data["local_path"] + os.sep + "Requests.txt"):
with open(global_data["local_path"] + os.sep + "Requests.txt") as f:
base_requests = f.read().splitlines()
for base_r in track(
base_requests, description="Requests.", total=len(base_requests)
):
asset = database.get_asset_by_name(base_r)
if len(asset) > 0:
asset_format = ""
if asset[0]["format_sbsar"]:
asset_format = asset_format + "sbsar "
if asset[0]["format_sbs"]:
asset_format = asset_format + "sbs "
if asset[0]["format_exr"]:
asset_format = asset_format + "exr "
if asset[0]["format_fbx"]:
asset_format = asset_format + "cbx "
if asset[0]["format_glb"]:
asset_format = asset_format + "glb "
if asset[0]["format_mdl"]:
asset_format = asset_format + "mdl "
fancy_requests.append(
asset[0]["name"]
+ " - "
+ asset_format.strip()
+ " - "
+ asset[0]["url"]
)
if len(fancy_requests) > 0:
file = open(
append_date(global_data["local_path"] + os.sep + "Result.txt"),
"w",
encoding="utf-8",
)
for f in fancy_requests:
file.write(f + "\n")
file.close()
input("Press any enter to close...")
def move_folders_to_new_category(database):
"""
Checks if asset folder do not exist at category location, then looks in every category
for the asset to relocate to the proper location
:param CommonDatabaseAccess database: reference to the database
"""
console.print("Generating report ...")
asset_types = database.get_all_asset_types()
all_categories = database.get_all_categories()
log = []
for a in asset_types: # track(asset_types, description="Types."):
categories = database.get_all_categories_by_asset_type_id(a["id"])
# if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
# continue
for c in categories: # track(categories, description="Categories."):
assets = database.get_all_assets_by_category(c["id"])
# if not os.path.exists(
# global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
# ):
# continue
console.print(f"{a['name']} - {c['name']}")
for asset in track(assets, description="Assets.", total=len(assets)):
expected_path = (
global_data["local_path"]
+ os.sep
+ a["name"]
+ os.sep
+ c["name"]
+ os.sep
+ asset["name"]
)
if not os.path.exists(expected_path):
# we did not find our asset in the right place, so we check everywhere
found = False
for a1 in asset_types:
for c1 in all_categories:
checked_path = (
global_data["local_path"]
+ os.sep
+ a1["name"]
+ os.sep
+ c1["name"]
+ os.sep
+ asset["name"]
)
if checked_path != expected_path and os.path.exists(
checked_path
):
log.append(checked_path + " >> " + expected_path)
if not os.path.exists(global_data["local_path"] + os.sep + a["name"]):
os.makedirs(global_data["local_path"] + os.sep + a["name"])
if not os.path.exists(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
):
os.makedirs(
global_data["local_path"] + os.sep + a["name"] + os.sep + c["name"]
)
os.rename(checked_path, expected_path)
found = True
break
if found:
break
console.print("Moved Assets - " + str(len(log)))
console.print()
console.print("All Done !!!")
if len(log) > 0:
file = open(
append_date(
global_data["local_path"] + os.sep + "AssetCategoryChangeLog.txt"
),
"w",
encoding="utf-8",
)
for f in log:
file.write(f + "\n")
file.close()
input("Press any enter to close...")
def main_menu(database):
"""
Draw main menu
:param CommonDatabaseAccess database: reference to the database
:return:
"""
menu_title = " Select action"
menu_items = [
"[1] Create folders.",
"[2] Download all images.",
"[3] Make all icons. Where Preview.ico do not exist.",
"[4] Make all icons, but ignore where Preview.ico exists.",
"[5] Transfer all local files from _source folder to appropriate folders.",
"[6] Mark database with my files. (Do this before Generating report).",
"[7] Generate all folder report. (Do this after Marking database with my files).",
"[8] Generate existing folder report. (Do this after Marking database with my files).",
"[9] Fancy list generation. (Convert simple material list to list with format and links, looks for Requests.txt).",
"[10] Move folders if Category changed.",
"[11] Quit.",
]
menu_exit = False
while not menu_exit:
clear_console()
console.print("version " + global_data["version"])
console.print(menu_title + "")
for m_i in menu_items:
console.print(m_i + "")
console.print("")
user_input = input("Enter a number: ")
if user_input.isnumeric():
menu_sel = int(user_input)
if menu_sel == 1: # Create folders
create_folders(database)
if menu_sel == 2: # Download all images
download_all_images(database)
if menu_sel == 3: # Make all icons
make_all_icons(database, False)
if menu_sel == 4: # Make all icons
make_all_icons(database)
if menu_sel == 5: # Transfer all local files
transfer_all_local_files(database)
if menu_sel == 6: # Mark database with my files
mark_database_with_my_files(database)
if menu_sel == 7: # Generate folder report
generate_folder_report(database)
if menu_sel == 8: # Generate detail report
generate_detail_report(database)
if menu_sel == 9: # Fancy list generation
fancy_list_generation(database)
if menu_sel == 10: # Move folders to new category
move_folders_to_new_category(database)
if menu_sel == 11: # Quit
menu_exit = True
def main():
"""
Check location of the database and then going to main menu
:return:
"""
menu_title = " Select database file"
menu_items = []
menu_items_count = 0
menu_items_references = []
local_path = os.path.dirname(sys.argv[0])
global_data["local_path"] = local_path
global_data["source_path"] = "_source"
files = os.listdir(local_path)
for f in files:
file_details = os.path.splitext(f)
if os.path.isfile(local_path + os.sep + f) and file_details[1] == ".db":
menu_items.append(f"[{menu_items_count + 1}] {f}")
menu_items_count = menu_items_count + 1
menu_items_references.append(f)
if menu_items_count == 0:
clear_console()
console.print("Database files not found next to the application files.")
input("Press any enter to close...")
elif menu_items_count == 1:
database = CommonDatabaseAccess(
db_path=local_path + os.sep + menu_items_references[0], force=False
)
main_menu(database)
else:
menu_exit = False
while not menu_exit:
clear_console()
console.print("version " + global_data["version"])
console.print(menu_title + "")
for m_i in menu_items:
console.print(m_i + "")
console.print("")
user_input = input("Enter a number: ")
if user_input.isnumeric():
menu_sel = int(user_input)
if 0 < menu_sel <= len(menu_items_references): # Initial scan
database = CommonDatabaseAccess(
db_path=local_path
+ os.sep
+ menu_items_references[menu_sel - 1],
force=False,
)
main_menu(database)
menu_exit = True
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 5 22:33:48 2019
@author: Kazuki
"""
import numpy as np
import pandas as pd
import os, gc
from glob import glob
from tqdm import tqdm
import sys
sys.path.append(f'/home/{os.environ.get('USER')}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from multiprocessing import cpu_count
from sklearn.metrics import roc_auc_score
import utils , utils_cat
utils.start(__file__)
#==============================================================================
SEED = np.random.randint(9999)
print('SEED:', SEED)
DROP = [
# 'f002_EngineVersion', 'f002_AvSigVersion', 'f002_AppVersion',
#
# 'f003_AvSigVersion', 'f003_OsBuildLab', 'f003_Census_OSVersion',
# 'f003_date_min', 'f003_date_max'
]
NFOLD = 5
LOOP = 1
param = {
'objective': 'binary',
'metric': 'auc',
'learning_rate': 0.05,
'max_depth': -1,
'num_leaves': 2**6 -1,
'max_bin': 127,
'min_child_weight': 10,
'min_data_in_leaf': 150,
'reg_lambda': 0.5, # L2 regularization term on weights.
'reg_alpha': 0.5, # L1 regularization term on weights.
'colsample_bytree': 0.9,
'subsample': 0.7,
# 'nthread': 32,
'nthread': cpu_count(),
'bagging_freq': 1,
'verbose':-1,
}
NROUND = 500
ESR = 50
VERBOSE_EVAL = 25
TRAIN_TH = 0.6
VALID_TH = 0.8
outpath_tr = '../data/train_f005_1.f'
outpath_te = '../data/test_f005_1.f'
# =============================================================================
# load
# =============================================================================
files_tr = sorted(glob('../data/f005/train_f005*.f'))[20:40]
[print(i,f) for i,f in enumerate(files_tr)]
X_train = pd.concat([
pd.read_feather(f).sample(frac=0.5, random_state=SEED) for f in tqdm(files_tr, mininterval=60)
], axis=1)
y_train = utils.load_target().sample(frac=0.5, random_state=SEED)['HasDetections']
if len(DROP)>0:
X_train.drop(DROP, axis=1, inplace=True)
#adv = pd.read_csv('../data/oof_802_adv.py.csv').iloc[:8921483].oof
#adv_th = adv.quantile(VALID_TH)
#
#X_valid = X_train[adv>adv.quantile(VALID_TH)]
#y_valid = y_train[adv>adv.quantile(VALID_TH)]
#
#X_train = X_train[adv<=adv.quantile(TRAIN_TH)]
#y_train = y_train[adv<=adv.quantile(TRAIN_TH)]
if X_train.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X_train.columns[X_train.columns.duplicated()] }')
print('no dup :) ')
print(f'X_train.shape {X_train.shape}')
#print(f'X_valid.shape {X_valid.shape}')
gc.collect()
CAT = list( set(X_train.columns)&set(utils_cat.ALL))
print(f'CAT: {CAT}')
# =============================================================================
# hold out
# =============================================================================
dtrain = lgb.Dataset(X_train, y_train.values,
categorical_feature=CAT,
free_raw_data=False)
#dvalid = lgb.Dataset(X_valid, y_valid.values,
# categorical_feature=CAT,
# free_raw_data=False)
gc.collect()
model = lgb.train(params=param, train_set=dtrain, num_boost_round=NROUND,
# valid_sets=[dtrain, dvalid],
# valid_names=['train','valid'],
# feval=ex.eval_auc,
categorical_feature=CAT,
# early_stopping_rounds=ESR,
verbose_eval=VERBOSE_EVAL)
imp = ex.getImp(model)
imp['split'] /= imp['split'].max()
imp['gain'] /= imp['gain'].max()
imp['total'] = imp['split'] + imp['gain']
imp.sort_values('total', ascending=False, inplace=True)
imp.reset_index(drop=True, inplace=True)
imp.to_csv(f'LOG/imp_{__file__}.csv', index=False)
# =============================================================================
#
# =============================================================================
imp = pd.read_csv('LOG/imp_005-2_agg_each_lgb_1.py.csv')
COL = imp.head(30).feature.tolist()
X_train = pd.concat([
pd.read_feather(f) for f in tqdm(files_tr, mininterval=60)
], axis=1)[COL]
X_train.to_feather(outpath_tr)
files_te = sorted(glob('../data/f005/test_f005*.f'))[20:40]
X_test = pd.concat([
pd.read_feather(f) for f in tqdm(files_te, mininterval=60)
], axis=1)[COL]
X_test.to_feather(outpath_te)
#==============================================================================
utils.end(__file__)
#utils.stop_instance()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 5 22:33:48 2019
@author: Kazuki
"""
import numpy as np
import pandas as pd
import os, gc
from glob import glob
from tqdm import tqdm
import sys
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from multiprocessing import cpu_count
from sklearn.metrics import roc_auc_score
import utils , utils_cat
utils.start(__file__)
#==============================================================================
SEED = np.random.randint(9999)
print('SEED:', SEED)
DROP = [
# 'f002_EngineVersion', 'f002_AvSigVersion', 'f002_AppVersion',
#
# 'f003_AvSigVersion', 'f003_OsBuildLab', 'f003_Census_OSVersion',
# 'f003_date_min', 'f003_date_max'
]
NFOLD = 5
LOOP = 1
param = {
'objective': 'binary',
'metric': 'auc',
'learning_rate': 0.05,
'max_depth': -1,
'num_leaves': 2**6 -1,
'max_bin': 127,
'min_child_weight': 10,
'min_data_in_leaf': 150,
'reg_lambda': 0.5, # L2 regularization term on weights.
'reg_alpha': 0.5, # L1 regularization term on weights.
'colsample_bytree': 0.9,
'subsample': 0.7,
# 'nthread': 32,
'nthread': cpu_count(),
'bagging_freq': 1,
'verbose':-1,
}
NROUND = 500
ESR = 50
VERBOSE_EVAL = 25
TRAIN_TH = 0.6
VALID_TH = 0.8
outpath_tr = '../data/train_f005_1.f'
outpath_te = '../data/test_f005_1.f'
# =============================================================================
# load
# =============================================================================
files_tr = sorted(glob('../data/f005/train_f005*.f'))[20:40]
[print(i,f) for i,f in enumerate(files_tr)]
X_train = pd.concat([
pd.read_feather(f).sample(frac=0.5, random_state=SEED) for f in tqdm(files_tr, mininterval=60)
], axis=1)
y_train = utils.load_target().sample(frac=0.5, random_state=SEED)['HasDetections']
if len(DROP)>0:
X_train.drop(DROP, axis=1, inplace=True)
#adv = pd.read_csv('../data/oof_802_adv.py.csv').iloc[:8921483].oof
#adv_th = adv.quantile(VALID_TH)
#
#X_valid = X_train[adv>adv.quantile(VALID_TH)]
#y_valid = y_train[adv>adv.quantile(VALID_TH)]
#
#X_train = X_train[adv<=adv.quantile(TRAIN_TH)]
#y_train = y_train[adv<=adv.quantile(TRAIN_TH)]
if X_train.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X_train.columns[X_train.columns.duplicated()] }')
print('no dup :) ')
print(f'X_train.shape {X_train.shape}')
#print(f'X_valid.shape {X_valid.shape}')
gc.collect()
CAT = list( set(X_train.columns)&set(utils_cat.ALL))
print(f'CAT: {CAT}')
# =============================================================================
# hold out
# =============================================================================
dtrain = lgb.Dataset(X_train, y_train.values,
categorical_feature=CAT,
free_raw_data=False)
#dvalid = lgb.Dataset(X_valid, y_valid.values,
# categorical_feature=CAT,
# free_raw_data=False)
gc.collect()
model = lgb.train(params=param, train_set=dtrain, num_boost_round=NROUND,
# valid_sets=[dtrain, dvalid],
# valid_names=['train','valid'],
# feval=ex.eval_auc,
categorical_feature=CAT,
# early_stopping_rounds=ESR,
verbose_eval=VERBOSE_EVAL)
imp = ex.getImp(model)
imp['split'] /= imp['split'].max()
imp['gain'] /= imp['gain'].max()
imp['total'] = imp['split'] + imp['gain']
imp.sort_values('total', ascending=False, inplace=True)
imp.reset_index(drop=True, inplace=True)
imp.to_csv(f'LOG/imp_{__file__}.csv', index=False)
# =============================================================================
#
# =============================================================================
imp = pd.read_csv('LOG/imp_005-2_agg_each_lgb_1.py.csv')
COL = imp.head(30).feature.tolist()
X_train = pd.concat([
pd.read_feather(f) for f in tqdm(files_tr, mininterval=60)
], axis=1)[COL]
X_train.to_feather(outpath_tr)
files_te = sorted(glob('../data/f005/test_f005*.f'))[20:40]
X_test = pd.concat([
pd.read_feather(f) for f in tqdm(files_te, mininterval=60)
], axis=1)[COL]
X_test.to_feather(outpath_te)
#==============================================================================
utils.end(__file__)
#utils.stop_instance()
|
import os
import re
import ctypes
import zlib
import functools
from urllib.parse import urlparse
from collections import namedtuple
from copy import deepcopy
from datafaucet import metadata
from datafaucet.paths import rootdir
from datafaucet._utils import merge, to_ordered_dict
from datafaucet.yaml import YamlDict
from datafaucet.download import download
import datafaucet.logging as log
Urn = namedtuple('Urn', ['scheme', 'user', 'password', 'host', 'port', 'path', 'params', 'query', 'fragment'])
def filter_empty(lst):
return [x for x in lst if x != '' and x is not None]
def tsplit(s, sep, shift='left'):
s = s.split(sep)
if shift=='left':
return (s[0],s[1]) if len(s)>1 else ('', s[0])
else: # right
return (s[0],s[1]) if len(s)>1 else (s[0], '')
def urnparse(s):
scheme, url = tsplit(s, '//')
path_only = ((not scheme) or
scheme.startswith('jdbc:sqlite') or
scheme.startswith('s3a'))
url = urlparse(url) if path_only else urlparse('scheme://'+url)
path = url.path
query = url.query
scheme = filter_empty(scheme.split(':'))
auth, netloc = tsplit(url.netloc, '@')
user, password = tsplit(auth, ':', 'right')
host, port = tsplit(netloc,':', 'right')
# parsing oracle thin urn for user, password
oracle_thin_scheme = len(scheme)==4 and ':'.join(scheme[0:3])=='jdbc:oracle:thin'
if oracle_thin_scheme and scheme[-1][-1]=='@':
o_user, o_password = tsplit(scheme[-1].rstrip('@'), '/', 'right')
user = o_user or user
password = o_password or password
# parsing oracle params
if oracle_thin_scheme:
path, *params = path.split(',')
query = query + '&'.join(filter_empty(params))
# parsing mssql params
jdbc_mssql_scheme = len(scheme)==2 and ':'.join(scheme[0:2])=='jdbc:sqlserver'
if jdbc_mssql_scheme:
netloc, *params = netloc.split(';')
host, port = tsplit(netloc,':', 'right')
query = query + '&'.join(filter_empty(params))
params = filter_empty(query.split('&'))
params = [tuple(p.split('=')) for p in params]
urn = Urn(scheme,user, password, host, port, path, params, query, url.fragment)
return urn
def path_to_jdbc(md, provider=False):
database = md['database']
table = md['table']
path = md['path'] or ''
if md['format']!='jdbc':
return database, table, path
e = filter_empty(path.split('/'))
if len(e)==0:
pass
elif len(e)==1:
if provider:
database = e[0] or None
path = None
else:
table = e[0] or None
path = None
else:
database = e[0] or None
table = e[1] or None
path = None
return database, table, path
def get_default_md():
f = [
'service',
'format',
'version',
'host',
'port',
'driver',
'database',
'schema',
'table',
'user',
'password',
'path',
'options',
'provider'
]
return dict(zip(f, [None for _ in range(len(f))]))
def metadata_overrides(md, host=None, service=None, port=None, user=None, password=None,
driver=None, database=None, schema=None, table=None, format=None,
version=None, hostname=None, username=None, **options):
d = {}
d['path'] = md.get('url') or md.get('path')
d['provider'] = md.get('provider')
d['host'] = host or hostname or md['host'] or md.get('hostname')
d['port'] = port or md['port']
d['service'] = service or md['service']
d['format'] = format or md['format']
d['version'] = version or md['version']
d['user'] = user or username or md['user'] or md.get('username')
d['password'] = password or md['password']
d['database'] = database or md['database']
d['schema'] = schema or md['schema']
d['table'] = table or md['table']
d['driver'] = driver or md['driver']
d['options'] = merge(md['options'], options)
if database or table:
d['path'] = None
return d
def resource_from_dict(d):
md = get_default_md()
d['path'] = d.get('path') or d.get('url')
for k in md.keys():
md[k] = d.get(k)
return md
def resource_from_urn(urn):
md = get_default_md()
query = get_sql_query(urn.path)
if query:
md['table'] = query
md['format'] = 'jdbc'
return md
params = dict(urn.params)
if urn.scheme and urn.scheme[0]=='jdbc':
service, format = urn.scheme[1], urn.scheme[0]
else:
service = urn.scheme[0] if urn.scheme else ''
format = get_format({'format':None, 'service': service, 'path': urn.path})
compression = get_compression(urn.path)
if compression:
params['compression'] = compression
md['service'] = service
md['format'] = format
md['host'] = urn.host
md['port'] = urn.port
md['path'] = urn.path
md['user'] = urn.user
md['password'] = urn.password
md['options'] = params
for k,v in md.items():
if not v:
md[k] = None
return md
def get_sql_query(s):
# if SQL query is detected,
# wrap the resource path as a temp table
sql_query = s
sql_query = sql_query.replace('\n', ' ')
sql_query = sql_query.replace('\t', ' ')
sql_query = sql_query.replace('\r', ' ')
sql_query = ' '.join(sql_query.split())
sql_query = sql_query.rstrip(' ')
sql_query = sql_query.rstrip(';')
sql_query = sql_query.lower()
#imple sql test: check for from or where prefixed with a space
# indicates multiple words
if any([x in sql_query for x in [' from ', ' where ']]):
return sql_query
else:
return None
def to_resource(url_alias=None, *args, **kwargs):
md = None
# if a dict, create from dictionary
if isinstance(url_alias, dict):
md = resource_from_dict(url_alias)
# if a string, and a metadata profile is loaded, check for aliases
if metadata.profile():
if not md and url_alias in metadata.profile().get('resources', {}).keys():
md = metadata.profile()['resources'][url_alias]
if not md and url_alias in metadata.profile().get('providers', {}).keys():
md = metadata.profile()['providers'][url_alias]
# if nothing found yet, interpret as a urn/path
if not md and url_alias:
md = resource_from_urn(urnparse(url_alias))
# empty default
if not md:
md = get_default_md()
# sanitize path if it's a url or a query
if md['path']:
url_md = resource_from_urn(urnparse(md['path']))
md = merge(url_md, md)
md['path'] = url_md['path']
# override using kwargs
md = metadata_overrides(md, **kwargs)
if 'hostname' in md:
del md['hostname']
if 'username' in md:
del md['username']
return md
def get_compression(path):
if not path:
return None
_, ext = os.path.splitext(path)
d = {
'.lz': 'lz',
'.lzo': 'lzo',
'.gz': 'gzip',
'.bz2': 'bzip2',
}
return d.get(ext)
def get_format(md):
if md['format']:
return md['format']
# get the provider format
if md['service'] in ['sqlite', 'mysql', 'postgres', 'mssql', 'oracle']:
return 'jdbc'
if md['service'] in ['mongodb']:
return 'mongo'
if md['service'] in ['elastic']:
return 'json'
# check if path is a query
query = get_sql_query(md['path'])
if query:
return 'jdbc'
# extract the format from file extension
#‘.gz’, ‘.bz2’, ‘.zip’, ‘.snappy’, '.deflate'
path, ext = os.path.splitext(md['path'])
if get_compression(md['path']):
_, ext = os.path.splitext(path)
if ext and ext[0]=='.':
ext = ext[1:]
# default is None
return ext or None
def get_driver(service):
drivers = {
'sqlite': 'org.sqlite.JDBC',
'mysql': 'com.mysql.cj.jdbc.Driver',
'postgres': 'org.postgresql.Driver',
'mssql': 'com.microsoft.sqlserver.jdbc.SQLServerDriver',
'oracle': 'oracle.jdbc.driver.OracleDriver',
'clickhouse': 'ru.yandex.clickhouse.ClickHouseDriver'
}
return drivers.get(service)
def get_port(service):
ports = {
'http':80,
'https':443,
'hdfs': 8020,
'mysql': 3306,
'postgres': 5432,
'mssql': 1433,
'mongodb': 27017,
'oracle': 1521,
'clickhouse':8123,
'elastic': 9200,
's3a':9000
}
return ports.get(service)
def get_version(service):
versions = {
'hdfs': '3.1.1',
'sqlite': '3.25.2',
'mysql': '8.0.12',
'postgres': '42.2.5',
'mssql': '6.4.0.jre8',
'mongodb': '2.4.1',
'oracle': '12.2.0.1',
'clickhouse':'0.1.54',
's3a':'3.1.1'
}
return versions.get(service)
def get_url(md):
service = md['service']
path = md['path']
host_port = f"{md["host"]}:{md["port"]}" if md['port'] else md['host']
if service in ['local', 'file']:
url = path
elif service == 'sqlite':
url = f"jdbc:sqlite:{path}"
elif service == 'hdfs':
url = f"hdfs://{host_port}{md["path"]}"
elif service in ['http', 'https']:
url = f"{service}://{host_port}{md["path"]}"
elif service in ['minio', 's3a']:
url = f"s3a://{md["path"]}"
elif service == 'mysql':
url = f"jdbc:mysql://{host_port}/{md["database"]}"
elif service == 'postgres':
url = f"jdbc:postgresql://{host_port}/{md["database"]}"
elif service == 'clickhouse':
url = f"jdbc:clickhouse://{host_port}/{md["database"]}"
elif service == 'mssql':
url = f"jdbc:sqlserver://{host_port};databaseName={md["database"]}"
elif service == 'oracle':
url = f"jdbc:oracle:thin:@//{host_port}/{md["database"]}"
elif service == 'elastic':
url = f"http://{host_port}/{md["database"]}"
elif service == 'mongodb':
url = f"mongodb://{md["user"]}:{md["password"]}@{host_port}/{md["path"]}"
return url
def process_metadata(md):
# update format from
md['format'] = get_format(md)
# if no service, at this point use file
md['service'] = md['service'] or 'file'
# standardize some service names
services = {
'minio': 's3a',
'local': 'file'
}
md['service'] = services.get(md['service'], md['service'])
# if no host, use localhost
md['host'] = md['host'] or '127.0.0.1'
# if local file system and rel path, prepend rootdir
if md['service'] in ['file', 'sqlite'] and not os.path.isabs(md['path']):
md['path'] = os.path.join(rootdir(), md['path'])
# if service is s3a, remove leading '/'
if md['service'] == 's3a' and md['path']:
md['path'] = md['path'].lstrip('/')
# if service is mongodb, use '.' instead of '/'
if md['service'] == 'mongodb' and md['path']:
md['path'] = md['path'].replace('/', '.')
# generate database, table from path
if md['format']=='jdbc':
md['database'], md['table'], md['path'] = path_to_jdbc(md)
# set driver
md['driver'] = md['driver'] or get_driver(md['service'])
# if not table, provide no result query
md['table'] = md['table'] or 'SELECT 0 as result where 1 = 0'
# if schema is not yet defined,
# take the default for each service
default_schemas = {
'mysql': md['database'],
'mssql': 'dbo',
'postgres': 'public',
'clickhouse': 'default',
'oracle': md['user']
}
md['schema'] = md['schema'] or default_schemas.get(md['service'])
query = get_sql_query(md['table'])
if query and not query.endswith('as _query'):
md['table'] = '( {} ) as _query'.format(query)
md['version'] = md['version'] or get_version(md['service'])
md['port'] = md['port'] or get_port(md['service'])
md['port'] = int(md['port']) if md['port'] else None
md['url'] = get_url(md)
if not isinstance(md['options'], dict):
md['options'] = {}
compression = get_compression(md['path'])
if md['format']!='jdbc' and compression:
md['options']['compression'] = compression
h_list = []
for k in ['url', 'format', 'table', 'database']:
v = zlib.crc32(md[k].encode()) if md[k] else 0
h_list.append(v)
md['hash'] = functools.reduce(lambda a,b : a^b, h_list)
md['hash'] = hex(ctypes.c_size_t(md['hash']).value)
return md
def assemble_metadata(md):
keys = [
'hash',
'url',
'service',
'version',
'format',
'host'
]
if md['service'] != 'file':
keys.append('port')
if md['service'] == 's3a' or md['format'] == 'jdbc':
keys.extend([
'user',
'password'])
if md['format'] == 'jdbc':
keys.extend([
'driver',
'database',
'schema',
'table'])
keys.append('options')
return YamlDict(to_ordered_dict(md, keys))
def Resource(path_or_alias_or_url=None, provider_path_or_alias_or_url=None,
host=None, service=None, port=None, user=None, password=None,
driver=None, database=None, schema=None, table=None, format=None,
version=None, hostname=None, username=None, **options):
prov = provider_path_or_alias_or_url
path = path_or_alias_or_url
# get the resource, by alias metadata or by url
rmd = to_resource(path, host=host, service=service, port=port,
user=user, password=password, driver=driver, database=database,
schema=schema, table=table, format=format, version=version,
hostname=hostname, username=username, **options)
# get the provider by reference from the resource, if available
prov = prov or rmd.get('provider')
# get the provider, by alias metadata or by url
pmd = to_resource(prov)
# check if the provider is a jdbc connection, if so set it
pmd['database'], pmd['table'], pmd['path'] = path_to_jdbc(pmd, True)
# merge provider and resource metadata
md = merge(pmd,rmd)
# concatenate paths, if no table is defined
if md['table']:
md['path'] = None
else:
md['path'] = os.path.join(pmd['path'] or '', rmd['path'] or '')
#process metadata
md = process_metadata(md)
#todo: verify resource
# check format and other minimum requirements are met
# assemble output
md = assemble_metadata(md)
return md
def get_local(md):
if md['service'].startswith('http'):
md['path'] = download(md['url'], md['format'])
md['service'] = 'file'
md['url'] = None
return Resource(md)
else:
return md
|
import os
import re
import ctypes
import zlib
import functools
from urllib.parse import urlparse
from collections import namedtuple
from copy import deepcopy
from datafaucet import metadata
from datafaucet.paths import rootdir
from datafaucet._utils import merge, to_ordered_dict
from datafaucet.yaml import YamlDict
from datafaucet.download import download
import datafaucet.logging as log
Urn = namedtuple('Urn', ['scheme', 'user', 'password', 'host', 'port', 'path', 'params', 'query', 'fragment'])
def filter_empty(lst):
return [x for x in lst if x != '' and x is not None]
def tsplit(s, sep, shift='left'):
s = s.split(sep)
if shift=='left':
return (s[0],s[1]) if len(s)>1 else ('', s[0])
else: # right
return (s[0],s[1]) if len(s)>1 else (s[0], '')
def urnparse(s):
scheme, url = tsplit(s, '//')
path_only = ((not scheme) or
scheme.startswith('jdbc:sqlite') or
scheme.startswith('s3a'))
url = urlparse(url) if path_only else urlparse('scheme://'+url)
path = url.path
query = url.query
scheme = filter_empty(scheme.split(':'))
auth, netloc = tsplit(url.netloc, '@')
user, password = tsplit(auth, ':', 'right')
host, port = tsplit(netloc,':', 'right')
# parsing oracle thin urn for user, password
oracle_thin_scheme = len(scheme)==4 and ':'.join(scheme[0:3])=='jdbc:oracle:thin'
if oracle_thin_scheme and scheme[-1][-1]=='@':
o_user, o_password = tsplit(scheme[-1].rstrip('@'), '/', 'right')
user = o_user or user
password = o_password or password
# parsing oracle params
if oracle_thin_scheme:
path, *params = path.split(',')
query = query + '&'.join(filter_empty(params))
# parsing mssql params
jdbc_mssql_scheme = len(scheme)==2 and ':'.join(scheme[0:2])=='jdbc:sqlserver'
if jdbc_mssql_scheme:
netloc, *params = netloc.split(';')
host, port = tsplit(netloc,':', 'right')
query = query + '&'.join(filter_empty(params))
params = filter_empty(query.split('&'))
params = [tuple(p.split('=')) for p in params]
urn = Urn(scheme,user, password, host, port, path, params, query, url.fragment)
return urn
def path_to_jdbc(md, provider=False):
database = md['database']
table = md['table']
path = md['path'] or ''
if md['format']!='jdbc':
return database, table, path
e = filter_empty(path.split('/'))
if len(e)==0:
pass
elif len(e)==1:
if provider:
database = e[0] or None
path = None
else:
table = e[0] or None
path = None
else:
database = e[0] or None
table = e[1] or None
path = None
return database, table, path
def get_default_md():
f = [
'service',
'format',
'version',
'host',
'port',
'driver',
'database',
'schema',
'table',
'user',
'password',
'path',
'options',
'provider'
]
return dict(zip(f, [None for _ in range(len(f))]))
def metadata_overrides(md, host=None, service=None, port=None, user=None, password=None,
driver=None, database=None, schema=None, table=None, format=None,
version=None, hostname=None, username=None, **options):
d = {}
d['path'] = md.get('url') or md.get('path')
d['provider'] = md.get('provider')
d['host'] = host or hostname or md['host'] or md.get('hostname')
d['port'] = port or md['port']
d['service'] = service or md['service']
d['format'] = format or md['format']
d['version'] = version or md['version']
d['user'] = user or username or md['user'] or md.get('username')
d['password'] = password or md['password']
d['database'] = database or md['database']
d['schema'] = schema or md['schema']
d['table'] = table or md['table']
d['driver'] = driver or md['driver']
d['options'] = merge(md['options'], options)
if database or table:
d['path'] = None
return d
def resource_from_dict(d):
md = get_default_md()
d['path'] = d.get('path') or d.get('url')
for k in md.keys():
md[k] = d.get(k)
return md
def resource_from_urn(urn):
md = get_default_md()
query = get_sql_query(urn.path)
if query:
md['table'] = query
md['format'] = 'jdbc'
return md
params = dict(urn.params)
if urn.scheme and urn.scheme[0]=='jdbc':
service, format = urn.scheme[1], urn.scheme[0]
else:
service = urn.scheme[0] if urn.scheme else ''
format = get_format({'format':None, 'service': service, 'path': urn.path})
compression = get_compression(urn.path)
if compression:
params['compression'] = compression
md['service'] = service
md['format'] = format
md['host'] = urn.host
md['port'] = urn.port
md['path'] = urn.path
md['user'] = urn.user
md['password'] = urn.password
md['options'] = params
for k,v in md.items():
if not v:
md[k] = None
return md
def get_sql_query(s):
# if SQL query is detected,
# wrap the resource path as a temp table
sql_query = s
sql_query = sql_query.replace('\n', ' ')
sql_query = sql_query.replace('\t', ' ')
sql_query = sql_query.replace('\r', ' ')
sql_query = ' '.join(sql_query.split())
sql_query = sql_query.rstrip(' ')
sql_query = sql_query.rstrip(';')
sql_query = sql_query.lower()
#imple sql test: check for from or where prefixed with a space
# indicates multiple words
if any([x in sql_query for x in [' from ', ' where ']]):
return sql_query
else:
return None
def to_resource(url_alias=None, *args, **kwargs):
md = None
# if a dict, create from dictionary
if isinstance(url_alias, dict):
md = resource_from_dict(url_alias)
# if a string, and a metadata profile is loaded, check for aliases
if metadata.profile():
if not md and url_alias in metadata.profile().get('resources', {}).keys():
md = metadata.profile()['resources'][url_alias]
if not md and url_alias in metadata.profile().get('providers', {}).keys():
md = metadata.profile()['providers'][url_alias]
# if nothing found yet, interpret as a urn/path
if not md and url_alias:
md = resource_from_urn(urnparse(url_alias))
# empty default
if not md:
md = get_default_md()
# sanitize path if it's a url or a query
if md['path']:
url_md = resource_from_urn(urnparse(md['path']))
md = merge(url_md, md)
md['path'] = url_md['path']
# override using kwargs
md = metadata_overrides(md, **kwargs)
if 'hostname' in md:
del md['hostname']
if 'username' in md:
del md['username']
return md
def get_compression(path):
if not path:
return None
_, ext = os.path.splitext(path)
d = {
'.lz': 'lz',
'.lzo': 'lzo',
'.gz': 'gzip',
'.bz2': 'bzip2',
}
return d.get(ext)
def get_format(md):
if md['format']:
return md['format']
# get the provider format
if md['service'] in ['sqlite', 'mysql', 'postgres', 'mssql', 'oracle']:
return 'jdbc'
if md['service'] in ['mongodb']:
return 'mongo'
if md['service'] in ['elastic']:
return 'json'
# check if path is a query
query = get_sql_query(md['path'])
if query:
return 'jdbc'
# extract the format from file extension
#‘.gz’, ‘.bz2’, ‘.zip’, ‘.snappy’, '.deflate'
path, ext = os.path.splitext(md['path'])
if get_compression(md['path']):
_, ext = os.path.splitext(path)
if ext and ext[0]=='.':
ext = ext[1:]
# default is None
return ext or None
def get_driver(service):
drivers = {
'sqlite': 'org.sqlite.JDBC',
'mysql': 'com.mysql.cj.jdbc.Driver',
'postgres': 'org.postgresql.Driver',
'mssql': 'com.microsoft.sqlserver.jdbc.SQLServerDriver',
'oracle': 'oracle.jdbc.driver.OracleDriver',
'clickhouse': 'ru.yandex.clickhouse.ClickHouseDriver'
}
return drivers.get(service)
def get_port(service):
ports = {
'http':80,
'https':443,
'hdfs': 8020,
'mysql': 3306,
'postgres': 5432,
'mssql': 1433,
'mongodb': 27017,
'oracle': 1521,
'clickhouse':8123,
'elastic': 9200,
's3a':9000
}
return ports.get(service)
def get_version(service):
versions = {
'hdfs': '3.1.1',
'sqlite': '3.25.2',
'mysql': '8.0.12',
'postgres': '42.2.5',
'mssql': '6.4.0.jre8',
'mongodb': '2.4.1',
'oracle': '12.2.0.1',
'clickhouse':'0.1.54',
's3a':'3.1.1'
}
return versions.get(service)
def get_url(md):
service = md['service']
path = md['path']
host_port = f"{md['host']}:{md['port']}" if md['port'] else md['host']
if service in ['local', 'file']:
url = path
elif service == 'sqlite':
url = f"jdbc:sqlite:{path}"
elif service == 'hdfs':
url = f"hdfs://{host_port}{md['path']}"
elif service in ['http', 'https']:
url = f"{service}://{host_port}{md['path']}"
elif service in ['minio', 's3a']:
url = f"s3a://{md['path']}"
elif service == 'mysql':
url = f"jdbc:mysql://{host_port}/{md['database']}"
elif service == 'postgres':
url = f"jdbc:postgresql://{host_port}/{md['database']}"
elif service == 'clickhouse':
url = f"jdbc:clickhouse://{host_port}/{md['database']}"
elif service == 'mssql':
url = f"jdbc:sqlserver://{host_port};databaseName={md['database']}"
elif service == 'oracle':
url = f"jdbc:oracle:thin:@//{host_port}/{md['database']}"
elif service == 'elastic':
url = f"http://{host_port}/{md['database']}"
elif service == 'mongodb':
url = f"mongodb://{md['user']}:{md['password']}@{host_port}/{md['path']}"
return url
def process_metadata(md):
# update format from
md['format'] = get_format(md)
# if no service, at this point use file
md['service'] = md['service'] or 'file'
# standardize some service names
services = {
'minio': 's3a',
'local': 'file'
}
md['service'] = services.get(md['service'], md['service'])
# if no host, use localhost
md['host'] = md['host'] or '127.0.0.1'
# if local file system and rel path, prepend rootdir
if md['service'] in ['file', 'sqlite'] and not os.path.isabs(md['path']):
md['path'] = os.path.join(rootdir(), md['path'])
# if service is s3a, remove leading '/'
if md['service'] == 's3a' and md['path']:
md['path'] = md['path'].lstrip('/')
# if service is mongodb, use '.' instead of '/'
if md['service'] == 'mongodb' and md['path']:
md['path'] = md['path'].replace('/', '.')
# generate database, table from path
if md['format']=='jdbc':
md['database'], md['table'], md['path'] = path_to_jdbc(md)
# set driver
md['driver'] = md['driver'] or get_driver(md['service'])
# if not table, provide no result query
md['table'] = md['table'] or 'SELECT 0 as result where 1 = 0'
# if schema is not yet defined,
# take the default for each service
default_schemas = {
'mysql': md['database'],
'mssql': 'dbo',
'postgres': 'public',
'clickhouse': 'default',
'oracle': md['user']
}
md['schema'] = md['schema'] or default_schemas.get(md['service'])
query = get_sql_query(md['table'])
if query and not query.endswith('as _query'):
md['table'] = '( {} ) as _query'.format(query)
md['version'] = md['version'] or get_version(md['service'])
md['port'] = md['port'] or get_port(md['service'])
md['port'] = int(md['port']) if md['port'] else None
md['url'] = get_url(md)
if not isinstance(md['options'], dict):
md['options'] = {}
compression = get_compression(md['path'])
if md['format']!='jdbc' and compression:
md['options']['compression'] = compression
h_list = []
for k in ['url', 'format', 'table', 'database']:
v = zlib.crc32(md[k].encode()) if md[k] else 0
h_list.append(v)
md['hash'] = functools.reduce(lambda a,b : a^b, h_list)
md['hash'] = hex(ctypes.c_size_t(md['hash']).value)
return md
def assemble_metadata(md):
keys = [
'hash',
'url',
'service',
'version',
'format',
'host'
]
if md['service'] != 'file':
keys.append('port')
if md['service'] == 's3a' or md['format'] == 'jdbc':
keys.extend([
'user',
'password'])
if md['format'] == 'jdbc':
keys.extend([
'driver',
'database',
'schema',
'table'])
keys.append('options')
return YamlDict(to_ordered_dict(md, keys))
def Resource(path_or_alias_or_url=None, provider_path_or_alias_or_url=None,
host=None, service=None, port=None, user=None, password=None,
driver=None, database=None, schema=None, table=None, format=None,
version=None, hostname=None, username=None, **options):
prov = provider_path_or_alias_or_url
path = path_or_alias_or_url
# get the resource, by alias metadata or by url
rmd = to_resource(path, host=host, service=service, port=port,
user=user, password=password, driver=driver, database=database,
schema=schema, table=table, format=format, version=version,
hostname=hostname, username=username, **options)
# get the provider by reference from the resource, if available
prov = prov or rmd.get('provider')
# get the provider, by alias metadata or by url
pmd = to_resource(prov)
# check if the provider is a jdbc connection, if so set it
pmd['database'], pmd['table'], pmd['path'] = path_to_jdbc(pmd, True)
# merge provider and resource metadata
md = merge(pmd,rmd)
# concatenate paths, if no table is defined
if md['table']:
md['path'] = None
else:
md['path'] = os.path.join(pmd['path'] or '', rmd['path'] or '')
#process metadata
md = process_metadata(md)
#todo: verify resource
# check format and other minimum requirements are met
# assemble output
md = assemble_metadata(md)
return md
def get_local(md):
if md['service'].startswith('http'):
md['path'] = download(md['url'], md['format'])
md['service'] = 'file'
md['url'] = None
return Resource(md)
else:
return md
|
import logging
import zmq
import sys
import time
import uptime
import pickle
from datetime import datetime
from os import path
try:
from gps_config import (init, GPS_TOPIC)
except ImportError:
raise Exception('failed to import init method')
sys.exit(-1)
def gen_gps_message():
return [
time.time(),
uptime.uptime(),
{
"class": "TPV",
"device": "/dev/ttyACM0",
"mode": 1,
"timestamp" : time.time(),
"leapseconds": 18,
"lat" : 8.66645,
"lon" : 53.5555,
"alt" : 6.5546,
}
]
def main():
config = init()
connect_to = f'{config['ipc_protocol']}:{config['ipc_port']}'
logging.debug(f'binding to {connect_to} for zeroMQ IPC')
ctx = zmq.Context()
zmq_socket = ctx.socket(zmq.PUB)
try:
zmq_socket.connect(connect_to)
except Exception as e:
logging.fatal('failed to connect to zeroMQ socket for IPC')
sys.exit(-1)
logging.debug(f'connected to zeroMQ IPC socket')
logging.debug(f'entering endless loop')
try:
while True:
# Do stuff
data = gen_gps_message()
if config['print']: print(f'gps: {data}')
zmq_socket.send_multipart(
[
GPS_TOPIC,
pickle.dumps(
data
)
]
)
time.sleep(1)
# zmq_socket.send_pyobj(data)
except StopIteration:
logging.fatal("GPSD has terminated")
except KeyboardInterrupt:
logging.info('goodbye')
sys.exit(0)
if __name__ == '__main__':
main()
|
import logging
import zmq
import sys
import time
import uptime
import pickle
from datetime import datetime
from os import path
try:
from gps_config import (init, GPS_TOPIC)
except ImportError:
raise Exception('failed to import init method')
sys.exit(-1)
def gen_gps_message():
return [
time.time(),
uptime.uptime(),
{
"class": "TPV",
"device": "/dev/ttyACM0",
"mode": 1,
"timestamp" : time.time(),
"leapseconds": 18,
"lat" : 8.66645,
"lon" : 53.5555,
"alt" : 6.5546,
}
]
def main():
config = init()
connect_to = f'{config["ipc_protocol"]}:{config["ipc_port"]}'
logging.debug(f'binding to {connect_to} for zeroMQ IPC')
ctx = zmq.Context()
zmq_socket = ctx.socket(zmq.PUB)
try:
zmq_socket.connect(connect_to)
except Exception as e:
logging.fatal('failed to connect to zeroMQ socket for IPC')
sys.exit(-1)
logging.debug(f'connected to zeroMQ IPC socket')
logging.debug(f'entering endless loop')
try:
while True:
# Do stuff
data = gen_gps_message()
if config['print']: print(f'gps: {data}')
zmq_socket.send_multipart(
[
GPS_TOPIC,
pickle.dumps(
data
)
]
)
time.sleep(1)
# zmq_socket.send_pyobj(data)
except StopIteration:
logging.fatal("GPSD has terminated")
except KeyboardInterrupt:
logging.info('goodbye')
sys.exit(0)
if __name__ == '__main__':
main()
|
from logging import getLogger
import slack
logger = getLogger(__name__)
class ChannelListNotLoadedError(RuntimeError):
pass
class ChannelNotFoundError(RuntimeError):
pass
class FileNotUploadedError(RuntimeError):
pass
class SlackAPI(object):
def __init__(self, token, channel: str, to_user: str) -> None:
self._client = slack.WebClient(token=token)
self._channel_id = self._get_channel_id(channel)
self._to_user = to_user if to_user == '' or to_user.startswith('@') else '@' + to_user
def _get_channels(self, channels=[], cursor=None):
params = {}
if cursor:
params['cursor'] = cursor
response = self._client.api_call('channels.list', http_verb="GET", params=params)
if not response['ok']:
raise ChannelListNotLoadedError(f'Error while loading channels. The error reason is "{response['error']}".')
channels += response.get('channels', [])
if not channels:
raise ChannelListNotLoadedError('Channel list is empty.')
if response['response_metadata']['next_cursor']:
return self._get_channels(channels, response['response_metadata']['next_cursor'])
else:
return channels
def _get_channel_id(self, channel_name):
for channel in self._get_channels():
if channel['name'] == channel_name:
return channel['id']
raise ChannelNotFoundError(f'Channel {channel_name} is not found in public channels.')
def send_snippet(self, comment, title, content):
request_body = dict(
channels=self._channel_id,
initial_comment=f'<{self._to_user}> {comment}' if self._to_user else comment,
content=content,
title=title)
response = self._client.api_call('files.upload', data=request_body)
if not response['ok']:
raise FileNotUploadedError(f'Error while uploading file. The error reason is "{response['error']}".')
|
from logging import getLogger
import slack
logger = getLogger(__name__)
class ChannelListNotLoadedError(RuntimeError):
pass
class ChannelNotFoundError(RuntimeError):
pass
class FileNotUploadedError(RuntimeError):
pass
class SlackAPI(object):
def __init__(self, token, channel: str, to_user: str) -> None:
self._client = slack.WebClient(token=token)
self._channel_id = self._get_channel_id(channel)
self._to_user = to_user if to_user == '' or to_user.startswith('@') else '@' + to_user
def _get_channels(self, channels=[], cursor=None):
params = {}
if cursor:
params['cursor'] = cursor
response = self._client.api_call('channels.list', http_verb="GET", params=params)
if not response['ok']:
raise ChannelListNotLoadedError(f'Error while loading channels. The error reason is "{response["error"]}".')
channels += response.get('channels', [])
if not channels:
raise ChannelListNotLoadedError('Channel list is empty.')
if response['response_metadata']['next_cursor']:
return self._get_channels(channels, response['response_metadata']['next_cursor'])
else:
return channels
def _get_channel_id(self, channel_name):
for channel in self._get_channels():
if channel['name'] == channel_name:
return channel['id']
raise ChannelNotFoundError(f'Channel {channel_name} is not found in public channels.')
def send_snippet(self, comment, title, content):
request_body = dict(
channels=self._channel_id,
initial_comment=f'<{self._to_user}> {comment}' if self._to_user else comment,
content=content,
title=title)
response = self._client.api_call('files.upload', data=request_body)
if not response['ok']:
raise FileNotUploadedError(f'Error while uploading file. The error reason is "{response["error"]}".')
|
import logging
import sys
import yfinance
import pandas as pd
import yfinance as yf
import os
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Any, Dict, List
from finrl.config import TimeRange, setup_utils_configuration
from finrl.data.converter import convert_ohlcv_format, convert_trades_format
from finrl.data.history import (convert_trades_to_ohlcv, refresh_backtest_ohlcv_data,
refresh_backtest_trades_data)
from finrl.exceptions import OperationalException
from finrl.exchange import timeframe_to_minutes
from finrl.resolvers import ExchangeResolver
from finrl.state import RunMode
logger = logging.getLogger(__name__)
def start_download_cryptodata(args: Dict[str, Any]) -> None:
"""
Parameters:
ARGS_DOWNLOAD_DATA = {'config': ['config.json'], 'datadir': None,
'user_data_dir': None, 'pairs': None, 'pairs_file': None,
'days': 160, 'timerange': None,
'download_trades': False, 'exchange': 'binance',
'timeframes': ['1d'], 'erase': False,
'dataformat_ohlcv': None, 'dataformat_trades': None}
Returns:
Json files in user_data/data/exchange/*.json
"""
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
if 'days' in config and 'timerange' in config:
raise OperationalException("--days and --timerange are mutually exclusive. "
"You can only specify one or the other.")
timerange = TimeRange()
if 'days' in config:
time_since = (datetime.now() - timedelta(days=config['days'])).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f'{time_since}-')
if 'timerange' in config:
timerange = timerange.parse_timerange(config['timerange'])
# Remove stake-currency to skip checks which are not relevant for datadownload
config['stake_currency'] = ''
if 'pairs' not in config:
raise OperationalException(
"Downloading data requires a list of pairs. "
"Please check the documentation on how to configure this.")
logger.info(f"About to download pairs: {config["pairs"]}, "
f"intervals: {config["timeframes"]} to {config["datadir"]}")
pairs_not_available: List[str] = []
# Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
# Manual validations of relevant settings
exchange.validate_pairs(config['pairs'])
for timeframe in config['timeframes']:
exchange.validate_timeframes(timeframe)
try:
if config.get('download_trades'):
pairs_not_available = refresh_backtest_trades_data(
exchange, pairs=config['pairs'], datadir=config['datadir'],
timerange=timerange, erase=bool(config.get('erase')),
data_format=config['dataformat_trades'])
# Convert downloaded trade data to different timeframes
convert_trades_to_ohlcv(
pairs=config['pairs'], timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),
data_format_ohlcv=config['dataformat_ohlcv'],
data_format_trades=config['dataformat_trades'],
)
else:
pairs_not_available = refresh_backtest_ohlcv_data(
exchange, pairs=config['pairs'], timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),
data_format=config['dataformat_ohlcv'])
except KeyboardInterrupt:
sys.exit("Interrupt received, aborting ...")
finally:
if pairs_not_available:
logger.info(f"Pairs [{",".join(pairs_not_available)}] not available "
f"on exchange {exchange.name}.")
def start_download_stockdata(args: Dict[str, Any]) -> None:
"""Fetches data from Yahoo API
Parameters
----------
ticker_list, timerange,
Returns
-------
Json of data
"""
args["exchange"] = "yahoo"
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
if 'days' in config and 'timerange' in config:
raise OperationalException("--days and --timerange are mutually exclusive. "
"You can only specify one or the other.")
config["datadir"] = "user_data/data/yahoo"
timerange = TimeRange()
if 'days' in config:
time_since = (datetime.now() - timedelta(days=config['days'])).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f'{time_since}-')
start = datetime.fromtimestamp(timerange.startts).strftime("%Y-%m-%d")
end = datetime.now().strftime("%Y-%m-%d")
if 'timerange' in config:
timerange = timerange.parse_timerange(config['timerange'])
start = datetime.fromtimestamp(timerange.startts).strftime("%Y-%m-%d")
end = datetime.fromtimestamp(timerange.stopts).strftime("%Y-%m-%d")
try:
data_df = pd.DataFrame()
for tic in config['ticker_list']:
temp_df = yf.download(tic, start=start, end=end)
temp_df.columns = [
"open",
"high",
"low",
"close",
"adjcp",
"volume",
]
temp_df["close"] = temp_df["adjcp"]
temp_df = temp_df.drop(["adjcp"], axis=1)
temp_df.to_json(f'{os.getcwd()}/{config['datadir']}/{tic}.json')
except KeyboardInterrupt:
sys.exit("Interrupt received, aborting ...")
def start_convert_data(args: Dict[str, Any], ohlcv: bool = True) -> None:
"""
Convert data from one format to another
"""
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if ohlcv:
convert_ohlcv_format(config,
convert_from=args['format_from'], convert_to=args['format_to'],
erase=args['erase'])
else:
convert_trades_format(config,
convert_from=args['format_from'], convert_to=args['format_to'],
erase=args['erase'])
def start_list_data(args: Dict[str, Any]) -> None:
"""
List available backtest data
"""
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
from tabulate import tabulate
from freqtrade.data.history.idatahandler import get_datahandler
dhc = get_datahandler(config['datadir'], config['dataformat_ohlcv'])
paircombs = dhc.ohlcv_get_available_data(config['datadir'])
if args['pairs']:
paircombs = [comb for comb in paircombs if comb[0] in args['pairs']]
print(f"Found {len(paircombs)} pair / timeframe combinations.")
groupedpair = defaultdict(list)
for pair, timeframe in sorted(paircombs, key=lambda x: (x[0], timeframe_to_minutes(x[1]))):
groupedpair[pair].append(timeframe)
if groupedpair:
print(tabulate([(pair, ', '.join(timeframes)) for pair, timeframes in groupedpair.items()],
headers=("Pair", "Timeframe"),
tablefmt='psql', stralign='right'))
|
import logging
import sys
import yfinance
import pandas as pd
import yfinance as yf
import os
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Any, Dict, List
from finrl.config import TimeRange, setup_utils_configuration
from finrl.data.converter import convert_ohlcv_format, convert_trades_format
from finrl.data.history import (convert_trades_to_ohlcv, refresh_backtest_ohlcv_data,
refresh_backtest_trades_data)
from finrl.exceptions import OperationalException
from finrl.exchange import timeframe_to_minutes
from finrl.resolvers import ExchangeResolver
from finrl.state import RunMode
logger = logging.getLogger(__name__)
def start_download_cryptodata(args: Dict[str, Any]) -> None:
"""
Parameters:
ARGS_DOWNLOAD_DATA = {'config': ['config.json'], 'datadir': None,
'user_data_dir': None, 'pairs': None, 'pairs_file': None,
'days': 160, 'timerange': None,
'download_trades': False, 'exchange': 'binance',
'timeframes': ['1d'], 'erase': False,
'dataformat_ohlcv': None, 'dataformat_trades': None}
Returns:
Json files in user_data/data/exchange/*.json
"""
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
if 'days' in config and 'timerange' in config:
raise OperationalException("--days and --timerange are mutually exclusive. "
"You can only specify one or the other.")
timerange = TimeRange()
if 'days' in config:
time_since = (datetime.now() - timedelta(days=config['days'])).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f'{time_since}-')
if 'timerange' in config:
timerange = timerange.parse_timerange(config['timerange'])
# Remove stake-currency to skip checks which are not relevant for datadownload
config['stake_currency'] = ''
if 'pairs' not in config:
raise OperationalException(
"Downloading data requires a list of pairs. "
"Please check the documentation on how to configure this.")
logger.info(f"About to download pairs: {config['pairs']}, "
f"intervals: {config['timeframes']} to {config['datadir']}")
pairs_not_available: List[str] = []
# Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
# Manual validations of relevant settings
exchange.validate_pairs(config['pairs'])
for timeframe in config['timeframes']:
exchange.validate_timeframes(timeframe)
try:
if config.get('download_trades'):
pairs_not_available = refresh_backtest_trades_data(
exchange, pairs=config['pairs'], datadir=config['datadir'],
timerange=timerange, erase=bool(config.get('erase')),
data_format=config['dataformat_trades'])
# Convert downloaded trade data to different timeframes
convert_trades_to_ohlcv(
pairs=config['pairs'], timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),
data_format_ohlcv=config['dataformat_ohlcv'],
data_format_trades=config['dataformat_trades'],
)
else:
pairs_not_available = refresh_backtest_ohlcv_data(
exchange, pairs=config['pairs'], timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),
data_format=config['dataformat_ohlcv'])
except KeyboardInterrupt:
sys.exit("Interrupt received, aborting ...")
finally:
if pairs_not_available:
logger.info(f"Pairs [{','.join(pairs_not_available)}] not available "
f"on exchange {exchange.name}.")
def start_download_stockdata(args: Dict[str, Any]) -> None:
"""Fetches data from Yahoo API
Parameters
----------
ticker_list, timerange,
Returns
-------
Json of data
"""
args["exchange"] = "yahoo"
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
if 'days' in config and 'timerange' in config:
raise OperationalException("--days and --timerange are mutually exclusive. "
"You can only specify one or the other.")
config["datadir"] = "user_data/data/yahoo"
timerange = TimeRange()
if 'days' in config:
time_since = (datetime.now() - timedelta(days=config['days'])).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f'{time_since}-')
start = datetime.fromtimestamp(timerange.startts).strftime("%Y-%m-%d")
end = datetime.now().strftime("%Y-%m-%d")
if 'timerange' in config:
timerange = timerange.parse_timerange(config['timerange'])
start = datetime.fromtimestamp(timerange.startts).strftime("%Y-%m-%d")
end = datetime.fromtimestamp(timerange.stopts).strftime("%Y-%m-%d")
try:
data_df = pd.DataFrame()
for tic in config['ticker_list']:
temp_df = yf.download(tic, start=start, end=end)
temp_df.columns = [
"open",
"high",
"low",
"close",
"adjcp",
"volume",
]
temp_df["close"] = temp_df["adjcp"]
temp_df = temp_df.drop(["adjcp"], axis=1)
temp_df.to_json(f'{os.getcwd()}/{config["datadir"]}/{tic}.json')
except KeyboardInterrupt:
sys.exit("Interrupt received, aborting ...")
def start_convert_data(args: Dict[str, Any], ohlcv: bool = True) -> None:
"""
Convert data from one format to another
"""
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if ohlcv:
convert_ohlcv_format(config,
convert_from=args['format_from'], convert_to=args['format_to'],
erase=args['erase'])
else:
convert_trades_format(config,
convert_from=args['format_from'], convert_to=args['format_to'],
erase=args['erase'])
def start_list_data(args: Dict[str, Any]) -> None:
"""
List available backtest data
"""
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
from tabulate import tabulate
from freqtrade.data.history.idatahandler import get_datahandler
dhc = get_datahandler(config['datadir'], config['dataformat_ohlcv'])
paircombs = dhc.ohlcv_get_available_data(config['datadir'])
if args['pairs']:
paircombs = [comb for comb in paircombs if comb[0] in args['pairs']]
print(f"Found {len(paircombs)} pair / timeframe combinations.")
groupedpair = defaultdict(list)
for pair, timeframe in sorted(paircombs, key=lambda x: (x[0], timeframe_to_minutes(x[1]))):
groupedpair[pair].append(timeframe)
if groupedpair:
print(tabulate([(pair, ', '.join(timeframes)) for pair, timeframes in groupedpair.items()],
headers=("Pair", "Timeframe"),
tablefmt='psql', stralign='right'))
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import datetime
import json
import os
import re
import requests
import time
from bs4 import BeautifulSoup
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS = 'ALL:@SECLEVEL=1'
def try_write(path, text):
paths = path.split("/")
sub_path = ""
for i in paths[:-1]:
sub_path += f"{i}/"
if not os.path.exists(sub_path):
os.mkdir(sub_path)
with open(path, "w", -1, "utf-8") as f:
f.write(text)
print(f"Saved successfully: {path}\n on `try_write`.")
return True
def try_archive(url, proxies, headers):
print("Sleep 10 seconds\n on `try_archive`.")
try_archive_time = 10
while try_archive_time:
time.sleep(110 - 10 * try_archive_time)
try:
archive = requests.get(f"https://web.archive.org/save/{url}", proxies=proxies, headers=headers, timeout=80)
if not archive.ok:
print(f"Archived failed: {url}\n {archive.status_code} {archive.reason}\n on `try_archive`.")
try_archive_time -= 1
if try_archive_time == 0:
return False
else:
continue
except Exception as e:
print(f"Error: {e}\n on `try_archive`.")
try_archive_time -= 1
if try_archive_time == 0:
return False
else:
continue
print(f"Archived successfully: {url}\n {archive.status_code} {archive.reason}\n on `try_archive`.")
return True
def parse_num(text):
if not text:
return 0
try:
parse = re.search(r"([\d,万億兆\.]+)", text).groups()[0]
parse = re.sub(r"[,万億兆]", "", parse)
return json.loads(parse)
except Exception as e:
print(f"Error: {e}\n on `parse_num`.")
return 0
def parse_date(date):
return tuple((int(i) if i else None) for i in re.search(r"^(?:(\d+)年)?(\d+)月(\d+)日[~〜~](?:(\d+)年)?(\d+)月(\d+)日$", date).groups())
def sub_name(date):
y1, m1, d1, y2, m2, d2 = date
return f"{y1 or y2:04d}/{m1:02d}/{y1 or y2:04d}-{m1:02d}-{d1:02d}__{y2 or y1:04d}-{m2:02d}-{d2:02d}"
def download_html(url, proxies={}, headers={}):
download = requests.get(url, proxies=proxies, headers=headers)
text = download.content.decode("utf-8")
parser = BeautifulSoup(text, "html.parser")
try:
date = parser.find(class_="heading__sub-text-body").get_text()
re.search(r"^(\d+)年(\d+)月(\d+)日~(\d+)年(\d+)月(\d+)日$", date).groups()
print(f"Downloaded and parsed successfully: {url}\n on `download_html`.")
except Exception as e:
try:
date = parser.find(class_="article-body__contents").get_text()
re.search(r"(?:(\d+)年)?(\d+)月(\d+)日[~〜~](?:(\d+)年)?(\d+)月(\d+)日", date).groups()
print(f"Downloaded and parsed successfully: {url}\n on `download_html`.")
except Exception as e:
print(f"Downloaded successfully: {url}\n on `download_html`.")
print(f"Error: {e}\n on `download_html` when parsing date.")
try_write(f"Html_Temp/{datetime.datetime.now().strftime(f"%Y%m%d%H%M%S%f")}.html", text)
try_archive(url, proxies, headers)
return text
def json_to_markdown(data):
if "software" in data:
software = data["software"]
else:
software = []
if "hardware" in data:
hardware = data["hardware"]
else:
hardware = []
y1, m1, d1, y2, m2, d2 = data["date"]
now = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=9))).isoformat(timespec = "seconds")
markdown_content = f"""---
from: {y1 or y2:04d}-{m1:02d}-{d1:02d}
to: {y2 or y1:04d}-{m2:02d}-{d2:02d}
top_30: {len(software) > 20}
top_10: {bool(hardware)}
last_modified_at: {now}
---
# Famitsu Sales: {y1 or y2:04d}-{m1:02d}-{d1:02d} ~ {y2 or y1:04d}-{m2:02d}-{d2:02d}
"""
if software:
markdown_content += """## Software
| Rank | Platform | Title | Publisher | Sales | Total | Rate | New |
| -: | -- | -- | -- | -: | -: | -: | -- |
"""
for soft in software:
markdown_content += "| {rank} | {platform} | {title} | {publisher} | {num_past:,} | {num_total:,} | {sales_meter} | {new} |\n".format(new = soft["is_new"] and "**New**" or "", **soft)
markdown_content += "\n"
if hardware:
markdown_content += """## Hardware
| Rank | Platform | Sales | Total |
| -: | -- | -: | -: |
"""
for hard in hardware:
markdown_content += "| {rank} | {platform} | {num_past:,} | {num_total:,} |\n".format(**hard)
markdown_content += "\n"
return markdown_content.strip()
def save_markdown(data):
file_name = f"Json/{sub_name(data["date"])}.json"
try:
if os.path.exists(file_name):
with open(file_name, "r", -1, "utf-8") as json_file:
old_data = json.load(json_file)
if old_data == data:
return False
else:
if isinstance(old_data, list):
old_software = old_data
elif "software" in old_data:
old_software = old_data["software"]
else:
old_software = []
if "hardware" in old_data:
old_hardware = old_data["hardware"]
else:
old_hardware = []
if old_software and ((not "software" in data) or (len(data["software"]) < len(old_software))):
data.update({
"software": old_software
})
if old_hardware and ((not "hardware" in data) or (len(data["hardware"]) < len(old_hardware))):
data.update({
"hardware": old_hardware
})
if old_data["software"] == data["software"] and ((not "hardware" in old_data) or (old_data["hardware"] == data["hardware"])):
return False
except Exception as e:
print(f"Error: {e}\n on `save_markdown`.")
try_write(file_name, json.dumps(data, ensure_ascii = False))
file_name = f"Markdown/{sub_name(data["date"])}.md"
try_write(file_name, json_to_markdown(data))
return True
def save_html(text, path, date):
file_name = f"{path}/{sub_name(date)}.html"
try_write(file_name, text)
def download_software(proxies={}, headers={}):
return download_html("https://www.famitsu.com/ranking/game-sales/", proxies, headers)
def parse_software(text):
parser = BeautifulSoup(text, "html.parser")
date = parse_date(parser.find(class_="heading__sub-text-body").get_text())
cards = parser.find_all(class_="card-game-sale-rank")
software = []
for card in cards:
info = {
"rank": parse_num(card.find(class_="icon-ranking").get_text()),
"is_new": bool(card.find(class_="card-game-sale-rank__status-info")),
"platform": card.find(class_="icon-console").get_text(),
"title": card.find(class_="card-game-sale-rank__title").get_text(),
"publisher": card.find(class_="card-game-sale-rank__publisher").get_text(),
"num_past": parse_num(card.find(class_="card-game-sale-rank__sales-num-past").get_text()),
"num_total": parse_num(card.find(class_="card-game-sale-rank__sales-num-total").get_text()),
"sales_meter": card.find(class_="card-game-sale-rank__sales-meter-num").get_text()
}
software.append(info)
return {
"software": software,
"date": date
}
def download_hardware(proxies={}, headers={}):
download = requests.get(r"https://www.famitsu.com/search/?type=article&q=%E3%82%BD%E3%83%95%E3%83%88+%E3%83%8F%E3%83%BC%E3%83%89+%E9%80%B1%E9%96%93%E8%B2%A9%E5%A3%B2%E6%95%B0", proxies=proxies, headers=headers)
text = download.content.decode("utf-8")
parser = BeautifulSoup(text, "html.parser")
try:
link = parser.find(class_="card__title").find("a").get("href")
if link.startswith("//"):
link = "https:" + link
elif link.startswith("/"):
link = "https://www.famitsu.com" + link
elif not link.find("//"):
link = "https://www.famitsu.com/search/" + link
except Exception as e:
print(f"Error: {e}\n on `download_hardware`.")
return False
return download_html(link, proxies, headers)
def parse_hardware(text):
parser = BeautifulSoup(text, "html.parser")
body = parser.find(class_="article-body__contents").get_text("\n")
date = parse_date(re.search(r"(?<=集計期間は)(?:(\d+)年)?(\d+)月(\d+)日[~〜~](?:(\d+)年)?(\d+)月(\d+)日", body)[0])
if not (date[0] or date[3]):
year = parse_num(parser.find("time").get("datetime")[0:4])
date = (year, date[1], date[2], None, date[4], date[5])
software_start = re.search(r"^\s*ソフト.*本数.*$\s*", body, re.M).end()
software_all = re.findall(r"^\s*(\d+)位(?:((.*?)))?\s*([^ ]*) +(.*)\s*([\d万億兆]+)本\s*(?:(累計(?:販売本数)?:?\s*([\d万億兆]+)本)\s*)?/\s*(.*)\s*/\s*\d+年\d+月\d+日(?:発売)?\s*$", body[software_start : ], re.M)
software = []
for soft in software_all:
info = {
"rank": parse_num(soft[0]),
"is_new": soft[1] == "初登場",
"platform": soft[2],
"title": soft[3],
"publisher": soft[6],
"num_past": parse_num(soft[4]),
"num_total": parse_num(soft[5]),
"sales_meter": ""
}
if (not info["num_total"]):
if info["is_new"]:
info["num_total"] = info["num_past"]
else:
info["num_total"] = -1
software.append(info)
hardware_start = re.search(r"^\s*ハード.*台数\s*$\s*", body, re.M).end()
hardware_all = re.findall(r"^\s*(.*?)\s*/\s*([\d万億兆]+)台(?:\s*(累計(?:販売台数)?:?\s*([\d万億兆]+)台)\s*)?$", body[hardware_start : ], re.M)
hardware_all.sort(key = lambda x: parse_num(x[1]), reverse = True)
hardware = []
for (i, hard) in enumerate(hardware_all):
info = {
"rank": i + 1,
"platform": hard[0],
"num_past": parse_num(hard[1]),
"num_total": parse_num(hard[2])
}
if (not info["num_total"]):
info["num_total"] = -1
hardware.append(info)
return {
"software": software,
"hardware": hardware,
"date": date
}
if __name__ == "__main__":
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.0.0 Safari/537.36 XzonnArchive/0.1"
}
# Test if proxies are needed
proxies = {}
try:
google = requests.get(r"https://www.google.com/", headers=headers, timeout=5)
if not google.ok:
proxies = {
"http": "http://127.0.0.1:10809/",
"https": "http://127.0.0.1:10809/"
}
print("Proxies are needed")
except Exception as e:
proxies = {
"http": "http://127.0.0.1:10809/",
"https": "http://127.0.0.1:10809/"
}
print("Proxies are needed")
software_text = download_software(proxies, headers)
software = parse_software(software_text)
hardware_text = download_hardware(proxies, headers)
hardware = parse_hardware(hardware_text)
if sub_name(software["date"]) == sub_name(hardware["date"]):
if save_markdown({
"software": software["software"],
"hardware": hardware["hardware"],
"date": software["date"]
}):
save_html(software_text, "Html_Top30", software["date"])
save_html(hardware_text, "Html_Top10", hardware["date"])
else:
if save_markdown(software):
save_html(software_text, "Html_Top30", software["date"])
if save_markdown(hardware):
save_html(hardware_text, "Html_Top10", hardware["date"])
week_urls = ["https://www.famitsu.com/ranking/game-sales/last_week/", "https://www.famitsu.com/ranking/game-sales/before_last/"]
for week_url in week_urls:
week_text = download_html(week_url, proxies, headers)
week = parse_software(week_text)
if save_markdown(week):
save_html(week_text, "Html_Top30", week["date"])
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import datetime
import json
import os
import re
import requests
import time
from bs4 import BeautifulSoup
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS = 'ALL:@SECLEVEL=1'
def try_write(path, text):
paths = path.split("/")
sub_path = ""
for i in paths[:-1]:
sub_path += f"{i}/"
if not os.path.exists(sub_path):
os.mkdir(sub_path)
with open(path, "w", -1, "utf-8") as f:
f.write(text)
print(f"Saved successfully: {path}\n on `try_write`.")
return True
def try_archive(url, proxies, headers):
print("Sleep 10 seconds\n on `try_archive`.")
try_archive_time = 10
while try_archive_time:
time.sleep(110 - 10 * try_archive_time)
try:
archive = requests.get(f"https://web.archive.org/save/{url}", proxies=proxies, headers=headers, timeout=80)
if not archive.ok:
print(f"Archived failed: {url}\n {archive.status_code} {archive.reason}\n on `try_archive`.")
try_archive_time -= 1
if try_archive_time == 0:
return False
else:
continue
except Exception as e:
print(f"Error: {e}\n on `try_archive`.")
try_archive_time -= 1
if try_archive_time == 0:
return False
else:
continue
print(f"Archived successfully: {url}\n {archive.status_code} {archive.reason}\n on `try_archive`.")
return True
def parse_num(text):
if not text:
return 0
try:
parse = re.search(r"([\d,万億兆\.]+)", text).groups()[0]
parse = re.sub(r"[,万億兆]", "", parse)
return json.loads(parse)
except Exception as e:
print(f"Error: {e}\n on `parse_num`.")
return 0
def parse_date(date):
return tuple((int(i) if i else None) for i in re.search(r"^(?:(\d+)年)?(\d+)月(\d+)日[~〜~](?:(\d+)年)?(\d+)月(\d+)日$", date).groups())
def sub_name(date):
y1, m1, d1, y2, m2, d2 = date
return f"{y1 or y2:04d}/{m1:02d}/{y1 or y2:04d}-{m1:02d}-{d1:02d}__{y2 or y1:04d}-{m2:02d}-{d2:02d}"
def download_html(url, proxies={}, headers={}):
download = requests.get(url, proxies=proxies, headers=headers)
text = download.content.decode("utf-8")
parser = BeautifulSoup(text, "html.parser")
try:
date = parser.find(class_="heading__sub-text-body").get_text()
re.search(r"^(\d+)年(\d+)月(\d+)日~(\d+)年(\d+)月(\d+)日$", date).groups()
print(f"Downloaded and parsed successfully: {url}\n on `download_html`.")
except Exception as e:
try:
date = parser.find(class_="article-body__contents").get_text()
re.search(r"(?:(\d+)年)?(\d+)月(\d+)日[~〜~](?:(\d+)年)?(\d+)月(\d+)日", date).groups()
print(f"Downloaded and parsed successfully: {url}\n on `download_html`.")
except Exception as e:
print(f"Downloaded successfully: {url}\n on `download_html`.")
print(f"Error: {e}\n on `download_html` when parsing date.")
try_write(f"Html_Temp/{datetime.datetime.now().strftime(f'%Y%m%d%H%M%S%f')}.html", text)
try_archive(url, proxies, headers)
return text
def json_to_markdown(data):
if "software" in data:
software = data["software"]
else:
software = []
if "hardware" in data:
hardware = data["hardware"]
else:
hardware = []
y1, m1, d1, y2, m2, d2 = data["date"]
now = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=9))).isoformat(timespec = "seconds")
markdown_content = f"""---
from: {y1 or y2:04d}-{m1:02d}-{d1:02d}
to: {y2 or y1:04d}-{m2:02d}-{d2:02d}
top_30: {len(software) > 20}
top_10: {bool(hardware)}
last_modified_at: {now}
---
# Famitsu Sales: {y1 or y2:04d}-{m1:02d}-{d1:02d} ~ {y2 or y1:04d}-{m2:02d}-{d2:02d}
"""
if software:
markdown_content += """## Software
| Rank | Platform | Title | Publisher | Sales | Total | Rate | New |
| -: | -- | -- | -- | -: | -: | -: | -- |
"""
for soft in software:
markdown_content += "| {rank} | {platform} | {title} | {publisher} | {num_past:,} | {num_total:,} | {sales_meter} | {new} |\n".format(new = soft["is_new"] and "**New**" or "", **soft)
markdown_content += "\n"
if hardware:
markdown_content += """## Hardware
| Rank | Platform | Sales | Total |
| -: | -- | -: | -: |
"""
for hard in hardware:
markdown_content += "| {rank} | {platform} | {num_past:,} | {num_total:,} |\n".format(**hard)
markdown_content += "\n"
return markdown_content.strip()
def save_markdown(data):
file_name = f"Json/{sub_name(data['date'])}.json"
try:
if os.path.exists(file_name):
with open(file_name, "r", -1, "utf-8") as json_file:
old_data = json.load(json_file)
if old_data == data:
return False
else:
if isinstance(old_data, list):
old_software = old_data
elif "software" in old_data:
old_software = old_data["software"]
else:
old_software = []
if "hardware" in old_data:
old_hardware = old_data["hardware"]
else:
old_hardware = []
if old_software and ((not "software" in data) or (len(data["software"]) < len(old_software))):
data.update({
"software": old_software
})
if old_hardware and ((not "hardware" in data) or (len(data["hardware"]) < len(old_hardware))):
data.update({
"hardware": old_hardware
})
if old_data["software"] == data["software"] and ((not "hardware" in old_data) or (old_data["hardware"] == data["hardware"])):
return False
except Exception as e:
print(f"Error: {e}\n on `save_markdown`.")
try_write(file_name, json.dumps(data, ensure_ascii = False))
file_name = f"Markdown/{sub_name(data['date'])}.md"
try_write(file_name, json_to_markdown(data))
return True
def save_html(text, path, date):
file_name = f"{path}/{sub_name(date)}.html"
try_write(file_name, text)
def download_software(proxies={}, headers={}):
return download_html("https://www.famitsu.com/ranking/game-sales/", proxies, headers)
def parse_software(text):
parser = BeautifulSoup(text, "html.parser")
date = parse_date(parser.find(class_="heading__sub-text-body").get_text())
cards = parser.find_all(class_="card-game-sale-rank")
software = []
for card in cards:
info = {
"rank": parse_num(card.find(class_="icon-ranking").get_text()),
"is_new": bool(card.find(class_="card-game-sale-rank__status-info")),
"platform": card.find(class_="icon-console").get_text(),
"title": card.find(class_="card-game-sale-rank__title").get_text(),
"publisher": card.find(class_="card-game-sale-rank__publisher").get_text(),
"num_past": parse_num(card.find(class_="card-game-sale-rank__sales-num-past").get_text()),
"num_total": parse_num(card.find(class_="card-game-sale-rank__sales-num-total").get_text()),
"sales_meter": card.find(class_="card-game-sale-rank__sales-meter-num").get_text()
}
software.append(info)
return {
"software": software,
"date": date
}
def download_hardware(proxies={}, headers={}):
download = requests.get(r"https://www.famitsu.com/search/?type=article&q=%E3%82%BD%E3%83%95%E3%83%88+%E3%83%8F%E3%83%BC%E3%83%89+%E9%80%B1%E9%96%93%E8%B2%A9%E5%A3%B2%E6%95%B0", proxies=proxies, headers=headers)
text = download.content.decode("utf-8")
parser = BeautifulSoup(text, "html.parser")
try:
link = parser.find(class_="card__title").find("a").get("href")
if link.startswith("//"):
link = "https:" + link
elif link.startswith("/"):
link = "https://www.famitsu.com" + link
elif not link.find("//"):
link = "https://www.famitsu.com/search/" + link
except Exception as e:
print(f"Error: {e}\n on `download_hardware`.")
return False
return download_html(link, proxies, headers)
def parse_hardware(text):
parser = BeautifulSoup(text, "html.parser")
body = parser.find(class_="article-body__contents").get_text("\n")
date = parse_date(re.search(r"(?<=集計期間は)(?:(\d+)年)?(\d+)月(\d+)日[~〜~](?:(\d+)年)?(\d+)月(\d+)日", body)[0])
if not (date[0] or date[3]):
year = parse_num(parser.find("time").get("datetime")[0:4])
date = (year, date[1], date[2], None, date[4], date[5])
software_start = re.search(r"^\s*ソフト.*本数.*$\s*", body, re.M).end()
software_all = re.findall(r"^\s*(\d+)位(?:((.*?)))?\s*([^ ]*) +(.*)\s*([\d万億兆]+)本\s*(?:(累計(?:販売本数)?:?\s*([\d万億兆]+)本)\s*)?/\s*(.*)\s*/\s*\d+年\d+月\d+日(?:発売)?\s*$", body[software_start : ], re.M)
software = []
for soft in software_all:
info = {
"rank": parse_num(soft[0]),
"is_new": soft[1] == "初登場",
"platform": soft[2],
"title": soft[3],
"publisher": soft[6],
"num_past": parse_num(soft[4]),
"num_total": parse_num(soft[5]),
"sales_meter": ""
}
if (not info["num_total"]):
if info["is_new"]:
info["num_total"] = info["num_past"]
else:
info["num_total"] = -1
software.append(info)
hardware_start = re.search(r"^\s*ハード.*台数\s*$\s*", body, re.M).end()
hardware_all = re.findall(r"^\s*(.*?)\s*/\s*([\d万億兆]+)台(?:\s*(累計(?:販売台数)?:?\s*([\d万億兆]+)台)\s*)?$", body[hardware_start : ], re.M)
hardware_all.sort(key = lambda x: parse_num(x[1]), reverse = True)
hardware = []
for (i, hard) in enumerate(hardware_all):
info = {
"rank": i + 1,
"platform": hard[0],
"num_past": parse_num(hard[1]),
"num_total": parse_num(hard[2])
}
if (not info["num_total"]):
info["num_total"] = -1
hardware.append(info)
return {
"software": software,
"hardware": hardware,
"date": date
}
if __name__ == "__main__":
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.0.0 Safari/537.36 XzonnArchive/0.1"
}
# Test if proxies are needed
proxies = {}
try:
google = requests.get(r"https://www.google.com/", headers=headers, timeout=5)
if not google.ok:
proxies = {
"http": "http://127.0.0.1:10809/",
"https": "http://127.0.0.1:10809/"
}
print("Proxies are needed")
except Exception as e:
proxies = {
"http": "http://127.0.0.1:10809/",
"https": "http://127.0.0.1:10809/"
}
print("Proxies are needed")
software_text = download_software(proxies, headers)
software = parse_software(software_text)
hardware_text = download_hardware(proxies, headers)
hardware = parse_hardware(hardware_text)
if sub_name(software["date"]) == sub_name(hardware["date"]):
if save_markdown({
"software": software["software"],
"hardware": hardware["hardware"],
"date": software["date"]
}):
save_html(software_text, "Html_Top30", software["date"])
save_html(hardware_text, "Html_Top10", hardware["date"])
else:
if save_markdown(software):
save_html(software_text, "Html_Top30", software["date"])
if save_markdown(hardware):
save_html(hardware_text, "Html_Top10", hardware["date"])
week_urls = ["https://www.famitsu.com/ranking/game-sales/last_week/", "https://www.famitsu.com/ranking/game-sales/before_last/"]
for week_url in week_urls:
week_text = download_html(week_url, proxies, headers)
week = parse_software(week_text)
if save_markdown(week):
save_html(week_text, "Html_Top30", week["date"])
|
"""Common test functions."""
from pathlib import Path
import re
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
from uuid import uuid4
from aiohttp import web
from aiohttp.test_utils import TestClient
from awesomeversion import AwesomeVersion
import pytest
from supervisor.api import RestAPI
from supervisor.bootstrap import initialize_coresys
from supervisor.const import REQUEST_FROM
from supervisor.coresys import CoreSys
from supervisor.dbus.network import NetworkManager
from supervisor.docker import DockerAPI
from supervisor.store.addon import AddonStore
from supervisor.store.repository import Repository
from supervisor.utils.gdbus import DBus
from tests.common import exists_fixture, load_fixture, load_json_fixture
# pylint: disable=redefined-outer-name, protected-access
async def mock_async_return_true() -> bool:
"""Mock methods to return True."""
return True
@pytest.fixture
def docker() -> DockerAPI:
"""Mock DockerAPI."""
images = [MagicMock(tags=["homeassistant/amd64-hassio-supervisor:latest"])]
with patch("docker.DockerClient", return_value=MagicMock()), patch(
"supervisor.docker.DockerAPI.images", return_value=MagicMock()
), patch("supervisor.docker.DockerAPI.containers", return_value=MagicMock()), patch(
"supervisor.docker.DockerAPI.api", return_value=MagicMock()
), patch(
"supervisor.docker.DockerAPI.images.list", return_value=images
), patch(
"supervisor.docker.DockerAPI.info",
return_value=MagicMock(),
), patch(
"supervisor.docker.DockerConfig",
return_value=MagicMock(),
):
docker_obj = DockerAPI()
docker_obj.info.logging = "journald"
docker_obj.info.storage = "overlay2"
docker_obj.info.version = "1.0.0"
docker_obj.config.registries = {}
yield docker_obj
@pytest.fixture
def dbus() -> DBus:
"""Mock DBUS."""
dbus_commands = []
async def mock_get_properties(dbus_obj, interface):
latest = dbus_obj.object_path.split("/")[-1]
fixture = interface.replace(".", "_")
if latest.isnumeric():
fixture = f"{fixture}_{latest}"
return load_json_fixture(f"{fixture}.json")
async def mock_wait_signal(_, __):
pass
async def mock_send(_, command, silent=False):
if silent:
return ""
fixture = command[6].replace("/", "_")[1:]
if command[1] == "introspect":
filetype = "xml"
if not exists_fixture(f"{fixture}.{filetype}"):
fixture = re.sub(r"_[0-9]+$", "", fixture)
# special case
if exists_fixture(f"{fixture}_*.{filetype}"):
fixture = f"{fixture}_*"
else:
fixture = f"{fixture}-{command[10].split(".")[-1]}"
filetype = "fixture"
dbus_commands.append(fixture)
return load_fixture(f"{fixture}.{filetype}")
with patch("supervisor.utils.gdbus.DBus._send", new=mock_send), patch(
"supervisor.utils.gdbus.DBus.wait_signal", new=mock_wait_signal
), patch(
"supervisor.dbus.interface.DBusInterface.is_connected",
return_value=True,
), patch(
"supervisor.utils.gdbus.DBus.get_properties", new=mock_get_properties
):
yield dbus_commands
@pytest.fixture
async def network_manager(dbus) -> NetworkManager:
"""Mock NetworkManager."""
nm_obj = NetworkManager()
nm_obj.dbus = dbus
# Init
await nm_obj.connect()
await nm_obj.update()
yield nm_obj
@pytest.fixture
async def coresys(loop, docker, network_manager, aiohttp_client) -> CoreSys:
"""Create a CoreSys Mock."""
with patch("supervisor.bootstrap.initialize_system_data"), patch(
"supervisor.bootstrap.setup_diagnostics"
), patch(
"supervisor.bootstrap.fetch_timezone",
return_value="Europe/Zurich",
), patch(
"aiohttp.ClientSession",
return_value=TestClient.session,
):
coresys_obj = await initialize_coresys()
# Mock save json
coresys_obj._ingress.save_data = MagicMock()
coresys_obj._auth.save_data = MagicMock()
coresys_obj._updater.save_data = MagicMock()
coresys_obj._config.save_data = MagicMock()
coresys_obj._jobs.save_data = MagicMock()
coresys_obj._resolution.save_data = MagicMock()
# Mock test client
coresys_obj.arch._default_arch = "amd64"
coresys_obj._machine = "qemux86-64"
coresys_obj._machine_id = uuid4()
# Mock host communication
coresys_obj._dbus._network = network_manager
# Mock docker
coresys_obj._docker = docker
# Set internet state
coresys_obj.supervisor._connectivity = True
coresys_obj.host.network._connectivity = True
# WebSocket
coresys_obj.homeassistant.api.check_api_state = mock_async_return_true
coresys_obj.homeassistant._websocket._client = AsyncMock(
ha_version=AwesomeVersion("2021.2.4")
)
yield coresys_obj
@pytest.fixture
def sys_machine():
"""Mock sys_machine."""
with patch("supervisor.coresys.CoreSys.machine", new_callable=PropertyMock) as mock:
yield mock
@pytest.fixture
def sys_supervisor():
"""Mock sys_supervisor."""
with patch(
"supervisor.coresys.CoreSys.supervisor", new_callable=PropertyMock
) as mock:
mock.return_value = MagicMock()
yield MagicMock
@pytest.fixture
async def api_client(aiohttp_client, coresys: CoreSys):
"""Fixture for RestAPI client."""
@web.middleware
async def _security_middleware(request: web.Request, handler: web.RequestHandler):
"""Make request are from Core."""
request[REQUEST_FROM] = coresys.homeassistant
return await handler(request)
api = RestAPI(coresys)
api.webapp = web.Application(middlewares=[_security_middleware])
api.start = AsyncMock()
await api.load()
yield await aiohttp_client(api.webapp)
@pytest.fixture
def store_manager(coresys: CoreSys):
"""Fixture for the store manager."""
sm_obj = coresys.store
with patch("supervisor.store.data.StoreData.update", return_value=MagicMock()):
yield sm_obj
@pytest.fixture
def run_dir(tmp_path):
"""Fixture to inject hassio env."""
with patch("supervisor.core.RUN_SUPERVISOR_STATE") as mock_run:
tmp_state = Path(tmp_path, "supervisor")
mock_run.write_text = tmp_state.write_text
yield tmp_state
@pytest.fixture
def store_addon(coresys: CoreSys, tmp_path):
"""Store add-on fixture."""
addon_obj = AddonStore(coresys, "test_store_addon")
coresys.addons.store[addon_obj.slug] = addon_obj
coresys.store.data.addons[addon_obj.slug] = load_json_fixture("add-on.json")
yield addon_obj
@pytest.fixture
def repository(coresys: CoreSys):
"""Repository fixture."""
repository_obj = Repository(
coresys, "https://github.com/awesome-developer/awesome-repo"
)
coresys.store.repositories[repository_obj.slug] = repository_obj
yield repository_obj
|
"""Common test functions."""
from pathlib import Path
import re
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
from uuid import uuid4
from aiohttp import web
from aiohttp.test_utils import TestClient
from awesomeversion import AwesomeVersion
import pytest
from supervisor.api import RestAPI
from supervisor.bootstrap import initialize_coresys
from supervisor.const import REQUEST_FROM
from supervisor.coresys import CoreSys
from supervisor.dbus.network import NetworkManager
from supervisor.docker import DockerAPI
from supervisor.store.addon import AddonStore
from supervisor.store.repository import Repository
from supervisor.utils.gdbus import DBus
from tests.common import exists_fixture, load_fixture, load_json_fixture
# pylint: disable=redefined-outer-name, protected-access
async def mock_async_return_true() -> bool:
"""Mock methods to return True."""
return True
@pytest.fixture
def docker() -> DockerAPI:
"""Mock DockerAPI."""
images = [MagicMock(tags=["homeassistant/amd64-hassio-supervisor:latest"])]
with patch("docker.DockerClient", return_value=MagicMock()), patch(
"supervisor.docker.DockerAPI.images", return_value=MagicMock()
), patch("supervisor.docker.DockerAPI.containers", return_value=MagicMock()), patch(
"supervisor.docker.DockerAPI.api", return_value=MagicMock()
), patch(
"supervisor.docker.DockerAPI.images.list", return_value=images
), patch(
"supervisor.docker.DockerAPI.info",
return_value=MagicMock(),
), patch(
"supervisor.docker.DockerConfig",
return_value=MagicMock(),
):
docker_obj = DockerAPI()
docker_obj.info.logging = "journald"
docker_obj.info.storage = "overlay2"
docker_obj.info.version = "1.0.0"
docker_obj.config.registries = {}
yield docker_obj
@pytest.fixture
def dbus() -> DBus:
"""Mock DBUS."""
dbus_commands = []
async def mock_get_properties(dbus_obj, interface):
latest = dbus_obj.object_path.split("/")[-1]
fixture = interface.replace(".", "_")
if latest.isnumeric():
fixture = f"{fixture}_{latest}"
return load_json_fixture(f"{fixture}.json")
async def mock_wait_signal(_, __):
pass
async def mock_send(_, command, silent=False):
if silent:
return ""
fixture = command[6].replace("/", "_")[1:]
if command[1] == "introspect":
filetype = "xml"
if not exists_fixture(f"{fixture}.{filetype}"):
fixture = re.sub(r"_[0-9]+$", "", fixture)
# special case
if exists_fixture(f"{fixture}_*.{filetype}"):
fixture = f"{fixture}_*"
else:
fixture = f"{fixture}-{command[10].split('.')[-1]}"
filetype = "fixture"
dbus_commands.append(fixture)
return load_fixture(f"{fixture}.{filetype}")
with patch("supervisor.utils.gdbus.DBus._send", new=mock_send), patch(
"supervisor.utils.gdbus.DBus.wait_signal", new=mock_wait_signal
), patch(
"supervisor.dbus.interface.DBusInterface.is_connected",
return_value=True,
), patch(
"supervisor.utils.gdbus.DBus.get_properties", new=mock_get_properties
):
yield dbus_commands
@pytest.fixture
async def network_manager(dbus) -> NetworkManager:
"""Mock NetworkManager."""
nm_obj = NetworkManager()
nm_obj.dbus = dbus
# Init
await nm_obj.connect()
await nm_obj.update()
yield nm_obj
@pytest.fixture
async def coresys(loop, docker, network_manager, aiohttp_client) -> CoreSys:
"""Create a CoreSys Mock."""
with patch("supervisor.bootstrap.initialize_system_data"), patch(
"supervisor.bootstrap.setup_diagnostics"
), patch(
"supervisor.bootstrap.fetch_timezone",
return_value="Europe/Zurich",
), patch(
"aiohttp.ClientSession",
return_value=TestClient.session,
):
coresys_obj = await initialize_coresys()
# Mock save json
coresys_obj._ingress.save_data = MagicMock()
coresys_obj._auth.save_data = MagicMock()
coresys_obj._updater.save_data = MagicMock()
coresys_obj._config.save_data = MagicMock()
coresys_obj._jobs.save_data = MagicMock()
coresys_obj._resolution.save_data = MagicMock()
# Mock test client
coresys_obj.arch._default_arch = "amd64"
coresys_obj._machine = "qemux86-64"
coresys_obj._machine_id = uuid4()
# Mock host communication
coresys_obj._dbus._network = network_manager
# Mock docker
coresys_obj._docker = docker
# Set internet state
coresys_obj.supervisor._connectivity = True
coresys_obj.host.network._connectivity = True
# WebSocket
coresys_obj.homeassistant.api.check_api_state = mock_async_return_true
coresys_obj.homeassistant._websocket._client = AsyncMock(
ha_version=AwesomeVersion("2021.2.4")
)
yield coresys_obj
@pytest.fixture
def sys_machine():
"""Mock sys_machine."""
with patch("supervisor.coresys.CoreSys.machine", new_callable=PropertyMock) as mock:
yield mock
@pytest.fixture
def sys_supervisor():
"""Mock sys_supervisor."""
with patch(
"supervisor.coresys.CoreSys.supervisor", new_callable=PropertyMock
) as mock:
mock.return_value = MagicMock()
yield MagicMock
@pytest.fixture
async def api_client(aiohttp_client, coresys: CoreSys):
"""Fixture for RestAPI client."""
@web.middleware
async def _security_middleware(request: web.Request, handler: web.RequestHandler):
"""Make request are from Core."""
request[REQUEST_FROM] = coresys.homeassistant
return await handler(request)
api = RestAPI(coresys)
api.webapp = web.Application(middlewares=[_security_middleware])
api.start = AsyncMock()
await api.load()
yield await aiohttp_client(api.webapp)
@pytest.fixture
def store_manager(coresys: CoreSys):
"""Fixture for the store manager."""
sm_obj = coresys.store
with patch("supervisor.store.data.StoreData.update", return_value=MagicMock()):
yield sm_obj
@pytest.fixture
def run_dir(tmp_path):
"""Fixture to inject hassio env."""
with patch("supervisor.core.RUN_SUPERVISOR_STATE") as mock_run:
tmp_state = Path(tmp_path, "supervisor")
mock_run.write_text = tmp_state.write_text
yield tmp_state
@pytest.fixture
def store_addon(coresys: CoreSys, tmp_path):
"""Store add-on fixture."""
addon_obj = AddonStore(coresys, "test_store_addon")
coresys.addons.store[addon_obj.slug] = addon_obj
coresys.store.data.addons[addon_obj.slug] = load_json_fixture("add-on.json")
yield addon_obj
@pytest.fixture
def repository(coresys: CoreSys):
"""Repository fixture."""
repository_obj = Repository(
coresys, "https://github.com/awesome-developer/awesome-repo"
)
coresys.store.repositories[repository_obj.slug] = repository_obj
yield repository_obj
|
# -*- coding: utf-8 -*-
"""
Functions for model training and evaluation (single-partner and multi-partner cases)
"""
import operator
import os
from abc import ABC, abstractmethod
from copy import deepcopy
from timeit import default_timer as timer
import numpy as np
import random
import tensorflow as tf
from loguru import logger
from sklearn.metrics import confusion_matrix
from tensorflow.keras import Input, Model
from tensorflow.keras.backend import clear_session
from tensorflow.keras.callbacks import EarlyStopping
from .utils import History
from ..utils import project_onto_the_simplex
from .. import constants
from ..models import NoiseAdaptationChannel, EnsemblePredictionsModel
from ..partner import Partner, PartnerMpl
ALLOWED_PARAMETERS = ('partners_list',
'epoch_count',
'minibatch_count',
'dataset',
'aggregation',
'is_early_stopping',
'is_save_data',
'save_folder',
'init_model_from',
'use_saved_weights')
class MultiPartnerLearning(ABC):
name = 'abstract'
def __init__(self, scenario, **kwargs):
"""
:type scenario: Scenario
"""
# Attributes related to the data and the model
self.dataset = scenario.dataset
self.partners_list = scenario.partners_list
self.init_model_from = scenario.init_model_from
self.use_saved_weights = scenario.use_saved_weights
self.amounts_per_partner = scenario.amounts_per_partner
self.val_set = scenario.val_set
self.test_set = scenario.test_set
# Attributes related to iterating at different levels
self.epoch_count = scenario.epoch_count
self.minibatch_count = scenario.minibatch_count
self.is_early_stopping = scenario.is_early_stopping
# Attributes to store results
self.save_folder = scenario.save_folder
# Erase the default parameters (which mostly come from the scenario) if some parameters have been specified
self.__dict__.update((k, v) for k, v in kwargs.items() if k in ALLOWED_PARAMETERS)
# Unpack dataset-related parameters
self.val_data = (self.dataset.x_val, self.dataset.y_val)
self.test_data = (self.dataset.x_test, self.dataset.y_test)
self.dataset_name = self.dataset.name
self.generate_new_model = self.dataset.generate_new_model
# Initialize the model
model = self.init_model()
self.model_weights = model.get_weights()
self.metrics_names = self.dataset.model_metrics_names
# Initialize iterators
self.epoch_index = 0
self.minibatch_index = 0
self.learning_computation_time = 0
# Convert partners to Mpl partners
for partner in self.partners_list:
assert isinstance(partner, Partner)
partners_list = sorted(self.partners_list, key=operator.attrgetter("id"))
logger.info(
f"## Preparation of model's training on partners with ids: {["#" + str(p.id) for p in partners_list]}")
self.partners_list = [PartnerMpl(partner, self) for partner in self.partners_list]
# Attributes related to the aggregation approach
self.aggregator = self.init_aggregation_function(scenario.aggregation)
# Initialize History
self.history = History(self)
# Initialize result folder
if self.save_folder is not None:
if 'custom_name' in kwargs:
self.save_folder = self.save_folder / kwargs["custom_name"]
else:
self.save_folder = self.save_folder / 'multi_partner_learning'
self.save_folder.mkdir(parents=True, exist_ok=False)
logger.debug("MultiPartnerLearning object instantiated.")
def __str__(self):
return f'{self.name}'
@property
def partners_count(self):
return len(self.partners_list)
def init_aggregation_function(self, aggregator):
return aggregator(self)
def build_model(self):
return self.build_model_from_weights(self.model_weights)
def build_model_from_weights(self, new_weights):
"""Generate a new model initialized with weights passed as arguments"""
new_model = self.generate_new_model()
new_model.set_weights(new_weights)
return new_model
def init_model(self):
new_model = self.generate_new_model()
if self.use_saved_weights:
logger.info("Init model with previous coalition model")
new_model.load_weights(self.init_model_from)
else:
logger.info("Init new model")
return new_model
def save_final_model(self):
"""Save final model weights"""
model_folder = os.path.join(self.save_folder, 'model')
if not os.path.isdir(model_folder):
os.makedirs(model_folder)
np.save(os.path.join(model_folder, self.dataset_name + '_final_weights.npy'), self.model_weights)
model_to_save = self.build_model()
model_to_save.save_weights(os.path.join(model_folder, self.dataset_name + '_final_weights.h5'))
def save_data(self):
if self.save_folder is None:
raise ValueError("The path to the save folder is None, history data cannot be saved, nor model weights")
self.save_final_model()
self.history.save_data()
def log_partner_perf(self, partner_id, partner_index, history):
for key_history in self.history.metrics:
self.history.history[partner_id][key_history][self.epoch_index,
self.minibatch_index] = history[key_history][-1]
epoch_nb_str = f"Epoch {str(self.epoch_index).zfill(2)}/{str(self.epoch_count - 1).zfill(2)}"
mb_nb_str = f"Minibatch {str(self.minibatch_index).zfill(2)}/{str(self.minibatch_count - 1).zfill(2)}"
partner_id_str = f"Partner partner_id #{partner_id} ({partner_index}/{self.partners_count - 1})"
val_acc_str = f"{round(history["val_accuracy"][-1], 2)}"
logger.debug(f"{epoch_nb_str} > {mb_nb_str} > {partner_id_str} > val_acc: {val_acc_str}")
def eval_and_log_model_val_perf(self):
model = self.build_model()
if self.val_set == 'global':
hist = model.evaluate(self.val_data[0],
self.val_data[1],
batch_size=constants.DEFAULT_BATCH_SIZE,
verbose=0,
)
elif self.val_set == 'local':
hist = [0.0, 0.0]
for p in self.partners_list:
hist_partner = model.evaluate(p.x_val,
p.y_val,
batch_size=constants.DEFAULT_BATCH_SIZE,
verbose=0,
)
hist[0] += hist_partner[0] / self.partners_count
hist[1] += hist_partner[1] / self.partners_count
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
self.history.history['mpl_model']['val_loss'][self.epoch_index, self.minibatch_index] = hist[0]
self.history.history['mpl_model']['val_accuracy'][self.epoch_index, self.minibatch_index] = hist[1]
if self.minibatch_index >= self.minibatch_count - 1:
epoch_nb_str = f"{str(self.epoch_index).zfill(2)}/{str(self.epoch_count - 1).zfill(2)}"
logger.info(f" Model evaluation at the end of the epoch "
f"{epoch_nb_str}: "
f"{["%.3f" % elem for elem in hist]}")
def eval_and_log_final_model_test_perf(self):
logger.info("### Evaluating model on test data:")
model = self.build_model()
if self.test_set == 'global':
hist = model.evaluate(self.test_data[0],
self.test_data[1],
batch_size=constants.DEFAULT_BATCH_SIZE,
verbose=0,
)
elif self.test_set == 'local':
hist = [0.0, 0.0]
for p in self.partners_list:
hist_partner = model.evaluate(p.x_test,
p.y_test,
batch_size=constants.DEFAULT_BATCH_SIZE,
verbose=0,
)
hist[0] += hist_partner[0] / self.partners_count
hist[1] += hist_partner[1] / self.partners_count
else:
raise ValueError("test set should be 'local' or 'global', not {self.val_set}")
self.history.score = hist[1]
self.history.nb_epochs_done = self.epoch_index + 1
logger.info(f" Model metrics names: {self.metrics_names}")
logger.info(f" Model metrics values: {["%.3f" % elem for elem in hist]}")
def split_in_minibatches(self):
"""Split the dataset passed as argument in mini-batches"""
for partner in self.partners_list:
partner.split_minibatches()
def early_stop(self):
logger.debug(" Checking if early stopping criteria are met:")
if self.is_early_stopping:
# Early stopping parameters
if (
self.epoch_index >= constants.PATIENCE
and self.history.history['mpl_model']['val_loss'][self.epoch_index,
self.minibatch_index] >
self.history.history['mpl_model']['val_loss'][self.epoch_index - constants.PATIENCE,
self.minibatch_index]
):
logger.debug(" -> Early stopping criteria are met, stopping here.")
return True
else:
logger.debug(" -> Early stopping criteria are not met, continuing with training.")
else:
return False
def fit(self):
"""Return the score on test data of a final aggregated model trained in a federated way on each partner"""
start = timer()
# Train model (iterate for each epoch and mini-batch)
while self.epoch_index < self.epoch_count:
self.fit_epoch() # perform an epoch on the self.model
if self.early_stop():
break
self.epoch_index += 1
# After last epoch or if early stopping was triggered, evaluate model on the global testset
self.eval_and_log_final_model_test_perf()
end = timer()
self.learning_computation_time = end - start
logger.info(f"Training and evaluation on multiple partners: "
f"done. ({np.round(self.learning_computation_time, 3)} seconds)")
if self.save_folder is not None:
self.save_data() # Save the model weights and the history data
@abstractmethod
def fit_epoch(self):
while self.minibatch_index < self.minibatch_count:
self.fit_minibatch()
self.minibatch_index += 1
self.eval_and_log_model_val_perf()
@abstractmethod
def fit_minibatch(self):
pass
class SinglePartnerLearning(MultiPartnerLearning):
name = 'Single Partner learning'
def __init__(self, scenario, **kwargs):
super(SinglePartnerLearning, self).__init__(scenario, **kwargs)
if self.partners_count != 1:
raise ValueError('More than one partner is provided')
self.partner = self.partners_list[0]
def fit(self):
"""Return the score on test data of a model trained on a single partner"""
start = timer()
logger.info(f"## Training and evaluating model on partner with partner_id #{self.partner.id}")
# Set if early stopping if needed
cb = []
es = None
if self.is_early_stopping:
es = EarlyStopping(monitor='val_loss', mode='min', verbose=0, patience=constants.PATIENCE)
cb.append(es)
# Train model
logger.info(" Training model...")
model = self.build_model()
if self.val_set == 'global':
history = model.fit(self.partner.x_train,
self.partner.y_train,
batch_size=self.partner.batch_size,
epochs=self.epoch_count,
verbose=0,
validation_data=self.val_data,
callbacks=cb)
elif self.val_set == 'local':
history = model.fit(self.partner.x_train,
self.partner.y_train,
batch_size=self.partner.batch_size,
epochs=self.epoch_count,
verbose=0,
validation_data=(self.partner.x_val, self.partner.y_val),
callbacks=cb)
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
self.model_weights = model.get_weights()
self.log_partner_perf(self.partner.id, 0, history.history)
del self.history.history['mpl_model']
# Evaluate trained model on test data
self.eval_and_log_final_model_test_perf()
self.history.nb_epochs_done = (es.stopped_epoch + 1) if es.stopped_epoch != 0 else self.epoch_count
end = timer()
self.learning_computation_time = end - start
def fit_epoch(self):
pass
def fit_minibatch(self):
pass
class FederatedAverageLearning(MultiPartnerLearning):
name = 'Federated averaging'
def __init__(self, scenario, **kwargs):
# First, if only one partner, fall back to dedicated single partner function
super(FederatedAverageLearning, self).__init__(scenario, **kwargs)
if self.partners_count == 1:
raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')
def fit_epoch(self):
# Clear Keras' old models
clear_session()
# Split the train dataset in mini-batches
self.split_in_minibatches()
# Iterate over mini-batches and train
for i in range(self.minibatch_count):
self.minibatch_index = i
self.fit_minibatch()
# At the end of each minibatch,aggregate the models
self.model_weights = self.aggregator.aggregate_model_weights()
self.minibatch_index = 0
def fit_minibatch(self):
"""Proceed to a collaborative round with a federated averaging approach"""
logger.debug("Start new fedavg collaborative round ...")
# Starting model for each partner is the aggregated model from the previous mini-batch iteration
logger.info(f"(fedavg) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, "
f"init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
# Evaluate and store accuracy of mini-batch start model
self.eval_and_log_model_val_perf()
# Iterate over partners for training each individual model
for partner_index, partner in enumerate(self.partners_list):
# Reference the partner's model
partner_model = partner.build_model()
# Train on partner local data set
if self.val_set == 'global':
history = partner_model.fit(partner.minibatched_x_train[self.minibatch_index],
partner.minibatched_y_train[self.minibatch_index],
batch_size=partner.batch_size,
verbose=0,
validation_data=self.val_data)
elif self.val_set == 'local':
history = partner_model.fit(partner.minibatched_x_train[self.minibatch_index],
partner.minibatched_y_train[self.minibatch_index],
batch_size=partner.batch_size,
verbose=0,
validation_data=(partner.x_val, partner.y_val))
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
# Log results of the round
self.log_partner_perf(partner.id, partner_index, history.history)
# Update the partner's model in the models' list
partner.model_weights = partner_model.get_weights()
logger.debug("End of fedavg collaborative round.")
class DistributionallyRobustFederatedAveragingLearning(MultiPartnerLearning):
"""
- This class implements the Distributionally Robust Federated Averaging (DRFA) Algorithm,
only a subset of partners are chosen to participate in a given collaborative
learning round. based on a global mixing parameter called lambda
- Lambda is updated at the end of each collaborative learning round using its own update rule
- DRFA is considered a framework under which we can implement other FL algorithms such as FedAvg
- Link to the paper : https://arxiv.org/abs/2102.12660
"""
name = "Distributionally Robust Federated Averaging"
def __init__(self, scenario, **kwargs):
super(DistributionallyRobustFederatedAveragingLearning, self).__init__(scenario, **kwargs)
if self.partners_count == 1:
raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')
self.active_partners_count = scenario.active_partners_count
self.lambda_vector = self.init_lambda()
self.active_partners_list = list()
self.update_active_partners_list()
self.local_steps = scenario.gradient_updates_per_pass_count
self.partners_training_data = {}
self.partners_participation = self.initialize_participation_dict()
self.lambda_learning_rate = 8e-3
self.local_steps_index = 0
self.local_steps_index_t = 0
self.global_model_at_index_t = None
self.model_weights_at_index_t = list()
self.loss_for_model_at_index_t = np.zeros(self.partners_count)
self.subset_u_partners = list()
self.loss_vector_v = list()
def fit_epoch(self):
# Split the train dataset in mini-batches
self.split_in_minibatches()
# convert partners training data into tf Dataset, reference: fast_mpl
for partner_id, partner in enumerate(self.partners_list):
self.partners_training_data[partner.id] = list()
for minibatch_index in range(self.minibatch_count):
# convert training data
data_train = tf.data.Dataset.from_tensor_slices((partner.minibatched_x_train[minibatch_index],
partner.minibatched_y_train[minibatch_index]))
data_train = data_train.shuffle(len(partner.minibatched_x_train[minibatch_index]))
data_train = data_train.batch(partner.batch_size)
data_train = data_train.prefetch(1)
self.partners_training_data[partner.id].append(data_train)
# Iterate over mini-batches and train
for i in range(self.minibatch_count):
self.minibatch_index = i
self.local_steps_index = 0
self.local_steps_index_t = np.random.randint(0, self.local_steps - 1)
logger.info(
f"Active partner in this round "
f"{["#"+str(active_partner.id) for active_partner in self.active_partners_list]} "
f"according to lambda vector > {self.lambda_vector}")
logger.info(f"Local step index t > {self.local_steps_index_t}")
self.fit_minibatch()
# update partner participations
self.partners_participation[self.epoch_index][self.minibatch_index][[p.id for p
in self.active_partners_list]] = 1
self.update_lambda()
self.update_active_partners_list()
self.log_partners_participation_rate()
self.minibatch_index = 0
def fit_minibatch(self):
"""Proceed to a collaborative round with a distributionally robust federated averaging approach"""
# Starting model for each partner is the aggregated model from the previous mini-batch iteration
logger.info(f"(drfa) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, "
f"init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
# Evaluate and store accuracy of mini-batch start model
self.eval_and_log_model_val_perf()
# Iterate over partners for training
for partner_index, partner in enumerate(self.active_partners_list):
partner_model = partner.build_model()
# loop through each partner's minibatch
minibatched_x_y = self.partners_training_data[partner.id][self.minibatch_index]
for idx, batch_x_y in enumerate(minibatched_x_y):
with tf.GradientTape() as tape:
p_pred = partner_model(batch_x_y[0])
loss = partner_model.compiled_loss(batch_x_y[1], p_pred)
partner_model.optimizer.minimize(loss, partner_model.trainable_weights, tape=tape)
self.local_steps_index += 1
if self.local_steps_index == self.local_steps_index_t:
# save model weights for each partner at local step t
self.model_weights_at_index_t.append(partner.model_weights)
partner.model_weights = partner_model.get_weights()
self.local_steps_index = 0
# aggregate final global model weights
self.model_weights = self.aggregate_model_weights(self.active_partners_list)
# build the model for each partner using weights gathered at index t
for active_partner, weights_t in zip(self.active_partners_list, self.model_weights_at_index_t):
active_partner.model_weights = weights_t
# aggregate global model weights at index t
self.global_model_at_index_t = self.aggregate_model_weights(self.active_partners_list)
# sample a new subset of partners of size active_partners_count
subset_index = random.sample(range(self.partners_count), self.active_partners_count)
self.subset_u_partners = [self.partners_list[index] for index in subset_index]
logger.info(
f"Subset of partners chosen for lambda update "
f"{["#"+ str(partner.id) for partner in self.subset_u_partners]}")
# compute losses over a random batch using the global model at index t
for partner, index in zip(self.subset_u_partners, subset_index):
random_minibatch_index = np.random.randint(0, self.minibatch_count - 1)
random_minibatch = self.partners_training_data[partner.id][random_minibatch_index]
random_batch_index = np.random.randint(0, len(random_minibatch) - 1)
random_batch = list(random_minibatch)[random_batch_index]
partner_model = self.build_model_from_weights(self.global_model_at_index_t)
loss = partner_model.compiled_loss(random_batch[1], partner_model(random_batch[0]))
# compute (n/m)*loss and add it to the loss vector
# n is the total number of partners, m is the number of active partners
self.loss_for_model_at_index_t[index] = \
((self.partners_count / self.active_partners_count) * np.mean(loss.numpy()))
def init_lambda(self):
"""
- initialize lambda vector according to each partner's dataset size
- this is a probability vector of size partners_count
"""
return np.array(self.amounts_per_partner)
def update_lambda(self):
"""
The update rule for lambda is : lambda_vector(i) =
Projection(lambda_vector(i-1) + (local_step_index_t * lambda_learning_rate * local_losses_at_index_t))
"""
self.lambda_vector += (self.local_steps_index_t * self.lambda_learning_rate * self.loss_for_model_at_index_t)
self.lambda_vector = project_onto_the_simplex(self.lambda_vector)
# The projection can produce zero probabilities for certain partners which prevents them from
# participating in the training. To avoid this, we assign 1e-3 to each probability smaller than this value.
if any(self.lambda_vector < 1e-3):
self.lambda_vector[self.lambda_vector < 1e-3] = 1e-3
# normalize the probability vector
self.lambda_vector = self.lambda_vector / np.sum(self.lambda_vector)
def update_active_partners_list(self):
"""
Update the active partners list according to lambda vector
"""
active_partners_indices = (-self.lambda_vector).argsort()[:self.active_partners_count]
self.active_partners_list = [self.partners_list[index] for index in active_partners_indices]
def initialize_participation_dict(self):
participation = {}
for epoch_index in range(self.epoch_count):
participation[epoch_index] = {}
for minibatch_index in range(self.minibatch_count):
participation[epoch_index][minibatch_index] = np.zeros(self.partners_count)
return participation
def log_partners_participation_rate(self):
epoch_participation_vector = np.zeros(self.partners_count)
percentages = []
for minibatch_index, vect in self.partners_participation[self.epoch_index].items():
epoch_participation_vector += vect
percentages = [str(np.round(p_v / self.minibatch_count, 2) * 100) + ' %'
for p_v in list(epoch_participation_vector)]
logger.info(f"Partners {["#" + str(p.id) for p in self.partners_list]} "
f"have the following participation rates, respectively : "
f"{percentages} "
f"at the end of Epoch > {self.epoch_index}")
final_participation_vector = np.zeros(self.partners_count)
if self.epoch_index == self.epoch_count - 1:
for epoch_index in range(self.epoch_count):
for minibatch_index, vect in self.partners_participation[epoch_index].items():
final_participation_vector += vect
percentages = [str(np.round(f_p_v / (self.minibatch_count * self.epoch_count), 2) * 100) + '%'
for f_p_v in list(final_participation_vector)]
logger.info(f"Partners {["#" + str(p.id) for p in self.partners_list]} "
f"have the following participation rates : "
f"{percentages} "
f"during the training")
@staticmethod
def aggregate_model_weights(partners_list):
""" This method is identical to the one in the aggregator class with few modifications.
I couldn't use the original aggregator method since it operates on the entire list of partners and
DRFA requires model aggregation over a subset of partners list only
"""
aggregation_weights = np.ones(len(partners_list), dtype='float32')
weights_per_layer = list(zip(*[partner.model_weights for partner in partners_list]))
new_weights = list()
for weights_for_layer in weights_per_layer:
avg_weights_for_layer = np.average(
np.array(weights_for_layer), axis=0, weights=aggregation_weights
)
new_weights.append(avg_weights_for_layer)
return new_weights
class SequentialLearning(MultiPartnerLearning): # seq-pure
name = 'Sequential learning'
def __init__(self, scenario, **kwargs):
super(SequentialLearning, self).__init__(scenario, **kwargs)
if self.partners_count == 1:
raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')
def fit_epoch(self):
# Clear Keras' old models
clear_session()
# Split the train dataset in mini-batches
self.split_in_minibatches()
# Iterate over mini-batches and train
for i in range(self.minibatch_count):
self.minibatch_index = i
logger.info(f"(seq-pure) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}")
self.fit_minibatch()
def fit_minibatch(self):
"""Proceed to a collaborative round with a sequential averaging approach"""
logger.debug("Start new seq collaborative round ...")
model_for_round = self.build_model()
# Evaluate and store accuracy of mini-batch start model
self.eval_and_log_model_val_perf()
# Iterate over partners for training each individual model
shuffled_indexes = np.random.permutation(self.partners_count)
logger.debug(f"(seq) Shuffled order for this seqavg collaborative round: {shuffled_indexes}")
for idx, partner_index in enumerate(shuffled_indexes):
partner = self.partners_list[partner_index]
# Train on partner local data set
if self.val_set == 'global':
history = model_for_round.fit(partner.minibatched_x_train[self.minibatch_index],
partner.minibatched_y_train[self.minibatch_index],
batch_size=partner.batch_size,
verbose=0,
validation_data=self.val_data)
elif self.val_set == 'local':
history = model_for_round.fit(partner.minibatched_x_train[self.minibatch_index],
partner.minibatched_y_train[self.minibatch_index],
batch_size=partner.batch_size,
verbose=0,
validation_data=(partner.x_val, partner.y_val))
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
# Log results
self.log_partner_perf(partner.id, idx, history.history)
# Save the partner's model in the models' list
partner.model_weights = model_for_round.get_weights()
self.model_weights = model_for_round.get_weights()
logger.debug("End of seq collaborative round.")
class SequentialWithFinalAggLearning(SequentialLearning):
name = 'Sequential learning with final aggregation'
def __init__(self, scenario, **kwargs):
super(SequentialWithFinalAggLearning, self).__init__(scenario, **kwargs)
if self.partners_count == 1:
raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')
def fit_epoch(self):
# Clear Keras' old models
clear_session()
# Split the train dataset in mini-batches
self.split_in_minibatches()
# Iterate over mini-batches and train
for i in range(self.minibatch_count):
logger.info(f"(seq-final-agg) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, "
f"init model with a copy of the global model")
self.minibatch_index = i
self.fit_minibatch()
# At the end of each epoch, aggregate the models
self.model_weights = self.aggregator.aggregate_model_weights()
class SequentialAverageLearning(SequentialLearning):
name = 'Sequential averaged learning'
def __init__(self, scenario, **kwargs):
super(SequentialAverageLearning, self).__init__(scenario, **kwargs)
if self.partners_count == 1:
raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')
def fit_epoch(self):
# Clear Keras' old models
clear_session()
# Split the train dataset in mini-batches
self.split_in_minibatches()
# Iterate over mini-batches and train
for i in range(self.minibatch_count):
logger.info(f"(seqavg) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, "
f"init model with a copy of the global model")
self.minibatch_index = i
self.fit_minibatch()
# At the end of each minibatch, aggregate the models
self.model_weights = self.aggregator.aggregate_model_weights()
class FedAvgSmodel(FederatedAverageLearning):
name = 'Federated learning with label flipping'
def __init__(self, scenario, pretrain_epochs=0, epsilon=0.5, **kwargs):
super(FedAvgSmodel, self).__init__(scenario, **kwargs)
self.pretrain_epochs = pretrain_epochs
self.epsilon = epsilon
if pretrain_epochs > 0:
self.pretrain_mpl = FederatedAverageLearning(scenario=scenario,
epoch_count=self.pretrain_epochs,
is_save_data=False)
def fit(self):
if self.pretrain_epochs > 0:
logger.info('Start pre-train...')
self.pretrain_mpl.fit()
pretrain_model = self.pretrain_mpl.build_model()
for p in self.partners_list:
confusion = confusion_matrix(np.argmax(p.y_train, axis=1),
np.argmax(pretrain_model.predict(p.x_train), axis=1),
normalize='pred')
p.noise_layer_weights = [np.log(confusion.T + 1e-8)]
self.model_weights[:-1] = self.pretrain_mpl.model_weights[:-1]
else:
for p in self.partners_list:
confusion = np.identity(10) * (1 - self.epsilon) + (self.epsilon / 10)
p.noise_layer_weights = [np.log(confusion + 1e-8)]
super(FedAvgSmodel, self).fit()
def fit_minibatch(self):
"""Proceed to a collaborative round with a S-Model federated averaging approach"""
logger.debug("Start new S-Model collaborative round ...")
# Starting model for each partner is the aggregated model from the previous mini-batch iteration
logger.info(f"(S-Model) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, "
f"init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
# Evaluate and store accuracy of mini-batch start model
self.eval_and_log_model_val_perf()
# Iterate over partners for training each individual model
for partner_index, partner in enumerate(self.partners_list):
# Reference the partner's model
partner_model = partner.build_model()
x_batch = partner.minibatched_x_train[self.minibatch_index]
y_batch = partner.minibatched_y_train[self.minibatch_index]
model_input = Input(shape=self.dataset.input_shape)
x = partner_model(model_input)
outputs = NoiseAdaptationChannel(weights=partner.noise_layer_weights, name='s-model')(x)
full_model = Model(inputs=model_input, outputs=outputs, name=f"full_model_partner_{partner_index}")
full_model.compile(
loss=partner_model.loss,
optimizer=partner_model.optimizer,
metrics='accuracy',
)
# Train on partner local data set
history = full_model.fit(x_batch,
y_batch,
batch_size=partner.batch_size,
verbose=0,
validation_data=self.val_data)
# Log results of the round
self.log_partner_perf(partner.id, partner_index, history.history)
# Update the partner's model in the models' list
partner.noise_layer_weights = full_model.get_layer('s-model').get_weights()
partner.model_weights = partner_model.get_weights()
logger.debug("End of S-Model collaborative round.")
class FederatedGradients(MultiPartnerLearning):
def __init__(self, scenario, **kwargs):
super(FederatedGradients, self).__init__(scenario, **kwargs)
if self.partners_count == 1:
raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')
self.model = self.build_model()
def fit_epoch(self):
# Split the train dataset in mini-batches
self.split_in_minibatches()
# Iterate over mini-batches and train
for i in range(self.minibatch_count):
self.minibatch_index = i
self.fit_minibatch()
self.minibatch_index = 0
def fit_minibatch(self):
"""Proceed to a collaborative round with a federated averaging approach"""
logger.debug("Start new gradients fusion collaborative round ...")
# Starting model for each partner is the aggregated model from the previous mini-batch iteration
logger.info(f"(gradient fusion) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, "
f"init each partner's models with a copy of the global model")
for partner in self.partners_list:
# Evaluate and store accuracy of mini-batch start model
partner.model_weights = self.model_weights
self.eval_and_log_model_val_perf()
# Iterate over partners for training each individual model
for partner_index, partner in enumerate(self.partners_list):
with tf.GradientTape() as tape:
loss = self.model.loss(partner.minibatched_y_train[self.minibatch_index],
self.model(partner.minibatched_x_train[self.minibatch_index]))
partner.grads = tape.gradient(loss, self.model.trainable_weights)
global_grad = self.aggregator.aggregate_gradients()
self.model.optimizer.apply_gradients(zip(global_grad, self.model.trainable_weights))
self.model_weights = self.model.get_weights()
for partner_index, partner in enumerate(self.partners_list):
val_history = self.model.evaluate(self.val_data[0], self.val_data[1], verbose=False)
history = self.model.evaluate(partner.minibatched_x_train[self.minibatch_index],
partner.minibatched_y_train[self.minibatch_index], verbose=False)
history = {
"loss": [history[0]],
'accuracy': [history[1]],
'val_loss': [val_history[0]],
'val_accuracy': [val_history[1]]
}
# Log results of the round
self.log_partner_perf(partner.id, partner_index, history)
logger.debug("End of grads-fusion collaborative round.")
class EnsemblePredictions(MultiPartnerLearning):
"""
Ensemble (average) prediction of several input models
This approach can only be used with the EnsemblePredictionsModel
"""
def __init__(self, scenario, **kwargs):
super(EnsemblePredictions, self).__init__(scenario, **kwargs)
# First, if only one partner, fall back to dedicated single partner function
if self.partners_count == 1:
raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')
partner_model_list = [self.dataset.generate_new_model() for _ in range(self.partners_count)]
self.model = EnsemblePredictionsModel(partner_model_list)
for partner in self.partners_list:
partner.model_weights = deepcopy(self.model_weights)
print(id(partner.model_weights))
logger.info("Init EnsemblePredictionsModel model")
def build_model(self):
partner_model_list = [partner.build_model() for partner in self.partners_list]
return EnsemblePredictionsModel(partner_model_list)
def fit_epoch(self):
# Clear Keras' old models
clear_session()
self.eval_and_log_model_val_perf()
for partner_index, partner in enumerate(self.partners_list):
partner_model = partner.build_model()
# Train on partner local data set
history = partner_model.fit(partner.x_train,
partner.y_train,
batch_size=partner.batch_size,
verbose=0,
validation_data=self.val_data)
# Log results of the round
self.log_partner_perf(partner.id, partner_index, history.history)
# Update the partner's model in the models' list
partner.model_weights = partner_model.get_weights()
def fit_minibatch(self):
pass
|
# -*- coding: utf-8 -*-
"""
Functions for model training and evaluation (single-partner and multi-partner cases)
"""
import operator
import os
from abc import ABC, abstractmethod
from copy import deepcopy
from timeit import default_timer as timer
import numpy as np
import random
import tensorflow as tf
from loguru import logger
from sklearn.metrics import confusion_matrix
from tensorflow.keras import Input, Model
from tensorflow.keras.backend import clear_session
from tensorflow.keras.callbacks import EarlyStopping
from .utils import History
from ..utils import project_onto_the_simplex
from .. import constants
from ..models import NoiseAdaptationChannel, EnsemblePredictionsModel
from ..partner import Partner, PartnerMpl
ALLOWED_PARAMETERS = ('partners_list',
'epoch_count',
'minibatch_count',
'dataset',
'aggregation',
'is_early_stopping',
'is_save_data',
'save_folder',
'init_model_from',
'use_saved_weights')
class MultiPartnerLearning(ABC):
name = 'abstract'
def __init__(self, scenario, **kwargs):
"""
:type scenario: Scenario
"""
# Attributes related to the data and the model
self.dataset = scenario.dataset
self.partners_list = scenario.partners_list
self.init_model_from = scenario.init_model_from
self.use_saved_weights = scenario.use_saved_weights
self.amounts_per_partner = scenario.amounts_per_partner
self.val_set = scenario.val_set
self.test_set = scenario.test_set
# Attributes related to iterating at different levels
self.epoch_count = scenario.epoch_count
self.minibatch_count = scenario.minibatch_count
self.is_early_stopping = scenario.is_early_stopping
# Attributes to store results
self.save_folder = scenario.save_folder
# Erase the default parameters (which mostly come from the scenario) if some parameters have been specified
self.__dict__.update((k, v) for k, v in kwargs.items() if k in ALLOWED_PARAMETERS)
# Unpack dataset-related parameters
self.val_data = (self.dataset.x_val, self.dataset.y_val)
self.test_data = (self.dataset.x_test, self.dataset.y_test)
self.dataset_name = self.dataset.name
self.generate_new_model = self.dataset.generate_new_model
# Initialize the model
model = self.init_model()
self.model_weights = model.get_weights()
self.metrics_names = self.dataset.model_metrics_names
# Initialize iterators
self.epoch_index = 0
self.minibatch_index = 0
self.learning_computation_time = 0
# Convert partners to Mpl partners
for partner in self.partners_list:
assert isinstance(partner, Partner)
partners_list = sorted(self.partners_list, key=operator.attrgetter("id"))
logger.info(
f"## Preparation of model's training on partners with ids: {['#' + str(p.id) for p in partners_list]}")
self.partners_list = [PartnerMpl(partner, self) for partner in self.partners_list]
# Attributes related to the aggregation approach
self.aggregator = self.init_aggregation_function(scenario.aggregation)
# Initialize History
self.history = History(self)
# Initialize result folder
if self.save_folder is not None:
if 'custom_name' in kwargs:
self.save_folder = self.save_folder / kwargs["custom_name"]
else:
self.save_folder = self.save_folder / 'multi_partner_learning'
self.save_folder.mkdir(parents=True, exist_ok=False)
logger.debug("MultiPartnerLearning object instantiated.")
def __str__(self):
return f'{self.name}'
@property
def partners_count(self):
return len(self.partners_list)
def init_aggregation_function(self, aggregator):
return aggregator(self)
def build_model(self):
return self.build_model_from_weights(self.model_weights)
def build_model_from_weights(self, new_weights):
"""Generate a new model initialized with weights passed as arguments"""
new_model = self.generate_new_model()
new_model.set_weights(new_weights)
return new_model
def init_model(self):
new_model = self.generate_new_model()
if self.use_saved_weights:
logger.info("Init model with previous coalition model")
new_model.load_weights(self.init_model_from)
else:
logger.info("Init new model")
return new_model
def save_final_model(self):
"""Save final model weights"""
model_folder = os.path.join(self.save_folder, 'model')
if not os.path.isdir(model_folder):
os.makedirs(model_folder)
np.save(os.path.join(model_folder, self.dataset_name + '_final_weights.npy'), self.model_weights)
model_to_save = self.build_model()
model_to_save.save_weights(os.path.join(model_folder, self.dataset_name + '_final_weights.h5'))
def save_data(self):
if self.save_folder is None:
raise ValueError("The path to the save folder is None, history data cannot be saved, nor model weights")
self.save_final_model()
self.history.save_data()
def log_partner_perf(self, partner_id, partner_index, history):
for key_history in self.history.metrics:
self.history.history[partner_id][key_history][self.epoch_index,
self.minibatch_index] = history[key_history][-1]
epoch_nb_str = f"Epoch {str(self.epoch_index).zfill(2)}/{str(self.epoch_count - 1).zfill(2)}"
mb_nb_str = f"Minibatch {str(self.minibatch_index).zfill(2)}/{str(self.minibatch_count - 1).zfill(2)}"
partner_id_str = f"Partner partner_id #{partner_id} ({partner_index}/{self.partners_count - 1})"
val_acc_str = f"{round(history['val_accuracy'][-1], 2)}"
logger.debug(f"{epoch_nb_str} > {mb_nb_str} > {partner_id_str} > val_acc: {val_acc_str}")
def eval_and_log_model_val_perf(self):
model = self.build_model()
if self.val_set == 'global':
hist = model.evaluate(self.val_data[0],
self.val_data[1],
batch_size=constants.DEFAULT_BATCH_SIZE,
verbose=0,
)
elif self.val_set == 'local':
hist = [0.0, 0.0]
for p in self.partners_list:
hist_partner = model.evaluate(p.x_val,
p.y_val,
batch_size=constants.DEFAULT_BATCH_SIZE,
verbose=0,
)
hist[0] += hist_partner[0] / self.partners_count
hist[1] += hist_partner[1] / self.partners_count
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
self.history.history['mpl_model']['val_loss'][self.epoch_index, self.minibatch_index] = hist[0]
self.history.history['mpl_model']['val_accuracy'][self.epoch_index, self.minibatch_index] = hist[1]
if self.minibatch_index >= self.minibatch_count - 1:
epoch_nb_str = f"{str(self.epoch_index).zfill(2)}/{str(self.epoch_count - 1).zfill(2)}"
logger.info(f" Model evaluation at the end of the epoch "
f"{epoch_nb_str}: "
f"{['%.3f' % elem for elem in hist]}")
def eval_and_log_final_model_test_perf(self):
logger.info("### Evaluating model on test data:")
model = self.build_model()
if self.test_set == 'global':
hist = model.evaluate(self.test_data[0],
self.test_data[1],
batch_size=constants.DEFAULT_BATCH_SIZE,
verbose=0,
)
elif self.test_set == 'local':
hist = [0.0, 0.0]
for p in self.partners_list:
hist_partner = model.evaluate(p.x_test,
p.y_test,
batch_size=constants.DEFAULT_BATCH_SIZE,
verbose=0,
)
hist[0] += hist_partner[0] / self.partners_count
hist[1] += hist_partner[1] / self.partners_count
else:
raise ValueError("test set should be 'local' or 'global', not {self.val_set}")
self.history.score = hist[1]
self.history.nb_epochs_done = self.epoch_index + 1
logger.info(f" Model metrics names: {self.metrics_names}")
logger.info(f" Model metrics values: {['%.3f' % elem for elem in hist]}")
def split_in_minibatches(self):
"""Split the dataset passed as argument in mini-batches"""
for partner in self.partners_list:
partner.split_minibatches()
def early_stop(self):
logger.debug(" Checking if early stopping criteria are met:")
if self.is_early_stopping:
# Early stopping parameters
if (
self.epoch_index >= constants.PATIENCE
and self.history.history['mpl_model']['val_loss'][self.epoch_index,
self.minibatch_index] >
self.history.history['mpl_model']['val_loss'][self.epoch_index - constants.PATIENCE,
self.minibatch_index]
):
logger.debug(" -> Early stopping criteria are met, stopping here.")
return True
else:
logger.debug(" -> Early stopping criteria are not met, continuing with training.")
else:
return False
def fit(self):
"""Return the score on test data of a final aggregated model trained in a federated way on each partner"""
start = timer()
# Train model (iterate for each epoch and mini-batch)
while self.epoch_index < self.epoch_count:
self.fit_epoch() # perform an epoch on the self.model
if self.early_stop():
break
self.epoch_index += 1
# After last epoch or if early stopping was triggered, evaluate model on the global testset
self.eval_and_log_final_model_test_perf()
end = timer()
self.learning_computation_time = end - start
logger.info(f"Training and evaluation on multiple partners: "
f"done. ({np.round(self.learning_computation_time, 3)} seconds)")
if self.save_folder is not None:
self.save_data() # Save the model weights and the history data
@abstractmethod
def fit_epoch(self):
while self.minibatch_index < self.minibatch_count:
self.fit_minibatch()
self.minibatch_index += 1
self.eval_and_log_model_val_perf()
@abstractmethod
def fit_minibatch(self):
pass
class SinglePartnerLearning(MultiPartnerLearning):
name = 'Single Partner learning'
def __init__(self, scenario, **kwargs):
super(SinglePartnerLearning, self).__init__(scenario, **kwargs)
if self.partners_count != 1:
raise ValueError('More than one partner is provided')
self.partner = self.partners_list[0]
def fit(self):
"""Return the score on test data of a model trained on a single partner"""
start = timer()
logger.info(f"## Training and evaluating model on partner with partner_id #{self.partner.id}")
# Set if early stopping if needed
cb = []
es = None
if self.is_early_stopping:
es = EarlyStopping(monitor='val_loss', mode='min', verbose=0, patience=constants.PATIENCE)
cb.append(es)
# Train model
logger.info(" Training model...")
model = self.build_model()
if self.val_set == 'global':
history = model.fit(self.partner.x_train,
self.partner.y_train,
batch_size=self.partner.batch_size,
epochs=self.epoch_count,
verbose=0,
validation_data=self.val_data,
callbacks=cb)
elif self.val_set == 'local':
history = model.fit(self.partner.x_train,
self.partner.y_train,
batch_size=self.partner.batch_size,
epochs=self.epoch_count,
verbose=0,
validation_data=(self.partner.x_val, self.partner.y_val),
callbacks=cb)
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
self.model_weights = model.get_weights()
self.log_partner_perf(self.partner.id, 0, history.history)
del self.history.history['mpl_model']
# Evaluate trained model on test data
self.eval_and_log_final_model_test_perf()
self.history.nb_epochs_done = (es.stopped_epoch + 1) if es.stopped_epoch != 0 else self.epoch_count
end = timer()
self.learning_computation_time = end - start
def fit_epoch(self):
pass
def fit_minibatch(self):
pass
class FederatedAverageLearning(MultiPartnerLearning):
name = 'Federated averaging'
def __init__(self, scenario, **kwargs):
# First, if only one partner, fall back to dedicated single partner function
super(FederatedAverageLearning, self).__init__(scenario, **kwargs)
if self.partners_count == 1:
raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')
def fit_epoch(self):
# Clear Keras' old models
clear_session()
# Split the train dataset in mini-batches
self.split_in_minibatches()
# Iterate over mini-batches and train
for i in range(self.minibatch_count):
self.minibatch_index = i
self.fit_minibatch()
# At the end of each minibatch,aggregate the models
self.model_weights = self.aggregator.aggregate_model_weights()
self.minibatch_index = 0
def fit_minibatch(self):
"""Proceed to a collaborative round with a federated averaging approach"""
logger.debug("Start new fedavg collaborative round ...")
# Starting model for each partner is the aggregated model from the previous mini-batch iteration
logger.info(f"(fedavg) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, "
f"init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
# Evaluate and store accuracy of mini-batch start model
self.eval_and_log_model_val_perf()
# Iterate over partners for training each individual model
for partner_index, partner in enumerate(self.partners_list):
# Reference the partner's model
partner_model = partner.build_model()
# Train on partner local data set
if self.val_set == 'global':
history = partner_model.fit(partner.minibatched_x_train[self.minibatch_index],
partner.minibatched_y_train[self.minibatch_index],
batch_size=partner.batch_size,
verbose=0,
validation_data=self.val_data)
elif self.val_set == 'local':
history = partner_model.fit(partner.minibatched_x_train[self.minibatch_index],
partner.minibatched_y_train[self.minibatch_index],
batch_size=partner.batch_size,
verbose=0,
validation_data=(partner.x_val, partner.y_val))
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
# Log results of the round
self.log_partner_perf(partner.id, partner_index, history.history)
# Update the partner's model in the models' list
partner.model_weights = partner_model.get_weights()
logger.debug("End of fedavg collaborative round.")
class DistributionallyRobustFederatedAveragingLearning(MultiPartnerLearning):
"""
- This class implements the Distributionally Robust Federated Averaging (DRFA) Algorithm,
only a subset of partners are chosen to participate in a given collaborative
learning round. based on a global mixing parameter called lambda
- Lambda is updated at the end of each collaborative learning round using its own update rule
- DRFA is considered a framework under which we can implement other FL algorithms such as FedAvg
- Link to the paper : https://arxiv.org/abs/2102.12660
"""
name = "Distributionally Robust Federated Averaging"
def __init__(self, scenario, **kwargs):
super(DistributionallyRobustFederatedAveragingLearning, self).__init__(scenario, **kwargs)
if self.partners_count == 1:
raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')
self.active_partners_count = scenario.active_partners_count
self.lambda_vector = self.init_lambda()
self.active_partners_list = list()
self.update_active_partners_list()
self.local_steps = scenario.gradient_updates_per_pass_count
self.partners_training_data = {}
self.partners_participation = self.initialize_participation_dict()
self.lambda_learning_rate = 8e-3
self.local_steps_index = 0
self.local_steps_index_t = 0
self.global_model_at_index_t = None
self.model_weights_at_index_t = list()
self.loss_for_model_at_index_t = np.zeros(self.partners_count)
self.subset_u_partners = list()
self.loss_vector_v = list()
def fit_epoch(self):
# Split the train dataset in mini-batches
self.split_in_minibatches()
# convert partners training data into tf Dataset, reference: fast_mpl
for partner_id, partner in enumerate(self.partners_list):
self.partners_training_data[partner.id] = list()
for minibatch_index in range(self.minibatch_count):
# convert training data
data_train = tf.data.Dataset.from_tensor_slices((partner.minibatched_x_train[minibatch_index],
partner.minibatched_y_train[minibatch_index]))
data_train = data_train.shuffle(len(partner.minibatched_x_train[minibatch_index]))
data_train = data_train.batch(partner.batch_size)
data_train = data_train.prefetch(1)
self.partners_training_data[partner.id].append(data_train)
# Iterate over mini-batches and train
for i in range(self.minibatch_count):
self.minibatch_index = i
self.local_steps_index = 0
self.local_steps_index_t = np.random.randint(0, self.local_steps - 1)
logger.info(
f"Active partner in this round "
f"{['#'+str(active_partner.id) for active_partner in self.active_partners_list]} "
f"according to lambda vector > {self.lambda_vector}")
logger.info(f"Local step index t > {self.local_steps_index_t}")
self.fit_minibatch()
# update partner participations
self.partners_participation[self.epoch_index][self.minibatch_index][[p.id for p
in self.active_partners_list]] = 1
self.update_lambda()
self.update_active_partners_list()
self.log_partners_participation_rate()
self.minibatch_index = 0
def fit_minibatch(self):
"""Proceed to a collaborative round with a distributionally robust federated averaging approach"""
# Starting model for each partner is the aggregated model from the previous mini-batch iteration
logger.info(f"(drfa) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, "
f"init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
# Evaluate and store accuracy of mini-batch start model
self.eval_and_log_model_val_perf()
# Iterate over partners for training
for partner_index, partner in enumerate(self.active_partners_list):
partner_model = partner.build_model()
# loop through each partner's minibatch
minibatched_x_y = self.partners_training_data[partner.id][self.minibatch_index]
for idx, batch_x_y in enumerate(minibatched_x_y):
with tf.GradientTape() as tape:
p_pred = partner_model(batch_x_y[0])
loss = partner_model.compiled_loss(batch_x_y[1], p_pred)
partner_model.optimizer.minimize(loss, partner_model.trainable_weights, tape=tape)
self.local_steps_index += 1
if self.local_steps_index == self.local_steps_index_t:
# save model weights for each partner at local step t
self.model_weights_at_index_t.append(partner.model_weights)
partner.model_weights = partner_model.get_weights()
self.local_steps_index = 0
# aggregate final global model weights
self.model_weights = self.aggregate_model_weights(self.active_partners_list)
# build the model for each partner using weights gathered at index t
for active_partner, weights_t in zip(self.active_partners_list, self.model_weights_at_index_t):
active_partner.model_weights = weights_t
# aggregate global model weights at index t
self.global_model_at_index_t = self.aggregate_model_weights(self.active_partners_list)
# sample a new subset of partners of size active_partners_count
subset_index = random.sample(range(self.partners_count), self.active_partners_count)
self.subset_u_partners = [self.partners_list[index] for index in subset_index]
logger.info(
f"Subset of partners chosen for lambda update "
f"{['#'+ str(partner.id) for partner in self.subset_u_partners]}")
# compute losses over a random batch using the global model at index t
for partner, index in zip(self.subset_u_partners, subset_index):
random_minibatch_index = np.random.randint(0, self.minibatch_count - 1)
random_minibatch = self.partners_training_data[partner.id][random_minibatch_index]
random_batch_index = np.random.randint(0, len(random_minibatch) - 1)
random_batch = list(random_minibatch)[random_batch_index]
partner_model = self.build_model_from_weights(self.global_model_at_index_t)
loss = partner_model.compiled_loss(random_batch[1], partner_model(random_batch[0]))
# compute (n/m)*loss and add it to the loss vector
# n is the total number of partners, m is the number of active partners
self.loss_for_model_at_index_t[index] = \
((self.partners_count / self.active_partners_count) * np.mean(loss.numpy()))
def init_lambda(self):
"""
- initialize lambda vector according to each partner's dataset size
- this is a probability vector of size partners_count
"""
return np.array(self.amounts_per_partner)
def update_lambda(self):
"""
The update rule for lambda is : lambda_vector(i) =
Projection(lambda_vector(i-1) + (local_step_index_t * lambda_learning_rate * local_losses_at_index_t))
"""
self.lambda_vector += (self.local_steps_index_t * self.lambda_learning_rate * self.loss_for_model_at_index_t)
self.lambda_vector = project_onto_the_simplex(self.lambda_vector)
# The projection can produce zero probabilities for certain partners which prevents them from
# participating in the training. To avoid this, we assign 1e-3 to each probability smaller than this value.
if any(self.lambda_vector < 1e-3):
self.lambda_vector[self.lambda_vector < 1e-3] = 1e-3
# normalize the probability vector
self.lambda_vector = self.lambda_vector / np.sum(self.lambda_vector)
def update_active_partners_list(self):
"""
Update the active partners list according to lambda vector
"""
active_partners_indices = (-self.lambda_vector).argsort()[:self.active_partners_count]
self.active_partners_list = [self.partners_list[index] for index in active_partners_indices]
def initialize_participation_dict(self):
participation = {}
for epoch_index in range(self.epoch_count):
participation[epoch_index] = {}
for minibatch_index in range(self.minibatch_count):
participation[epoch_index][minibatch_index] = np.zeros(self.partners_count)
return participation
def log_partners_participation_rate(self):
epoch_participation_vector = np.zeros(self.partners_count)
percentages = []
for minibatch_index, vect in self.partners_participation[self.epoch_index].items():
epoch_participation_vector += vect
percentages = [str(np.round(p_v / self.minibatch_count, 2) * 100) + ' %'
for p_v in list(epoch_participation_vector)]
logger.info(f"Partners {['#' + str(p.id) for p in self.partners_list]} "
f"have the following participation rates, respectively : "
f"{percentages} "
f"at the end of Epoch > {self.epoch_index}")
final_participation_vector = np.zeros(self.partners_count)
if self.epoch_index == self.epoch_count - 1:
for epoch_index in range(self.epoch_count):
for minibatch_index, vect in self.partners_participation[epoch_index].items():
final_participation_vector += vect
percentages = [str(np.round(f_p_v / (self.minibatch_count * self.epoch_count), 2) * 100) + '%'
for f_p_v in list(final_participation_vector)]
logger.info(f"Partners {['#' + str(p.id) for p in self.partners_list]} "
f"have the following participation rates : "
f"{percentages} "
f"during the training")
@staticmethod
def aggregate_model_weights(partners_list):
""" This method is identical to the one in the aggregator class with few modifications.
I couldn't use the original aggregator method since it operates on the entire list of partners and
DRFA requires model aggregation over a subset of partners list only
"""
aggregation_weights = np.ones(len(partners_list), dtype='float32')
weights_per_layer = list(zip(*[partner.model_weights for partner in partners_list]))
new_weights = list()
for weights_for_layer in weights_per_layer:
avg_weights_for_layer = np.average(
np.array(weights_for_layer), axis=0, weights=aggregation_weights
)
new_weights.append(avg_weights_for_layer)
return new_weights
class SequentialLearning(MultiPartnerLearning): # seq-pure
name = 'Sequential learning'
def __init__(self, scenario, **kwargs):
super(SequentialLearning, self).__init__(scenario, **kwargs)
if self.partners_count == 1:
raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')
def fit_epoch(self):
# Clear Keras' old models
clear_session()
# Split the train dataset in mini-batches
self.split_in_minibatches()
# Iterate over mini-batches and train
for i in range(self.minibatch_count):
self.minibatch_index = i
logger.info(f"(seq-pure) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}")
self.fit_minibatch()
def fit_minibatch(self):
"""Proceed to a collaborative round with a sequential averaging approach"""
logger.debug("Start new seq collaborative round ...")
model_for_round = self.build_model()
# Evaluate and store accuracy of mini-batch start model
self.eval_and_log_model_val_perf()
# Iterate over partners for training each individual model
shuffled_indexes = np.random.permutation(self.partners_count)
logger.debug(f"(seq) Shuffled order for this seqavg collaborative round: {shuffled_indexes}")
for idx, partner_index in enumerate(shuffled_indexes):
partner = self.partners_list[partner_index]
# Train on partner local data set
if self.val_set == 'global':
history = model_for_round.fit(partner.minibatched_x_train[self.minibatch_index],
partner.minibatched_y_train[self.minibatch_index],
batch_size=partner.batch_size,
verbose=0,
validation_data=self.val_data)
elif self.val_set == 'local':
history = model_for_round.fit(partner.minibatched_x_train[self.minibatch_index],
partner.minibatched_y_train[self.minibatch_index],
batch_size=partner.batch_size,
verbose=0,
validation_data=(partner.x_val, partner.y_val))
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
# Log results
self.log_partner_perf(partner.id, idx, history.history)
# Save the partner's model in the models' list
partner.model_weights = model_for_round.get_weights()
self.model_weights = model_for_round.get_weights()
logger.debug("End of seq collaborative round.")
class SequentialWithFinalAggLearning(SequentialLearning):
name = 'Sequential learning with final aggregation'
def __init__(self, scenario, **kwargs):
super(SequentialWithFinalAggLearning, self).__init__(scenario, **kwargs)
if self.partners_count == 1:
raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')
def fit_epoch(self):
# Clear Keras' old models
clear_session()
# Split the train dataset in mini-batches
self.split_in_minibatches()
# Iterate over mini-batches and train
for i in range(self.minibatch_count):
logger.info(f"(seq-final-agg) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, "
f"init model with a copy of the global model")
self.minibatch_index = i
self.fit_minibatch()
# At the end of each epoch, aggregate the models
self.model_weights = self.aggregator.aggregate_model_weights()
class SequentialAverageLearning(SequentialLearning):
name = 'Sequential averaged learning'
def __init__(self, scenario, **kwargs):
super(SequentialAverageLearning, self).__init__(scenario, **kwargs)
if self.partners_count == 1:
raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')
def fit_epoch(self):
# Clear Keras' old models
clear_session()
# Split the train dataset in mini-batches
self.split_in_minibatches()
# Iterate over mini-batches and train
for i in range(self.minibatch_count):
logger.info(f"(seqavg) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, "
f"init model with a copy of the global model")
self.minibatch_index = i
self.fit_minibatch()
# At the end of each minibatch, aggregate the models
self.model_weights = self.aggregator.aggregate_model_weights()
class FedAvgSmodel(FederatedAverageLearning):
name = 'Federated learning with label flipping'
def __init__(self, scenario, pretrain_epochs=0, epsilon=0.5, **kwargs):
super(FedAvgSmodel, self).__init__(scenario, **kwargs)
self.pretrain_epochs = pretrain_epochs
self.epsilon = epsilon
if pretrain_epochs > 0:
self.pretrain_mpl = FederatedAverageLearning(scenario=scenario,
epoch_count=self.pretrain_epochs,
is_save_data=False)
def fit(self):
if self.pretrain_epochs > 0:
logger.info('Start pre-train...')
self.pretrain_mpl.fit()
pretrain_model = self.pretrain_mpl.build_model()
for p in self.partners_list:
confusion = confusion_matrix(np.argmax(p.y_train, axis=1),
np.argmax(pretrain_model.predict(p.x_train), axis=1),
normalize='pred')
p.noise_layer_weights = [np.log(confusion.T + 1e-8)]
self.model_weights[:-1] = self.pretrain_mpl.model_weights[:-1]
else:
for p in self.partners_list:
confusion = np.identity(10) * (1 - self.epsilon) + (self.epsilon / 10)
p.noise_layer_weights = [np.log(confusion + 1e-8)]
super(FedAvgSmodel, self).fit()
def fit_minibatch(self):
"""Proceed to a collaborative round with a S-Model federated averaging approach"""
logger.debug("Start new S-Model collaborative round ...")
# Starting model for each partner is the aggregated model from the previous mini-batch iteration
logger.info(f"(S-Model) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, "
f"init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
# Evaluate and store accuracy of mini-batch start model
self.eval_and_log_model_val_perf()
# Iterate over partners for training each individual model
for partner_index, partner in enumerate(self.partners_list):
# Reference the partner's model
partner_model = partner.build_model()
x_batch = partner.minibatched_x_train[self.minibatch_index]
y_batch = partner.minibatched_y_train[self.minibatch_index]
model_input = Input(shape=self.dataset.input_shape)
x = partner_model(model_input)
outputs = NoiseAdaptationChannel(weights=partner.noise_layer_weights, name='s-model')(x)
full_model = Model(inputs=model_input, outputs=outputs, name=f"full_model_partner_{partner_index}")
full_model.compile(
loss=partner_model.loss,
optimizer=partner_model.optimizer,
metrics='accuracy',
)
# Train on partner local data set
history = full_model.fit(x_batch,
y_batch,
batch_size=partner.batch_size,
verbose=0,
validation_data=self.val_data)
# Log results of the round
self.log_partner_perf(partner.id, partner_index, history.history)
# Update the partner's model in the models' list
partner.noise_layer_weights = full_model.get_layer('s-model').get_weights()
partner.model_weights = partner_model.get_weights()
logger.debug("End of S-Model collaborative round.")
class FederatedGradients(MultiPartnerLearning):
def __init__(self, scenario, **kwargs):
super(FederatedGradients, self).__init__(scenario, **kwargs)
if self.partners_count == 1:
raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')
self.model = self.build_model()
def fit_epoch(self):
# Split the train dataset in mini-batches
self.split_in_minibatches()
# Iterate over mini-batches and train
for i in range(self.minibatch_count):
self.minibatch_index = i
self.fit_minibatch()
self.minibatch_index = 0
def fit_minibatch(self):
"""Proceed to a collaborative round with a federated averaging approach"""
logger.debug("Start new gradients fusion collaborative round ...")
# Starting model for each partner is the aggregated model from the previous mini-batch iteration
logger.info(f"(gradient fusion) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, "
f"init each partner's models with a copy of the global model")
for partner in self.partners_list:
# Evaluate and store accuracy of mini-batch start model
partner.model_weights = self.model_weights
self.eval_and_log_model_val_perf()
# Iterate over partners for training each individual model
for partner_index, partner in enumerate(self.partners_list):
with tf.GradientTape() as tape:
loss = self.model.loss(partner.minibatched_y_train[self.minibatch_index],
self.model(partner.minibatched_x_train[self.minibatch_index]))
partner.grads = tape.gradient(loss, self.model.trainable_weights)
global_grad = self.aggregator.aggregate_gradients()
self.model.optimizer.apply_gradients(zip(global_grad, self.model.trainable_weights))
self.model_weights = self.model.get_weights()
for partner_index, partner in enumerate(self.partners_list):
val_history = self.model.evaluate(self.val_data[0], self.val_data[1], verbose=False)
history = self.model.evaluate(partner.minibatched_x_train[self.minibatch_index],
partner.minibatched_y_train[self.minibatch_index], verbose=False)
history = {
"loss": [history[0]],
'accuracy': [history[1]],
'val_loss': [val_history[0]],
'val_accuracy': [val_history[1]]
}
# Log results of the round
self.log_partner_perf(partner.id, partner_index, history)
logger.debug("End of grads-fusion collaborative round.")
class EnsemblePredictions(MultiPartnerLearning):
"""
Ensemble (average) prediction of several input models
This approach can only be used with the EnsemblePredictionsModel
"""
def __init__(self, scenario, **kwargs):
super(EnsemblePredictions, self).__init__(scenario, **kwargs)
# First, if only one partner, fall back to dedicated single partner function
if self.partners_count == 1:
raise ValueError('Only one partner is provided. Please use the dedicated SinglePartnerLearning class')
partner_model_list = [self.dataset.generate_new_model() for _ in range(self.partners_count)]
self.model = EnsemblePredictionsModel(partner_model_list)
for partner in self.partners_list:
partner.model_weights = deepcopy(self.model_weights)
print(id(partner.model_weights))
logger.info("Init EnsemblePredictionsModel model")
def build_model(self):
partner_model_list = [partner.build_model() for partner in self.partners_list]
return EnsemblePredictionsModel(partner_model_list)
def fit_epoch(self):
# Clear Keras' old models
clear_session()
self.eval_and_log_model_val_perf()
for partner_index, partner in enumerate(self.partners_list):
partner_model = partner.build_model()
# Train on partner local data set
history = partner_model.fit(partner.x_train,
partner.y_train,
batch_size=partner.batch_size,
verbose=0,
validation_data=self.val_data)
# Log results of the round
self.log_partner_perf(partner.id, partner_index, history.history)
# Update the partner's model in the models' list
partner.model_weights = partner_model.get_weights()
def fit_minibatch(self):
pass
|
""" Galaxy Process Management superclass and utilities
"""
import contextlib
import importlib
import inspect
import os
import subprocess
import sys
from abc import ABCMeta, abstractmethod
from gravity.config_manager import ConfigManager
from gravity.io import error
from gravity.util import which
# If at some point we have additional process managers we can make a factory,
# but for the moment there's only supervisor.
@contextlib.contextmanager
def process_manager(*args, **kwargs):
# roulette!
for filename in os.listdir(os.path.dirname(__file__)):
if filename.endswith(".py") and not filename.startswith("_"):
mod = importlib.import_module("gravity.process_manager." + filename[: -len(".py")])
for name in dir(mod):
obj = getattr(mod, name)
if not name.startswith("_") and inspect.isclass(obj) and issubclass(obj, BaseProcessManager) and obj != BaseProcessManager:
pm = obj(*args, **kwargs)
try:
yield pm
finally:
pm.terminate()
return
class BaseProcessManager(object, metaclass=ABCMeta):
def __init__(self, state_dir=None, start_daemon=True, foreground=False):
self.config_manager = ConfigManager(state_dir=state_dir)
self.state_dir = self.config_manager.state_dir
self.tail = which("tail")
def _service_log_file(self, log_dir, program_name):
return os.path.join(log_dir, program_name + ".log")
def _service_program_name(self, instance_name, service):
return f"{instance_name}_{service["config_type"]}_{service["service_type"]}_{service["service_name"]}"
@abstractmethod
def start(self, instance_names):
""" """
@abstractmethod
def _process_config_changes(self, configs, meta_changes):
""" """
@abstractmethod
def terminate(self):
""" """
@abstractmethod
def stop(self, instance_names):
""" """
@abstractmethod
def restart(self, instance_names):
""" """
@abstractmethod
def reload(self, instance_names):
""" """
def follow(self, instance_names):
# supervisor has a built-in tail command but it only works on a single log file. `galaxyctl supervisorctl tail
# ...` can be used if desired, though
if not self.tail:
error("`tail` not found on $PATH, please install it")
return
if not instance_names:
instance_names = self.get_instance_names(instance_names)[0]
log_files = []
for instance_name in instance_names:
services = self.config_manager.get_instance_services(instance_name)
config = self.config_manager.get_instance_config(instance_name)
log_dir = config["attribs"]["log_dir"]
for service in services:
program_name = self._service_program_name(instance_name, service)
log_files.append(self._service_log_file(log_dir, program_name))
cmd = [self.tail, "-f"] + log_files
tail_popen = subprocess.Popen(cmd)
tail_popen.wait()
@abstractmethod
def graceful(self, instance_names):
""" """
@abstractmethod
def update(self, instance_names):
""" """
@abstractmethod
def shutdown(self, instance_names):
""" """
def get_instance_names(self, instance_names):
registered_instance_names = self.config_manager.get_registered_instances()
unknown_instance_names = []
if instance_names:
for i, n in enumerate(instance_names):
if n not in registered_instance_names:
unknown_instance_names.append(instance_names.pop(i))
elif registered_instance_names:
instance_names = registered_instance_names
else:
error("No instances registered (hint: `galaxyctl register /path/to/galaxy.yml`)")
sys.exit(1)
return instance_names, unknown_instance_names
|
""" Galaxy Process Management superclass and utilities
"""
import contextlib
import importlib
import inspect
import os
import subprocess
import sys
from abc import ABCMeta, abstractmethod
from gravity.config_manager import ConfigManager
from gravity.io import error
from gravity.util import which
# If at some point we have additional process managers we can make a factory,
# but for the moment there's only supervisor.
@contextlib.contextmanager
def process_manager(*args, **kwargs):
# roulette!
for filename in os.listdir(os.path.dirname(__file__)):
if filename.endswith(".py") and not filename.startswith("_"):
mod = importlib.import_module("gravity.process_manager." + filename[: -len(".py")])
for name in dir(mod):
obj = getattr(mod, name)
if not name.startswith("_") and inspect.isclass(obj) and issubclass(obj, BaseProcessManager) and obj != BaseProcessManager:
pm = obj(*args, **kwargs)
try:
yield pm
finally:
pm.terminate()
return
class BaseProcessManager(object, metaclass=ABCMeta):
def __init__(self, state_dir=None, start_daemon=True, foreground=False):
self.config_manager = ConfigManager(state_dir=state_dir)
self.state_dir = self.config_manager.state_dir
self.tail = which("tail")
def _service_log_file(self, log_dir, program_name):
return os.path.join(log_dir, program_name + ".log")
def _service_program_name(self, instance_name, service):
return f"{instance_name}_{service['config_type']}_{service['service_type']}_{service['service_name']}"
@abstractmethod
def start(self, instance_names):
""" """
@abstractmethod
def _process_config_changes(self, configs, meta_changes):
""" """
@abstractmethod
def terminate(self):
""" """
@abstractmethod
def stop(self, instance_names):
""" """
@abstractmethod
def restart(self, instance_names):
""" """
@abstractmethod
def reload(self, instance_names):
""" """
def follow(self, instance_names):
# supervisor has a built-in tail command but it only works on a single log file. `galaxyctl supervisorctl tail
# ...` can be used if desired, though
if not self.tail:
error("`tail` not found on $PATH, please install it")
return
if not instance_names:
instance_names = self.get_instance_names(instance_names)[0]
log_files = []
for instance_name in instance_names:
services = self.config_manager.get_instance_services(instance_name)
config = self.config_manager.get_instance_config(instance_name)
log_dir = config["attribs"]["log_dir"]
for service in services:
program_name = self._service_program_name(instance_name, service)
log_files.append(self._service_log_file(log_dir, program_name))
cmd = [self.tail, "-f"] + log_files
tail_popen = subprocess.Popen(cmd)
tail_popen.wait()
@abstractmethod
def graceful(self, instance_names):
""" """
@abstractmethod
def update(self, instance_names):
""" """
@abstractmethod
def shutdown(self, instance_names):
""" """
def get_instance_names(self, instance_names):
registered_instance_names = self.config_manager.get_registered_instances()
unknown_instance_names = []
if instance_names:
for i, n in enumerate(instance_names):
if n not in registered_instance_names:
unknown_instance_names.append(instance_names.pop(i))
elif registered_instance_names:
instance_names = registered_instance_names
else:
error("No instances registered (hint: `galaxyctl register /path/to/galaxy.yml`)")
sys.exit(1)
return instance_names, unknown_instance_names
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import re
import subprocess
import textwrap
from collections import defaultdict
from typing import Any, Dict, List, NamedTuple, Optional, Set
import click
from github import Github, Issue, PullRequest, UnknownObjectException
from rich.console import Console
from rich.progress import Progress
logger = logging.getLogger(__name__)
console = Console(width=400, color_system="standard")
MY_DIR_PATH = os.path.dirname(__file__)
SOURCE_DIR_PATH = os.path.abspath(os.path.join(MY_DIR_PATH, os.pardir))
PR_PATTERN = re.compile(r".*\(#([0-9]+)\)")
ISSUE_MATCH_IN_BODY = re.compile(r" #([0-9]+)[^0-9]")
@click.group(context_settings={'help_option_names': ['-h', '--help'], 'max_content_width': 500})
def cli():
...
option_verbose = click.option(
"--verbose",
is_flag=True,
help="Print verbose information about performed steps",
)
option_previous_release = click.option(
"--previous-release",
type=str,
required=True,
help="commit reference (for example hash or tag) of the previous release.",
)
option_current_release = click.option(
"--current-release",
type=str,
required=True,
help="commit reference (for example hash or tag) of the current release.",
)
option_github_token = click.option(
"--github-token",
type=str,
required=True,
help=textwrap.dedent(
"""
Github token used to authenticate.
You can set omit it if you have GITHUB_TOKEN env variable set
Can be generated with:
https://github.com/settings/tokens/new?description=Read%20sssues&scopes=repo:status"""
),
envvar='GITHUB_TOKEN',
)
option_excluded_pr_list = click.option(
"--excluded-pr-list", type=str, default='', help="Coma-separated list of PRs to exclude from the issue."
)
option_limit_pr_count = click.option(
"--limit-pr-count",
type=int,
default=None,
help="Limit PR count processes (useful for testing small subset of PRs).",
)
def get_git_log_command(
verbose: bool, from_commit: Optional[str] = None, to_commit: Optional[str] = None
) -> List[str]:
"""
Get git command to run for the current repo from the current folder (which is the package folder).
:param verbose: whether to print verbose info while getting the command
:param from_commit: if present - base commit from which to start the log from
:param to_commit: if present - final commit which should be the start of the log
:return: git command to run
"""
git_cmd = [
"git",
"log",
"--pretty=format:%H %h %cd %s",
"--date=short",
]
if from_commit and to_commit:
git_cmd.append(f"{from_commit}...{to_commit}")
elif from_commit:
git_cmd.append(from_commit)
git_cmd.extend(['--', '.'])
if verbose:
console.print(f"Command to run: '{" ".join(git_cmd)}'")
return git_cmd
class Change(NamedTuple):
"""Stores details about commits"""
full_hash: str
short_hash: str
date: str
message: str
message_without_backticks: str
pr: Optional[int]
def get_change_from_line(line: str):
split_line = line.split(" ", maxsplit=3)
message = split_line[3]
pr = None
pr_match = PR_PATTERN.match(message)
if pr_match:
pr = pr_match.group(1)
return Change(
full_hash=split_line[0],
short_hash=split_line[1],
date=split_line[2],
message=message,
message_without_backticks=message.replace("`", "'").replace("&39;", "'"),
pr=int(pr) if pr else None,
)
def get_changes(verbose: bool, previous_release: str, current_release: str) -> List[Change]:
change_strings = subprocess.check_output(
get_git_log_command(verbose, from_commit=previous_release, to_commit=current_release),
cwd=SOURCE_DIR_PATH,
universal_newlines=True,
)
return [get_change_from_line(line) for line in change_strings.split("\n")]
def render_template(
template_name: str,
context: Dict[str, Any],
autoescape: bool = True,
keep_trailing_newline: bool = False,
) -> str:
"""
Renders template based on it's name. Reads the template from <name>_TEMPLATE.md.jinja2 in current dir.
:param template_name: name of the template to use
:param context: Jinja2 context
:param autoescape: Whether to autoescape HTML
:param keep_trailing_newline: Whether to keep the newline in rendered output
:return: rendered template
"""
import jinja2
template_loader = jinja2.FileSystemLoader(searchpath=MY_DIR_PATH)
template_env = jinja2.Environment(
loader=template_loader,
undefined=jinja2.StrictUndefined,
autoescape=autoescape,
keep_trailing_newline=keep_trailing_newline,
)
template = template_env.get_template(f"{template_name}_TEMPLATE.md.jinja2")
content: str = template.render(context)
return content
def print_issue_content(
current_release: str,
pull_requests: Dict[int, PullRequest.PullRequest],
linked_issues: Dict[int, List[Issue.Issue]],
users: Dict[int, Set[str]],
):
pr_list = list(pull_requests.keys())
pr_list.sort()
user_logins: Dict[int, str] = {pr: "@" + " @".join(users[pr]) for pr in users}
all_users: Set[str] = set()
for user_list in users.values():
all_users.update(user_list)
all_user_logins = "@" + " @".join(all_users)
content = render_template(
template_name='ISSUE',
context={
'version': current_release,
'pr_list': pr_list,
'pull_requests': pull_requests,
'linked_issues': linked_issues,
'users': users,
'user_logins': user_logins,
'all_user_logins': all_user_logins,
},
autoescape=False,
keep_trailing_newline=True,
)
print(content)
@cli.command()
@option_github_token
@option_previous_release
@option_current_release
@option_excluded_pr_list
@option_verbose
@option_limit_pr_count
def generate_issue_content(
github_token: str,
previous_release: str,
current_release: str,
excluded_pr_list: str,
verbose: bool,
limit_pr_count: Optional[int],
):
if excluded_pr_list:
excluded_prs = [int(pr) for pr in excluded_pr_list.split(",")]
else:
excluded_prs = []
changes = get_changes(verbose, previous_release, current_release)
prs = list(
filter(lambda pr: pr is not None and pr not in excluded_prs, [change.pr for change in changes])
)
g = Github(github_token)
repo = g.get_repo("apache/airflow")
pull_requests: Dict[int, PullRequest.PullRequest] = {}
linked_issues: Dict[int, List[Issue.Issue]] = defaultdict(lambda: [])
users: Dict[int, Set[str]] = defaultdict(lambda: set())
count_prs = len(prs)
if limit_pr_count:
count_prs = limit_pr_count
with Progress(console=console) as progress:
task = progress.add_task(f"Retrieving {count_prs} PRs ", total=count_prs)
for i in range(count_prs):
pr_number = prs[i]
progress.console.print(
f"Retrieving PR#{pr_number}: " f"https://github.com/apache/airflow/pull/{pr_number}"
)
try:
pr = repo.get_pull(pr_number)
except UnknownObjectException:
# Fallback to issue if PR not found
try:
pr = repo.get_issue(pr_number) # (same fields as PR)
except UnknownObjectException:
console.print(f"[red]The PR #{pr_number} could not be found[/]")
continue
# Ignore doc-only and skipped PRs
label_names = [label.name for label in pr.labels]
if "type:doc-only" in label_names or "changelog:skip" in label_names:
continue
pull_requests[pr_number] = pr
# GitHub does not have linked issues in PR - but we quite rigorously add Fixes/Closes
# Relate so we can find those from the body
if pr.body:
body = pr.body.replace("\n", " ").replace("\r", " ")
for issue_match in ISSUE_MATCH_IN_BODY.finditer(body):
linked_issue_number = int(issue_match.group(1))
progress.console.print(
f"Retrieving Linked issue PR#{linked_issue_number}: "
f"https://github.com/apache/airflow/issue/{linked_issue_number}"
)
try:
linked_issues[pr_number].append(repo.get_issue(linked_issue_number))
except UnknownObjectException:
progress.console.print(
f"Failed to retrieve linked issue #{linked_issue_number}: Unknown Issue"
)
users[pr_number].add(pr.user.login)
for linked_issue in linked_issues[pr_number]:
users[pr_number].add(linked_issue.user.login)
progress.advance(task)
print_issue_content(current_release, pull_requests, linked_issues, users)
if __name__ == "__main__":
cli()
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import re
import subprocess
import textwrap
from collections import defaultdict
from typing import Any, Dict, List, NamedTuple, Optional, Set
import click
from github import Github, Issue, PullRequest, UnknownObjectException
from rich.console import Console
from rich.progress import Progress
logger = logging.getLogger(__name__)
console = Console(width=400, color_system="standard")
MY_DIR_PATH = os.path.dirname(__file__)
SOURCE_DIR_PATH = os.path.abspath(os.path.join(MY_DIR_PATH, os.pardir))
PR_PATTERN = re.compile(r".*\(#([0-9]+)\)")
ISSUE_MATCH_IN_BODY = re.compile(r" #([0-9]+)[^0-9]")
@click.group(context_settings={'help_option_names': ['-h', '--help'], 'max_content_width': 500})
def cli():
...
option_verbose = click.option(
"--verbose",
is_flag=True,
help="Print verbose information about performed steps",
)
option_previous_release = click.option(
"--previous-release",
type=str,
required=True,
help="commit reference (for example hash or tag) of the previous release.",
)
option_current_release = click.option(
"--current-release",
type=str,
required=True,
help="commit reference (for example hash or tag) of the current release.",
)
option_github_token = click.option(
"--github-token",
type=str,
required=True,
help=textwrap.dedent(
"""
Github token used to authenticate.
You can set omit it if you have GITHUB_TOKEN env variable set
Can be generated with:
https://github.com/settings/tokens/new?description=Read%20sssues&scopes=repo:status"""
),
envvar='GITHUB_TOKEN',
)
option_excluded_pr_list = click.option(
"--excluded-pr-list", type=str, default='', help="Coma-separated list of PRs to exclude from the issue."
)
option_limit_pr_count = click.option(
"--limit-pr-count",
type=int,
default=None,
help="Limit PR count processes (useful for testing small subset of PRs).",
)
def get_git_log_command(
verbose: bool, from_commit: Optional[str] = None, to_commit: Optional[str] = None
) -> List[str]:
"""
Get git command to run for the current repo from the current folder (which is the package folder).
:param verbose: whether to print verbose info while getting the command
:param from_commit: if present - base commit from which to start the log from
:param to_commit: if present - final commit which should be the start of the log
:return: git command to run
"""
git_cmd = [
"git",
"log",
"--pretty=format:%H %h %cd %s",
"--date=short",
]
if from_commit and to_commit:
git_cmd.append(f"{from_commit}...{to_commit}")
elif from_commit:
git_cmd.append(from_commit)
git_cmd.extend(['--', '.'])
if verbose:
console.print(f"Command to run: '{' '.join(git_cmd)}'")
return git_cmd
class Change(NamedTuple):
"""Stores details about commits"""
full_hash: str
short_hash: str
date: str
message: str
message_without_backticks: str
pr: Optional[int]
def get_change_from_line(line: str):
split_line = line.split(" ", maxsplit=3)
message = split_line[3]
pr = None
pr_match = PR_PATTERN.match(message)
if pr_match:
pr = pr_match.group(1)
return Change(
full_hash=split_line[0],
short_hash=split_line[1],
date=split_line[2],
message=message,
message_without_backticks=message.replace("`", "'").replace("&39;", "'"),
pr=int(pr) if pr else None,
)
def get_changes(verbose: bool, previous_release: str, current_release: str) -> List[Change]:
change_strings = subprocess.check_output(
get_git_log_command(verbose, from_commit=previous_release, to_commit=current_release),
cwd=SOURCE_DIR_PATH,
universal_newlines=True,
)
return [get_change_from_line(line) for line in change_strings.split("\n")]
def render_template(
template_name: str,
context: Dict[str, Any],
autoescape: bool = True,
keep_trailing_newline: bool = False,
) -> str:
"""
Renders template based on it's name. Reads the template from <name>_TEMPLATE.md.jinja2 in current dir.
:param template_name: name of the template to use
:param context: Jinja2 context
:param autoescape: Whether to autoescape HTML
:param keep_trailing_newline: Whether to keep the newline in rendered output
:return: rendered template
"""
import jinja2
template_loader = jinja2.FileSystemLoader(searchpath=MY_DIR_PATH)
template_env = jinja2.Environment(
loader=template_loader,
undefined=jinja2.StrictUndefined,
autoescape=autoescape,
keep_trailing_newline=keep_trailing_newline,
)
template = template_env.get_template(f"{template_name}_TEMPLATE.md.jinja2")
content: str = template.render(context)
return content
def print_issue_content(
current_release: str,
pull_requests: Dict[int, PullRequest.PullRequest],
linked_issues: Dict[int, List[Issue.Issue]],
users: Dict[int, Set[str]],
):
pr_list = list(pull_requests.keys())
pr_list.sort()
user_logins: Dict[int, str] = {pr: "@" + " @".join(users[pr]) for pr in users}
all_users: Set[str] = set()
for user_list in users.values():
all_users.update(user_list)
all_user_logins = "@" + " @".join(all_users)
content = render_template(
template_name='ISSUE',
context={
'version': current_release,
'pr_list': pr_list,
'pull_requests': pull_requests,
'linked_issues': linked_issues,
'users': users,
'user_logins': user_logins,
'all_user_logins': all_user_logins,
},
autoescape=False,
keep_trailing_newline=True,
)
print(content)
@cli.command()
@option_github_token
@option_previous_release
@option_current_release
@option_excluded_pr_list
@option_verbose
@option_limit_pr_count
def generate_issue_content(
github_token: str,
previous_release: str,
current_release: str,
excluded_pr_list: str,
verbose: bool,
limit_pr_count: Optional[int],
):
if excluded_pr_list:
excluded_prs = [int(pr) for pr in excluded_pr_list.split(",")]
else:
excluded_prs = []
changes = get_changes(verbose, previous_release, current_release)
prs = list(
filter(lambda pr: pr is not None and pr not in excluded_prs, [change.pr for change in changes])
)
g = Github(github_token)
repo = g.get_repo("apache/airflow")
pull_requests: Dict[int, PullRequest.PullRequest] = {}
linked_issues: Dict[int, List[Issue.Issue]] = defaultdict(lambda: [])
users: Dict[int, Set[str]] = defaultdict(lambda: set())
count_prs = len(prs)
if limit_pr_count:
count_prs = limit_pr_count
with Progress(console=console) as progress:
task = progress.add_task(f"Retrieving {count_prs} PRs ", total=count_prs)
for i in range(count_prs):
pr_number = prs[i]
progress.console.print(
f"Retrieving PR#{pr_number}: " f"https://github.com/apache/airflow/pull/{pr_number}"
)
try:
pr = repo.get_pull(pr_number)
except UnknownObjectException:
# Fallback to issue if PR not found
try:
pr = repo.get_issue(pr_number) # (same fields as PR)
except UnknownObjectException:
console.print(f"[red]The PR #{pr_number} could not be found[/]")
continue
# Ignore doc-only and skipped PRs
label_names = [label.name for label in pr.labels]
if "type:doc-only" in label_names or "changelog:skip" in label_names:
continue
pull_requests[pr_number] = pr
# GitHub does not have linked issues in PR - but we quite rigorously add Fixes/Closes
# Relate so we can find those from the body
if pr.body:
body = pr.body.replace("\n", " ").replace("\r", " ")
for issue_match in ISSUE_MATCH_IN_BODY.finditer(body):
linked_issue_number = int(issue_match.group(1))
progress.console.print(
f"Retrieving Linked issue PR#{linked_issue_number}: "
f"https://github.com/apache/airflow/issue/{linked_issue_number}"
)
try:
linked_issues[pr_number].append(repo.get_issue(linked_issue_number))
except UnknownObjectException:
progress.console.print(
f"Failed to retrieve linked issue #{linked_issue_number}: Unknown Issue"
)
users[pr_number].add(pr.user.login)
for linked_issue in linked_issues[pr_number]:
users[pr_number].add(linked_issue.user.login)
progress.advance(task)
print_issue_content(current_release, pull_requests, linked_issues, users)
if __name__ == "__main__":
cli()
|
#!/usr/bin/env python3
#
# ISC License
#
# Copyright (C) 2021 DS-Homebrew
# Copyright (C) 2021-present lifehackerhansol
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import discord
import aiohttp
import asyncio
import settings
from discord.ext import commands
from utils.utils import create_error_embed
from tortoise import Tortoise
cogs = [
'cogs.mod',
'cogs.roles',
'cogs.load',
'jishaku'
]
async def init():
await Tortoise.init(
db_url='sqlite://neptune.db',
modules={'models': ['utils.models']}
)
# Generate the schema
await Tortoise.generate_schemas()
class Neptune(commands.Bot):
def __init__(self, command_prefix, description):
intents = discord.Intents(guilds=True, members=True, bans=True, messages=True)
allowed_mentions = discord.AllowedMentions(everyone=False, roles=False)
activity = discord.Game(settings.STATUS)
status = discord.Status.online
super().__init__(
command_prefix=command_prefix,
description=description,
intents=intents,
allowed_mentions=allowed_mentions,
activity=activity,
status=status,
case_insensitive=True
)
self.session = aiohttp.ClientSession()
def load_cogs(self):
for cog in cogs:
try:
self.load_extension(cog)
print(f"Loaded cog {cog}")
except Exception as e:
exc = "{}: {}".format(type(e).__name__, e)
print("Failed to load cog {}\n{}".format(cog, exc))
async def close(self):
await Tortoise.close_connections()
await super().close()
await self.session.close()
async def is_owner(self, user: discord.User):
if settings.GUILD:
g = self.get_guild(settings.GUILD)
if g:
member = g.get_member(user.id)
if member and any(role.id in settings.staff_roles for role in member.roles):
return True
return await super().is_owner(user)
async def on_ready(self):
print("Neptune ready.")
async def on_command_error(self, ctx: commands.Context, exc: commands.CommandInvokeError):
author: discord.Member = ctx.author
command: commands.Command = ctx.command or '<unknown cmd>'
exc = getattr(exc, 'original', exc)
channel = ctx.channel
if isinstance(exc, commands.CommandNotFound):
return
elif isinstance(exc, commands.ArgumentParsingError):
await ctx.send_help(ctx.command)
elif isinstance(exc, commands.NoPrivateMessage):
await ctx.send(f'`{command}` cannot be used in direct messages.')
elif isinstance(exc, commands.MissingPermissions):
await ctx.send(f"{author.mention} You don't have permission to use `{command}`.")
elif isinstance(exc, commands.CheckFailure):
await ctx.send(f'{author.mention} You cannot use `{command}`.')
elif isinstance(exc, commands.BadArgument):
await ctx.send(f'{author.mention} A bad argument was given: `{exc}`\n')
await ctx.send_help(ctx.command)
elif isinstance(exc, commands.BadUnionArgument):
await ctx.send(f'{author.mention} A bad argument was given: `{exc}`\n')
elif isinstance(exc, commands.BadLiteralArgument):
await ctx.send(f'{author.mention} A bad argument was given, expected one of {', '.join(exc.literals)}')
elif isinstance(exc, commands.MissingRequiredArgument):
await ctx.send(f'{author.mention} You are missing required argument {exc.param.name}.\n')
await ctx.send_help(ctx.command)
elif isinstance(exc, discord.NotFound):
await ctx.send("ID not found.")
elif isinstance(exc, discord.Forbidden):
await ctx.send(f"💢 I can't help you if you don't let me!\n`{exc.text}`.")
elif isinstance(exc, commands.CommandInvokeError):
await ctx.send(f'{author.mention} `{command}` raised an exception during usage')
embed = create_error_embed(ctx, exc)
await channel.send(embed=embed)
else:
await ctx.send(f'{author.mention} Unexpected exception occurred while using the command `{command}`')
embed = create_error_embed(ctx, exc)
await channel.send(embed=embed)
async def startup():
bot = Neptune(settings.PREFIX, description="Neptune, the 777 Air Cadets Discord bot")
bot.help_command = commands.DefaultHelpCommand()
print('Starting Neptune...')
bot.load_cogs()
await init()
await bot.start(settings.TOKEN)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(startup())
|
#!/usr/bin/env python3
#
# ISC License
#
# Copyright (C) 2021 DS-Homebrew
# Copyright (C) 2021-present lifehackerhansol
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import discord
import aiohttp
import asyncio
import settings
from discord.ext import commands
from utils.utils import create_error_embed
from tortoise import Tortoise
cogs = [
'cogs.mod',
'cogs.roles',
'cogs.load',
'jishaku'
]
async def init():
await Tortoise.init(
db_url='sqlite://neptune.db',
modules={'models': ['utils.models']}
)
# Generate the schema
await Tortoise.generate_schemas()
class Neptune(commands.Bot):
def __init__(self, command_prefix, description):
intents = discord.Intents(guilds=True, members=True, bans=True, messages=True)
allowed_mentions = discord.AllowedMentions(everyone=False, roles=False)
activity = discord.Game(settings.STATUS)
status = discord.Status.online
super().__init__(
command_prefix=command_prefix,
description=description,
intents=intents,
allowed_mentions=allowed_mentions,
activity=activity,
status=status,
case_insensitive=True
)
self.session = aiohttp.ClientSession()
def load_cogs(self):
for cog in cogs:
try:
self.load_extension(cog)
print(f"Loaded cog {cog}")
except Exception as e:
exc = "{}: {}".format(type(e).__name__, e)
print("Failed to load cog {}\n{}".format(cog, exc))
async def close(self):
await Tortoise.close_connections()
await super().close()
await self.session.close()
async def is_owner(self, user: discord.User):
if settings.GUILD:
g = self.get_guild(settings.GUILD)
if g:
member = g.get_member(user.id)
if member and any(role.id in settings.staff_roles for role in member.roles):
return True
return await super().is_owner(user)
async def on_ready(self):
print("Neptune ready.")
async def on_command_error(self, ctx: commands.Context, exc: commands.CommandInvokeError):
author: discord.Member = ctx.author
command: commands.Command = ctx.command or '<unknown cmd>'
exc = getattr(exc, 'original', exc)
channel = ctx.channel
if isinstance(exc, commands.CommandNotFound):
return
elif isinstance(exc, commands.ArgumentParsingError):
await ctx.send_help(ctx.command)
elif isinstance(exc, commands.NoPrivateMessage):
await ctx.send(f'`{command}` cannot be used in direct messages.')
elif isinstance(exc, commands.MissingPermissions):
await ctx.send(f"{author.mention} You don't have permission to use `{command}`.")
elif isinstance(exc, commands.CheckFailure):
await ctx.send(f'{author.mention} You cannot use `{command}`.')
elif isinstance(exc, commands.BadArgument):
await ctx.send(f'{author.mention} A bad argument was given: `{exc}`\n')
await ctx.send_help(ctx.command)
elif isinstance(exc, commands.BadUnionArgument):
await ctx.send(f'{author.mention} A bad argument was given: `{exc}`\n')
elif isinstance(exc, commands.BadLiteralArgument):
await ctx.send(f'{author.mention} A bad argument was given, expected one of {", ".join(exc.literals)}')
elif isinstance(exc, commands.MissingRequiredArgument):
await ctx.send(f'{author.mention} You are missing required argument {exc.param.name}.\n')
await ctx.send_help(ctx.command)
elif isinstance(exc, discord.NotFound):
await ctx.send("ID not found.")
elif isinstance(exc, discord.Forbidden):
await ctx.send(f"💢 I can't help you if you don't let me!\n`{exc.text}`.")
elif isinstance(exc, commands.CommandInvokeError):
await ctx.send(f'{author.mention} `{command}` raised an exception during usage')
embed = create_error_embed(ctx, exc)
await channel.send(embed=embed)
else:
await ctx.send(f'{author.mention} Unexpected exception occurred while using the command `{command}`')
embed = create_error_embed(ctx, exc)
await channel.send(embed=embed)
async def startup():
bot = Neptune(settings.PREFIX, description="Neptune, the 777 Air Cadets Discord bot")
bot.help_command = commands.DefaultHelpCommand()
print('Starting Neptune...')
bot.load_cogs()
await init()
await bot.start(settings.TOKEN)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(startup())
|
import os
import sys
import time
import random
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
import numpy as np
from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager
from dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset
from model import Model
from test import validation
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def train(opt):
""" dataset preparation """
if not opt.data_filtering_off:
print('Filtering the images containing characters which are not in opt.character')
print('Filtering the images whose label is longer than opt.batch_max_length')
# see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130
opt.select_data = opt.select_data.split('-')
opt.batch_ratio = opt.batch_ratio.split('-')
train_dataset = Batch_Balanced_Dataset(opt)
log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')
AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=opt.batch_size,
shuffle=True, # 'True' to check training progress with validation function.
num_workers=int(opt.workers),
collate_fn=AlignCollate_valid, pin_memory=True)
log.write(valid_dataset_log)
print('-' * 80)
log.write('-' * 80 + '\n')
log.close()
""" model configuration """
if 'CTC' in opt.Prediction:
if opt.baiduCTC:
converter = CTCLabelConverterForBaiduWarpctc(opt.character)
else:
converter = CTCLabelConverter(opt.character)
else:
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,
opt.SequenceModeling, opt.Prediction)
# weight initialization
for name, param in model.named_parameters():
if 'localization_fc2' in name:
print(f'Skip {name} as it is already initialized')
continue
try:
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.kaiming_normal_(param)
except Exception as e: # for batchnorm.
if 'weight' in name:
param.data.fill_(1)
continue
# data parallel for multi-GPU
model = torch.nn.DataParallel(model).to(device)
model.train()
if opt.saved_model != '':
print(f'loading pretrained model from {opt.saved_model}')
if opt.FT:
model.load_state_dict(torch.load(opt.saved_model), strict=False)
else:
model.load_state_dict(torch.load(opt.saved_model))
print("Model:")
print(model)
""" setup loss """
if 'CTC' in opt.Prediction:
if opt.baiduCTC:
# need to install warpctc. see our guideline.
from warpctc_pytorch import CTCLoss
criterion = CTCLoss()
else:
criterion = torch.nn.CTCLoss(zero_infinity=True).to(device)
else:
criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0
# loss averager
loss_avg = Averager()
# filter that only require gradient decent
filtered_parameters = []
params_num = []
for p in filter(lambda p: p.requires_grad, model.parameters()):
filtered_parameters.append(p)
params_num.append(np.prod(p.size()))
print('Trainable params num : ', sum(params_num))
# [print(name, p.numel()) for name, p in filter(lambda p: p[1].requires_grad, model.named_parameters())]
# setup optimizer
if opt.adam:
optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))
else:
optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps)
print("Optimizer:")
print(optimizer)
""" final options """
# print(opt)
with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:
opt_log = '------------ Options -------------\n'
args = vars(opt)
for k, v in args.items():
opt_log += f'{str(k)}: {str(v)}\n'
opt_log += '---------------------------------------\n'
print(opt_log)
opt_file.write(opt_log)
""" start training """
start_iter = 0
if opt.saved_model != '':
try:
start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])
print(f'continue to train, start_iter: {start_iter}')
except:
pass
start_time = time.time()
best_accuracy = -1
best_norm_ED = -1
iteration = start_iter
while (True):
# train part
image_tensors, labels = train_dataset.get_batch()
image = image_tensors.to(device)
text, length = converter.encode(labels, batch_max_length=opt.batch_max_length)
batch_size = image.size(0)
if 'CTC' in opt.Prediction:
preds = model(image, text)
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
if opt.baiduCTC:
preds = preds.permute(1, 0, 2) # to use CTCLoss format
cost = criterion(preds, text, preds_size, length) / batch_size
else:
preds = preds.log_softmax(2).permute(1, 0, 2)
cost = criterion(preds, text, preds_size, length)
else:
preds = model(image, text[:, :-1]) # align with Attention.forward
target = text[:, 1:] # without [GO] Symbol
cost = criterion(preds.view(-1, preds.shape[-1]), target.contiguous().view(-1))
model.zero_grad()
cost.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) # gradient clipping with 5 (Default)
optimizer.step()
loss_avg.add(cost)
# validation part
if (
iteration + 1) % opt.valInterval == 0 or iteration == 0: # To see training progress, we also conduct validation when 'iteration == 0'
elapsed_time = time.time() - start_time
# for log
with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:
model.eval()
with torch.no_grad():
valid_loss, current_accuracy, current_norm_ED, preds, confidence_score, labels, infer_time, length_of_data = validation(
model, criterion, valid_loader, converter, opt)
model.train()
# training loss and validation loss
loss_log = f'[{iteration + 1}/{opt.num_iter}] Train loss: {loss_avg.val():0.5f}, Valid loss: {valid_loss:0.5f}, Elapsed_time: {elapsed_time:0.5f}'
loss_avg.reset()
current_model_log = f'{'Current_accuracy':17s}: {current_accuracy:0.3f}, {'Current_norm_ED':17s}: {current_norm_ED:0.2f}'
# keep best accuracy model (on valid dataset)
if current_accuracy > best_accuracy:
best_accuracy = current_accuracy
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_accuracy.pth')
if current_norm_ED > best_norm_ED:
best_norm_ED = current_norm_ED
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_norm_ED.pth')
best_model_log = f'{'Best_accuracy':17s}: {best_accuracy:0.3f}, {'Best_norm_ED':17s}: {best_norm_ED:0.2f}'
loss_model_log = f'{loss_log}\n{current_model_log}\n{best_model_log}'
print(loss_model_log)
log.write(loss_model_log + '\n')
# show some predicted results
dashed_line = '-' * 80
head = f'{'Ground Truth':25s} | {'Prediction':25s} | Confidence Score & T/F'
predicted_result_log = f'{dashed_line}\n{head}\n{dashed_line}\n'
for gt, pred, confidence in zip(labels[:5], preds[:5], confidence_score[:5]):
if 'Attn' in opt.Prediction:
gt = gt[:gt.find('[s]')]
pred = pred[:pred.find('[s]')]
predicted_result_log += f'{gt:25s} | {pred:25s} | {confidence:0.4f}\t{str(pred == gt)}\n'
predicted_result_log += f'{dashed_line}'
print(predicted_result_log)
log.write(predicted_result_log + '\n')
# save model per 1e+5 iter.
if (iteration + 1) % 1e+5 == 0:
torch.save(
model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration + 1}.pth')
if (iteration + 1) == opt.num_iter:
print('end the training')
sys.exit()
iteration += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', help='Where to store logs and models')
parser.add_argument('--train_data', required=True, help='path to training dataset', default="D:/data/data_lmdb_release/training")
parser.add_argument('--valid_data', required=True, help='path to validation dataset', default="D:/data/data_lmdb_release/validation")
parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch_size', type=int, default=192, help='input batch size')
parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')
parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each validation')
parser.add_argument('--saved_model', default='', help="path to model to continue training")
parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is Adadelta)')
parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')
parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')
parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode')
""" Data processing """
parser.add_argument('--select_data', type=str, default='MJ-ST',
help='select training data (default is MJ-ST, which means MJ and ST used as training data)')
parser.add_argument('--batch_ratio', type=str, default='0.5-0.5',
help='assign ratio for each selected data in the batch')
parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',
help='total data usage ratio, this ratio is multiplied to total number of data.')
parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
parser.add_argument('--character', type=str,
default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
""" Model Architecture """
parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS')
parser.add_argument('--FeatureExtraction', type=str, required=True,
help='FeatureExtraction stage. VGG|RCNN|ResNet')
parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM')
parser.add_argument('--Prediction', type=str, required=True, help='Prediction stage. CTC|Attn')
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1,
help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512,
help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
opt = parser.parse_args()
if not opt.exp_name:
opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-{opt.Prediction}'
opt.exp_name += f'-Seed{opt.manualSeed}'
# print(opt.exp_name)
os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)
""" vocab / character number configuration """
if opt.sensitive:
# opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
""" Seed and GPU setting """
# print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
# print('device count', opt.num_gpu)
if opt.num_gpu > 1:
print('------ Use multi-GPU setting ------')
print('if you stuck too long time with multi-GPU setting, try to set --workers 0')
# check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1
opt.workers = opt.workers * opt.num_gpu
opt.batch_size = opt.batch_size * opt.num_gpu
""" previous version
print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)
opt.batch_size = opt.batch_size * opt.num_gpu
print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')
If you dont care about it, just commnet out these line.)
opt.num_iter = int(opt.num_iter / opt.num_gpu)
"""
train(opt)
|
import os
import sys
import time
import random
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
import numpy as np
from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager
from dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset
from model import Model
from test import validation
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def train(opt):
""" dataset preparation """
if not opt.data_filtering_off:
print('Filtering the images containing characters which are not in opt.character')
print('Filtering the images whose label is longer than opt.batch_max_length')
# see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130
opt.select_data = opt.select_data.split('-')
opt.batch_ratio = opt.batch_ratio.split('-')
train_dataset = Batch_Balanced_Dataset(opt)
log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')
AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=opt.batch_size,
shuffle=True, # 'True' to check training progress with validation function.
num_workers=int(opt.workers),
collate_fn=AlignCollate_valid, pin_memory=True)
log.write(valid_dataset_log)
print('-' * 80)
log.write('-' * 80 + '\n')
log.close()
""" model configuration """
if 'CTC' in opt.Prediction:
if opt.baiduCTC:
converter = CTCLabelConverterForBaiduWarpctc(opt.character)
else:
converter = CTCLabelConverter(opt.character)
else:
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,
opt.SequenceModeling, opt.Prediction)
# weight initialization
for name, param in model.named_parameters():
if 'localization_fc2' in name:
print(f'Skip {name} as it is already initialized')
continue
try:
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.kaiming_normal_(param)
except Exception as e: # for batchnorm.
if 'weight' in name:
param.data.fill_(1)
continue
# data parallel for multi-GPU
model = torch.nn.DataParallel(model).to(device)
model.train()
if opt.saved_model != '':
print(f'loading pretrained model from {opt.saved_model}')
if opt.FT:
model.load_state_dict(torch.load(opt.saved_model), strict=False)
else:
model.load_state_dict(torch.load(opt.saved_model))
print("Model:")
print(model)
""" setup loss """
if 'CTC' in opt.Prediction:
if opt.baiduCTC:
# need to install warpctc. see our guideline.
from warpctc_pytorch import CTCLoss
criterion = CTCLoss()
else:
criterion = torch.nn.CTCLoss(zero_infinity=True).to(device)
else:
criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0
# loss averager
loss_avg = Averager()
# filter that only require gradient decent
filtered_parameters = []
params_num = []
for p in filter(lambda p: p.requires_grad, model.parameters()):
filtered_parameters.append(p)
params_num.append(np.prod(p.size()))
print('Trainable params num : ', sum(params_num))
# [print(name, p.numel()) for name, p in filter(lambda p: p[1].requires_grad, model.named_parameters())]
# setup optimizer
if opt.adam:
optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))
else:
optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps)
print("Optimizer:")
print(optimizer)
""" final options """
# print(opt)
with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:
opt_log = '------------ Options -------------\n'
args = vars(opt)
for k, v in args.items():
opt_log += f'{str(k)}: {str(v)}\n'
opt_log += '---------------------------------------\n'
print(opt_log)
opt_file.write(opt_log)
""" start training """
start_iter = 0
if opt.saved_model != '':
try:
start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])
print(f'continue to train, start_iter: {start_iter}')
except:
pass
start_time = time.time()
best_accuracy = -1
best_norm_ED = -1
iteration = start_iter
while (True):
# train part
image_tensors, labels = train_dataset.get_batch()
image = image_tensors.to(device)
text, length = converter.encode(labels, batch_max_length=opt.batch_max_length)
batch_size = image.size(0)
if 'CTC' in opt.Prediction:
preds = model(image, text)
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
if opt.baiduCTC:
preds = preds.permute(1, 0, 2) # to use CTCLoss format
cost = criterion(preds, text, preds_size, length) / batch_size
else:
preds = preds.log_softmax(2).permute(1, 0, 2)
cost = criterion(preds, text, preds_size, length)
else:
preds = model(image, text[:, :-1]) # align with Attention.forward
target = text[:, 1:] # without [GO] Symbol
cost = criterion(preds.view(-1, preds.shape[-1]), target.contiguous().view(-1))
model.zero_grad()
cost.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) # gradient clipping with 5 (Default)
optimizer.step()
loss_avg.add(cost)
# validation part
if (
iteration + 1) % opt.valInterval == 0 or iteration == 0: # To see training progress, we also conduct validation when 'iteration == 0'
elapsed_time = time.time() - start_time
# for log
with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:
model.eval()
with torch.no_grad():
valid_loss, current_accuracy, current_norm_ED, preds, confidence_score, labels, infer_time, length_of_data = validation(
model, criterion, valid_loader, converter, opt)
model.train()
# training loss and validation loss
loss_log = f'[{iteration + 1}/{opt.num_iter}] Train loss: {loss_avg.val():0.5f}, Valid loss: {valid_loss:0.5f}, Elapsed_time: {elapsed_time:0.5f}'
loss_avg.reset()
current_model_log = f'{"Current_accuracy":17s}: {current_accuracy:0.3f}, {"Current_norm_ED":17s}: {current_norm_ED:0.2f}'
# keep best accuracy model (on valid dataset)
if current_accuracy > best_accuracy:
best_accuracy = current_accuracy
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_accuracy.pth')
if current_norm_ED > best_norm_ED:
best_norm_ED = current_norm_ED
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_norm_ED.pth')
best_model_log = f'{"Best_accuracy":17s}: {best_accuracy:0.3f}, {"Best_norm_ED":17s}: {best_norm_ED:0.2f}'
loss_model_log = f'{loss_log}\n{current_model_log}\n{best_model_log}'
print(loss_model_log)
log.write(loss_model_log + '\n')
# show some predicted results
dashed_line = '-' * 80
head = f'{"Ground Truth":25s} | {"Prediction":25s} | Confidence Score & T/F'
predicted_result_log = f'{dashed_line}\n{head}\n{dashed_line}\n'
for gt, pred, confidence in zip(labels[:5], preds[:5], confidence_score[:5]):
if 'Attn' in opt.Prediction:
gt = gt[:gt.find('[s]')]
pred = pred[:pred.find('[s]')]
predicted_result_log += f'{gt:25s} | {pred:25s} | {confidence:0.4f}\t{str(pred == gt)}\n'
predicted_result_log += f'{dashed_line}'
print(predicted_result_log)
log.write(predicted_result_log + '\n')
# save model per 1e+5 iter.
if (iteration + 1) % 1e+5 == 0:
torch.save(
model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration + 1}.pth')
if (iteration + 1) == opt.num_iter:
print('end the training')
sys.exit()
iteration += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', help='Where to store logs and models')
parser.add_argument('--train_data', required=True, help='path to training dataset', default="D:/data/data_lmdb_release/training")
parser.add_argument('--valid_data', required=True, help='path to validation dataset', default="D:/data/data_lmdb_release/validation")
parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch_size', type=int, default=192, help='input batch size')
parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')
parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each validation')
parser.add_argument('--saved_model', default='', help="path to model to continue training")
parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is Adadelta)')
parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')
parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')
parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode')
""" Data processing """
parser.add_argument('--select_data', type=str, default='MJ-ST',
help='select training data (default is MJ-ST, which means MJ and ST used as training data)')
parser.add_argument('--batch_ratio', type=str, default='0.5-0.5',
help='assign ratio for each selected data in the batch')
parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',
help='total data usage ratio, this ratio is multiplied to total number of data.')
parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
parser.add_argument('--character', type=str,
default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
""" Model Architecture """
parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS')
parser.add_argument('--FeatureExtraction', type=str, required=True,
help='FeatureExtraction stage. VGG|RCNN|ResNet')
parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM')
parser.add_argument('--Prediction', type=str, required=True, help='Prediction stage. CTC|Attn')
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1,
help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512,
help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
opt = parser.parse_args()
if not opt.exp_name:
opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-{opt.Prediction}'
opt.exp_name += f'-Seed{opt.manualSeed}'
# print(opt.exp_name)
os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)
""" vocab / character number configuration """
if opt.sensitive:
# opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
""" Seed and GPU setting """
# print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
# print('device count', opt.num_gpu)
if opt.num_gpu > 1:
print('------ Use multi-GPU setting ------')
print('if you stuck too long time with multi-GPU setting, try to set --workers 0')
# check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1
opt.workers = opt.workers * opt.num_gpu
opt.batch_size = opt.batch_size * opt.num_gpu
""" previous version
print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)
opt.batch_size = opt.batch_size * opt.num_gpu
print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')
If you dont care about it, just commnet out these line.)
opt.num_iter = int(opt.num_iter / opt.num_gpu)
"""
train(opt)
|
from nussl import ml, datasets, evaluation
import tempfile
from torch import optim
import numpy as np
import logging
import os
import torch
from matplotlib import pyplot as plt
logging.basicConfig(
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.INFO
)
fix_dir = 'tests/local/trainer'
def test_overfit_a(mix_source_folder):
tfms = datasets.transforms.Compose([
datasets.transforms.PhaseSensitiveSpectrumApproximation(),
datasets.transforms.ToSeparationModel(),
datasets.transforms.Cache('~/.nussl/tests/cache', overwrite=True),
datasets.transforms.GetExcerpt(400)
])
dataset = datasets.MixSourceFolder(
mix_source_folder, transform=tfms)
ml.train.cache_dataset(dataset)
dataset.cache_populated = True
dataloader = torch.utils.data.DataLoader(
dataset, shuffle=True, batch_size=len(dataset), num_workers=2)
# create the model, based on the first item in the dataset
# second bit of the shape is the number of features
n_features = dataset[0]['mix_magnitude'].shape[1]
mi_config = ml.networks.builders.build_recurrent_mask_inference(
n_features, 50, 1, False, 0.0, 2, 'sigmoid',
)
model = ml.SeparationModel(mi_config)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if device == 'cuda':
epoch_length = 100
else:
epoch_length = 10
model = model.to(device)
# create optimizer
optimizer = optim.Adam(model.parameters(), lr=1e-3)
loss_dictionary = {
'L1Loss': {
'weight': 1.0
}
}
train_closure = ml.train.closures.TrainClosure(
loss_dictionary, optimizer, model)
val_closure = ml.train.closures.ValidationClosure(
loss_dictionary, model)
with tempfile.TemporaryDirectory() as tmpdir:
_dir = fix_dir if fix_dir else tmpdir
os.makedirs(os.path.join(_dir, 'plots'), exist_ok=True)
trainer, validator = ml.train.create_train_and_validation_engines(
train_closure, val_closure, device=device
)
# add handlers to engine
ml.train.add_stdout_handler(trainer, validator)
ml.train.add_validate_and_checkpoint(
_dir, model, optimizer, dataset,
trainer, val_data=dataloader, validator=validator)
ml.train.add_tensorboard_handler(_dir, trainer)
# run engine
trainer.run(dataloader, max_epochs=5, epoch_length=epoch_length)
model_path = os.path.join(
trainer.state.output_folder, 'checkpoints', 'best.model.pth')
state_dict = torch.load(
model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(state_dict['state_dict'])
history = state_dict['metadata']['trainer.state.epoch_history']
for key in history:
plt.figure(figsize=(10, 4))
plt.title(f"epoch:{key}")
plt.plot(np.array(history[key]).reshape(-1, ))
plt.savefig(os.path.join(
trainer.state.output_folder, 'plots',
f"epoch:{key.replace("/", ":")}.png"))
|
from nussl import ml, datasets, evaluation
import tempfile
from torch import optim
import numpy as np
import logging
import os
import torch
from matplotlib import pyplot as plt
logging.basicConfig(
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.INFO
)
fix_dir = 'tests/local/trainer'
def test_overfit_a(mix_source_folder):
tfms = datasets.transforms.Compose([
datasets.transforms.PhaseSensitiveSpectrumApproximation(),
datasets.transforms.ToSeparationModel(),
datasets.transforms.Cache('~/.nussl/tests/cache', overwrite=True),
datasets.transforms.GetExcerpt(400)
])
dataset = datasets.MixSourceFolder(
mix_source_folder, transform=tfms)
ml.train.cache_dataset(dataset)
dataset.cache_populated = True
dataloader = torch.utils.data.DataLoader(
dataset, shuffle=True, batch_size=len(dataset), num_workers=2)
# create the model, based on the first item in the dataset
# second bit of the shape is the number of features
n_features = dataset[0]['mix_magnitude'].shape[1]
mi_config = ml.networks.builders.build_recurrent_mask_inference(
n_features, 50, 1, False, 0.0, 2, 'sigmoid',
)
model = ml.SeparationModel(mi_config)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if device == 'cuda':
epoch_length = 100
else:
epoch_length = 10
model = model.to(device)
# create optimizer
optimizer = optim.Adam(model.parameters(), lr=1e-3)
loss_dictionary = {
'L1Loss': {
'weight': 1.0
}
}
train_closure = ml.train.closures.TrainClosure(
loss_dictionary, optimizer, model)
val_closure = ml.train.closures.ValidationClosure(
loss_dictionary, model)
with tempfile.TemporaryDirectory() as tmpdir:
_dir = fix_dir if fix_dir else tmpdir
os.makedirs(os.path.join(_dir, 'plots'), exist_ok=True)
trainer, validator = ml.train.create_train_and_validation_engines(
train_closure, val_closure, device=device
)
# add handlers to engine
ml.train.add_stdout_handler(trainer, validator)
ml.train.add_validate_and_checkpoint(
_dir, model, optimizer, dataset,
trainer, val_data=dataloader, validator=validator)
ml.train.add_tensorboard_handler(_dir, trainer)
# run engine
trainer.run(dataloader, max_epochs=5, epoch_length=epoch_length)
model_path = os.path.join(
trainer.state.output_folder, 'checkpoints', 'best.model.pth')
state_dict = torch.load(
model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(state_dict['state_dict'])
history = state_dict['metadata']['trainer.state.epoch_history']
for key in history:
plt.figure(figsize=(10, 4))
plt.title(f"epoch:{key}")
plt.plot(np.array(history[key]).reshape(-1, ))
plt.savefig(os.path.join(
trainer.state.output_folder, 'plots',
f"epoch:{key.replace('/', ':')}.png"))
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetScriptResult',
'AwaitableGetScriptResult',
'get_script',
'get_script_output',
]
@pulumi.output_type
class GetScriptResult:
"""
A collection of values returned by getScript.
"""
def __init__(__self__, dag_edges=None, dag_nodes=None, id=None, language=None, python_script=None, scala_code=None):
if dag_edges and not isinstance(dag_edges, list):
raise TypeError("Expected argument 'dag_edges' to be a list")
pulumi.set(__self__, "dag_edges", dag_edges)
if dag_nodes and not isinstance(dag_nodes, list):
raise TypeError("Expected argument 'dag_nodes' to be a list")
pulumi.set(__self__, "dag_nodes", dag_nodes)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if language and not isinstance(language, str):
raise TypeError("Expected argument 'language' to be a str")
pulumi.set(__self__, "language", language)
if python_script and not isinstance(python_script, str):
raise TypeError("Expected argument 'python_script' to be a str")
pulumi.set(__self__, "python_script", python_script)
if scala_code and not isinstance(scala_code, str):
raise TypeError("Expected argument 'scala_code' to be a str")
pulumi.set(__self__, "scala_code", scala_code)
@property
@pulumi.getter(name="dagEdges")
def dag_edges(self) -> Sequence['outputs.GetScriptDagEdgeResult']:
return pulumi.get(self, "dag_edges")
@property
@pulumi.getter(name="dagNodes")
def dag_nodes(self) -> Sequence['outputs.GetScriptDagNodeResult']:
return pulumi.get(self, "dag_nodes")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def language(self) -> Optional[str]:
return pulumi.get(self, "language")
@property
@pulumi.getter(name="pythonScript")
def python_script(self) -> str:
"""
The Python script generated from the DAG when the `language` argument is set to `PYTHON`.
"""
return pulumi.get(self, "python_script")
@property
@pulumi.getter(name="scalaCode")
def scala_code(self) -> str:
"""
The Scala code generated from the DAG when the `language` argument is set to `SCALA`.
"""
return pulumi.get(self, "scala_code")
class AwaitableGetScriptResult(GetScriptResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetScriptResult(
dag_edges=self.dag_edges,
dag_nodes=self.dag_nodes,
id=self.id,
language=self.language,
python_script=self.python_script,
scala_code=self.scala_code)
def get_script(dag_edges: Optional[Sequence[pulumi.InputType['GetScriptDagEdgeArgs']]] = None,
dag_nodes: Optional[Sequence[pulumi.InputType['GetScriptDagNodeArgs']]] = None,
language: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetScriptResult:
"""
Use this data source to generate a Glue script from a Directed Acyclic Graph (DAG).
## Example Usage
### Generate Python Script
```python
import pulumi
import pulumi_aws as aws
example = aws.glue.get_script(language="PYTHON",
dag_edges=[
aws.glue.GetScriptDagEdgeArgs(
source="datasource0",
target="applymapping1",
),
aws.glue.GetScriptDagEdgeArgs(
source="applymapping1",
target="selectfields2",
),
aws.glue.GetScriptDagEdgeArgs(
source="selectfields2",
target="resolvechoice3",
),
aws.glue.GetScriptDagEdgeArgs(
source="resolvechoice3",
target="datasink4",
),
],
dag_nodes=[
aws.glue.GetScriptDagNodeArgs(
id="datasource0",
node_type="DataSource",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database["source"]["name"]}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table["source"]["name"]}\"",
),
],
),
aws.glue.GetScriptDagNodeArgs(
id="applymapping1",
node_type="ApplyMapping",
args=[aws.glue.GetScriptDagNodeArgArgs(
name="mapping",
value="[(\"column1\", \"string\", \"column1\", \"string\")]",
)],
),
aws.glue.GetScriptDagNodeArgs(
id="selectfields2",
node_type="SelectFields",
args=[aws.glue.GetScriptDagNodeArgArgs(
name="paths",
value="[\"column1\"]",
)],
),
aws.glue.GetScriptDagNodeArgs(
id="resolvechoice3",
node_type="ResolveChoice",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="choice",
value="\"MATCH_CATALOG\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database["destination"]["name"]}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table["destination"]["name"]}\"",
),
],
),
aws.glue.GetScriptDagNodeArgs(
id="datasink4",
node_type="DataSink",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database["destination"]["name"]}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table["destination"]["name"]}\"",
),
],
),
])
pulumi.export("pythonScript", example.python_script)
```
### Generate Scala Code
```python
import pulumi
import pulumi_aws as aws
example = aws.glue.get_script(language="SCALA",
dag_edges=[
aws.glue.GetScriptDagEdgeArgs(
source="datasource0",
target="applymapping1",
),
aws.glue.GetScriptDagEdgeArgs(
source="applymapping1",
target="selectfields2",
),
aws.glue.GetScriptDagEdgeArgs(
source="selectfields2",
target="resolvechoice3",
),
aws.glue.GetScriptDagEdgeArgs(
source="resolvechoice3",
target="datasink4",
),
],
dag_nodes=[
aws.glue.GetScriptDagNodeArgs(
id="datasource0",
node_type="DataSource",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database["source"]["name"]}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table["source"]["name"]}\"",
),
],
),
aws.glue.GetScriptDagNodeArgs(
id="applymapping1",
node_type="ApplyMapping",
args=[aws.glue.GetScriptDagNodeArgArgs(
name="mappings",
value="[(\"column1\", \"string\", \"column1\", \"string\")]",
)],
),
aws.glue.GetScriptDagNodeArgs(
id="selectfields2",
node_type="SelectFields",
args=[aws.glue.GetScriptDagNodeArgArgs(
name="paths",
value="[\"column1\"]",
)],
),
aws.glue.GetScriptDagNodeArgs(
id="resolvechoice3",
node_type="ResolveChoice",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="choice",
value="\"MATCH_CATALOG\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database["destination"]["name"]}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table["destination"]["name"]}\"",
),
],
),
aws.glue.GetScriptDagNodeArgs(
id="datasink4",
node_type="DataSink",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database["destination"]["name"]}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table["destination"]["name"]}\"",
),
],
),
])
pulumi.export("scalaCode", example.scala_code)
```
:param Sequence[pulumi.InputType['GetScriptDagEdgeArgs']] dag_edges: A list of the edges in the DAG. Defined below.
:param Sequence[pulumi.InputType['GetScriptDagNodeArgs']] dag_nodes: A list of the nodes in the DAG. Defined below.
:param str language: The programming language of the resulting code from the DAG. Defaults to `PYTHON`. Valid values are `PYTHON` and `SCALA`.
"""
__args__ = dict()
__args__['dagEdges'] = dag_edges
__args__['dagNodes'] = dag_nodes
__args__['language'] = language
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:glue/getScript:getScript', __args__, opts=opts, typ=GetScriptResult).value
return AwaitableGetScriptResult(
dag_edges=__ret__.dag_edges,
dag_nodes=__ret__.dag_nodes,
id=__ret__.id,
language=__ret__.language,
python_script=__ret__.python_script,
scala_code=__ret__.scala_code)
@_utilities.lift_output_func(get_script)
def get_script_output(dag_edges: Optional[pulumi.Input[Sequence[pulumi.InputType['GetScriptDagEdgeArgs']]]] = None,
dag_nodes: Optional[pulumi.Input[Sequence[pulumi.InputType['GetScriptDagNodeArgs']]]] = None,
language: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetScriptResult]:
"""
Use this data source to generate a Glue script from a Directed Acyclic Graph (DAG).
## Example Usage
### Generate Python Script
```python
import pulumi
import pulumi_aws as aws
example = aws.glue.get_script(language="PYTHON",
dag_edges=[
aws.glue.GetScriptDagEdgeArgs(
source="datasource0",
target="applymapping1",
),
aws.glue.GetScriptDagEdgeArgs(
source="applymapping1",
target="selectfields2",
),
aws.glue.GetScriptDagEdgeArgs(
source="selectfields2",
target="resolvechoice3",
),
aws.glue.GetScriptDagEdgeArgs(
source="resolvechoice3",
target="datasink4",
),
],
dag_nodes=[
aws.glue.GetScriptDagNodeArgs(
id="datasource0",
node_type="DataSource",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database["source"]["name"]}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table["source"]["name"]}\"",
),
],
),
aws.glue.GetScriptDagNodeArgs(
id="applymapping1",
node_type="ApplyMapping",
args=[aws.glue.GetScriptDagNodeArgArgs(
name="mapping",
value="[(\"column1\", \"string\", \"column1\", \"string\")]",
)],
),
aws.glue.GetScriptDagNodeArgs(
id="selectfields2",
node_type="SelectFields",
args=[aws.glue.GetScriptDagNodeArgArgs(
name="paths",
value="[\"column1\"]",
)],
),
aws.glue.GetScriptDagNodeArgs(
id="resolvechoice3",
node_type="ResolveChoice",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="choice",
value="\"MATCH_CATALOG\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database["destination"]["name"]}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table["destination"]["name"]}\"",
),
],
),
aws.glue.GetScriptDagNodeArgs(
id="datasink4",
node_type="DataSink",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database["destination"]["name"]}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table["destination"]["name"]}\"",
),
],
),
])
pulumi.export("pythonScript", example.python_script)
```
### Generate Scala Code
```python
import pulumi
import pulumi_aws as aws
example = aws.glue.get_script(language="SCALA",
dag_edges=[
aws.glue.GetScriptDagEdgeArgs(
source="datasource0",
target="applymapping1",
),
aws.glue.GetScriptDagEdgeArgs(
source="applymapping1",
target="selectfields2",
),
aws.glue.GetScriptDagEdgeArgs(
source="selectfields2",
target="resolvechoice3",
),
aws.glue.GetScriptDagEdgeArgs(
source="resolvechoice3",
target="datasink4",
),
],
dag_nodes=[
aws.glue.GetScriptDagNodeArgs(
id="datasource0",
node_type="DataSource",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database["source"]["name"]}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table["source"]["name"]}\"",
),
],
),
aws.glue.GetScriptDagNodeArgs(
id="applymapping1",
node_type="ApplyMapping",
args=[aws.glue.GetScriptDagNodeArgArgs(
name="mappings",
value="[(\"column1\", \"string\", \"column1\", \"string\")]",
)],
),
aws.glue.GetScriptDagNodeArgs(
id="selectfields2",
node_type="SelectFields",
args=[aws.glue.GetScriptDagNodeArgArgs(
name="paths",
value="[\"column1\"]",
)],
),
aws.glue.GetScriptDagNodeArgs(
id="resolvechoice3",
node_type="ResolveChoice",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="choice",
value="\"MATCH_CATALOG\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database["destination"]["name"]}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table["destination"]["name"]}\"",
),
],
),
aws.glue.GetScriptDagNodeArgs(
id="datasink4",
node_type="DataSink",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database["destination"]["name"]}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table["destination"]["name"]}\"",
),
],
),
])
pulumi.export("scalaCode", example.scala_code)
```
:param Sequence[pulumi.InputType['GetScriptDagEdgeArgs']] dag_edges: A list of the edges in the DAG. Defined below.
:param Sequence[pulumi.InputType['GetScriptDagNodeArgs']] dag_nodes: A list of the nodes in the DAG. Defined below.
:param str language: The programming language of the resulting code from the DAG. Defaults to `PYTHON`. Valid values are `PYTHON` and `SCALA`.
"""
...
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetScriptResult',
'AwaitableGetScriptResult',
'get_script',
'get_script_output',
]
@pulumi.output_type
class GetScriptResult:
"""
A collection of values returned by getScript.
"""
def __init__(__self__, dag_edges=None, dag_nodes=None, id=None, language=None, python_script=None, scala_code=None):
if dag_edges and not isinstance(dag_edges, list):
raise TypeError("Expected argument 'dag_edges' to be a list")
pulumi.set(__self__, "dag_edges", dag_edges)
if dag_nodes and not isinstance(dag_nodes, list):
raise TypeError("Expected argument 'dag_nodes' to be a list")
pulumi.set(__self__, "dag_nodes", dag_nodes)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if language and not isinstance(language, str):
raise TypeError("Expected argument 'language' to be a str")
pulumi.set(__self__, "language", language)
if python_script and not isinstance(python_script, str):
raise TypeError("Expected argument 'python_script' to be a str")
pulumi.set(__self__, "python_script", python_script)
if scala_code and not isinstance(scala_code, str):
raise TypeError("Expected argument 'scala_code' to be a str")
pulumi.set(__self__, "scala_code", scala_code)
@property
@pulumi.getter(name="dagEdges")
def dag_edges(self) -> Sequence['outputs.GetScriptDagEdgeResult']:
return pulumi.get(self, "dag_edges")
@property
@pulumi.getter(name="dagNodes")
def dag_nodes(self) -> Sequence['outputs.GetScriptDagNodeResult']:
return pulumi.get(self, "dag_nodes")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def language(self) -> Optional[str]:
return pulumi.get(self, "language")
@property
@pulumi.getter(name="pythonScript")
def python_script(self) -> str:
"""
The Python script generated from the DAG when the `language` argument is set to `PYTHON`.
"""
return pulumi.get(self, "python_script")
@property
@pulumi.getter(name="scalaCode")
def scala_code(self) -> str:
"""
The Scala code generated from the DAG when the `language` argument is set to `SCALA`.
"""
return pulumi.get(self, "scala_code")
class AwaitableGetScriptResult(GetScriptResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetScriptResult(
dag_edges=self.dag_edges,
dag_nodes=self.dag_nodes,
id=self.id,
language=self.language,
python_script=self.python_script,
scala_code=self.scala_code)
def get_script(dag_edges: Optional[Sequence[pulumi.InputType['GetScriptDagEdgeArgs']]] = None,
dag_nodes: Optional[Sequence[pulumi.InputType['GetScriptDagNodeArgs']]] = None,
language: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetScriptResult:
"""
Use this data source to generate a Glue script from a Directed Acyclic Graph (DAG).
## Example Usage
### Generate Python Script
```python
import pulumi
import pulumi_aws as aws
example = aws.glue.get_script(language="PYTHON",
dag_edges=[
aws.glue.GetScriptDagEdgeArgs(
source="datasource0",
target="applymapping1",
),
aws.glue.GetScriptDagEdgeArgs(
source="applymapping1",
target="selectfields2",
),
aws.glue.GetScriptDagEdgeArgs(
source="selectfields2",
target="resolvechoice3",
),
aws.glue.GetScriptDagEdgeArgs(
source="resolvechoice3",
target="datasink4",
),
],
dag_nodes=[
aws.glue.GetScriptDagNodeArgs(
id="datasource0",
node_type="DataSource",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database['source']['name']}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table['source']['name']}\"",
),
],
),
aws.glue.GetScriptDagNodeArgs(
id="applymapping1",
node_type="ApplyMapping",
args=[aws.glue.GetScriptDagNodeArgArgs(
name="mapping",
value="[(\"column1\", \"string\", \"column1\", \"string\")]",
)],
),
aws.glue.GetScriptDagNodeArgs(
id="selectfields2",
node_type="SelectFields",
args=[aws.glue.GetScriptDagNodeArgArgs(
name="paths",
value="[\"column1\"]",
)],
),
aws.glue.GetScriptDagNodeArgs(
id="resolvechoice3",
node_type="ResolveChoice",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="choice",
value="\"MATCH_CATALOG\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database['destination']['name']}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table['destination']['name']}\"",
),
],
),
aws.glue.GetScriptDagNodeArgs(
id="datasink4",
node_type="DataSink",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database['destination']['name']}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table['destination']['name']}\"",
),
],
),
])
pulumi.export("pythonScript", example.python_script)
```
### Generate Scala Code
```python
import pulumi
import pulumi_aws as aws
example = aws.glue.get_script(language="SCALA",
dag_edges=[
aws.glue.GetScriptDagEdgeArgs(
source="datasource0",
target="applymapping1",
),
aws.glue.GetScriptDagEdgeArgs(
source="applymapping1",
target="selectfields2",
),
aws.glue.GetScriptDagEdgeArgs(
source="selectfields2",
target="resolvechoice3",
),
aws.glue.GetScriptDagEdgeArgs(
source="resolvechoice3",
target="datasink4",
),
],
dag_nodes=[
aws.glue.GetScriptDagNodeArgs(
id="datasource0",
node_type="DataSource",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database['source']['name']}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table['source']['name']}\"",
),
],
),
aws.glue.GetScriptDagNodeArgs(
id="applymapping1",
node_type="ApplyMapping",
args=[aws.glue.GetScriptDagNodeArgArgs(
name="mappings",
value="[(\"column1\", \"string\", \"column1\", \"string\")]",
)],
),
aws.glue.GetScriptDagNodeArgs(
id="selectfields2",
node_type="SelectFields",
args=[aws.glue.GetScriptDagNodeArgArgs(
name="paths",
value="[\"column1\"]",
)],
),
aws.glue.GetScriptDagNodeArgs(
id="resolvechoice3",
node_type="ResolveChoice",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="choice",
value="\"MATCH_CATALOG\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database['destination']['name']}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table['destination']['name']}\"",
),
],
),
aws.glue.GetScriptDagNodeArgs(
id="datasink4",
node_type="DataSink",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database['destination']['name']}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table['destination']['name']}\"",
),
],
),
])
pulumi.export("scalaCode", example.scala_code)
```
:param Sequence[pulumi.InputType['GetScriptDagEdgeArgs']] dag_edges: A list of the edges in the DAG. Defined below.
:param Sequence[pulumi.InputType['GetScriptDagNodeArgs']] dag_nodes: A list of the nodes in the DAG. Defined below.
:param str language: The programming language of the resulting code from the DAG. Defaults to `PYTHON`. Valid values are `PYTHON` and `SCALA`.
"""
__args__ = dict()
__args__['dagEdges'] = dag_edges
__args__['dagNodes'] = dag_nodes
__args__['language'] = language
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:glue/getScript:getScript', __args__, opts=opts, typ=GetScriptResult).value
return AwaitableGetScriptResult(
dag_edges=__ret__.dag_edges,
dag_nodes=__ret__.dag_nodes,
id=__ret__.id,
language=__ret__.language,
python_script=__ret__.python_script,
scala_code=__ret__.scala_code)
@_utilities.lift_output_func(get_script)
def get_script_output(dag_edges: Optional[pulumi.Input[Sequence[pulumi.InputType['GetScriptDagEdgeArgs']]]] = None,
dag_nodes: Optional[pulumi.Input[Sequence[pulumi.InputType['GetScriptDagNodeArgs']]]] = None,
language: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetScriptResult]:
"""
Use this data source to generate a Glue script from a Directed Acyclic Graph (DAG).
## Example Usage
### Generate Python Script
```python
import pulumi
import pulumi_aws as aws
example = aws.glue.get_script(language="PYTHON",
dag_edges=[
aws.glue.GetScriptDagEdgeArgs(
source="datasource0",
target="applymapping1",
),
aws.glue.GetScriptDagEdgeArgs(
source="applymapping1",
target="selectfields2",
),
aws.glue.GetScriptDagEdgeArgs(
source="selectfields2",
target="resolvechoice3",
),
aws.glue.GetScriptDagEdgeArgs(
source="resolvechoice3",
target="datasink4",
),
],
dag_nodes=[
aws.glue.GetScriptDagNodeArgs(
id="datasource0",
node_type="DataSource",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database['source']['name']}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table['source']['name']}\"",
),
],
),
aws.glue.GetScriptDagNodeArgs(
id="applymapping1",
node_type="ApplyMapping",
args=[aws.glue.GetScriptDagNodeArgArgs(
name="mapping",
value="[(\"column1\", \"string\", \"column1\", \"string\")]",
)],
),
aws.glue.GetScriptDagNodeArgs(
id="selectfields2",
node_type="SelectFields",
args=[aws.glue.GetScriptDagNodeArgArgs(
name="paths",
value="[\"column1\"]",
)],
),
aws.glue.GetScriptDagNodeArgs(
id="resolvechoice3",
node_type="ResolveChoice",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="choice",
value="\"MATCH_CATALOG\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database['destination']['name']}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table['destination']['name']}\"",
),
],
),
aws.glue.GetScriptDagNodeArgs(
id="datasink4",
node_type="DataSink",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database['destination']['name']}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table['destination']['name']}\"",
),
],
),
])
pulumi.export("pythonScript", example.python_script)
```
### Generate Scala Code
```python
import pulumi
import pulumi_aws as aws
example = aws.glue.get_script(language="SCALA",
dag_edges=[
aws.glue.GetScriptDagEdgeArgs(
source="datasource0",
target="applymapping1",
),
aws.glue.GetScriptDagEdgeArgs(
source="applymapping1",
target="selectfields2",
),
aws.glue.GetScriptDagEdgeArgs(
source="selectfields2",
target="resolvechoice3",
),
aws.glue.GetScriptDagEdgeArgs(
source="resolvechoice3",
target="datasink4",
),
],
dag_nodes=[
aws.glue.GetScriptDagNodeArgs(
id="datasource0",
node_type="DataSource",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database['source']['name']}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table['source']['name']}\"",
),
],
),
aws.glue.GetScriptDagNodeArgs(
id="applymapping1",
node_type="ApplyMapping",
args=[aws.glue.GetScriptDagNodeArgArgs(
name="mappings",
value="[(\"column1\", \"string\", \"column1\", \"string\")]",
)],
),
aws.glue.GetScriptDagNodeArgs(
id="selectfields2",
node_type="SelectFields",
args=[aws.glue.GetScriptDagNodeArgArgs(
name="paths",
value="[\"column1\"]",
)],
),
aws.glue.GetScriptDagNodeArgs(
id="resolvechoice3",
node_type="ResolveChoice",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="choice",
value="\"MATCH_CATALOG\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database['destination']['name']}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table['destination']['name']}\"",
),
],
),
aws.glue.GetScriptDagNodeArgs(
id="datasink4",
node_type="DataSink",
args=[
aws.glue.GetScriptDagNodeArgArgs(
name="database",
value=f"\"{aws_glue_catalog_database['destination']['name']}\"",
),
aws.glue.GetScriptDagNodeArgArgs(
name="table_name",
value=f"\"{aws_glue_catalog_table['destination']['name']}\"",
),
],
),
])
pulumi.export("scalaCode", example.scala_code)
```
:param Sequence[pulumi.InputType['GetScriptDagEdgeArgs']] dag_edges: A list of the edges in the DAG. Defined below.
:param Sequence[pulumi.InputType['GetScriptDagNodeArgs']] dag_nodes: A list of the nodes in the DAG. Defined below.
:param str language: The programming language of the resulting code from the DAG. Defaults to `PYTHON`. Valid values are `PYTHON` and `SCALA`.
"""
...
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Bertrand256
# Created on: 2018-11
import base64
import json
import logging
import time
from collections import namedtuple
from enum import Enum
from functools import partial
from typing import List, Union, Callable
import ipaddress
from PyQt5 import QtWidgets, QtGui
from PyQt5.QtCore import pyqtSlot, Qt, QTimerEvent, QTimer
from PyQt5.QtGui import QPalette
from PyQt5.QtWidgets import QDialog, QApplication, QToolButton, QAction, QWidget
from bitcoinrpc.authproxy import EncodeDecimal, JSONRPCException
import app_cache
import app_defs
import hw_intf
from app_config import MasternodeConfig, AppConfig, InputKeyType
from app_defs import FEE_DUFF_PER_BYTE
from bip44_wallet import Bip44Wallet, BreakFetchTransactionsException, find_wallet_addresses
from common import CancelException
from crown_utils import generate_bls_privkey, generate_wif_privkey, validate_address, wif_privkey_to_address, \
validate_wif_privkey, bls_privkey_to_pubkey
from crownd_intf import CrowndInterface
from thread_fun_dlg import CtrlObject
from ui import ui_reg_masternode_dlg
from wallet_common import Bip44AccountType, Bip44AddressType
from wnd_utils import WndUtils
STEP_MN_DATA = 1
STEP_DASHD_TYPE = 2
STEP_AUTOMATIC_RPC_NODE = 3
STEP_MANUAL_OWN_NODE = 4
STEP_SUMMARY = 5
NODE_TYPE_PUBLIC_RPC = 1
NODE_TYPE_OWN = 2
CACHE_ITEM_SHOW_FIELD_HINTS = 'RegMasternodeDlg_ShowFieldHints'
log = logging.getLogger('cmt.reg_masternode')
class RegMasternodeDlg(QDialog, ui_reg_masternode_dlg.Ui_RegMasternodeDlg, WndUtils):
def __init__(self, main_dlg, config: AppConfig, crownd_intf: CrowndInterface, masternode: MasternodeConfig,
on_proregtx_success_callback: Callable):
QDialog.__init__(self, main_dlg)
ui_reg_masternode_dlg.Ui_RegMasternodeDlg.__init__(self)
WndUtils.__init__(self, main_dlg.app_config)
self.main_dlg = main_dlg
self.masternode = masternode
self.app_config = config
self.crownd_intf:CrowndInterface = crownd_intf
self.on_proregtx_success_callback = on_proregtx_success_callback
self.style = '<style>.info{color:darkblue} .warning{color:#ff6600} .error{background-color:red;color:white}</style>'
self.operator_reward_saved = None
self.owner_pkey_generated: str = None
self.operator_pkey_generated: str = None
self.voting_pkey_generated: str = None
self.current_step = STEP_MN_DATA
self.step_stack: List[int] = []
self.proregtx_prepare_thread_ref = None
self.deterministic_mns_spork_active = True
self.dmn_collateral_tx: str = None
self.dmn_collateral_tx_index: int = None
self.dmn_collateral_tx_address: str = None
self.dmn_collateral_tx_address_path: str = None
self.dmn_ip: str = None
self.dmn_tcp_port: int = None
self.dmn_owner_payout_addr: str = None
self.dmn_operator_reward: int = 0
self.dmn_owner_privkey: str = None
self.dmn_owner_address: str = None
self.dmn_operator_privkey: str = None
self.dmn_operator_pubkey: str = None
self.dmn_voting_privkey: str = None
self.dmn_voting_address: str = None
self.dmn_owner_key_type = InputKeyType.PRIVATE
self.dmn_operator_key_type = InputKeyType.PRIVATE
self.dmn_voting_key_type = InputKeyType.PRIVATE
self.collateral_validation_err_msg = ''
self.ip_port_validation_err_msg = ''
self.payout_address_validation_err_msg = ''
self.operator_reward_validation_err_msg = ''
self.owner_key_validation_err_msg = ''
self.operator_key_validation_err_msg = ''
self.voting_key_validation_err_msg = ''
self.dmn_reg_tx_hash = ''
self.manual_signed_message = False
self.last_manual_prepare_string: str = None
self.wait_for_confirmation_timer_id = None
self.show_field_hinds = True
self.summary_info = []
if self.masternode:
self.dmn_collateral_tx_address_path = self.masternode.collateralBip32Path
self.bip44_wallet = Bip44Wallet(self.app_config.hw_coin_name, self.main_dlg.hw_session,
self.app_config.db_intf, self.crownd_intf, self.app_config.crown_network)
self.finishing = False
self.setupUi()
def setupUi(self):
ui_reg_masternode_dlg.Ui_RegMasternodeDlg.setupUi(self, self)
self.closeEvent = self.closeEvent
self.restore_cache_settings()
self.edtCollateralTx.setText(self.masternode.collateralTx)
if self.masternode.collateralTx:
sz = self.edtCollateralTx.fontMetrics().size(0, self.masternode.collateralTx + '000')
self.edtCollateralTx.setMinimumWidth(sz.width())
self.edtCollateralIndex.setText(self.masternode.collateralTxIndex)
self.edtIP.setText(self.masternode.ip)
self.edtPort.setText(self.masternode.port)
self.edtPayoutAddress.setText(self.masternode.collateralAddress)
self.chbWholeMNReward.setChecked(True)
self.lblProtxSummary2.linkActivated.connect(self.save_summary_info)
self.lblCollateralTxMsg.sizePolicy().setHeightForWidth(True)
self.prepare_keys()
self.btnClose.hide()
self.setIcon(self.btnManualFundingAddressPaste, 'content-paste@16px.png')
self.setIcon(self.btnManualProtxPrepareCopy, 'content-copy@16px.png')
self.setIcon(self.btnManualProtxPrepareResultPaste, 'content-paste@16px.png')
self.setIcon(self.btnManualProtxSubmitCopy, 'content-copy@16px.png')
self.setIcon(self.btnManualTxHashPaste, 'content-paste@16px.png')
self.setIcon(self.btnSummaryDMNOperatorKeyCopy, 'content-copy@16px.png')
self.edtSummaryDMNOperatorKey.setStyleSheet("QLineEdit{background-color: white} "
"QLineEdit:read-only{background-color: white}")
doc_url = app_defs.get_doc_url('deterministic-mn-migration.md')
if doc_url:
self.lblDocumentation.setText(f'<a href="{doc_url}">Documentation</a>')
self.update_dynamic_labels()
self.update_ctrls_visibility()
self.update_ctrl_state()
self.update_step_tab_ui()
self.update_show_hints_label()
self.minimize_dialog_height()
def closeEvent(self, event):
self.finishing = True
if self.wait_for_confirmation_timer_id is not None:
self.killTimer(self.wait_for_confirmation_timer_id)
self.save_cache_settings()
def restore_cache_settings(self):
app_cache.restore_window_size(self)
self.show_field_hinds = app_cache.get_value(CACHE_ITEM_SHOW_FIELD_HINTS, True, bool)
def save_cache_settings(self):
app_cache.save_window_size(self)
app_cache.set_value(CACHE_ITEM_SHOW_FIELD_HINTS, self.show_field_hinds)
def minimize_dialog_height(self):
def set():
self.adjustSize()
self.tm_resize_dlg = QTimer(self)
self.tm_resize_dlg.setSingleShot(True)
self.tm_resize_dlg.singleShot(100, set)
def update_dynamic_labels(self):
def style_to_color(style: str) -> str:
if style == 'hl1':
color = 'color:#00802b'
else:
color = ''
return color
def get_label_text(prefix:str, key_type: str, tooltip_anchor: str, style: str):
lbl = prefix + ' ' + \
{'privkey': 'private key', 'pubkey': 'public key', 'address': 'Crown address'}.get(key_type, '???')
change_mode = f'(<a href="{tooltip_anchor}">use {tooltip_anchor}</a>)'
return f'<table style="float:right;{style_to_color(style)}"><tr><td><b>{lbl}</b></td><td>{change_mode}</td></tr></table>'
if self.masternode:
if self.dmn_owner_key_type == InputKeyType.PRIVATE:
key_type, tooltip_anchor, placeholder_text = ('privkey', 'address', 'Enter the owner private key')
style = ''
else:
key_type, tooltip_anchor, placeholder_text = ('address', 'privkey', 'Enter the owner Crown address')
style = 'hl1'
self.lblOwnerKey.setText(get_label_text('Owner', key_type, tooltip_anchor, style))
self.edtOwnerKey.setPlaceholderText(placeholder_text)
if self.dmn_operator_key_type == InputKeyType.PRIVATE:
key_type, tooltip_anchor, placeholder_text = ('privkey', 'pubkey', 'Enter the operator private key')
style = ''
else:
key_type, tooltip_anchor, placeholder_text = ('pubkey', 'privkey', 'Enter the operator public key')
style = 'hl1'
self.lblOperatorKey.setText(get_label_text('Operator', key_type, tooltip_anchor, style))
self.edtOperatorKey.setPlaceholderText(placeholder_text)
if self.dmn_voting_key_type == InputKeyType.PRIVATE:
key_type, tooltip_anchor, placeholder_text = ('privkey','address', 'Enter the voting private key')
style = ''
else:
key_type, tooltip_anchor, placeholder_text = ('address', 'privkey', 'Enter the voting Crown address')
style = 'hl1'
self.lblVotingKey.setText(get_label_text('Voting', key_type, tooltip_anchor, style))
self.edtVotingKey.setPlaceholderText(placeholder_text)
@pyqtSlot(str)
def on_lblOwnerKey_linkActivated(self, link):
if self.dmn_owner_key_type == InputKeyType.PRIVATE:
self.dmn_owner_key_type = InputKeyType.PUBLIC
self.dmn_owner_privkey = self.edtOwnerKey.text()
self.edtOwnerKey.setText(self.dmn_owner_address)
else:
self.dmn_owner_key_type = InputKeyType.PRIVATE
self.dmn_owner_address = self.edtOwnerKey.text()
self.edtOwnerKey.setText(self.dmn_owner_privkey)
self.update_dynamic_labels()
self.update_ctrls_visibility()
self.upd_owner_key_info(False)
@pyqtSlot(str)
def on_lblOperatorKey_linkActivated(self, link):
if self.dmn_operator_key_type == InputKeyType.PRIVATE:
self.dmn_operator_key_type = InputKeyType.PUBLIC
self.dmn_operator_privkey = self.edtOperatorKey.text()
self.edtOperatorKey.setText(self.dmn_operator_pubkey)
else:
self.dmn_operator_key_type = InputKeyType.PRIVATE
self.dmn_operator_pubkey = self.edtOperatorKey.text()
self.edtOperatorKey.setText(self.dmn_operator_privkey)
self.update_dynamic_labels()
self.update_ctrls_visibility()
self.upd_operator_key_info(False)
@pyqtSlot(str)
def on_lblVotingKey_linkActivated(self, link):
if self.dmn_voting_key_type == InputKeyType.PRIVATE:
self.dmn_voting_key_type = InputKeyType.PUBLIC
self.dmn_voting_privkey = self.edtVotingKey.text()
self.edtVotingKey.setText(self.dmn_voting_address)
else:
self.dmn_voting_key_type = InputKeyType.PRIVATE
self.dmn_voting_address = self.edtVotingKey.text()
self.edtVotingKey.setText(self.dmn_voting_privkey)
self.update_dynamic_labels()
self.update_ctrls_visibility()
self.upd_voting_key_info(False)
@pyqtSlot(str)
def on_lblOwnerKey_linkHovered(self, link):
if link == 'address':
tt = 'Change input type to Crown address'
else:
tt = 'Change input type to private key'
self.lblOwnerKey.setToolTip(tt)
@pyqtSlot(str)
def on_lblOperatorKey_linkHovered(self, link):
if link == 'pubkey':
tt = 'Change input type to public key'
else:
tt = 'Change input type to private key'
self.lblOperatorKey.setToolTip(tt)
@pyqtSlot(str)
def on_lblVotingKey_linkHovered(self, link):
if link == 'address':
tt = 'Change input type to Crown address'
else:
tt = 'Change input type to private key'
self.lblVotingKey.setToolTip(tt)
def prepare_keys(self):
gen_owner = False
gen_operator = False
gen_voting = False
# if any of the owner/operator/voting key used in the configuration is the same as the corresponding
# key shown in the blockchain, replace that key by a new one
found_protx = False
protx_state = {}
try:
for protx in self.crownd_intf.protx('list', 'registered', True):
protx_state = protx.get('state')
if (protx_state and protx_state.get('service') == self.masternode.ip + ':' + self.masternode.port) or \
(protx.get('collateralHash') == self.masternode.collateralTx and
str(protx.get('collateralIndex')) == str(self.masternode.collateralTxIndex)):
found_protx = True
break
except Exception as e:
pass
if found_protx:
if self.masternode.get_dmn_owner_public_address(self.app_config.crown_network) == \
protx_state.get('ownerAddress'):
gen_owner = True
if self.masternode.get_dmn_operator_pubkey() == protx_state.get('pubKeyOperator'):
gen_operator = True
if self.masternode.get_dmn_voting_public_address(self.app_config.crown_network) == \
protx_state.get('votingAddress'):
gen_voting = True
if (self.masternode.dmn_owner_key_type == InputKeyType.PRIVATE and
not self.masternode.dmn_owner_private_key) or \
(self.masternode.dmn_owner_key_type == InputKeyType.PUBLIC and
not self.masternode.dmn_owner_address):
gen_owner = True
if (self.masternode.dmn_operator_key_type == InputKeyType.PRIVATE and
not self.masternode.dmn_operator_private_key) or \
(self.masternode.dmn_operator_key_type == InputKeyType.PUBLIC and
not self.masternode.dmn_operator_public_key):
gen_operator = True
if (self.masternode.dmn_voting_key_type == InputKeyType.PRIVATE and
not self.masternode.dmn_voting_private_key) or \
(self.masternode.dmn_voting_key_type == InputKeyType.PUBLIC and
not self.masternode.dmn_voting_address):
gen_voting = True
if gen_owner:
self.owner_pkey_generated = generate_wif_privkey(self.app_config.crown_network, compressed=True)
self.edtOwnerKey.setText(self.owner_pkey_generated)
else:
if self.masternode.dmn_owner_key_type == InputKeyType.PRIVATE:
self.edtOwnerKey.setText(self.masternode.dmn_owner_private_key)
else:
self.edtOwnerKey.setText(self.masternode.dmn_owner_address)
self.dmn_owner_key_type = self.masternode.dmn_owner_key_type
if gen_operator:
try:
self.operator_pkey_generated = generate_bls_privkey()
self.edtOperatorKey.setText(self.operator_pkey_generated)
except Exception as e:
self.errorMsg(str(e))
else:
if self.masternode.dmn_operator_key_type == InputKeyType.PRIVATE:
self.edtOperatorKey.setText(self.masternode.dmn_operator_private_key)
else:
self.edtOperatorKey.setText(self.masternode.dmn_operator_public_key)
self.dmn_operator_key_type = self.masternode.dmn_operator_key_type
if self.deterministic_mns_spork_active:
if gen_voting:
self.voting_pkey_generated = generate_wif_privkey(self.app_config.crown_network, compressed=True)
self.edtVotingKey.setText(self.voting_pkey_generated)
else:
if self.dmn_voting_key_type == InputKeyType.PRIVATE:
self.edtVotingKey.setText(self.masternode.dmn_voting_private_key)
else:
self.edtVotingKey.setText(self.masternode.dmn_voting_address)
@pyqtSlot(bool)
def on_btnCancel_clicked(self):
self.close()
@pyqtSlot(bool)
def on_btnClose_clicked(self):
self.close()
@pyqtSlot(bool)
def on_btnGenerateOwnerKey_clicked(self, active):
k = generate_wif_privkey(self.app_config.crown_network, compressed=True)
self.edtOwnerKey.setText(k)
self.edtOwnerKey.repaint()
@pyqtSlot(bool)
def on_btnGenerateOperatorKey_clicked(self, active):
self.edtOperatorKey.setText(generate_bls_privkey())
self.edtOperatorKey.repaint() # qt 5.11.3 has issue with automatic repainting after setText on mac
@pyqtSlot(bool)
def on_btnGenerateVotingKey_clicked(self, active):
k = generate_wif_privkey(self.app_config.crown_network, compressed=True)
self.edtVotingKey.setText(k)
self.edtVotingKey.repaint()
def set_ctrl_message(self, control, message: str, style: str):
if message:
control.setText(f'{self.style}<span class="{style}">{message}</span>')
control.setVisible(True)
# control.repaint()
else:
control.setVisible(False)
def update_ctrls_visibility(self):
if not self.deterministic_mns_spork_active:
# hide controls related to the voting key - if spork 15 is not active, voting key has to be the same
# as the owner key
self.lblVotingMsg.hide()
self.lblVotingKey.hide()
self.edtVotingKey.hide()
self.btnGenerateVotingKey.hide()
else:
self.btnGenerateVotingKey.setVisible(self.dmn_voting_key_type == InputKeyType.PRIVATE)
self.btnGenerateOwnerKey.setVisible(self.dmn_owner_key_type == InputKeyType.PRIVATE)
self.btnGenerateOperatorKey.setVisible(self.dmn_operator_key_type == InputKeyType.PRIVATE)
def update_fields_info(self, show_invalid_data_msg: bool):
"""
:param show_data_invalid_msg: if the argument is true and the data is invalid, an error message is shown
below the control; the argument is set to True if before moving to the next step there are some errors
found in the data provided by the user.
"""
self.upd_collateral_tx_info(show_invalid_data_msg)
self.upd_ip_info(show_invalid_data_msg)
self.upd_payout_addr_info(show_invalid_data_msg)
self.upd_oper_reward_info(show_invalid_data_msg)
self.upd_owner_key_info(show_invalid_data_msg)
self.upd_operator_key_info(show_invalid_data_msg)
self.upd_voting_key_info(show_invalid_data_msg)
def upd_collateral_tx_info(self, show_invalid_data_msg: bool):
"""
:param show_data_invalid_msg: if the argument is true and the data is invalid, an error message is shown
below the control; the argument is set to True if before moving to the next step there are some errors
found in the data provided by the user.
"""
msg = ''
style = 'info'
if show_invalid_data_msg and self.collateral_validation_err_msg:
msg = self.collateral_validation_err_msg
style = 'error'
self.set_ctrl_message(self.lblCollateralTxMsg, msg, style)
def upd_ip_info(self, show_invalid_data_msg: bool):
"""
:param show_data_invalid_msg: if the argument is true and the data is invalid, an error message is shown
below the control; the argument is set to True if before moving to the next step there are some errors
found in the data provided by the user.
"""
msg = ''
style = ''
if show_invalid_data_msg and self.ip_port_validation_err_msg:
msg = self.ip_port_validation_err_msg
style = 'error'
else:
if self.show_field_hinds:
if self.edtIP.text().strip():
msg = 'You can leave the IP address and port fields empty if you want to delegate the operator ' \
'role to an external entity and you don\'t know their values in advance.'
style = 'info'
else:
msg = 'If don\'t set the IP address and port fields, the masternode operator will ' \
'have to issue a ProUpServTx transaction using Crown wallet.'
style = 'warning'
self.set_ctrl_message(self.lblIPMsg, msg, style)
def upd_payout_addr_info(self, show_invalid_data_msg: bool):
msg = ''
style = ''
if show_invalid_data_msg and self.payout_address_validation_err_msg:
msg = self.payout_address_validation_err_msg
style = 'error'
else:
if self.show_field_hinds:
msg = 'The owner\'s payout address can be set to any valid Crown address - it no longer ' \
'has to be the same as the collateral address.'
style = 'info'
self.set_ctrl_message(self.lblPayoutMsg, msg, style)
def upd_oper_reward_info(self, show_invalid_data_msg: bool):
msg = ''
style = ''
if show_invalid_data_msg and self.operator_reward_validation_err_msg:
msg = self.operator_reward_validation_err_msg
style = 'error'
else:
if self.show_field_hinds:
if self.chbWholeMNReward.isChecked():
msg = 'Here you can specify how much of the masternode earnings will go to the ' \
'masternode operator.'
style = 'info'
else:
msg = 'The masternode operator will have to specify his reward payee address in a ProUpServTx ' \
'transaction, otherwise the full reward will go to the masternode owner.'
style = 'warning'
self.set_ctrl_message(self.lblOperatorRewardMsg, msg, style)
def upd_owner_key_info(self, show_invalid_data_msg: bool):
msg = ''
style = ''
if show_invalid_data_msg and self.owner_key_validation_err_msg:
msg = self.owner_key_validation_err_msg
style = 'error'
else:
if self.show_field_hinds:
if self.dmn_owner_key_type == InputKeyType.PRIVATE:
if self.edtOwnerKey.text().strip() == self.owner_pkey_generated:
msg = 'This is an automatically generated owner private key. You can enter your own or ' \
'generate a new one by pressing the button on the right.'
elif not self.edtOwnerKey.text().strip():
msg = 'Enter the owner private key or generate a new one by clicking the button on the right.'
style = 'info'
else:
msg = 'You can use Crown address if the related private key is stored elsewhere, eg in ' \
'the Crown Core wallet.<br><span class="warning">Note, that if you provide an address ' \
'instead of a private key, you will not be able to publish ProRegTx ' \
'transaction through public RPC nodes in the next steps.</span>'
style = 'info'
self.set_ctrl_message(self.lblOwnerMsg, msg, style)
def upd_operator_key_info(self, show_invalid_data_msg: bool):
msg = ''
style = ''
if show_invalid_data_msg and self.operator_key_validation_err_msg:
msg = self.operator_key_validation_err_msg
style = 'error'
else:
if self.show_field_hinds:
if self.dmn_operator_key_type == InputKeyType.PRIVATE:
if self.edtOperatorKey.text().strip() == self.operator_pkey_generated:
msg = 'This is an automatically generated operator BLS private key. You can enter your ' \
'own or generate a new one by pressing the button on the right.'
elif not self.edtOperatorKey.text().strip():
msg = 'Enter the operator private key or generate a new one by clicking the button on ' \
'the right.'
style = 'info'
else:
msg = 'You can use public key if your masternode is managed by a separate entity (operator) ' \
'that controls the related private key or if you prefer to keep the private key outside ' \
'the program. If necessary, you can revoke this key by sending a new ProRegTx ' \
'transaction with a new operator key.'
style = 'info'
self.set_ctrl_message(self.lblOperatorMsg, msg, style)
def upd_voting_key_info(self, show_invalid_data_msg: bool):
msg = ''
style = ''
if self.deterministic_mns_spork_active:
if show_invalid_data_msg and self.voting_key_validation_err_msg:
msg = self.voting_key_validation_err_msg
style = 'error'
else:
if self.show_field_hinds:
if self.dmn_voting_key_type == InputKeyType.PRIVATE:
if self.edtVotingKey.text().strip() == self.voting_pkey_generated:
msg = 'This is an automatically generated private key for voting. You can enter your own or ' \
'generate a new one by pressing the button on the right.'
elif not self.edtVotingKey.text().strip():
msg = 'Enter the private key for voting or generate a new one by clicking the button on ' \
'the right.'
style = 'info'
else:
msg = 'You can use Crown address if the related private key is stored elsewhere, eg in ' \
'the Crown Core wallet.<br><span class="warning">Note, that providing an address instead of ' \
'a private key will prevent you from voting on proposals in this program.</span>'
style = 'info'
self.set_ctrl_message(self.lblVotingMsg, msg, style)
def get_crown_node_type(self):
if self.rbCMTCrownNodeType.isChecked():
return NODE_TYPE_PUBLIC_RPC
elif self.rbOwnCrownNodeType.isChecked():
return NODE_TYPE_OWN
else:
return None
def upd_node_type_info(self):
nt = self.get_crown_node_type()
msg = ''
if nt is None:
msg = 'DIP-3 masternode registration involves sending a special transaction via the v0.13 Crown node ' \
'(eg Crown-Qt). <b>Note, that this requires incurring a certain transaction fee, as with any ' \
'other ("normal") transaction.</b>'
elif nt == NODE_TYPE_PUBLIC_RPC:
msg = 'The ProRegTx transaction will be processed via the remote RPC node stored in the app configuration.' \
'<br><br>' \
'<b>Note 1:</b> this operation will involve signing transaction data with your <span style="color:red">owner key on the remote node</span>, ' \
'so use this method only if you trust the operator of that node (nodes <i>alice(luna, suzy).crown-masternode-tool.org</i> are maintained by the author of this application).<br><br>' \
'<b>Note 2:</b> if the operation fails (e.g. due to a lack of funds), choose the manual method ' \
'using your own Crown wallet.'
elif nt == NODE_TYPE_OWN:
msg = 'A Crown Core wallet (v0.13) with sufficient funds to cover transaction fees is required to ' \
'complete the next steps.'
self.lblCrownNodeTypeMessage.setText(msg)
def update_ctrl_state(self):
self.edtOperatorReward.setDisabled(self.chbWholeMNReward.isChecked())
@pyqtSlot(str)
def on_edtCollateralTx_textChanged(self, text):
self.upd_collateral_tx_info(False)
@pyqtSlot(str)
def on_edtCollateralIndex_textChanged(self, text):
self.upd_collateral_tx_info(False)
@pyqtSlot(str)
def on_edtIP_textChanged(self, text):
self.upd_ip_info(False)
@pyqtSlot(str)
def on_edtPayoutAddress_textChanged(self, text):
self.upd_payout_addr_info(False)
@pyqtSlot(bool)
def on_chbWholeMNReward_toggled(self, checked):
if checked:
self.operator_reward_saved = self.edtOperatorReward.value()
self.edtOperatorReward.setValue(0.0)
else:
if not self.operator_reward_saved is None:
self.edtOperatorReward.setValue(self.operator_reward_saved)
self.update_ctrl_state()
self.upd_oper_reward_info(False)
@pyqtSlot(str)
def on_edtOwnerKey_textChanged(self, text):
self.upd_owner_key_info(False)
@pyqtSlot(str)
def on_edtOperatorKey_textChanged(self, text):
self.upd_operator_key_info(False)
@pyqtSlot(str)
def on_edtVotingKey_textChanged(self, text):
self.upd_voting_key_info(False)
@pyqtSlot(str)
def save_summary_info(self, link: str):
file_name = WndUtils.save_file_query(self.main_dlg, self.app_config,
'Enter the file name',
filter="TXT files (*.txt);;All Files (*)")
if file_name:
with open(file_name, 'wt') as fptr:
for l in self.summary_info:
lbl, val = l.split('\t')
fptr.write(f'{lbl}:\t{val}\n')
def update_step_tab_ui(self):
def show_hide_tabs(tab_idx_to_show: int):
self.edtManualProtxPrepare.setVisible(tab_idx_to_show == 3)
self.edtManualProtxPrepareResult.setVisible(tab_idx_to_show == 3)
self.edtManualProtxSubmit.setVisible(tab_idx_to_show == 3)
pass
self.btnContinue.setEnabled(False)
if self.current_step == STEP_MN_DATA:
self.stackedWidget.setCurrentIndex(0)
self.update_fields_info(False)
self.btnContinue.show()
self.btnContinue.setEnabled(True)
self.btnCancel.setEnabled(True)
elif self.current_step == STEP_DASHD_TYPE:
self.stackedWidget.setCurrentIndex(1)
self.upd_node_type_info()
self.btnContinue.setEnabled(True)
self.btnContinue.show()
self.btnCancel.setEnabled(True)
elif self.current_step == STEP_AUTOMATIC_RPC_NODE:
self.stackedWidget.setCurrentIndex(2)
self.upd_node_type_info()
elif self.current_step == STEP_MANUAL_OWN_NODE:
self.stackedWidget.setCurrentIndex(3)
self.upd_node_type_info()
self.btnContinue.setEnabled(True)
elif self.current_step == STEP_SUMMARY:
self.stackedWidget.setCurrentIndex(4)
if self.dmn_owner_key_type == InputKeyType.PRIVATE:
owner_privkey = self.dmn_owner_privkey
else:
owner_privkey = '<not available>'
if self.dmn_operator_key_type == InputKeyType.PRIVATE:
operator_privkey = self.dmn_operator_privkey
else:
operator_privkey = '<not available>'
if self.dmn_voting_key_type == InputKeyType.PRIVATE:
voting_privkey = self.dmn_voting_privkey
else:
voting_privkey = '<not available>'
self.summary_info = \
[f'Network address\t{self.dmn_ip}:{self.dmn_tcp_port}',
f'Payout address\t{self.dmn_owner_payout_addr}',
f'Owner private key\t{owner_privkey}',
f'Owner public address\t{self.dmn_owner_address}',
f'Operator private key\t{operator_privkey}',
f'Operator public key\t{self.dmn_operator_pubkey}',
f'Voting private key\t{voting_privkey}',
f'Voting public address\t{self.dmn_voting_address}',
f'Protx hash\t{self.dmn_reg_tx_hash}']
text = '<table>'
for l in self.summary_info:
lbl, val = l.split('\t')
text += f'<tr><td style="white-space: nowrap"><b>{lbl}:</b> </td><td>{val}</td></tr>'
text += '</table>'
self.edtProtxSummary.setText(text)
self.edtProtxSummary.show()
self.lblProtxSummary2.show()
if self.dmn_operator_key_type == InputKeyType.PRIVATE:
operator_message = '<b><span style="color:red">One more thing... <span></b>copy the following ' \
'line to the <code>crown.conf</code> file on your masternode server ' \
'(and restart <i>crownd</i>) or pass it to the masternode operator:'
else:
operator_message = '<b><span style="color:red">One more thing... <span></b>copy the following ' \
'line to the <code>crown.conf</code> file on your masternode server, replacing ' \
'"<your-operator-bls-private-key>" with the appropriate value or ask the operator ' \
'for it:'
self.lblProtxSummary3.setText(operator_message)
if self.dmn_operator_key_type == InputKeyType.PRIVATE:
operator_privkey = self.dmn_operator_privkey
else:
operator_privkey = '<your-operator-bls-private-key>'
self.edtSummaryDMNOperatorKey.setText(f'masternodeblsprivkey={operator_privkey}')
self.btnCancel.hide()
self.btnBack.hide()
self.btnContinue.hide()
self.btnClose.show()
self.btnClose.setEnabled(True)
self.btnClose.repaint()
else:
raise Exception('Invalid step')
show_hide_tabs(self.stackedWidget.currentIndex())
self.lblFieldHints.setVisible(self.stackedWidget.currentIndex() == 0)
self.btnBack.setEnabled(len(self.step_stack) > 0)
self.btnContinue.repaint()
self.btnCancel.repaint()
self.btnBack.repaint()
def validate_data(self):
self.dmn_collateral_tx = self.edtCollateralTx.text().strip()
self.collateral_validation_err_msg = ''
error_count = 0
try:
if not self.dmn_collateral_tx:
self.collateral_validation_err_msg = 'Collteral transaction ID is required.'
self.edtCollateralTx.setFocus()
else:
self.dmn_collateral_tx_index = int(self.edtCollateralIndex.text())
if self.dmn_collateral_tx_index < 0:
self.collateral_validation_err_msg = 'Invalid collateral transaction index.'
except Exception:
self.edtCollateralIndex.setFocus()
self.collateral_validation_err_msg = 'Invalid collateral transaction index: should be an integer ' \
'value, greater or equal 0.'
if self.collateral_validation_err_msg:
self.upd_collateral_tx_info(True)
error_count += 1
self.ip_port_validation_err_msg = ''
try:
self.dmn_ip = self.edtIP.text().strip()
if self.dmn_ip:
ipaddress.ip_address(self.dmn_ip)
except Exception as e:
self.edtIP.setFocus()
self.ip_port_validation_err_msg = 'Invalid masternode IP address: %s.' % str(e)
self.upd_ip_info(True)
error_count += 1
try:
if self.dmn_ip:
self.dmn_tcp_port = int(self.edtPort.text())
else:
self.dmn_tcp_port = None
except Exception:
self.edtPort.setFocus()
self.ip_port_validation_err_msg = 'Invalid TCP port: should be integer.'
self.upd_ip_info(True)
error_count += 1
self.payout_address_validation_err_msg = ''
addr = self.edtPayoutAddress.text().strip()
if not addr:
self.payout_address_validation_err_msg = 'Owner payout address is required.'
else:
self.dmn_owner_payout_addr = addr
if not validate_address(self.dmn_owner_payout_addr, self.app_config.crown_network):
self.payout_address_validation_err_msg = 'Invalid owner payout address.'
if self.payout_address_validation_err_msg:
self.edtPayoutAddress.setFocus()
self.upd_payout_addr_info(True)
error_count += 1
self.operator_reward_validation_err_msg = ''
if self.chbWholeMNReward.isChecked():
self.dmn_operator_reward = 0
else:
self.dmn_operator_reward = self.edtOperatorReward.value()
if self.dmn_operator_reward > 100 or self.dmn_operator_reward < 0:
self.edtOperatorReward.setFocus()
self.operator_reward_validation_err_msg = 'Invalid operator reward value: should be a value ' \
'between 0 and 100.'
if self.operator_reward_validation_err_msg:
self.upd_oper_reward_info(True)
error_count += 1
self.owner_key_validation_err_msg = ''
key = self.edtOwnerKey.text().strip()
if not key:
self.owner_key_validation_err_msg = 'Owner key/address is required.'
else:
if self.dmn_owner_key_type == InputKeyType.PRIVATE:
self.dmn_owner_privkey = key
if not validate_wif_privkey(self.dmn_owner_privkey, self.app_config.crown_network):
self.edtOwnerKey.setFocus()
self.owner_key_validation_err_msg = 'Invalid owner private key.'
else:
self.dmn_owner_address = wif_privkey_to_address(self.dmn_owner_privkey, self.app_config.crown_network)
else:
self.dmn_owner_address = key
self.dmn_owner_privkey = ''
if not validate_address(self.dmn_owner_address, self.app_config.crown_network):
self.edtOwnerKey.setFocus()
self.owner_key_validation_err_msg = 'Invalid owner Crown address.'
if self.owner_key_validation_err_msg:
self.upd_owner_key_info(True)
error_count += 1
self.operator_key_validation_err_msg = ''
key = self.edtOperatorKey.text().strip()
if not key:
self.operator_key_validation_err_msg = 'Operator key is required.'
else:
if self.dmn_operator_key_type == InputKeyType.PRIVATE:
try:
self.dmn_operator_privkey = key
try:
b = bytes.fromhex(self.dmn_operator_privkey)
if len(b) != 32:
raise Exception('invalid length (' + str(len(b)) + ')')
except Exception as e:
self.edtOperatorKey.setFocus()
self.operator_key_validation_err_msg = 'Invalid operator private key: ' + str(e)
self.dmn_operator_pubkey = bls_privkey_to_pubkey(self.dmn_operator_privkey)
except Exception as e:
self.edtOperatorKey.setFocus()
self.operator_key_validation_err_msg = 'Invalid operator private key: ' + str(e)
else:
self.dmn_operator_pubkey = key
self.dmn_operator_privkey = ''
try:
b = bytes.fromhex(self.dmn_operator_pubkey)
if len(b) != 48:
raise Exception('invalid length (' + str(len(b)) + ')')
except Exception as e:
self.edtOperatorKey.setFocus()
self.operator_key_validation_err_msg = 'Invalid operator public key: ' + str(e)
if self.operator_key_validation_err_msg:
self.upd_operator_key_info(True)
error_count += 1
self.voting_key_validation_err_msg = ''
if self.deterministic_mns_spork_active:
key = self.edtVotingKey.text().strip()
if not key:
self.voting_key_validation_err_msg = 'Voting key/address is required.'
else:
if self.dmn_voting_key_type == InputKeyType.PRIVATE:
self.dmn_voting_privkey = key
if not validate_wif_privkey(self.dmn_voting_privkey, self.app_config.crown_network):
self.edtVotingKey.setFocus()
self.voting_key_validation_err_msg = 'Invalid voting private key.'
else:
self.dmn_voting_address = wif_privkey_to_address(self.dmn_voting_privkey, self.app_config.crown_network)
else:
self.dmn_voting_address = key
self.dmn_voting_privkey = ''
if not validate_address(self.dmn_voting_address, self.app_config.crown_network):
self.edtVotingKey.setFocus()
self.voting_key_validation_err_msg = 'Invalid voting Crown address.'
else:
# spork 15 not active - use the owner private key for voting
self.dmn_voting_address = self.dmn_owner_address
self.dmn_voting_privkey = self.dmn_owner_privkey
self.dmn_voting_key_type = self.dmn_owner_key_type
if self.voting_key_validation_err_msg:
self.upd_voting_key_info(True)
error_count += 1
if error_count > 1:
raise Exception('Errors were encountered in the input data. You must correct them before you can continue.')
elif error_count == 1:
raise Exception(max((self.collateral_validation_err_msg, self.ip_port_validation_err_msg,
self.payout_address_validation_err_msg, self.operator_reward_validation_err_msg,
self.owner_key_validation_err_msg, self.operator_key_validation_err_msg,
self.voting_key_validation_err_msg)))
break_scanning = False
def check_break_scanning():
nonlocal break_scanning
return break_scanning
def do_break_scanning():
nonlocal break_scanning
break_scanning = True
return False
self.btnContinue.setEnabled(False)
try:
ret = WndUtils.run_thread_dialog(self.get_collateral_tx_address_thread, (check_break_scanning,), True,
force_close_dlg_callback=do_break_scanning)
except Exception as e:
log.exception(str(e))
raise Exception(str(e))
self.btnContinue.setEnabled(True)
return ret
def get_collateral_tx_address_thread(self, ctrl: CtrlObject, check_break_scanning_ext: Callable[[], bool]):
txes_cnt = 0
msg = ''
break_scanning = False
ctrl.dlg_config_fun(dlg_title="Validating collateral transaction.", show_progress_bar=False)
ctrl.display_msg_fun('Verifying collateral transaction...')
def check_break_scanning():
nonlocal break_scanning
if self.finishing or break_scanning:
# stop the scanning process if the dialog finishes or the address/bip32path has been found
raise BreakFetchTransactionsException()
if check_break_scanning_ext is not None and check_break_scanning_ext():
raise BreakFetchTransactionsException()
def fetch_txes_feeback(tx_cnt: int):
nonlocal msg, txes_cnt
txes_cnt += tx_cnt
ctrl.display_msg_fun(msg + '<br><br>' + 'Number of transactions fetched so far: ' + str(txes_cnt))
def on_msg_link_activated(link: str):
nonlocal break_scanning
if link == 'break':
break_scanning = True
try:
tx = self.crownd_intf.getrawtransaction(self.dmn_collateral_tx, 1, skip_cache=True)
except Exception as e:
raise Exception('Cannot get the collateral transaction due to the following errror: ' + str(e))
vouts = tx.get('vout')
if vouts:
if self.dmn_collateral_tx_index < len(vouts):
vout = vouts[self.dmn_collateral_tx_index]
spk = vout.get('scriptPubKey')
if not spk:
raise Exception(f'The collateral transaction ({self.dmn_collateral_tx}) output '
f'({self.dmn_collateral_tx_index}) doesn\'t have value in the scriptPubKey '
f'field.')
ads = spk.get('addresses')
if not ads or len(ads) < 0:
raise Exception('The collateral transaction output doesn\'t have the Crown address assigned.')
if vout.get('valueSat') != 10000e8:
raise Exception('The value of the collateral transaction output is not equal to 10000 Crown.')
self.dmn_collateral_tx_address = ads[0]
else:
raise Exception(f'Transaction {self.dmn_collateral_tx} doesn\'t have output with index: '
f'{self.dmn_collateral_tx_index}')
else:
raise Exception('Invalid collateral transaction')
ctrl.display_msg_fun('Verifying the collateral transaction address on your hardware wallet.')
if not self.main_dlg.connect_hardware_wallet():
return False
if self.dmn_collateral_tx_address_path:
try:
addr = hw_intf.get_address(self.main_dlg.hw_session, self.dmn_collateral_tx_address_path)
except CancelException:
return False
msg = ''
if addr != self.dmn_collateral_tx_address:
log.warning(
f'The address returned by the hardware wallet ({addr}) for the BIP32 path '
f'{self.dmn_collateral_tx_address_path} differs from the address stored the mn configuration '
f'(self.dmn_collateral_tx_address). Need to scan wallet for a correct BIP32 path.')
msg = '<span style="color:red">The BIP32 path of the collateral address from your mn config is incorret.<br></span>' \
f'Trying to find the BIP32 path of the address {self.dmn_collateral_tx_address} in your wallet.' \
f'<br>This may take a while (<a href="break">break</a>)...'
self.dmn_collateral_tx_address_path = ''
else:
msg = 'Looking for a BIP32 path of the Crown address related to the masternode collateral.<br>' \
'This may take a while (<a href="break">break</a>)....'
if not self.dmn_collateral_tx_address_path and not self.finishing:
lbl = ctrl.get_msg_label_control()
if lbl:
def set():
lbl.setOpenExternalLinks(False)
lbl.setTextInteractionFlags(lbl.textInteractionFlags() & ~Qt.TextSelectableByMouse)
lbl.linkActivated.connect(on_msg_link_activated)
lbl.repaint()
WndUtils.call_in_main_thread(set)
ctrl.display_msg_fun(msg)
# fetch the transactions that involved the addresses stored in the wallet - during this
# all the used addresses are revealed
addr = self.bip44_wallet.scan_wallet_for_address(self.dmn_collateral_tx_address, check_break_scanning,
fetch_txes_feeback)
if not addr:
if not break_scanning:
WndUtils.errorMsg(f'Couldn\'t find a BIP32 path of the collateral address ({self.dmn_collateral_tx_address}).')
return False
else:
self.dmn_collateral_tx_address_path = addr.bip32_path
return True
def next_step(self):
cs = None
if self.current_step == STEP_MN_DATA:
if self.validate_data():
cs = STEP_DASHD_TYPE
else:
return
self.step_stack.append(self.current_step)
elif self.current_step == STEP_DASHD_TYPE:
if self.get_crown_node_type() == NODE_TYPE_PUBLIC_RPC:
cs = STEP_AUTOMATIC_RPC_NODE
elif self.get_crown_node_type() == NODE_TYPE_OWN:
cs = STEP_MANUAL_OWN_NODE
else:
self.errorMsg('You have to choose one of the two options.')
return
self.step_stack.append(self.current_step)
elif self.current_step == STEP_AUTOMATIC_RPC_NODE:
cs = STEP_SUMMARY
# in this case don't allow to start the automatic process again when the user clicks <Back>
elif self.current_step == STEP_MANUAL_OWN_NODE:
# check if the user passed tge protx transaction hash
if not self.manual_signed_message:
self.errorMsg('It looks like you have not signed a "protx register_prepare" result.')
return
self.dmn_reg_tx_hash = self.edtManualTxHash.text().strip()
if not self.dmn_reg_tx_hash:
self.edtManualTxHash.setFocus()
self.errorMsg('Invalid transaction hash.')
return
try:
bytes.fromhex(self.dmn_reg_tx_hash)
except Exception:
log.warning('Invalid transaction hash.')
self.edtManualTxHash.setFocus()
self.errorMsg('Invalid transaction hash.')
return
cs = STEP_SUMMARY
else:
self.errorMsg('Invalid step')
return
prev_step = self.current_step
self.current_step = cs
self.update_step_tab_ui()
try:
if self.current_step == STEP_AUTOMATIC_RPC_NODE:
self.start_automatic_process()
elif self.current_step == STEP_MANUAL_OWN_NODE:
self.start_manual_process()
elif self.current_step == STEP_SUMMARY:
self.lblProtxSummary1.setText('<b><span style="color:green">Congratultions! The transaction for your DIP-3 '
'masternode has been submitted and is currently awaiting confirmations.'
'</b></span>')
if self.on_proregtx_success_callback:
self.on_proregtx_success_callback(self.masternode)
if not self.check_tx_confirmation():
self.wait_for_confirmation_timer_id = self.startTimer(5000)
except Exception:
self.current_step = prev_step
self.update_step_tab_ui()
raise
def previous_step(self):
if self.step_stack:
self.current_step = self.step_stack.pop()
else:
raise Exception('Invalid step')
self.update_step_tab_ui()
@pyqtSlot(bool)
def on_btnContinue_clicked(self, active):
self.next_step()
@pyqtSlot(bool)
def on_btnBack_clicked(self, active):
self.previous_step()
@pyqtSlot(bool)
def on_rbCMTCrownNodeType_toggled(self, active):
if active:
self.upd_node_type_info()
@pyqtSlot(bool)
def on_rbOwnCrownNodeType_toggled(self, active):
if active:
self.upd_node_type_info()
def sign_protx_message_with_hw(self, msg_to_sign) -> str:
sig = WndUtils.call_in_main_thread(
hw_intf.hw_sign_message, self.main_dlg.hw_session, self.dmn_collateral_tx_address_path,
msg_to_sign, 'Click the confirmation button on your hardware wallet to sign the ProTx payload message.')
if sig.address != self.dmn_collateral_tx_address:
log.error(f'Protx payload signature address mismatch. Is: {sig.address}, should be: '
f'{self.dmn_collateral_tx_address}.')
raise Exception(f'Protx payload signature address mismatch. Is: {sig.address}, should be: '
f'{self.dmn_collateral_tx_address}.')
else:
sig_bin = base64.b64encode(sig.signature)
payload_sig_str = sig_bin.decode('ascii')
return payload_sig_str
def start_automatic_process(self):
self.lblProtxTransaction1.hide()
self.lblProtxTransaction2.hide()
self.lblProtxTransaction3.hide()
self.lblProtxTransaction4.hide()
self.btnContinue.setEnabled(False)
self.btnContinue.repaint()
self.run_thread(self, self.proregtx_automatic_thread, (), on_thread_finish=self.finished_automatic_process)
def finished_automatic_process(self):
self.btnCancel.setEnabled(True)
self.btnCancel.repaint()
self.update_step_tab_ui()
def proregtx_automatic_thread(self, ctrl):
log.debug('Starting proregtx_prepare_thread')
def set_text(widget, text: str):
def call(widget, text):
widget.setText(text)
widget.repaint()
widget.setVisible(True)
WndUtils.call_in_main_thread(call, widget, text)
def finished_with_success():
def call():
self.next_step()
WndUtils.call_in_main_thread(call)
try:
try:
mn_reg_support = self.crownd_intf.checkfeaturesupport('protx_register', self.app_config.app_version)
# is the "registration" feature enabled on the current rpc node?
if not mn_reg_support.get('enabled'):
if mn_reg_support.get('message'):
raise Exception(mn_reg_support.get('message'))
else:
raise Exception('The \'protx_register\' function is not supported by the RPC node '
'you are connected to.')
public_proxy_node = True
active = self.app_config.feature_register_dmn_automatic.get_value()
if not active:
msg = self.app_config.feature_register_dmn_automatic.get_message()
if not msg:
msg = 'The functionality of the automatic execution of the ProRegTx command on the ' \
'"public" RPC nodes is inactive. Use the manual method or contact the program author ' \
'for details.'
raise Exception(msg)
except JSONRPCException as e:
public_proxy_node = False # it's not a "public" rpc node
# preparing protx message
try:
funding_address = ''
if not public_proxy_node:
try:
# find an address to be used as the source of the transaction fees
min_fee = round(1024 * FEE_DUFF_PER_BYTE / 1e8, 8)
balances = self.crownd_intf.listaddressbalances(min_fee)
bal_list = []
for addr in balances:
bal_list.append({'address': addr, 'amount': balances[addr]})
bal_list.sort(key = lambda x: x['amount'])
if not bal_list:
raise Exception("No address can be found in the node's wallet with sufficient funds to "
"cover the transaction fees.")
funding_address = bal_list[0]['address']
except JSONRPCException as e:
log.info("Couldn't list the node address balances. We assume you are using a public RPC node and "
"the funding address for the transaction fees will be estimated during the "
"`register_prepare` call")
set_text(self.lblProtxTransaction1, '<b>1. Preparing a ProRegTx transaction on a remote node...</b>')
if self.dmn_owner_key_type == InputKeyType.PRIVATE:
owner_key = self.dmn_owner_privkey
else:
owner_key = self.dmn_owner_address
params = ['register_prepare', self.dmn_collateral_tx, self.dmn_collateral_tx_index,
self.dmn_ip + ':' + str(self.dmn_tcp_port) if self.dmn_ip else '0', owner_key,
self.dmn_operator_pubkey, self.dmn_voting_address, str(round(self.dmn_operator_reward, 2)),
self.dmn_owner_payout_addr]
if funding_address:
params.append(funding_address)
call_ret = self.crownd_intf.rpc_call(True, False, 'protx', *tuple(params))
call_ret_str = json.dumps(call_ret, default=EncodeDecimal)
msg_to_sign = call_ret.get('signMessage', '')
protx_tx = call_ret.get('tx')
log.debug('register_prepare returned: ' + call_ret_str)
set_text(self.lblProtxTransaction1,
'<b>1. Preparing a ProRegTx transaction on a remote node.</b> <span style="color:green">'
'Success.</span>')
except Exception as e:
set_text(
self.lblProtxTransaction1,
'<b>1. Preparing a ProRegTx transaction on a remote node.</b> <span style="color:red">Failed '
f'with the following error: {str(e)}</span>')
return
set_text(self.lblProtxTransaction2, '<b>Message to be signed:</b><br><code>' + msg_to_sign + '</code>')
# signing message:
set_text(self.lblProtxTransaction3, '<b>2. Signing message with hardware wallet...</b>')
try:
payload_sig_str = self.sign_protx_message_with_hw(msg_to_sign)
set_text(self.lblProtxTransaction3, '<b>2. Signing message with hardware wallet.</b> '
'<span style="color:green">Success.</span>')
except CancelException:
set_text(self.lblProtxTransaction3,
'<b>2. Signing message with hardware wallet.</b> <span style="color:red">Cancelled.</span>')
return
except Exception as e:
log.exception('Signature failed.')
set_text(self.lblProtxTransaction3,
'<b>2. Signing message with hardware wallet.</b> <span style="color:red">Failed with the '
f'following error: {str(e)}.</span>')
return
# submitting signed transaction
set_text(self.lblProtxTransaction4, '<b>3. Submitting the signed protx transaction to the remote node...</b>')
try:
self.dmn_reg_tx_hash = self.crownd_intf.rpc_call(True, False, 'protx', 'register_submit', protx_tx,
payload_sig_str)
log.debug('protx register_submit returned: ' + str(self.dmn_reg_tx_hash))
set_text(self.lblProtxTransaction4,
'<b>3. Submitting the signed protx transaction to the remote node.</b> <span style="'
'color:green">Success.</span>')
finished_with_success()
except Exception as e:
log.exception('protx register_submit failed')
set_text(self.lblProtxTransaction4,
'<b>3. Submitting the signed protx transaction to the remote node.</b> '
f'<span style="color:red">Failed with the following error: {str(e)}</span>')
except Exception as e:
log.exception('Exception occurred')
set_text(self.lblProtxTransaction1, f'<span style="color:red">{str(e)}</span>')
@pyqtSlot(bool)
def on_btnManualSignProtx_clicked(self):
prepare_result = self.edtManualProtxPrepareResult.toPlainText().strip()
if not prepare_result:
self.errorMsg('You need to enter a result of the "protx register_prepare" command.')
self.edtManualProtxPrepareResult.setFocus()
return
try:
prepare_result_dict = json.loads(prepare_result)
msg_to_sign = prepare_result_dict.get('signMessage', '')
protx_tx = prepare_result_dict.get('tx')
try:
payload_sig_str = self.sign_protx_message_with_hw(msg_to_sign)
protx_submit = f'protx register_submit "{protx_tx}" "{payload_sig_str}"'
self.edtManualProtxSubmit.setPlainText(protx_submit)
self.btnContinue.setEnabled(True)
self.btnContinue.repaint()
self.manual_signed_message = True
except CancelException:
return
except Exception as e:
log.exception('Signature failed.')
self.errorMsg(str(e))
return
except Exception as e:
self.errorMsg('Invalid "protx register_prepare" result. Note that the text must be copied along '
'with curly braces.')
return
def start_manual_process(self):
self.edtManualFundingAddress.setFocus()
self.update_manual_protx_prepare_command()
def update_manual_protx_prepare_command(self):
addr = self.edtManualFundingAddress.text().strip()
if addr:
valid = validate_address(addr, self.app_config.crown_network)
if valid:
if self.dmn_owner_key_type == InputKeyType.PRIVATE:
owner_key = self.dmn_owner_privkey
else:
owner_key = self.dmn_owner_address
cmd = f'protx register_prepare "{self.dmn_collateral_tx}" "{self.dmn_collateral_tx_index}" ' \
f'"{self.dmn_ip + ':' + str(self.dmn_tcp_port) if self.dmn_ip else '0'}" ' \
f'"{owner_key}" "{self.dmn_operator_pubkey}" "{self.dmn_voting_address}" ' \
f'"{str(round(self.dmn_operator_reward, 2))}" "{self.dmn_owner_payout_addr}" "{addr}"'
else:
cmd = 'Enter the valid funding address in the exit box above'
else:
cmd = ''
self.edtManualProtxPrepare.setPlainText(cmd)
if cmd != self.last_manual_prepare_string:
self.last_manual_prepare_string = cmd
self.edtManualProtxSubmit.clear()
self.edtManualProtxPrepareResult.clear()
self.edtManualTxHash.clear()
self.dmn_reg_tx_hash = ''
self.manual_signed_message = False
def timerEvent(self, event: QTimerEvent):
""" Timer controlling the confirmation of the proreg transaction. """
if self.check_tx_confirmation():
self.killTimer(event.timerId())
def check_tx_confirmation(self):
try:
tx = self.crownd_intf.getrawtransaction(self.dmn_reg_tx_hash, 1, skip_cache=True)
conf = tx.get('confirmations')
if conf:
h = tx.get('height')
self.lblProtxSummary1.setText(
'<b><span style="color:green">Congratultions! The transaction for your DIP-3 masternode has been '
f'confirmed in block {h}.</b></span> ')
return True
except Exception:
pass
return False
def update_show_hints_label(self):
if self.show_field_hinds:
lbl = '<a href="hide">Hide field descriptions</a>'
else:
lbl = '<a href="show">Show field descriptions</a>'
self.lblFieldHints.setText(lbl)
@pyqtSlot(str)
def on_lblFieldHints_linkActivated(self, link):
if link == 'show':
self.show_field_hinds = True
else:
self.show_field_hinds = False
self.update_show_hints_label()
self.update_fields_info(False)
self.minimize_dialog_height()
@pyqtSlot(str)
def on_edtManualFundingAddress_textChanged(self, text):
self.update_manual_protx_prepare_command()
@pyqtSlot(bool)
def on_btnManualFundingAddressPaste_clicked(self, checked):
cl = QApplication.clipboard()
self.edtManualFundingAddress.setText(cl.text())
@pyqtSlot(bool)
def on_btnManualProtxPrepareCopy_clicked(self, checked):
text = self.edtManualProtxPrepare.toPlainText()
cl = QApplication.clipboard()
cl.setText(text)
@pyqtSlot(bool)
def on_btnManualProtxPrepareResultPaste_clicked(self, checked):
cl = QApplication.clipboard()
self.edtManualProtxPrepareResult.setPlainText(cl.text())
@pyqtSlot(bool)
def on_btnManualProtxSubmitCopy_clicked(self, checked):
text = self.edtManualProtxSubmit.toPlainText()
cl = QApplication.clipboard()
cl.setText(text)
@pyqtSlot(bool)
def on_btnManualTxHashPaste_clicked(self, checked):
cl = QApplication.clipboard()
self.edtManualTxHash.setText(cl.text())
@pyqtSlot(bool)
def on_btnSummaryDMNOperatorKeyCopy_clicked(self, checked):
text = self.edtSummaryDMNOperatorKey.text()
cl = QApplication.clipboard()
cl.setText(text)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Bertrand256
# Created on: 2018-11
import base64
import json
import logging
import time
from collections import namedtuple
from enum import Enum
from functools import partial
from typing import List, Union, Callable
import ipaddress
from PyQt5 import QtWidgets, QtGui
from PyQt5.QtCore import pyqtSlot, Qt, QTimerEvent, QTimer
from PyQt5.QtGui import QPalette
from PyQt5.QtWidgets import QDialog, QApplication, QToolButton, QAction, QWidget
from bitcoinrpc.authproxy import EncodeDecimal, JSONRPCException
import app_cache
import app_defs
import hw_intf
from app_config import MasternodeConfig, AppConfig, InputKeyType
from app_defs import FEE_DUFF_PER_BYTE
from bip44_wallet import Bip44Wallet, BreakFetchTransactionsException, find_wallet_addresses
from common import CancelException
from crown_utils import generate_bls_privkey, generate_wif_privkey, validate_address, wif_privkey_to_address, \
validate_wif_privkey, bls_privkey_to_pubkey
from crownd_intf import CrowndInterface
from thread_fun_dlg import CtrlObject
from ui import ui_reg_masternode_dlg
from wallet_common import Bip44AccountType, Bip44AddressType
from wnd_utils import WndUtils
STEP_MN_DATA = 1
STEP_DASHD_TYPE = 2
STEP_AUTOMATIC_RPC_NODE = 3
STEP_MANUAL_OWN_NODE = 4
STEP_SUMMARY = 5
NODE_TYPE_PUBLIC_RPC = 1
NODE_TYPE_OWN = 2
CACHE_ITEM_SHOW_FIELD_HINTS = 'RegMasternodeDlg_ShowFieldHints'
log = logging.getLogger('cmt.reg_masternode')
class RegMasternodeDlg(QDialog, ui_reg_masternode_dlg.Ui_RegMasternodeDlg, WndUtils):
def __init__(self, main_dlg, config: AppConfig, crownd_intf: CrowndInterface, masternode: MasternodeConfig,
on_proregtx_success_callback: Callable):
QDialog.__init__(self, main_dlg)
ui_reg_masternode_dlg.Ui_RegMasternodeDlg.__init__(self)
WndUtils.__init__(self, main_dlg.app_config)
self.main_dlg = main_dlg
self.masternode = masternode
self.app_config = config
self.crownd_intf:CrowndInterface = crownd_intf
self.on_proregtx_success_callback = on_proregtx_success_callback
self.style = '<style>.info{color:darkblue} .warning{color:#ff6600} .error{background-color:red;color:white}</style>'
self.operator_reward_saved = None
self.owner_pkey_generated: str = None
self.operator_pkey_generated: str = None
self.voting_pkey_generated: str = None
self.current_step = STEP_MN_DATA
self.step_stack: List[int] = []
self.proregtx_prepare_thread_ref = None
self.deterministic_mns_spork_active = True
self.dmn_collateral_tx: str = None
self.dmn_collateral_tx_index: int = None
self.dmn_collateral_tx_address: str = None
self.dmn_collateral_tx_address_path: str = None
self.dmn_ip: str = None
self.dmn_tcp_port: int = None
self.dmn_owner_payout_addr: str = None
self.dmn_operator_reward: int = 0
self.dmn_owner_privkey: str = None
self.dmn_owner_address: str = None
self.dmn_operator_privkey: str = None
self.dmn_operator_pubkey: str = None
self.dmn_voting_privkey: str = None
self.dmn_voting_address: str = None
self.dmn_owner_key_type = InputKeyType.PRIVATE
self.dmn_operator_key_type = InputKeyType.PRIVATE
self.dmn_voting_key_type = InputKeyType.PRIVATE
self.collateral_validation_err_msg = ''
self.ip_port_validation_err_msg = ''
self.payout_address_validation_err_msg = ''
self.operator_reward_validation_err_msg = ''
self.owner_key_validation_err_msg = ''
self.operator_key_validation_err_msg = ''
self.voting_key_validation_err_msg = ''
self.dmn_reg_tx_hash = ''
self.manual_signed_message = False
self.last_manual_prepare_string: str = None
self.wait_for_confirmation_timer_id = None
self.show_field_hinds = True
self.summary_info = []
if self.masternode:
self.dmn_collateral_tx_address_path = self.masternode.collateralBip32Path
self.bip44_wallet = Bip44Wallet(self.app_config.hw_coin_name, self.main_dlg.hw_session,
self.app_config.db_intf, self.crownd_intf, self.app_config.crown_network)
self.finishing = False
self.setupUi()
def setupUi(self):
ui_reg_masternode_dlg.Ui_RegMasternodeDlg.setupUi(self, self)
self.closeEvent = self.closeEvent
self.restore_cache_settings()
self.edtCollateralTx.setText(self.masternode.collateralTx)
if self.masternode.collateralTx:
sz = self.edtCollateralTx.fontMetrics().size(0, self.masternode.collateralTx + '000')
self.edtCollateralTx.setMinimumWidth(sz.width())
self.edtCollateralIndex.setText(self.masternode.collateralTxIndex)
self.edtIP.setText(self.masternode.ip)
self.edtPort.setText(self.masternode.port)
self.edtPayoutAddress.setText(self.masternode.collateralAddress)
self.chbWholeMNReward.setChecked(True)
self.lblProtxSummary2.linkActivated.connect(self.save_summary_info)
self.lblCollateralTxMsg.sizePolicy().setHeightForWidth(True)
self.prepare_keys()
self.btnClose.hide()
self.setIcon(self.btnManualFundingAddressPaste, 'content-paste@16px.png')
self.setIcon(self.btnManualProtxPrepareCopy, 'content-copy@16px.png')
self.setIcon(self.btnManualProtxPrepareResultPaste, 'content-paste@16px.png')
self.setIcon(self.btnManualProtxSubmitCopy, 'content-copy@16px.png')
self.setIcon(self.btnManualTxHashPaste, 'content-paste@16px.png')
self.setIcon(self.btnSummaryDMNOperatorKeyCopy, 'content-copy@16px.png')
self.edtSummaryDMNOperatorKey.setStyleSheet("QLineEdit{background-color: white} "
"QLineEdit:read-only{background-color: white}")
doc_url = app_defs.get_doc_url('deterministic-mn-migration.md')
if doc_url:
self.lblDocumentation.setText(f'<a href="{doc_url}">Documentation</a>')
self.update_dynamic_labels()
self.update_ctrls_visibility()
self.update_ctrl_state()
self.update_step_tab_ui()
self.update_show_hints_label()
self.minimize_dialog_height()
def closeEvent(self, event):
self.finishing = True
if self.wait_for_confirmation_timer_id is not None:
self.killTimer(self.wait_for_confirmation_timer_id)
self.save_cache_settings()
def restore_cache_settings(self):
app_cache.restore_window_size(self)
self.show_field_hinds = app_cache.get_value(CACHE_ITEM_SHOW_FIELD_HINTS, True, bool)
def save_cache_settings(self):
app_cache.save_window_size(self)
app_cache.set_value(CACHE_ITEM_SHOW_FIELD_HINTS, self.show_field_hinds)
def minimize_dialog_height(self):
def set():
self.adjustSize()
self.tm_resize_dlg = QTimer(self)
self.tm_resize_dlg.setSingleShot(True)
self.tm_resize_dlg.singleShot(100, set)
def update_dynamic_labels(self):
def style_to_color(style: str) -> str:
if style == 'hl1':
color = 'color:#00802b'
else:
color = ''
return color
def get_label_text(prefix:str, key_type: str, tooltip_anchor: str, style: str):
lbl = prefix + ' ' + \
{'privkey': 'private key', 'pubkey': 'public key', 'address': 'Crown address'}.get(key_type, '???')
change_mode = f'(<a href="{tooltip_anchor}">use {tooltip_anchor}</a>)'
return f'<table style="float:right;{style_to_color(style)}"><tr><td><b>{lbl}</b></td><td>{change_mode}</td></tr></table>'
if self.masternode:
if self.dmn_owner_key_type == InputKeyType.PRIVATE:
key_type, tooltip_anchor, placeholder_text = ('privkey', 'address', 'Enter the owner private key')
style = ''
else:
key_type, tooltip_anchor, placeholder_text = ('address', 'privkey', 'Enter the owner Crown address')
style = 'hl1'
self.lblOwnerKey.setText(get_label_text('Owner', key_type, tooltip_anchor, style))
self.edtOwnerKey.setPlaceholderText(placeholder_text)
if self.dmn_operator_key_type == InputKeyType.PRIVATE:
key_type, tooltip_anchor, placeholder_text = ('privkey', 'pubkey', 'Enter the operator private key')
style = ''
else:
key_type, tooltip_anchor, placeholder_text = ('pubkey', 'privkey', 'Enter the operator public key')
style = 'hl1'
self.lblOperatorKey.setText(get_label_text('Operator', key_type, tooltip_anchor, style))
self.edtOperatorKey.setPlaceholderText(placeholder_text)
if self.dmn_voting_key_type == InputKeyType.PRIVATE:
key_type, tooltip_anchor, placeholder_text = ('privkey','address', 'Enter the voting private key')
style = ''
else:
key_type, tooltip_anchor, placeholder_text = ('address', 'privkey', 'Enter the voting Crown address')
style = 'hl1'
self.lblVotingKey.setText(get_label_text('Voting', key_type, tooltip_anchor, style))
self.edtVotingKey.setPlaceholderText(placeholder_text)
@pyqtSlot(str)
def on_lblOwnerKey_linkActivated(self, link):
if self.dmn_owner_key_type == InputKeyType.PRIVATE:
self.dmn_owner_key_type = InputKeyType.PUBLIC
self.dmn_owner_privkey = self.edtOwnerKey.text()
self.edtOwnerKey.setText(self.dmn_owner_address)
else:
self.dmn_owner_key_type = InputKeyType.PRIVATE
self.dmn_owner_address = self.edtOwnerKey.text()
self.edtOwnerKey.setText(self.dmn_owner_privkey)
self.update_dynamic_labels()
self.update_ctrls_visibility()
self.upd_owner_key_info(False)
@pyqtSlot(str)
def on_lblOperatorKey_linkActivated(self, link):
if self.dmn_operator_key_type == InputKeyType.PRIVATE:
self.dmn_operator_key_type = InputKeyType.PUBLIC
self.dmn_operator_privkey = self.edtOperatorKey.text()
self.edtOperatorKey.setText(self.dmn_operator_pubkey)
else:
self.dmn_operator_key_type = InputKeyType.PRIVATE
self.dmn_operator_pubkey = self.edtOperatorKey.text()
self.edtOperatorKey.setText(self.dmn_operator_privkey)
self.update_dynamic_labels()
self.update_ctrls_visibility()
self.upd_operator_key_info(False)
@pyqtSlot(str)
def on_lblVotingKey_linkActivated(self, link):
if self.dmn_voting_key_type == InputKeyType.PRIVATE:
self.dmn_voting_key_type = InputKeyType.PUBLIC
self.dmn_voting_privkey = self.edtVotingKey.text()
self.edtVotingKey.setText(self.dmn_voting_address)
else:
self.dmn_voting_key_type = InputKeyType.PRIVATE
self.dmn_voting_address = self.edtVotingKey.text()
self.edtVotingKey.setText(self.dmn_voting_privkey)
self.update_dynamic_labels()
self.update_ctrls_visibility()
self.upd_voting_key_info(False)
@pyqtSlot(str)
def on_lblOwnerKey_linkHovered(self, link):
if link == 'address':
tt = 'Change input type to Crown address'
else:
tt = 'Change input type to private key'
self.lblOwnerKey.setToolTip(tt)
@pyqtSlot(str)
def on_lblOperatorKey_linkHovered(self, link):
if link == 'pubkey':
tt = 'Change input type to public key'
else:
tt = 'Change input type to private key'
self.lblOperatorKey.setToolTip(tt)
@pyqtSlot(str)
def on_lblVotingKey_linkHovered(self, link):
if link == 'address':
tt = 'Change input type to Crown address'
else:
tt = 'Change input type to private key'
self.lblVotingKey.setToolTip(tt)
def prepare_keys(self):
gen_owner = False
gen_operator = False
gen_voting = False
# if any of the owner/operator/voting key used in the configuration is the same as the corresponding
# key shown in the blockchain, replace that key by a new one
found_protx = False
protx_state = {}
try:
for protx in self.crownd_intf.protx('list', 'registered', True):
protx_state = protx.get('state')
if (protx_state and protx_state.get('service') == self.masternode.ip + ':' + self.masternode.port) or \
(protx.get('collateralHash') == self.masternode.collateralTx and
str(protx.get('collateralIndex')) == str(self.masternode.collateralTxIndex)):
found_protx = True
break
except Exception as e:
pass
if found_protx:
if self.masternode.get_dmn_owner_public_address(self.app_config.crown_network) == \
protx_state.get('ownerAddress'):
gen_owner = True
if self.masternode.get_dmn_operator_pubkey() == protx_state.get('pubKeyOperator'):
gen_operator = True
if self.masternode.get_dmn_voting_public_address(self.app_config.crown_network) == \
protx_state.get('votingAddress'):
gen_voting = True
if (self.masternode.dmn_owner_key_type == InputKeyType.PRIVATE and
not self.masternode.dmn_owner_private_key) or \
(self.masternode.dmn_owner_key_type == InputKeyType.PUBLIC and
not self.masternode.dmn_owner_address):
gen_owner = True
if (self.masternode.dmn_operator_key_type == InputKeyType.PRIVATE and
not self.masternode.dmn_operator_private_key) or \
(self.masternode.dmn_operator_key_type == InputKeyType.PUBLIC and
not self.masternode.dmn_operator_public_key):
gen_operator = True
if (self.masternode.dmn_voting_key_type == InputKeyType.PRIVATE and
not self.masternode.dmn_voting_private_key) or \
(self.masternode.dmn_voting_key_type == InputKeyType.PUBLIC and
not self.masternode.dmn_voting_address):
gen_voting = True
if gen_owner:
self.owner_pkey_generated = generate_wif_privkey(self.app_config.crown_network, compressed=True)
self.edtOwnerKey.setText(self.owner_pkey_generated)
else:
if self.masternode.dmn_owner_key_type == InputKeyType.PRIVATE:
self.edtOwnerKey.setText(self.masternode.dmn_owner_private_key)
else:
self.edtOwnerKey.setText(self.masternode.dmn_owner_address)
self.dmn_owner_key_type = self.masternode.dmn_owner_key_type
if gen_operator:
try:
self.operator_pkey_generated = generate_bls_privkey()
self.edtOperatorKey.setText(self.operator_pkey_generated)
except Exception as e:
self.errorMsg(str(e))
else:
if self.masternode.dmn_operator_key_type == InputKeyType.PRIVATE:
self.edtOperatorKey.setText(self.masternode.dmn_operator_private_key)
else:
self.edtOperatorKey.setText(self.masternode.dmn_operator_public_key)
self.dmn_operator_key_type = self.masternode.dmn_operator_key_type
if self.deterministic_mns_spork_active:
if gen_voting:
self.voting_pkey_generated = generate_wif_privkey(self.app_config.crown_network, compressed=True)
self.edtVotingKey.setText(self.voting_pkey_generated)
else:
if self.dmn_voting_key_type == InputKeyType.PRIVATE:
self.edtVotingKey.setText(self.masternode.dmn_voting_private_key)
else:
self.edtVotingKey.setText(self.masternode.dmn_voting_address)
@pyqtSlot(bool)
def on_btnCancel_clicked(self):
self.close()
@pyqtSlot(bool)
def on_btnClose_clicked(self):
self.close()
@pyqtSlot(bool)
def on_btnGenerateOwnerKey_clicked(self, active):
k = generate_wif_privkey(self.app_config.crown_network, compressed=True)
self.edtOwnerKey.setText(k)
self.edtOwnerKey.repaint()
@pyqtSlot(bool)
def on_btnGenerateOperatorKey_clicked(self, active):
self.edtOperatorKey.setText(generate_bls_privkey())
self.edtOperatorKey.repaint() # qt 5.11.3 has issue with automatic repainting after setText on mac
@pyqtSlot(bool)
def on_btnGenerateVotingKey_clicked(self, active):
k = generate_wif_privkey(self.app_config.crown_network, compressed=True)
self.edtVotingKey.setText(k)
self.edtVotingKey.repaint()
def set_ctrl_message(self, control, message: str, style: str):
if message:
control.setText(f'{self.style}<span class="{style}">{message}</span>')
control.setVisible(True)
# control.repaint()
else:
control.setVisible(False)
def update_ctrls_visibility(self):
if not self.deterministic_mns_spork_active:
# hide controls related to the voting key - if spork 15 is not active, voting key has to be the same
# as the owner key
self.lblVotingMsg.hide()
self.lblVotingKey.hide()
self.edtVotingKey.hide()
self.btnGenerateVotingKey.hide()
else:
self.btnGenerateVotingKey.setVisible(self.dmn_voting_key_type == InputKeyType.PRIVATE)
self.btnGenerateOwnerKey.setVisible(self.dmn_owner_key_type == InputKeyType.PRIVATE)
self.btnGenerateOperatorKey.setVisible(self.dmn_operator_key_type == InputKeyType.PRIVATE)
def update_fields_info(self, show_invalid_data_msg: bool):
"""
:param show_data_invalid_msg: if the argument is true and the data is invalid, an error message is shown
below the control; the argument is set to True if before moving to the next step there are some errors
found in the data provided by the user.
"""
self.upd_collateral_tx_info(show_invalid_data_msg)
self.upd_ip_info(show_invalid_data_msg)
self.upd_payout_addr_info(show_invalid_data_msg)
self.upd_oper_reward_info(show_invalid_data_msg)
self.upd_owner_key_info(show_invalid_data_msg)
self.upd_operator_key_info(show_invalid_data_msg)
self.upd_voting_key_info(show_invalid_data_msg)
def upd_collateral_tx_info(self, show_invalid_data_msg: bool):
"""
:param show_data_invalid_msg: if the argument is true and the data is invalid, an error message is shown
below the control; the argument is set to True if before moving to the next step there are some errors
found in the data provided by the user.
"""
msg = ''
style = 'info'
if show_invalid_data_msg and self.collateral_validation_err_msg:
msg = self.collateral_validation_err_msg
style = 'error'
self.set_ctrl_message(self.lblCollateralTxMsg, msg, style)
def upd_ip_info(self, show_invalid_data_msg: bool):
"""
:param show_data_invalid_msg: if the argument is true and the data is invalid, an error message is shown
below the control; the argument is set to True if before moving to the next step there are some errors
found in the data provided by the user.
"""
msg = ''
style = ''
if show_invalid_data_msg and self.ip_port_validation_err_msg:
msg = self.ip_port_validation_err_msg
style = 'error'
else:
if self.show_field_hinds:
if self.edtIP.text().strip():
msg = 'You can leave the IP address and port fields empty if you want to delegate the operator ' \
'role to an external entity and you don\'t know their values in advance.'
style = 'info'
else:
msg = 'If don\'t set the IP address and port fields, the masternode operator will ' \
'have to issue a ProUpServTx transaction using Crown wallet.'
style = 'warning'
self.set_ctrl_message(self.lblIPMsg, msg, style)
def upd_payout_addr_info(self, show_invalid_data_msg: bool):
msg = ''
style = ''
if show_invalid_data_msg and self.payout_address_validation_err_msg:
msg = self.payout_address_validation_err_msg
style = 'error'
else:
if self.show_field_hinds:
msg = 'The owner\'s payout address can be set to any valid Crown address - it no longer ' \
'has to be the same as the collateral address.'
style = 'info'
self.set_ctrl_message(self.lblPayoutMsg, msg, style)
def upd_oper_reward_info(self, show_invalid_data_msg: bool):
msg = ''
style = ''
if show_invalid_data_msg and self.operator_reward_validation_err_msg:
msg = self.operator_reward_validation_err_msg
style = 'error'
else:
if self.show_field_hinds:
if self.chbWholeMNReward.isChecked():
msg = 'Here you can specify how much of the masternode earnings will go to the ' \
'masternode operator.'
style = 'info'
else:
msg = 'The masternode operator will have to specify his reward payee address in a ProUpServTx ' \
'transaction, otherwise the full reward will go to the masternode owner.'
style = 'warning'
self.set_ctrl_message(self.lblOperatorRewardMsg, msg, style)
def upd_owner_key_info(self, show_invalid_data_msg: bool):
msg = ''
style = ''
if show_invalid_data_msg and self.owner_key_validation_err_msg:
msg = self.owner_key_validation_err_msg
style = 'error'
else:
if self.show_field_hinds:
if self.dmn_owner_key_type == InputKeyType.PRIVATE:
if self.edtOwnerKey.text().strip() == self.owner_pkey_generated:
msg = 'This is an automatically generated owner private key. You can enter your own or ' \
'generate a new one by pressing the button on the right.'
elif not self.edtOwnerKey.text().strip():
msg = 'Enter the owner private key or generate a new one by clicking the button on the right.'
style = 'info'
else:
msg = 'You can use Crown address if the related private key is stored elsewhere, eg in ' \
'the Crown Core wallet.<br><span class="warning">Note, that if you provide an address ' \
'instead of a private key, you will not be able to publish ProRegTx ' \
'transaction through public RPC nodes in the next steps.</span>'
style = 'info'
self.set_ctrl_message(self.lblOwnerMsg, msg, style)
def upd_operator_key_info(self, show_invalid_data_msg: bool):
msg = ''
style = ''
if show_invalid_data_msg and self.operator_key_validation_err_msg:
msg = self.operator_key_validation_err_msg
style = 'error'
else:
if self.show_field_hinds:
if self.dmn_operator_key_type == InputKeyType.PRIVATE:
if self.edtOperatorKey.text().strip() == self.operator_pkey_generated:
msg = 'This is an automatically generated operator BLS private key. You can enter your ' \
'own or generate a new one by pressing the button on the right.'
elif not self.edtOperatorKey.text().strip():
msg = 'Enter the operator private key or generate a new one by clicking the button on ' \
'the right.'
style = 'info'
else:
msg = 'You can use public key if your masternode is managed by a separate entity (operator) ' \
'that controls the related private key or if you prefer to keep the private key outside ' \
'the program. If necessary, you can revoke this key by sending a new ProRegTx ' \
'transaction with a new operator key.'
style = 'info'
self.set_ctrl_message(self.lblOperatorMsg, msg, style)
def upd_voting_key_info(self, show_invalid_data_msg: bool):
msg = ''
style = ''
if self.deterministic_mns_spork_active:
if show_invalid_data_msg and self.voting_key_validation_err_msg:
msg = self.voting_key_validation_err_msg
style = 'error'
else:
if self.show_field_hinds:
if self.dmn_voting_key_type == InputKeyType.PRIVATE:
if self.edtVotingKey.text().strip() == self.voting_pkey_generated:
msg = 'This is an automatically generated private key for voting. You can enter your own or ' \
'generate a new one by pressing the button on the right.'
elif not self.edtVotingKey.text().strip():
msg = 'Enter the private key for voting or generate a new one by clicking the button on ' \
'the right.'
style = 'info'
else:
msg = 'You can use Crown address if the related private key is stored elsewhere, eg in ' \
'the Crown Core wallet.<br><span class="warning">Note, that providing an address instead of ' \
'a private key will prevent you from voting on proposals in this program.</span>'
style = 'info'
self.set_ctrl_message(self.lblVotingMsg, msg, style)
def get_crown_node_type(self):
if self.rbCMTCrownNodeType.isChecked():
return NODE_TYPE_PUBLIC_RPC
elif self.rbOwnCrownNodeType.isChecked():
return NODE_TYPE_OWN
else:
return None
def upd_node_type_info(self):
nt = self.get_crown_node_type()
msg = ''
if nt is None:
msg = 'DIP-3 masternode registration involves sending a special transaction via the v0.13 Crown node ' \
'(eg Crown-Qt). <b>Note, that this requires incurring a certain transaction fee, as with any ' \
'other ("normal") transaction.</b>'
elif nt == NODE_TYPE_PUBLIC_RPC:
msg = 'The ProRegTx transaction will be processed via the remote RPC node stored in the app configuration.' \
'<br><br>' \
'<b>Note 1:</b> this operation will involve signing transaction data with your <span style="color:red">owner key on the remote node</span>, ' \
'so use this method only if you trust the operator of that node (nodes <i>alice(luna, suzy).crown-masternode-tool.org</i> are maintained by the author of this application).<br><br>' \
'<b>Note 2:</b> if the operation fails (e.g. due to a lack of funds), choose the manual method ' \
'using your own Crown wallet.'
elif nt == NODE_TYPE_OWN:
msg = 'A Crown Core wallet (v0.13) with sufficient funds to cover transaction fees is required to ' \
'complete the next steps.'
self.lblCrownNodeTypeMessage.setText(msg)
def update_ctrl_state(self):
self.edtOperatorReward.setDisabled(self.chbWholeMNReward.isChecked())
@pyqtSlot(str)
def on_edtCollateralTx_textChanged(self, text):
self.upd_collateral_tx_info(False)
@pyqtSlot(str)
def on_edtCollateralIndex_textChanged(self, text):
self.upd_collateral_tx_info(False)
@pyqtSlot(str)
def on_edtIP_textChanged(self, text):
self.upd_ip_info(False)
@pyqtSlot(str)
def on_edtPayoutAddress_textChanged(self, text):
self.upd_payout_addr_info(False)
@pyqtSlot(bool)
def on_chbWholeMNReward_toggled(self, checked):
if checked:
self.operator_reward_saved = self.edtOperatorReward.value()
self.edtOperatorReward.setValue(0.0)
else:
if not self.operator_reward_saved is None:
self.edtOperatorReward.setValue(self.operator_reward_saved)
self.update_ctrl_state()
self.upd_oper_reward_info(False)
@pyqtSlot(str)
def on_edtOwnerKey_textChanged(self, text):
self.upd_owner_key_info(False)
@pyqtSlot(str)
def on_edtOperatorKey_textChanged(self, text):
self.upd_operator_key_info(False)
@pyqtSlot(str)
def on_edtVotingKey_textChanged(self, text):
self.upd_voting_key_info(False)
@pyqtSlot(str)
def save_summary_info(self, link: str):
file_name = WndUtils.save_file_query(self.main_dlg, self.app_config,
'Enter the file name',
filter="TXT files (*.txt);;All Files (*)")
if file_name:
with open(file_name, 'wt') as fptr:
for l in self.summary_info:
lbl, val = l.split('\t')
fptr.write(f'{lbl}:\t{val}\n')
def update_step_tab_ui(self):
def show_hide_tabs(tab_idx_to_show: int):
self.edtManualProtxPrepare.setVisible(tab_idx_to_show == 3)
self.edtManualProtxPrepareResult.setVisible(tab_idx_to_show == 3)
self.edtManualProtxSubmit.setVisible(tab_idx_to_show == 3)
pass
self.btnContinue.setEnabled(False)
if self.current_step == STEP_MN_DATA:
self.stackedWidget.setCurrentIndex(0)
self.update_fields_info(False)
self.btnContinue.show()
self.btnContinue.setEnabled(True)
self.btnCancel.setEnabled(True)
elif self.current_step == STEP_DASHD_TYPE:
self.stackedWidget.setCurrentIndex(1)
self.upd_node_type_info()
self.btnContinue.setEnabled(True)
self.btnContinue.show()
self.btnCancel.setEnabled(True)
elif self.current_step == STEP_AUTOMATIC_RPC_NODE:
self.stackedWidget.setCurrentIndex(2)
self.upd_node_type_info()
elif self.current_step == STEP_MANUAL_OWN_NODE:
self.stackedWidget.setCurrentIndex(3)
self.upd_node_type_info()
self.btnContinue.setEnabled(True)
elif self.current_step == STEP_SUMMARY:
self.stackedWidget.setCurrentIndex(4)
if self.dmn_owner_key_type == InputKeyType.PRIVATE:
owner_privkey = self.dmn_owner_privkey
else:
owner_privkey = '<not available>'
if self.dmn_operator_key_type == InputKeyType.PRIVATE:
operator_privkey = self.dmn_operator_privkey
else:
operator_privkey = '<not available>'
if self.dmn_voting_key_type == InputKeyType.PRIVATE:
voting_privkey = self.dmn_voting_privkey
else:
voting_privkey = '<not available>'
self.summary_info = \
[f'Network address\t{self.dmn_ip}:{self.dmn_tcp_port}',
f'Payout address\t{self.dmn_owner_payout_addr}',
f'Owner private key\t{owner_privkey}',
f'Owner public address\t{self.dmn_owner_address}',
f'Operator private key\t{operator_privkey}',
f'Operator public key\t{self.dmn_operator_pubkey}',
f'Voting private key\t{voting_privkey}',
f'Voting public address\t{self.dmn_voting_address}',
f'Protx hash\t{self.dmn_reg_tx_hash}']
text = '<table>'
for l in self.summary_info:
lbl, val = l.split('\t')
text += f'<tr><td style="white-space: nowrap"><b>{lbl}:</b> </td><td>{val}</td></tr>'
text += '</table>'
self.edtProtxSummary.setText(text)
self.edtProtxSummary.show()
self.lblProtxSummary2.show()
if self.dmn_operator_key_type == InputKeyType.PRIVATE:
operator_message = '<b><span style="color:red">One more thing... <span></b>copy the following ' \
'line to the <code>crown.conf</code> file on your masternode server ' \
'(and restart <i>crownd</i>) or pass it to the masternode operator:'
else:
operator_message = '<b><span style="color:red">One more thing... <span></b>copy the following ' \
'line to the <code>crown.conf</code> file on your masternode server, replacing ' \
'"<your-operator-bls-private-key>" with the appropriate value or ask the operator ' \
'for it:'
self.lblProtxSummary3.setText(operator_message)
if self.dmn_operator_key_type == InputKeyType.PRIVATE:
operator_privkey = self.dmn_operator_privkey
else:
operator_privkey = '<your-operator-bls-private-key>'
self.edtSummaryDMNOperatorKey.setText(f'masternodeblsprivkey={operator_privkey}')
self.btnCancel.hide()
self.btnBack.hide()
self.btnContinue.hide()
self.btnClose.show()
self.btnClose.setEnabled(True)
self.btnClose.repaint()
else:
raise Exception('Invalid step')
show_hide_tabs(self.stackedWidget.currentIndex())
self.lblFieldHints.setVisible(self.stackedWidget.currentIndex() == 0)
self.btnBack.setEnabled(len(self.step_stack) > 0)
self.btnContinue.repaint()
self.btnCancel.repaint()
self.btnBack.repaint()
def validate_data(self):
self.dmn_collateral_tx = self.edtCollateralTx.text().strip()
self.collateral_validation_err_msg = ''
error_count = 0
try:
if not self.dmn_collateral_tx:
self.collateral_validation_err_msg = 'Collteral transaction ID is required.'
self.edtCollateralTx.setFocus()
else:
self.dmn_collateral_tx_index = int(self.edtCollateralIndex.text())
if self.dmn_collateral_tx_index < 0:
self.collateral_validation_err_msg = 'Invalid collateral transaction index.'
except Exception:
self.edtCollateralIndex.setFocus()
self.collateral_validation_err_msg = 'Invalid collateral transaction index: should be an integer ' \
'value, greater or equal 0.'
if self.collateral_validation_err_msg:
self.upd_collateral_tx_info(True)
error_count += 1
self.ip_port_validation_err_msg = ''
try:
self.dmn_ip = self.edtIP.text().strip()
if self.dmn_ip:
ipaddress.ip_address(self.dmn_ip)
except Exception as e:
self.edtIP.setFocus()
self.ip_port_validation_err_msg = 'Invalid masternode IP address: %s.' % str(e)
self.upd_ip_info(True)
error_count += 1
try:
if self.dmn_ip:
self.dmn_tcp_port = int(self.edtPort.text())
else:
self.dmn_tcp_port = None
except Exception:
self.edtPort.setFocus()
self.ip_port_validation_err_msg = 'Invalid TCP port: should be integer.'
self.upd_ip_info(True)
error_count += 1
self.payout_address_validation_err_msg = ''
addr = self.edtPayoutAddress.text().strip()
if not addr:
self.payout_address_validation_err_msg = 'Owner payout address is required.'
else:
self.dmn_owner_payout_addr = addr
if not validate_address(self.dmn_owner_payout_addr, self.app_config.crown_network):
self.payout_address_validation_err_msg = 'Invalid owner payout address.'
if self.payout_address_validation_err_msg:
self.edtPayoutAddress.setFocus()
self.upd_payout_addr_info(True)
error_count += 1
self.operator_reward_validation_err_msg = ''
if self.chbWholeMNReward.isChecked():
self.dmn_operator_reward = 0
else:
self.dmn_operator_reward = self.edtOperatorReward.value()
if self.dmn_operator_reward > 100 or self.dmn_operator_reward < 0:
self.edtOperatorReward.setFocus()
self.operator_reward_validation_err_msg = 'Invalid operator reward value: should be a value ' \
'between 0 and 100.'
if self.operator_reward_validation_err_msg:
self.upd_oper_reward_info(True)
error_count += 1
self.owner_key_validation_err_msg = ''
key = self.edtOwnerKey.text().strip()
if not key:
self.owner_key_validation_err_msg = 'Owner key/address is required.'
else:
if self.dmn_owner_key_type == InputKeyType.PRIVATE:
self.dmn_owner_privkey = key
if not validate_wif_privkey(self.dmn_owner_privkey, self.app_config.crown_network):
self.edtOwnerKey.setFocus()
self.owner_key_validation_err_msg = 'Invalid owner private key.'
else:
self.dmn_owner_address = wif_privkey_to_address(self.dmn_owner_privkey, self.app_config.crown_network)
else:
self.dmn_owner_address = key
self.dmn_owner_privkey = ''
if not validate_address(self.dmn_owner_address, self.app_config.crown_network):
self.edtOwnerKey.setFocus()
self.owner_key_validation_err_msg = 'Invalid owner Crown address.'
if self.owner_key_validation_err_msg:
self.upd_owner_key_info(True)
error_count += 1
self.operator_key_validation_err_msg = ''
key = self.edtOperatorKey.text().strip()
if not key:
self.operator_key_validation_err_msg = 'Operator key is required.'
else:
if self.dmn_operator_key_type == InputKeyType.PRIVATE:
try:
self.dmn_operator_privkey = key
try:
b = bytes.fromhex(self.dmn_operator_privkey)
if len(b) != 32:
raise Exception('invalid length (' + str(len(b)) + ')')
except Exception as e:
self.edtOperatorKey.setFocus()
self.operator_key_validation_err_msg = 'Invalid operator private key: ' + str(e)
self.dmn_operator_pubkey = bls_privkey_to_pubkey(self.dmn_operator_privkey)
except Exception as e:
self.edtOperatorKey.setFocus()
self.operator_key_validation_err_msg = 'Invalid operator private key: ' + str(e)
else:
self.dmn_operator_pubkey = key
self.dmn_operator_privkey = ''
try:
b = bytes.fromhex(self.dmn_operator_pubkey)
if len(b) != 48:
raise Exception('invalid length (' + str(len(b)) + ')')
except Exception as e:
self.edtOperatorKey.setFocus()
self.operator_key_validation_err_msg = 'Invalid operator public key: ' + str(e)
if self.operator_key_validation_err_msg:
self.upd_operator_key_info(True)
error_count += 1
self.voting_key_validation_err_msg = ''
if self.deterministic_mns_spork_active:
key = self.edtVotingKey.text().strip()
if not key:
self.voting_key_validation_err_msg = 'Voting key/address is required.'
else:
if self.dmn_voting_key_type == InputKeyType.PRIVATE:
self.dmn_voting_privkey = key
if not validate_wif_privkey(self.dmn_voting_privkey, self.app_config.crown_network):
self.edtVotingKey.setFocus()
self.voting_key_validation_err_msg = 'Invalid voting private key.'
else:
self.dmn_voting_address = wif_privkey_to_address(self.dmn_voting_privkey, self.app_config.crown_network)
else:
self.dmn_voting_address = key
self.dmn_voting_privkey = ''
if not validate_address(self.dmn_voting_address, self.app_config.crown_network):
self.edtVotingKey.setFocus()
self.voting_key_validation_err_msg = 'Invalid voting Crown address.'
else:
# spork 15 not active - use the owner private key for voting
self.dmn_voting_address = self.dmn_owner_address
self.dmn_voting_privkey = self.dmn_owner_privkey
self.dmn_voting_key_type = self.dmn_owner_key_type
if self.voting_key_validation_err_msg:
self.upd_voting_key_info(True)
error_count += 1
if error_count > 1:
raise Exception('Errors were encountered in the input data. You must correct them before you can continue.')
elif error_count == 1:
raise Exception(max((self.collateral_validation_err_msg, self.ip_port_validation_err_msg,
self.payout_address_validation_err_msg, self.operator_reward_validation_err_msg,
self.owner_key_validation_err_msg, self.operator_key_validation_err_msg,
self.voting_key_validation_err_msg)))
break_scanning = False
def check_break_scanning():
nonlocal break_scanning
return break_scanning
def do_break_scanning():
nonlocal break_scanning
break_scanning = True
return False
self.btnContinue.setEnabled(False)
try:
ret = WndUtils.run_thread_dialog(self.get_collateral_tx_address_thread, (check_break_scanning,), True,
force_close_dlg_callback=do_break_scanning)
except Exception as e:
log.exception(str(e))
raise Exception(str(e))
self.btnContinue.setEnabled(True)
return ret
def get_collateral_tx_address_thread(self, ctrl: CtrlObject, check_break_scanning_ext: Callable[[], bool]):
txes_cnt = 0
msg = ''
break_scanning = False
ctrl.dlg_config_fun(dlg_title="Validating collateral transaction.", show_progress_bar=False)
ctrl.display_msg_fun('Verifying collateral transaction...')
def check_break_scanning():
nonlocal break_scanning
if self.finishing or break_scanning:
# stop the scanning process if the dialog finishes or the address/bip32path has been found
raise BreakFetchTransactionsException()
if check_break_scanning_ext is not None and check_break_scanning_ext():
raise BreakFetchTransactionsException()
def fetch_txes_feeback(tx_cnt: int):
nonlocal msg, txes_cnt
txes_cnt += tx_cnt
ctrl.display_msg_fun(msg + '<br><br>' + 'Number of transactions fetched so far: ' + str(txes_cnt))
def on_msg_link_activated(link: str):
nonlocal break_scanning
if link == 'break':
break_scanning = True
try:
tx = self.crownd_intf.getrawtransaction(self.dmn_collateral_tx, 1, skip_cache=True)
except Exception as e:
raise Exception('Cannot get the collateral transaction due to the following errror: ' + str(e))
vouts = tx.get('vout')
if vouts:
if self.dmn_collateral_tx_index < len(vouts):
vout = vouts[self.dmn_collateral_tx_index]
spk = vout.get('scriptPubKey')
if not spk:
raise Exception(f'The collateral transaction ({self.dmn_collateral_tx}) output '
f'({self.dmn_collateral_tx_index}) doesn\'t have value in the scriptPubKey '
f'field.')
ads = spk.get('addresses')
if not ads or len(ads) < 0:
raise Exception('The collateral transaction output doesn\'t have the Crown address assigned.')
if vout.get('valueSat') != 10000e8:
raise Exception('The value of the collateral transaction output is not equal to 10000 Crown.')
self.dmn_collateral_tx_address = ads[0]
else:
raise Exception(f'Transaction {self.dmn_collateral_tx} doesn\'t have output with index: '
f'{self.dmn_collateral_tx_index}')
else:
raise Exception('Invalid collateral transaction')
ctrl.display_msg_fun('Verifying the collateral transaction address on your hardware wallet.')
if not self.main_dlg.connect_hardware_wallet():
return False
if self.dmn_collateral_tx_address_path:
try:
addr = hw_intf.get_address(self.main_dlg.hw_session, self.dmn_collateral_tx_address_path)
except CancelException:
return False
msg = ''
if addr != self.dmn_collateral_tx_address:
log.warning(
f'The address returned by the hardware wallet ({addr}) for the BIP32 path '
f'{self.dmn_collateral_tx_address_path} differs from the address stored the mn configuration '
f'(self.dmn_collateral_tx_address). Need to scan wallet for a correct BIP32 path.')
msg = '<span style="color:red">The BIP32 path of the collateral address from your mn config is incorret.<br></span>' \
f'Trying to find the BIP32 path of the address {self.dmn_collateral_tx_address} in your wallet.' \
f'<br>This may take a while (<a href="break">break</a>)...'
self.dmn_collateral_tx_address_path = ''
else:
msg = 'Looking for a BIP32 path of the Crown address related to the masternode collateral.<br>' \
'This may take a while (<a href="break">break</a>)....'
if not self.dmn_collateral_tx_address_path and not self.finishing:
lbl = ctrl.get_msg_label_control()
if lbl:
def set():
lbl.setOpenExternalLinks(False)
lbl.setTextInteractionFlags(lbl.textInteractionFlags() & ~Qt.TextSelectableByMouse)
lbl.linkActivated.connect(on_msg_link_activated)
lbl.repaint()
WndUtils.call_in_main_thread(set)
ctrl.display_msg_fun(msg)
# fetch the transactions that involved the addresses stored in the wallet - during this
# all the used addresses are revealed
addr = self.bip44_wallet.scan_wallet_for_address(self.dmn_collateral_tx_address, check_break_scanning,
fetch_txes_feeback)
if not addr:
if not break_scanning:
WndUtils.errorMsg(f'Couldn\'t find a BIP32 path of the collateral address ({self.dmn_collateral_tx_address}).')
return False
else:
self.dmn_collateral_tx_address_path = addr.bip32_path
return True
def next_step(self):
cs = None
if self.current_step == STEP_MN_DATA:
if self.validate_data():
cs = STEP_DASHD_TYPE
else:
return
self.step_stack.append(self.current_step)
elif self.current_step == STEP_DASHD_TYPE:
if self.get_crown_node_type() == NODE_TYPE_PUBLIC_RPC:
cs = STEP_AUTOMATIC_RPC_NODE
elif self.get_crown_node_type() == NODE_TYPE_OWN:
cs = STEP_MANUAL_OWN_NODE
else:
self.errorMsg('You have to choose one of the two options.')
return
self.step_stack.append(self.current_step)
elif self.current_step == STEP_AUTOMATIC_RPC_NODE:
cs = STEP_SUMMARY
# in this case don't allow to start the automatic process again when the user clicks <Back>
elif self.current_step == STEP_MANUAL_OWN_NODE:
# check if the user passed tge protx transaction hash
if not self.manual_signed_message:
self.errorMsg('It looks like you have not signed a "protx register_prepare" result.')
return
self.dmn_reg_tx_hash = self.edtManualTxHash.text().strip()
if not self.dmn_reg_tx_hash:
self.edtManualTxHash.setFocus()
self.errorMsg('Invalid transaction hash.')
return
try:
bytes.fromhex(self.dmn_reg_tx_hash)
except Exception:
log.warning('Invalid transaction hash.')
self.edtManualTxHash.setFocus()
self.errorMsg('Invalid transaction hash.')
return
cs = STEP_SUMMARY
else:
self.errorMsg('Invalid step')
return
prev_step = self.current_step
self.current_step = cs
self.update_step_tab_ui()
try:
if self.current_step == STEP_AUTOMATIC_RPC_NODE:
self.start_automatic_process()
elif self.current_step == STEP_MANUAL_OWN_NODE:
self.start_manual_process()
elif self.current_step == STEP_SUMMARY:
self.lblProtxSummary1.setText('<b><span style="color:green">Congratultions! The transaction for your DIP-3 '
'masternode has been submitted and is currently awaiting confirmations.'
'</b></span>')
if self.on_proregtx_success_callback:
self.on_proregtx_success_callback(self.masternode)
if not self.check_tx_confirmation():
self.wait_for_confirmation_timer_id = self.startTimer(5000)
except Exception:
self.current_step = prev_step
self.update_step_tab_ui()
raise
def previous_step(self):
if self.step_stack:
self.current_step = self.step_stack.pop()
else:
raise Exception('Invalid step')
self.update_step_tab_ui()
@pyqtSlot(bool)
def on_btnContinue_clicked(self, active):
self.next_step()
@pyqtSlot(bool)
def on_btnBack_clicked(self, active):
self.previous_step()
@pyqtSlot(bool)
def on_rbCMTCrownNodeType_toggled(self, active):
if active:
self.upd_node_type_info()
@pyqtSlot(bool)
def on_rbOwnCrownNodeType_toggled(self, active):
if active:
self.upd_node_type_info()
def sign_protx_message_with_hw(self, msg_to_sign) -> str:
sig = WndUtils.call_in_main_thread(
hw_intf.hw_sign_message, self.main_dlg.hw_session, self.dmn_collateral_tx_address_path,
msg_to_sign, 'Click the confirmation button on your hardware wallet to sign the ProTx payload message.')
if sig.address != self.dmn_collateral_tx_address:
log.error(f'Protx payload signature address mismatch. Is: {sig.address}, should be: '
f'{self.dmn_collateral_tx_address}.')
raise Exception(f'Protx payload signature address mismatch. Is: {sig.address}, should be: '
f'{self.dmn_collateral_tx_address}.')
else:
sig_bin = base64.b64encode(sig.signature)
payload_sig_str = sig_bin.decode('ascii')
return payload_sig_str
def start_automatic_process(self):
self.lblProtxTransaction1.hide()
self.lblProtxTransaction2.hide()
self.lblProtxTransaction3.hide()
self.lblProtxTransaction4.hide()
self.btnContinue.setEnabled(False)
self.btnContinue.repaint()
self.run_thread(self, self.proregtx_automatic_thread, (), on_thread_finish=self.finished_automatic_process)
def finished_automatic_process(self):
self.btnCancel.setEnabled(True)
self.btnCancel.repaint()
self.update_step_tab_ui()
def proregtx_automatic_thread(self, ctrl):
log.debug('Starting proregtx_prepare_thread')
def set_text(widget, text: str):
def call(widget, text):
widget.setText(text)
widget.repaint()
widget.setVisible(True)
WndUtils.call_in_main_thread(call, widget, text)
def finished_with_success():
def call():
self.next_step()
WndUtils.call_in_main_thread(call)
try:
try:
mn_reg_support = self.crownd_intf.checkfeaturesupport('protx_register', self.app_config.app_version)
# is the "registration" feature enabled on the current rpc node?
if not mn_reg_support.get('enabled'):
if mn_reg_support.get('message'):
raise Exception(mn_reg_support.get('message'))
else:
raise Exception('The \'protx_register\' function is not supported by the RPC node '
'you are connected to.')
public_proxy_node = True
active = self.app_config.feature_register_dmn_automatic.get_value()
if not active:
msg = self.app_config.feature_register_dmn_automatic.get_message()
if not msg:
msg = 'The functionality of the automatic execution of the ProRegTx command on the ' \
'"public" RPC nodes is inactive. Use the manual method or contact the program author ' \
'for details.'
raise Exception(msg)
except JSONRPCException as e:
public_proxy_node = False # it's not a "public" rpc node
# preparing protx message
try:
funding_address = ''
if not public_proxy_node:
try:
# find an address to be used as the source of the transaction fees
min_fee = round(1024 * FEE_DUFF_PER_BYTE / 1e8, 8)
balances = self.crownd_intf.listaddressbalances(min_fee)
bal_list = []
for addr in balances:
bal_list.append({'address': addr, 'amount': balances[addr]})
bal_list.sort(key = lambda x: x['amount'])
if not bal_list:
raise Exception("No address can be found in the node's wallet with sufficient funds to "
"cover the transaction fees.")
funding_address = bal_list[0]['address']
except JSONRPCException as e:
log.info("Couldn't list the node address balances. We assume you are using a public RPC node and "
"the funding address for the transaction fees will be estimated during the "
"`register_prepare` call")
set_text(self.lblProtxTransaction1, '<b>1. Preparing a ProRegTx transaction on a remote node...</b>')
if self.dmn_owner_key_type == InputKeyType.PRIVATE:
owner_key = self.dmn_owner_privkey
else:
owner_key = self.dmn_owner_address
params = ['register_prepare', self.dmn_collateral_tx, self.dmn_collateral_tx_index,
self.dmn_ip + ':' + str(self.dmn_tcp_port) if self.dmn_ip else '0', owner_key,
self.dmn_operator_pubkey, self.dmn_voting_address, str(round(self.dmn_operator_reward, 2)),
self.dmn_owner_payout_addr]
if funding_address:
params.append(funding_address)
call_ret = self.crownd_intf.rpc_call(True, False, 'protx', *tuple(params))
call_ret_str = json.dumps(call_ret, default=EncodeDecimal)
msg_to_sign = call_ret.get('signMessage', '')
protx_tx = call_ret.get('tx')
log.debug('register_prepare returned: ' + call_ret_str)
set_text(self.lblProtxTransaction1,
'<b>1. Preparing a ProRegTx transaction on a remote node.</b> <span style="color:green">'
'Success.</span>')
except Exception as e:
set_text(
self.lblProtxTransaction1,
'<b>1. Preparing a ProRegTx transaction on a remote node.</b> <span style="color:red">Failed '
f'with the following error: {str(e)}</span>')
return
set_text(self.lblProtxTransaction2, '<b>Message to be signed:</b><br><code>' + msg_to_sign + '</code>')
# signing message:
set_text(self.lblProtxTransaction3, '<b>2. Signing message with hardware wallet...</b>')
try:
payload_sig_str = self.sign_protx_message_with_hw(msg_to_sign)
set_text(self.lblProtxTransaction3, '<b>2. Signing message with hardware wallet.</b> '
'<span style="color:green">Success.</span>')
except CancelException:
set_text(self.lblProtxTransaction3,
'<b>2. Signing message with hardware wallet.</b> <span style="color:red">Cancelled.</span>')
return
except Exception as e:
log.exception('Signature failed.')
set_text(self.lblProtxTransaction3,
'<b>2. Signing message with hardware wallet.</b> <span style="color:red">Failed with the '
f'following error: {str(e)}.</span>')
return
# submitting signed transaction
set_text(self.lblProtxTransaction4, '<b>3. Submitting the signed protx transaction to the remote node...</b>')
try:
self.dmn_reg_tx_hash = self.crownd_intf.rpc_call(True, False, 'protx', 'register_submit', protx_tx,
payload_sig_str)
log.debug('protx register_submit returned: ' + str(self.dmn_reg_tx_hash))
set_text(self.lblProtxTransaction4,
'<b>3. Submitting the signed protx transaction to the remote node.</b> <span style="'
'color:green">Success.</span>')
finished_with_success()
except Exception as e:
log.exception('protx register_submit failed')
set_text(self.lblProtxTransaction4,
'<b>3. Submitting the signed protx transaction to the remote node.</b> '
f'<span style="color:red">Failed with the following error: {str(e)}</span>')
except Exception as e:
log.exception('Exception occurred')
set_text(self.lblProtxTransaction1, f'<span style="color:red">{str(e)}</span>')
@pyqtSlot(bool)
def on_btnManualSignProtx_clicked(self):
prepare_result = self.edtManualProtxPrepareResult.toPlainText().strip()
if not prepare_result:
self.errorMsg('You need to enter a result of the "protx register_prepare" command.')
self.edtManualProtxPrepareResult.setFocus()
return
try:
prepare_result_dict = json.loads(prepare_result)
msg_to_sign = prepare_result_dict.get('signMessage', '')
protx_tx = prepare_result_dict.get('tx')
try:
payload_sig_str = self.sign_protx_message_with_hw(msg_to_sign)
protx_submit = f'protx register_submit "{protx_tx}" "{payload_sig_str}"'
self.edtManualProtxSubmit.setPlainText(protx_submit)
self.btnContinue.setEnabled(True)
self.btnContinue.repaint()
self.manual_signed_message = True
except CancelException:
return
except Exception as e:
log.exception('Signature failed.')
self.errorMsg(str(e))
return
except Exception as e:
self.errorMsg('Invalid "protx register_prepare" result. Note that the text must be copied along '
'with curly braces.')
return
def start_manual_process(self):
self.edtManualFundingAddress.setFocus()
self.update_manual_protx_prepare_command()
def update_manual_protx_prepare_command(self):
addr = self.edtManualFundingAddress.text().strip()
if addr:
valid = validate_address(addr, self.app_config.crown_network)
if valid:
if self.dmn_owner_key_type == InputKeyType.PRIVATE:
owner_key = self.dmn_owner_privkey
else:
owner_key = self.dmn_owner_address
cmd = f'protx register_prepare "{self.dmn_collateral_tx}" "{self.dmn_collateral_tx_index}" ' \
f'"{self.dmn_ip + ":" + str(self.dmn_tcp_port) if self.dmn_ip else "0"}" ' \
f'"{owner_key}" "{self.dmn_operator_pubkey}" "{self.dmn_voting_address}" ' \
f'"{str(round(self.dmn_operator_reward, 2))}" "{self.dmn_owner_payout_addr}" "{addr}"'
else:
cmd = 'Enter the valid funding address in the exit box above'
else:
cmd = ''
self.edtManualProtxPrepare.setPlainText(cmd)
if cmd != self.last_manual_prepare_string:
self.last_manual_prepare_string = cmd
self.edtManualProtxSubmit.clear()
self.edtManualProtxPrepareResult.clear()
self.edtManualTxHash.clear()
self.dmn_reg_tx_hash = ''
self.manual_signed_message = False
def timerEvent(self, event: QTimerEvent):
""" Timer controlling the confirmation of the proreg transaction. """
if self.check_tx_confirmation():
self.killTimer(event.timerId())
def check_tx_confirmation(self):
try:
tx = self.crownd_intf.getrawtransaction(self.dmn_reg_tx_hash, 1, skip_cache=True)
conf = tx.get('confirmations')
if conf:
h = tx.get('height')
self.lblProtxSummary1.setText(
'<b><span style="color:green">Congratultions! The transaction for your DIP-3 masternode has been '
f'confirmed in block {h}.</b></span> ')
return True
except Exception:
pass
return False
def update_show_hints_label(self):
if self.show_field_hinds:
lbl = '<a href="hide">Hide field descriptions</a>'
else:
lbl = '<a href="show">Show field descriptions</a>'
self.lblFieldHints.setText(lbl)
@pyqtSlot(str)
def on_lblFieldHints_linkActivated(self, link):
if link == 'show':
self.show_field_hinds = True
else:
self.show_field_hinds = False
self.update_show_hints_label()
self.update_fields_info(False)
self.minimize_dialog_height()
@pyqtSlot(str)
def on_edtManualFundingAddress_textChanged(self, text):
self.update_manual_protx_prepare_command()
@pyqtSlot(bool)
def on_btnManualFundingAddressPaste_clicked(self, checked):
cl = QApplication.clipboard()
self.edtManualFundingAddress.setText(cl.text())
@pyqtSlot(bool)
def on_btnManualProtxPrepareCopy_clicked(self, checked):
text = self.edtManualProtxPrepare.toPlainText()
cl = QApplication.clipboard()
cl.setText(text)
@pyqtSlot(bool)
def on_btnManualProtxPrepareResultPaste_clicked(self, checked):
cl = QApplication.clipboard()
self.edtManualProtxPrepareResult.setPlainText(cl.text())
@pyqtSlot(bool)
def on_btnManualProtxSubmitCopy_clicked(self, checked):
text = self.edtManualProtxSubmit.toPlainText()
cl = QApplication.clipboard()
cl.setText(text)
@pyqtSlot(bool)
def on_btnManualTxHashPaste_clicked(self, checked):
cl = QApplication.clipboard()
self.edtManualTxHash.setText(cl.text())
@pyqtSlot(bool)
def on_btnSummaryDMNOperatorKeyCopy_clicked(self, checked):
text = self.edtSummaryDMNOperatorKey.text()
cl = QApplication.clipboard()
cl.setText(text)
|
"""DEFINES THE INVERSEDYNAMICS SOLVER, A Solver for solving the joint based model of a dog."""
from scipy import optimize, signal
from data.data_loader import C3DData, load_force_plate_data, ForcePlateData, SMALData, get_delay_between, DataSources, \
path_join
from vis.utils import *
from vis import visualisations
from dynamics.footfall_detector import FootfallDetector
from tqdm import tqdm
# pure constants (no optimisation needed)
g = 9.81
freq_forceplate = 100 # Hz
foot_joint_labels = ["front left", "front right", "rear left", "rear right"]
foot_joint_indices = [0, 9, 23, 20] # for set 2 3r3
class Model:
"""ID Model, with all parameters derived/optimised"""
def __init__(self):
# CONSTANTS
self.paws = {}
self.bone_density = 1950 # Estimate - needs refining! From paper: Development of a neuromusculoskeletal computer model in a chondrodystrophic dog.
self.muscle_density = 1060 # From above
# params to optimise
self.bone_length_definitions = {
"normal": lambda l: dict(inner_radius=0.01, outer_radius=0.05, displacement=0),
"body": lambda l: dict(inner_radius=l / 20, outer_radius=l / 7, displacement=l / 4 - l / 20), }
# Paw parameters. All scaled to be in standard form - exponent in separate dict.
self.paw_params_normalised = {
"L0_front": 6.9, # 6.9 # in .1mm
"L0_rear": 6.9, # in .1mm
"k_front": 3.42 * .18, # in kN/m
"k_rear": 2.0 * .21, # in kN/m
"c_front": 20,
"c_rear": 20,
"k_rear_prop": 0.85, # k = k_rear * m **.85
"frame_delay": 0 # Used for analysis of paw treadmill forces. Not used for normal ID solver
}
self.paw_exponents = {
"L0_front": -4,
"L0_rear": -4,
"k_front": 3,
"k_rear": 3,
"c_front": 0,
"c_rear": 0,
"k_rear_prop": 0,
"frame_delay": 0
}
self.calc_paw_params()
self.freq_par_data = 200
# weightings used in dynamics calculations
self.equation_weighting = {
"Inertial": 2,
"Rotational": 2,
"Leg spring": 0.5,
"Paw spring": 1,
}
def calc_paw_params(self):
"""Calculates paw parameters (separate function for optimisation purposes)"""
for param, val in self.paw_params_normalised.items():
self.paws[param] = val * 10 ** (self.paw_exponents[param])
def edit_paw_param(self, param, val):
"""Edit paw parameter (separate for optimisation purposes)"""
self.paw_params_normalised[param] = val
self.calc_paw_params()
model = Model()
def time_deriv(X, dt):
"""Finds the time derivative of a given series of data.
Always treats the first dimension as time - works for any number of dimensions (n_frames, M, N, O, ...).
For all except first and last val, calcs difference over 2 timesteps"""
diff = np.zeros_like(X)
diff[0] = X[1] - X[0]
diff[1:-1] = (X[2:] - X[:-2]) / 2
diff[-1] = X[-1] - X[-2]
return diff * 1 / dt
def nth_time_deriv(X, dt, n=2):
"""Recursively get the nth time derivative"""
if n == 1:
return time_deriv(X, dt)
else:
return time_deriv(nth_time_deriv(X, dt, n=n - 1), dt)
def get_principal_axes(vector=Vector(1, 0, 0), cardinal=np.identity(3)):
"""Given a vector, devise a basis of principle axis with any two perpendicular vectors (for application of an
axisymmetric object - cylinder) """
i, j, k = cardinal
K = vector.unit()
# Now find any two perp vectors to K
if not K.is_parallel(i):
I = K.cross(i).unit()
J = K.cross(I).unit()
else:
I = K.cross(j).unit()
J = K.cross(I).unit()
return np.array([I, J, K])
def I_cylinder(density, length, radius):
mass = density * np.pi * (radius ** 2) * length
Ixx, Izz = (length ** 2) / 12 + (radius ** 2) / 4, radius ** 2 / 2
return mass * np.diag([Ixx, Ixx, Izz])
class DoubleCylinder:
"""An object comprised of a cylinder of given length between two end points, of radius inner_radius and density bone_density,
and an outer cylinder that does NOT share the same central axis, of radius outer_radius, displaced by a distance <displacement> normally from the centerline.
Cylinder is defined with the centerline vertical (z direction), and the displacement always in the normal closest to the z direction downwards.
For InverseDynamics calculations, this object will have a start and end index, which correspond to the joint indices in which the end point data is held.
"""
def __init__(self, start, end, length, inner_radius, outer_radius, displacement, freq=50.0, name=""):
self.name = name
self.freq = freq # Frequency, in Hz
self.start = start
self.end = end
self.length = length
self.displacement = displacement
if outer_radius is None: outer_radius = inner_radius
self.inner_mass = model.bone_density * np.pi * inner_radius ** 2 * self.length
self.outer_mass = model.muscle_density * np.pi * self.length * (outer_radius ** 2 - inner_radius ** 2)
self.mass = self.inner_mass + self.outer_mass
I_bone = I_cylinder(model.bone_density, length, inner_radius)
I_muscle = I_cylinder(model.muscle_density, length, outer_radius) - I_cylinder(model.muscle_density, length,
inner_radius)
# By parallel axis theorem, add component of I due to outer radius being displaced from the centerline axis
I_axis_displacement = np.zeros((3, 3))
I_axis_displacement[0, 0] = self.outer_mass * displacement ** 2
self.I = I_bone + I_muscle + I_axis_displacement # Inertia tensor in a reference frame in which the bone is lengthwise facing upwards
def get_kinematics(self, data):
"""Given a numpy array of time, data, of shape (n_frames, 2, 3),
giving the position data of both ends of the cylinder over time, compute the kinematics of the cylinder"""
X = self.X = np.array(data) # positions
V = self.V = time_deriv(X, 1 / self.freq) # velocities
A = self.A = time_deriv(V, 1 / self.freq) # accelerations
self.XG = np.mean(X, axis=1) # average over X
self.VG = np.mean(V, axis=1) # average over V
self.AG = np.mean(A, axis=1) # average over A
# Rotational
R = self.R = [Vector(*x[1]) - Vector(*x[0]) for x in X] # Vector from bone start to end in each frame
local_axes = [get_principal_axes(r) for r in R] # Get principal axes for each frame
# theta_g = (n_frame, 3) of angular rotation about i, j, k for each frame
# angular rotation about each axis is defined as 0 for the next vector in the cycle
# i.e. angular rotation about i = 0 for a vector parallel to j
zero_angles = [[0, 1, 0], [0, 0, 1], [1, 0, 0]] # definition of 'zero angle' vector for i, j, k
theta_g = []
# Compute theta_g in local axes first, where K is the unit vector
for n_frame in range(len(X) - 1):
local_ax = local_axes[n_frame]
# representation as a a single rotation theta about an axis e (https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula)
a = R[n_frame] # rotation from one frame...
b = R[n_frame + 1] # ...to the next
if np.array_equal(a, b):
theta_g += [[0, 0, 0]] # If no rotation, return 0
else:
axis = np.cross(a, b) / (np.linalg.norm(np.cross(a, b))) # unit vector of omega
with np.errstate(invalid='raise'):
try:
alignment = np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
alignment = np.clip(alignment, a_min=-1,
a_max=1) # clip between -1 and 1 to deal with rounding errors
angle = np.arccos(alignment) # magnitude of theta
except:
print((np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))))
raise ValueError("INVALID ANGLE", a, b)
theta_g += [axis * angle]
theta_g = np.array(theta_g)
self.theta_g = signal.savgol_filter(theta_g, window_length=19, polyorder=2, axis=0)
self.omega_g = time_deriv(self.theta_g, dt=1 / self.freq)
self.alpha_g = time_deriv(self.omega_g, dt=1 / self.freq) # angular acceleration
self.I_fixed = [la.T @ self.I @ la for la in local_axes] # compute I in fixed reference frame at each frame
def get_dynamics(self):
"""Compute dynamics (F_net, Torque_net) at each frame"""
self.F_net = [self.mass * a_g for a_g in self.AG]
self.tau_net = [I_f @ alpha for (I_f, alpha) in zip(self.I_fixed, self.alpha_g)]
class Body(DoubleCylinder):
"""A unique case of double cylinder, where the (multiple) joints connect at the cylindrical surface at either end.
These joint attachments are defined by an angle from the i direction normal to the centerline at initialisation.
Dynamics for the body then must be calculated using a separate set of equations. Define the body such that all
joints bones go into it rather than out of it (i.e. all input forces are positive on the body) """
def __init__(self, start_joints, end_joints, all_joint_positions, **cylinder_kwaargs):
"""From the indices given by start_joints and end_joints, identify a cylinder shape that best fits these
points on either side, and create that as the cylinder. """
self.start_joints = start_joints
self.end_joints = end_joints
start_pos = Vector(*np.mean(all_joint_positions[40, start_joints], axis=0))
end_pos = Vector(*np.mean(all_joint_positions[40, end_joints], axis=0))
length = start_pos > end_pos
super().__init__(start=None, end=None, length=length, **model.bone_length_definitions["body"](length),
**cylinder_kwaargs)
def get_centre_of_gravity(self, start: 'Vector', end: 'Vector'):
"""Calculates the centre of gravity based on the displacement from the centerline."""
centreline_g = 0.5 * (start + end)
# to find a normal that is closest to z, find N possible equispaced normals, and see which one has the greatest .k product
normal = (start - end).find_normal()
N = 20 # number of normals to consider
all_normals = [normal.rotate_about((start - end).unit(), angle=(n * 2 * np.pi / N)) for n in range(N)]
idx = np.argmax([v.dot(Vector(0, 0, -1)) for v in all_normals])
chosen_normal = all_normals[idx] # choose most downwards normal
return centreline_g + self.displacement * chosen_normal
def get_kinematics(self, data):
"""For body, data is of shape (n_frames, 2, 2, 3), where it is split by rear and front.
So average across rear and front to get behaviour of centerline, and then run normal get_kinematics"""
super().get_kinematics(np.mean(data, axis=2))
def weighted_bound_least_squares(A, b, weights=None, bounds=None, **kwargs):
"""Completes a least squares solve of the equation A x = b, to solve N unknowns from M equations
where A is an M x N matrix, x is an N x 1 vector, and b is an M x 1 vector.
Applies weightings to each row to favour certain datapoints. weights is an M x 1 vector.
Applies bounds where bounds is an M x 2 array. each tuple in the array gives the LB and UB for the given equation"""
if weights is None: weights = np.ones_like(b) # If no weight given, equal weight to all
w = np.array(weights)
weighted_A, weighted_b = np.array(A) * w[:, np.newaxis], np.array(b) * w # Apply weights to A, b
try:
solve = optimize.lsq_linear(weighted_A, weighted_b, bounds=list(zip(*bounds)), tol=1e-2)
return solve["x"]
except np.linalg.LinAlgError as e:
out = f"SVD did not converge in Lin Least Sq. Printing params: {A}, {b}"
raise ArithmeticError(out)
class InverseDynamicsSolver:
"""Through scipy optimisation, Skeleton finds a set of force data that corresponds to the correct kinematic data.
Takes a skeleton, and the relevant bones and joints, and solves the set of forces that correspond to correct kinematics."""
def __init__(self, joint_data, target_bones, body_joints, no_torque_joints=None, no_reaction_joints=None,
foot_joints=None, leg_spring_joints=None, model=Model(),
freq=50.0, name="output", is_mocap=True):
for var in [foot_joints, leg_spring_joints, no_reaction_joints, no_torque_joints]:
if var is None:
var = []
self.name = name
self.freq = freq
self.n_frames, self.n_joints, _ = joint_data.shape
self.model = model
self.is_mocap = is_mocap
# Preprocess joint data - basic smoothing
if is_mocap:
window_length = self.freq // 2
else:
window_length = 0.75 * self.freq
if window_length % 2 == 0: window_length -= 1
self.T = self.n_frames / self.freq
self.smooth = lambda X, p=5: signal.savgol_filter(X, window_length=int(window_length), polyorder=p, axis=0)
p = 5 if self.is_mocap else 2
self.unsmoothed_data = joint_data # save unsmoothed data for other uses
self.joint_pos = self.smooth(joint_data, p=p)
self.joint_vel = time_deriv(self.joint_pos, 1 / freq)
self.joint_accel = time_deriv(self.smooth(self.joint_vel), 1 / freq)
self.foot_joints = foot_joints
self.body_joints = body_joints
self.get_foot_joint_from_index = {} # Identify which foot from the index
for fj in self.foot_joints:
for bone, (j1, j2) in target_bones.items():
if fj in [j1, j2]:
self.get_foot_joint_from_index[fj] = bone
self.no_torque_joints = no_torque_joints
self.no_reaction_joints = no_reaction_joints
self.target_bones_dict = target_bones # for use in plotting
self.target_bones = []
self.total_mass = 0
for bone, (joint1, joint2) in target_bones.items():
# Calculate length using the initial positions of jointA and B.
# Smoothing functions can cause the issues for the first few frames, so take avg of later frames
frames = [50, 51, 52, 53, 54, 55, 56]
n_averaging = len(frames)
length = 0
for frame in frames:
posA = Vector(*self.joint_pos[frame, joint1])
posB = Vector(*self.joint_pos[frame, joint2])
if posA.length() == 0 or posB.length() == 0:
n_averaging -= 1
else:
length += posA > posB
length = length / n_averaging # avg of all the frames data taken from
if length == 0:
print(f"Warning: Error in calculating length of '{bone}'")
length = 0.01
b = DoubleCylinder(start=joint1, end=joint2, length=length, name=bone, freq=freq,
**self.model.bone_length_definitions["normal"](length))
self.target_bones.append(b) # add bone to list
self.total_mass += b.mass
self.body = Body(*body_joints, self.joint_pos, freq=freq, name="body")
self.body.get_kinematics(
np.stack([self.joint_pos[:, body_joints[0]], self.joint_pos[:, body_joints[1]]], axis=1))
self.body.get_dynamics()
self.total_mass += self.body.mass
# Paw parameters
m = self.total_mass
paw_d = self.model.paws
self.L0_paws = {"front": paw_d["L0_front"] * m, "rear": paw_d["L0_rear"] * m}
self.k_paws = {"front": paw_d["k_front"] * m, "rear": paw_d["k_rear"] * m ** paw_d["k_rear_prop"]}
self.c_paws = {"front": paw_d["c_front"] * m, "rear": paw_d["c_rear"] * m}
# if self.model.equation_weighting['Paw spring'] > 0:
self.set_paw_equilibrium()
self.get_dynamics()
self.leg_spring_joints = leg_spring_joints
self.calc_leg_lengths()
self.equation_weighting = model.equation_weighting
def get_dynamics(self):
"""Gets dynamics of centre of mass of each bone & body"""
for bone in self.target_bones:
bone.get_kinematics(self.joint_pos[:, [bone.start, bone.end]])
bone.get_dynamics()
body = self.body
body.get_kinematics(
np.stack([self.joint_pos[:, body.start_joints], self.joint_pos[:, body.end_joints]], axis=1))
body.get_dynamics()
def calculate_forces(self, n_frame, report_equations=True):
"""
Sets up a system of linear equations governing the motion of the skeleton at a given frame.
These equations are:
- FREE JOINTS:
The torques at free joints are zero. Free joints are joints only connected to one bone, on the end of the body eg the feet
- INERTIA:
On each bone, the sum of the two joint forces is equal to the mass * acceleration of the bone
- ROTATION:
On each bone, the net torque about the bone is equal to the I * alpha_g of the bone
- BODY:
The body is set up as a slightly different type of bone, in which it has several joints connected at either end, and its position is dictated by all of those joints.
See the code for it below, it has its own set of inertial and rotational equations.
This is set up as a least squares problem Ax = b, where A is a matrix of coefficients to multiply the unknowns by,
x is the unknowns (in the form [F_1_x, F_1_y, F_1_z, F_2_x, ... T_1, T, ...]
b is the result of the equations.
A weighting is also applied to each row to weight the least squares problem (eg to priorities free joint equations)
The problem also has bounds applied to it. For now, these bounds are simply that foot joint vertical reaction forces are non negative.
Improvements:
- Replace the current spinal system with a large non axisymmetric cylinder to represent the body
- Add a sphere to represent the head
"""
# Consult report for explanation of system
A = []
b = []
weights = [] # Collect weightings for each equation as they are added to the system
equation_weighting = self.equation_weighting
# Reasonable bounds for each force, and for each torque. Current limits set at 10 * weight for mass, 10 * mass at one metre for torque
max_force = 3 * self.total_mass * g
max_torque = 3 * self.total_mass * g
# bounds can be adjusted further for specific joints (eg no downards reaction at the feet)
bounds = [(-max_force, max_force)] * (3 * self.n_joints) + [(-max_torque, max_torque)] * (self.n_joints)
def A_row(vals={}):
"""Returns a row of 0s length 4 * self.n_joints, with other vectors in any indices in vals.
vals is a dict of index:vector"""
row = [0.0] * 4 * self.n_joints
for index, val in vals.items():
row[index] = val
return row
def add_blank_row():
A.append(A_row({}))
b.append(0)
weights.append(0)
def add_n_blank_rows(n=1):
for i in range(n): add_blank_row()
null, unit, g_vec = Vector(0, 0, 0), Vector(1, 1, 1), Vector(0, 0, -g)
n_joints = self.n_joints
def get_index(joint, dimension=0, is_force=True):
"""Get correct index of D"""
return (3 * n_joints * int(not is_force)) + ([1, 3][is_force] * joint) + dimension
# dimension = 0 for x, 1 for y, 2 for z
# First, add the equations to show that the torques in each of the foot joints are zero
for no_torque_joint in self.no_torque_joints:
# Set up the equation 1 * tau_{foot_joint} = 0
# BOUNDARY CONDITIONS ARE FIXED, RATHER THAN AN ADDITIONAL EQUATION. SO INCORPORATE THEM INTO BOUNDS
bounds[get_index(no_torque_joint, is_force=False)] = (0, 1e-10)
for no_reaction_joint in self.no_reaction_joints: # BC : no reactions
for dim in [0, 1, 2]:
bounds[get_index(no_reaction_joint, dimension=dim, is_force=True)] = (0, 1e-10)
for foot_joint in self.foot_joints:
## If the feet are a certain amount off the ground for that foot, also assign the reaction forces to be zero
bone_name = self.get_foot_joint_from_index[foot_joint]
end = bone_name.split(" ")[1] # get 'front' or 'rear'
L0 = self.L0_paws[end] # get stiffness from 'front' or 'rear' in bone name
# L0 = self.paw_equilibrium_values[foot_joint]
k_paw = self.k_paws[end]
c_paw = self.c_paws[end]
paw_disp = self.paw_disps[foot_joint][n_frame]
paw_off_ground = self.joint_pos[n_frame, foot_joint, 2] >= L0 # BC: no reaction in foot off ground
paw_off_ground = paw_disp == 0
if paw_off_ground: # BC: no reaction in foot off ground
for dim in [0, 1, 2]:
bounds[get_index(foot_joint, dimension=dim, is_force=True)] = (0, 1e-10)
add_n_blank_rows(4) # for consistency of number of eqns
else: # If paw near ground, add force due to spring
height = self.unsmoothed_data[n_frame, foot_joint, 2]
eps = L0 - height # min((L0 - height), L0/2)
eps_dot = self.joint_vel[n_frame, foot_joint, 2]
F_damp = 0 # c_paw * eps_dot
if self.model.equation_weighting['Paw spring'] > 0:
## PAW SPRING MODEL
eps = paw_disp
F_spring = k_paw * eps + c_paw * eps_dot
if foot_joint != 20:
A.append(A_row({get_index(foot_joint, dimension=2, is_force=True): 1}))
b.append(F_spring + F_damp)
weights.append(equation_weighting["Paw spring"])
if self.model.equation_weighting['Leg spring'] > 0:
## LEG SPRING MODEL
K = 3000 if end == "front" else 2000
for dim in [0, 1, 2]:
# component = self.leg_vecs[foot_joint][n_frame][dim]
F_spring = self.leg_disps[foot_joint][n_frame] * K # * component
A.append(A_row({get_index(foot_joint, dimension=dim, is_force=True): 1}))
b.append(F_spring + F_damp)
weights.append(equation_weighting["Leg spring"])
# Set bounds for foot joints to only have positive vertical reactions
bounds[get_index(foot_joint, dimension=2, is_force=True)] = (0, max_force)
bounds[get_index(foot_joint, dimension=1, is_force=True)] = (0, 1e-10) # set Fy=0
for bone in self.target_bones:
j_1, j_2 = bone.start, bone.end
x_1, x_2 = bone.X[n_frame]
# F_1 + F_2 + F_grav = F_net
F_net = bone.F_net[n_frame]
for dim in [0, 1, 2]:
A.append(A_row({get_index(j_1, dim): 1, get_index(j_2, dim): - 1}))
b.append((F_net - bone.mass * g_vec)[dim])
weights.append(equation_weighting["Inertial"])
tau_net = bone.tau_net[n_frame]
x_g = bone.XG[n_frame]
r_1, r_2 = (x_1 - x_g), (x_2 - x_g)
# direction of each T is perpendicular to the bones that the joint is on
adjacent_1_bone = [b for b in self.target_bones if b.end == j_1 and b != bone]
if len(adjacent_1_bone) == 1: # if there is an adjacent bone
adj_bone = adjacent_1_bone[0]
T_1_dir = Vector(*r_1).cross((adj_bone.X[n_frame, 1] - adj_bone.XG[n_frame])).unit()
if len(adjacent_1_bone) == 0 or np.isnan(T_1_dir).any(): # if no adjacent, or if above calc causes error
T_1_dir = (0, 1, 0) # Improve later, for now say all torques about y axis
adjacent_2_bone = [b for b in self.target_bones if b.start == j_2 and b != bone]
if len(adjacent_2_bone) == 1: # if there is an adjacent bone
adj_bone = adjacent_2_bone[0]
T_2_dir = Vector(*r_2).cross((adj_bone.X[n_frame, 0] - adj_bone.XG[n_frame])).unit()
if len(adjacent_2_bone) == 0 or np.isnan(T_2_dir).any(): # if no adjacent, or if above calc causes error
T_2_dir = (0, 1, 0) # Improve later, for now say all torques about y axis
for dim in [0, 1, 2]:
# This loop essentially writes out the following equations into A and b for each dimension (x,y,z):
# r1 x F1 + r2 x F2 + T1 + T2 = T_net
# The cross product of r = (x,y,z) and F = (Fx, Fy, Fz) yields (Fz*y - Fy*z, ...)
# Take the x component, x -> Fz*y - Fy*z
# Notice that Fy is negative, and Fz is positive. This is always true, that, for the forces, one lower dimension than the current is positive, and one higher is negative (cyclical relations)
# use this below
# Get dim above and below, wrapping round for below x and above z
dim_below = (dim - 1) % 3
dim_above = (dim + 1) % 3
coeff_dict = {
get_index(j_1, dim): 0,
# eg no effect of F_x in the x directional torque (not relevant statement, only here for readability)
get_index(j_1, dim_above): - r_1[dim_below], # eg multiply - z by Fy in the x direction
get_index(j_1, dim_below): r_1[dim_above], # eg multiply y by Fz in the x direction
# Reversed polarity for joint 2 as the desired force is - F2
get_index(j_2, dim_above): r_2[dim_below],
get_index(j_2, dim_below): - r_2[dim_above],
# Add the torques on each joint
get_index(j_1, is_force=False): T_1_dir[dim],
get_index(j_2, is_force=False): -T_2_dir[dim]
}
A.append(A_row(coeff_dict))
b.append(tau_net[dim])
weights.append(equation_weighting["Rotational"])
### SOLVE FORCES ON BODY. Note body defined so all joint forces/torques on it are positive
body = self.body
F_net = body.F_net[n_frame]
# BODY INERTIAL FORCES
for dim in [0, 1, 2]:
A.append(A_row({get_index(j, dim): 1 for j in self.body.start_joints + self.body.end_joints}))
b.append((F_net - body.mass * g_vec)[dim])
weights.append(equation_weighting["Inertial"])
# BODY ROTATIONAL FORCES - same as for bones
x_g = body.XG[n_frame]
tau_net = body.tau_net[n_frame]
# Improve above later, for now say all torques about y axis
T_dir = (0, 1, 0)
for dim in [0, 1, 2]:
coeff_dict = {}
for joint in body.start_joints + body.end_joints:
x_j = self.joint_pos[n_frame, joint]
r_j = (x_j - x_g) # position vector to centre
# Get dim above and below, wrapping round for below x and above z
dim_below, dim_above = (dim - 1) % 3, (dim + 1) % 3
coeff_dict[get_index(joint, dim_above)] = -r_j[dim_below] # eg multiply - z by Fy in the x direction
coeff_dict[get_index(joint, dim_below)] = r_j[dim_above] # eg multiply y by Fz in the x direction
coeff_dict[get_index(joint, is_force=False)] = T_dir[dim] # Add pure torque of pin
A.append(A_row(coeff_dict))
b.append(tau_net[dim])
weights.append(equation_weighting["Rotational"])
# print each line of the equations defined by A, b, with the final result
# Only print variables with both non-zero values, and non-zero coefficients
if report_equations:
print(f"----Frame {n_frame}----")
params = []
for joint in range(self.n_joints):
for dim in "xyz":
params.append(F"F_{joint}_{dim}") # Add forces by joint
for joint in range(self.n_joints):
params.append(F"T_{joint}") # Add torques by joint
for n, (coeffs, result) in enumerate(zip(A, b)):
s = []
for j, (coeff, param) in enumerate(zip(coeffs, params)):
if coeff != 0:
s.append(f"{round(coeff, 3)} * {param}")
# b_actual = np.dot(A[n], D)
# pct_error = abs(100 * (b_actual - result) / b_actual)
if n <= 7:
print(f"{" + ".join(s)} = {round(result, 3)}") # ({round(b_actual, 3)}) [{round(pct_error, 2)}%]")
return A, b, weights, bounds
def solve_forces(self, report_equations=False, end_frames_disregarded=5, prefix="",
save=True):
"""Solves the forces at each frame for the system, collects them and saves them to .npy files.
Note: Currently, due to smoothing, the first 5 and last 5 frames are disregarded"""
self.get_dynamics()
n_joints = self.n_joints
if report_equations:
print("Solving system...")
print(f"Total mass {round(self.total_mass, 2)} kg.")
# If dir doesn't exist, make it
dir = path_join(DataSources.dynamics_data, self.name)
if self.name not in os.listdir(DataSources.dynamics_data):
os.mkdir(dir)
forces, torques = [], []
f_shape, t_shape = (self.n_joints, 3), (self.n_joints,)
# Add zeros either end due to not being able to calculate for the first or last 2 frames
for i in range(end_frames_disregarded):
forces.append(np.zeros(f_shape))
torques.append(np.zeros(t_shape))
calc_forces = []
calc_torques = []
progress = tqdm(total=self.n_frames - 2 * end_frames_disregarded)
for n_frame in range(end_frames_disregarded, self.n_frames - end_frames_disregarded):
A, b, weights, bounds = self.calculate_forces(n_frame, report_equations=report_equations)
D = weighted_bound_least_squares(A, b, weights, bounds, rcond=None)
f, tau = D[:(3 * n_joints)], D[(3 * n_joints):]
f, tau = f.reshape((n_joints, 3)), tau.reshape((n_joints))
calc_forces.append(f)
calc_torques.append(tau)
progress.update()
forces[end_frames_disregarded: - end_frames_disregarded] = calc_forces
torques += calc_torques
for i in range(end_frames_disregarded):
forces.append(np.zeros(f_shape))
torques.append(np.zeros(t_shape))
if save:
np.save(path_join(dir, prefix + "forces.npy"), forces)
np.save(path_join(dir, prefix + "torques.npy"), torques)
return np.array(forces), np.array(torques)
def get_com_position(self):
"""Calculates the position of the centre of mass of the whole system at each timestep"""
return sum(b.XG * b.mass for b in self.target_bones + [self.body]) / self.total_mass
def return_equations(self, end_frames_disregarded=5):
"""For each frame, return the equation vector b"""
self.get_dynamics()
bs = []
for n_frame in range(end_frames_disregarded, self.n_frames - end_frames_disregarded):
A, b, weights, bounds = self.calculate_forces(n_frame, report_equations=False)
bs.append(b)
return np.array(bs)
def set_paw_equilibrium(self):
"""Get paw equilibrium from mocap data by finding the drop of the paw.
This method will work for the current dataset, but is likely not robust, so can be replaced with
a better method of finding the paw equilibrium at a later date"""
if self.is_mocap:
paw_z_heights = self.unsmoothed_data[:, self.foot_joints, 2]
else:
paw_z_heights = self.unsmoothed_data[:, self.foot_joints, 2]
self.paw_disps = {} # paw joint: displacement over time, for paw spring model
min_contacts_detected = 3 # minimum requirement to use peak detection mode
plot = True
if plot:
fig, axes = plt.subplots(nrows=2, ncols=2)
footfall_detector = FootfallDetector(train=False, load=True, name=["smal", "mocap"][self.is_mocap])
for n, paw in enumerate(self.foot_joints):
contact_ends_failed = False
disp = np.zeros((self.n_frames)) # will give eps - the displacement of the paw from equilibrium
# for when the paw is in contact with the ground
Z = paw_z_heights[:, n]
on_ground = footfall_detector.process_clip(Z)
on_ground_idxs = np.where(on_ground > 0)[0]
if plot:
axes[n // 2, n % 2].plot(Z.mean() * (on_ground), color="red", alpha=0.3)
min_footfall_width = 3 # 3 frames long minimum to count as a footfall
footfalls = consecutive(on_ground_idxs)
trigger_height = np.percentile(np.array([Z[ff].max() for ff in footfalls]), 25) # mean trigger height
for footfall in footfalls:
if len(footfall) > min_footfall_width:
# disp[footfall] = Z[footfall].max() - Z[footfall] # old
disp[footfall] = np.clip(trigger_height - Z[footfall], a_min=0, a_max=None)
self.paw_disps[paw] = disp
if plot:
ax = axes[n // 2, n % 2]
ax.plot(Z)
Z_on_ground = Z.copy()
Z_on_ground[disp == 0] = np.nan
ax.plot(Z_on_ground, color="green")
ax.plot(disp)
Z_smoothed = self.joint_pos[:, paw, 2]
ax.set_title(n)
if plot:
plt.show(block=False)
plt.draw()
plt.pause(1e-8)
def view_ground_displacements(self, deriv=0):
"""Plot and show a graph of vertical displacement against frames for each paw - identifying L0 for each paw"""
fig, axes = plt.subplots(nrows=4)
for n, j in enumerate(self.foot_joints):
label = foot_joint_labels[n]
ax = axes[n]
if deriv == 0:
X = self.joint_pos[:, j, 2]
X_unsmoothed = self.unsmoothed_data[:, j, 2]
ax.plot(X)
ax.plot(X_unsmoothed, alpha=.6)
# ax.axhline(self.paw_equilibrium_values[j], ls = "--")
ax.axhline(self.L0_paws[label.split(" ")[0]])
elif deriv == 1:
ax.plot(self.joint_vel[:, j, 2])
ax.set_title(label)
plt.show()
def view_com_displacements(self, deriv=0):
"""Plot and show graph of X, Y, and Z motion of CoM of dog.
If deriv > 0, plot that derivative of the displacement"""
fig, ax = plt.subplots()
com_data = self.get_com_position()
if deriv > 0:
com_data = nth_time_deriv(com_data, 1 / self.freq, n=deriv)
for i in [0, 1, 2]:
ax.plot(com_data[:, i], label="xyz"[i])
ax.legend()
plt.show()
def calc_leg_lengths(self):
"""Uses the compliant-legged walking model estimation to work out the average length of legs.
Assume legs are undeformed while off ground. Work out avg distance from leg to COM"""
self.leg_disps = {} # length of leg over time for each paw
self.leg_vecs = {} # normalised vector of leg spring direction for each paw
plot = True
if plot: fig, axes = plt.subplots(nrows=2, ncols=2, sharex="all", sharey="row")
for n, paw in enumerate(self.foot_joints):
is_front = n < 2 # Assumes order of f left, f right, r left, r right
tol = 1e-3
on_ground = self.paw_disps[paw] > tol
off_ground = self.paw_disps[paw] <= tol
# centre_of_rot = self.body.XG[:]#self.body.X[:, int(is_front)]
# centre_of_rot = self.unsmoothed_data[:, self.body_joints[is_front][n%2]]
if self.is_mocap:
centre_of_rot = self.unsmoothed_data[:, self.leg_spring_joints[n]]
paw_pos = self.unsmoothed_data[:, paw]
else:
centre_of_rot = self.unsmoothed_data[:, self.leg_spring_joints[n]]
paw_pos = self.unsmoothed_data[:, paw]
X, Z = np.swapaxes(centre_of_rot[:, [0, 2]], 0, 1) # get X, Z position of CoM
X_PAW, Z_PAW = np.swapaxes(paw_pos[:, [0, 2]], 0, 1) # get X, Z position of CoM
THETA = np.arctan((X_PAW - X) / (Z - Z_PAW)) # angle between spring and vertical
L = ((X - X_PAW) ** 2 + (Z - Z_PAW) ** 2) ** .5
L0 = (L).max()
z_disp = (L - L0) * np.cos(THETA)
x_disp = (L - L0) * np.sin(THETA)
# get z displacement by footfall
disp = np.zeros(self.n_frames)
# if self.is_mocap:
for ff in consecutive(np.where(on_ground)[0]):
if len(ff) < 3: continue # min width of footfall required
disp[ff] = z_disp[ff].max() - z_disp[ff]
# else:
# disp = -z_disp
self.leg_disps[paw] = disp
if plot:
ax = axes[n // 2, n % 2]
# ax.plot(L)
ax.plot(L - L0)
ax.plot(disp, color="green")
if plot:
plt.tight_layout()
# plt.show()
plt.show(block=False)
plt.draw()
plt.pause(1e-8)
def norm_kin_data(kin_data, targ_markers=None):
"""Normalise kinematic data.
If targ_markers given, normalise so these markers are at desired height"""
norm_height = 0.4 # 0.635 # fixed to Ally height for now
# scale so minimum is at (0,0,0)
for dim in [0, 1, 2]:
kin_data[:, :, dim] -= kin_data[:, :, dim].min()
if targ_markers is None:
kin_data = norm_height * kin_data / np.max(kin_data[:, :, 2])
elif targ_markers is not None:
height_target = kin_data[:, targ_markers, 2].mean()
kin_data = norm_height * kin_data / height_target
return kin_data
def get_dyn_data(dynamic_src, clip_length, mass, is_mocap=True, target_freq=100):
"""Loads and returns kinematic data"""
force_plate_data, force_plate_tdelay = load_force_plate_data(dynamic_src, is_mocap)
raw_dyn_data = force_plate_data
raw_dyn_data *= 1 / (mass * 9.81)
# resample if requested
if target_freq != freq_forceplate:
target_frames = int(len(raw_dyn_data) * target_freq / freq_forceplate)
dyn_data = signal.resample(raw_dyn_data, target_frames)
# this resampling causes a jumpiness for the periods of zero value. Fix that here:
tol = 1e-4
for paw in range(dyn_data.shape[1]):
# get indices where should be 0
antifootfalls = consecutive(np.where(raw_dyn_data[:, paw] < tol)[0])
min_width = 10 # in frames
for aff in antifootfalls:
if len(aff) < min_width: continue
start, end = aff[0] * target_freq / freq_forceplate, aff[-1] * target_freq / freq_forceplate
# ^ start and end indices, in remapped frame
dyn_data[int(start):int(end), paw] = 0 # set to 0
freq = target_freq
else:
freq = freq_forceplate
dyn_data = raw_dyn_data
frame_delay = int(freq * force_plate_tdelay)
n_frames_forceplate = int(clip_length * freq) # number of frames for forceplate to be same time length as mocap
if frame_delay == 0:
return dyn_data[:n_frames_forceplate]
if frame_delay > 0: # crop forceplate data
return dyn_data[frame_delay: frame_delay + n_frames_forceplate] # crop forceplate data to match mocap/SMAL data
else: # fdelay <0, pad forceplate data
return np.pad(dyn_data, ((int(-frame_delay), 0), (0, 0)))[:n_frames_forceplate]
kin_src_to_solver_name = lambda s: s.replace("/", " ").replace(" ", "_").replace(".c3d", "")
def load_solver(kin_src, clip_length, mocap=True, resample_freq=100):
if mocap:
joint_data = C3DData(ax=None, src=kin_src, interpolate=True, crop=clip_length,
fix_rotations="3 kph" in kin_src) # only fix rotations for 3 kph for now
else:
joint_data = SMALData(kin_src, freq=30, norm=True, crop=clip_length, smooth=True)
joint_data.resample_at(resample_freq) ### TRY RESAMPLING DATA TO 100 Hz
target_bones, body_joints, no_torque_joints, leg_spring_joints = joint_data.generate_skeleton_mapping()
# Normalise data based on z data, so that the dog is roughly 0.5m high. Also smooth data
kin_data = np.array(joint_data.all_data)
kin_data = norm_kin_data(kin_data, targ_markers=leg_spring_joints)
solver_kwargs = dict(target_bones=target_bones,
body_joints=body_joints, no_torque_joints=no_torque_joints,
foot_joints=no_torque_joints, leg_spring_joints=leg_spring_joints,
freq=joint_data.freq,
name=kin_src_to_solver_name(kin_src))
solver = InverseDynamicsSolver(joint_data=kin_data, **solver_kwargs, is_mocap=mocap)
print(f"Solver loaded. Mass = {solver.total_mass:.1f} kg.")
return solver
|
"""DEFINES THE INVERSEDYNAMICS SOLVER, A Solver for solving the joint based model of a dog."""
from scipy import optimize, signal
from data.data_loader import C3DData, load_force_plate_data, ForcePlateData, SMALData, get_delay_between, DataSources, \
path_join
from vis.utils import *
from vis import visualisations
from dynamics.footfall_detector import FootfallDetector
from tqdm import tqdm
# pure constants (no optimisation needed)
g = 9.81
freq_forceplate = 100 # Hz
foot_joint_labels = ["front left", "front right", "rear left", "rear right"]
foot_joint_indices = [0, 9, 23, 20] # for set 2 3r3
class Model:
"""ID Model, with all parameters derived/optimised"""
def __init__(self):
# CONSTANTS
self.paws = {}
self.bone_density = 1950 # Estimate - needs refining! From paper: Development of a neuromusculoskeletal computer model in a chondrodystrophic dog.
self.muscle_density = 1060 # From above
# params to optimise
self.bone_length_definitions = {
"normal": lambda l: dict(inner_radius=0.01, outer_radius=0.05, displacement=0),
"body": lambda l: dict(inner_radius=l / 20, outer_radius=l / 7, displacement=l / 4 - l / 20), }
# Paw parameters. All scaled to be in standard form - exponent in separate dict.
self.paw_params_normalised = {
"L0_front": 6.9, # 6.9 # in .1mm
"L0_rear": 6.9, # in .1mm
"k_front": 3.42 * .18, # in kN/m
"k_rear": 2.0 * .21, # in kN/m
"c_front": 20,
"c_rear": 20,
"k_rear_prop": 0.85, # k = k_rear * m **.85
"frame_delay": 0 # Used for analysis of paw treadmill forces. Not used for normal ID solver
}
self.paw_exponents = {
"L0_front": -4,
"L0_rear": -4,
"k_front": 3,
"k_rear": 3,
"c_front": 0,
"c_rear": 0,
"k_rear_prop": 0,
"frame_delay": 0
}
self.calc_paw_params()
self.freq_par_data = 200
# weightings used in dynamics calculations
self.equation_weighting = {
"Inertial": 2,
"Rotational": 2,
"Leg spring": 0.5,
"Paw spring": 1,
}
def calc_paw_params(self):
"""Calculates paw parameters (separate function for optimisation purposes)"""
for param, val in self.paw_params_normalised.items():
self.paws[param] = val * 10 ** (self.paw_exponents[param])
def edit_paw_param(self, param, val):
"""Edit paw parameter (separate for optimisation purposes)"""
self.paw_params_normalised[param] = val
self.calc_paw_params()
model = Model()
def time_deriv(X, dt):
"""Finds the time derivative of a given series of data.
Always treats the first dimension as time - works for any number of dimensions (n_frames, M, N, O, ...).
For all except first and last val, calcs difference over 2 timesteps"""
diff = np.zeros_like(X)
diff[0] = X[1] - X[0]
diff[1:-1] = (X[2:] - X[:-2]) / 2
diff[-1] = X[-1] - X[-2]
return diff * 1 / dt
def nth_time_deriv(X, dt, n=2):
"""Recursively get the nth time derivative"""
if n == 1:
return time_deriv(X, dt)
else:
return time_deriv(nth_time_deriv(X, dt, n=n - 1), dt)
def get_principal_axes(vector=Vector(1, 0, 0), cardinal=np.identity(3)):
"""Given a vector, devise a basis of principle axis with any two perpendicular vectors (for application of an
axisymmetric object - cylinder) """
i, j, k = cardinal
K = vector.unit()
# Now find any two perp vectors to K
if not K.is_parallel(i):
I = K.cross(i).unit()
J = K.cross(I).unit()
else:
I = K.cross(j).unit()
J = K.cross(I).unit()
return np.array([I, J, K])
def I_cylinder(density, length, radius):
mass = density * np.pi * (radius ** 2) * length
Ixx, Izz = (length ** 2) / 12 + (radius ** 2) / 4, radius ** 2 / 2
return mass * np.diag([Ixx, Ixx, Izz])
class DoubleCylinder:
"""An object comprised of a cylinder of given length between two end points, of radius inner_radius and density bone_density,
and an outer cylinder that does NOT share the same central axis, of radius outer_radius, displaced by a distance <displacement> normally from the centerline.
Cylinder is defined with the centerline vertical (z direction), and the displacement always in the normal closest to the z direction downwards.
For InverseDynamics calculations, this object will have a start and end index, which correspond to the joint indices in which the end point data is held.
"""
def __init__(self, start, end, length, inner_radius, outer_radius, displacement, freq=50.0, name=""):
self.name = name
self.freq = freq # Frequency, in Hz
self.start = start
self.end = end
self.length = length
self.displacement = displacement
if outer_radius is None: outer_radius = inner_radius
self.inner_mass = model.bone_density * np.pi * inner_radius ** 2 * self.length
self.outer_mass = model.muscle_density * np.pi * self.length * (outer_radius ** 2 - inner_radius ** 2)
self.mass = self.inner_mass + self.outer_mass
I_bone = I_cylinder(model.bone_density, length, inner_radius)
I_muscle = I_cylinder(model.muscle_density, length, outer_radius) - I_cylinder(model.muscle_density, length,
inner_radius)
# By parallel axis theorem, add component of I due to outer radius being displaced from the centerline axis
I_axis_displacement = np.zeros((3, 3))
I_axis_displacement[0, 0] = self.outer_mass * displacement ** 2
self.I = I_bone + I_muscle + I_axis_displacement # Inertia tensor in a reference frame in which the bone is lengthwise facing upwards
def get_kinematics(self, data):
"""Given a numpy array of time, data, of shape (n_frames, 2, 3),
giving the position data of both ends of the cylinder over time, compute the kinematics of the cylinder"""
X = self.X = np.array(data) # positions
V = self.V = time_deriv(X, 1 / self.freq) # velocities
A = self.A = time_deriv(V, 1 / self.freq) # accelerations
self.XG = np.mean(X, axis=1) # average over X
self.VG = np.mean(V, axis=1) # average over V
self.AG = np.mean(A, axis=1) # average over A
# Rotational
R = self.R = [Vector(*x[1]) - Vector(*x[0]) for x in X] # Vector from bone start to end in each frame
local_axes = [get_principal_axes(r) for r in R] # Get principal axes for each frame
# theta_g = (n_frame, 3) of angular rotation about i, j, k for each frame
# angular rotation about each axis is defined as 0 for the next vector in the cycle
# i.e. angular rotation about i = 0 for a vector parallel to j
zero_angles = [[0, 1, 0], [0, 0, 1], [1, 0, 0]] # definition of 'zero angle' vector for i, j, k
theta_g = []
# Compute theta_g in local axes first, where K is the unit vector
for n_frame in range(len(X) - 1):
local_ax = local_axes[n_frame]
# representation as a a single rotation theta about an axis e (https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula)
a = R[n_frame] # rotation from one frame...
b = R[n_frame + 1] # ...to the next
if np.array_equal(a, b):
theta_g += [[0, 0, 0]] # If no rotation, return 0
else:
axis = np.cross(a, b) / (np.linalg.norm(np.cross(a, b))) # unit vector of omega
with np.errstate(invalid='raise'):
try:
alignment = np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
alignment = np.clip(alignment, a_min=-1,
a_max=1) # clip between -1 and 1 to deal with rounding errors
angle = np.arccos(alignment) # magnitude of theta
except:
print((np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))))
raise ValueError("INVALID ANGLE", a, b)
theta_g += [axis * angle]
theta_g = np.array(theta_g)
self.theta_g = signal.savgol_filter(theta_g, window_length=19, polyorder=2, axis=0)
self.omega_g = time_deriv(self.theta_g, dt=1 / self.freq)
self.alpha_g = time_deriv(self.omega_g, dt=1 / self.freq) # angular acceleration
self.I_fixed = [la.T @ self.I @ la for la in local_axes] # compute I in fixed reference frame at each frame
def get_dynamics(self):
"""Compute dynamics (F_net, Torque_net) at each frame"""
self.F_net = [self.mass * a_g for a_g in self.AG]
self.tau_net = [I_f @ alpha for (I_f, alpha) in zip(self.I_fixed, self.alpha_g)]
class Body(DoubleCylinder):
"""A unique case of double cylinder, where the (multiple) joints connect at the cylindrical surface at either end.
These joint attachments are defined by an angle from the i direction normal to the centerline at initialisation.
Dynamics for the body then must be calculated using a separate set of equations. Define the body such that all
joints bones go into it rather than out of it (i.e. all input forces are positive on the body) """
def __init__(self, start_joints, end_joints, all_joint_positions, **cylinder_kwaargs):
"""From the indices given by start_joints and end_joints, identify a cylinder shape that best fits these
points on either side, and create that as the cylinder. """
self.start_joints = start_joints
self.end_joints = end_joints
start_pos = Vector(*np.mean(all_joint_positions[40, start_joints], axis=0))
end_pos = Vector(*np.mean(all_joint_positions[40, end_joints], axis=0))
length = start_pos > end_pos
super().__init__(start=None, end=None, length=length, **model.bone_length_definitions["body"](length),
**cylinder_kwaargs)
def get_centre_of_gravity(self, start: 'Vector', end: 'Vector'):
"""Calculates the centre of gravity based on the displacement from the centerline."""
centreline_g = 0.5 * (start + end)
# to find a normal that is closest to z, find N possible equispaced normals, and see which one has the greatest .k product
normal = (start - end).find_normal()
N = 20 # number of normals to consider
all_normals = [normal.rotate_about((start - end).unit(), angle=(n * 2 * np.pi / N)) for n in range(N)]
idx = np.argmax([v.dot(Vector(0, 0, -1)) for v in all_normals])
chosen_normal = all_normals[idx] # choose most downwards normal
return centreline_g + self.displacement * chosen_normal
def get_kinematics(self, data):
"""For body, data is of shape (n_frames, 2, 2, 3), where it is split by rear and front.
So average across rear and front to get behaviour of centerline, and then run normal get_kinematics"""
super().get_kinematics(np.mean(data, axis=2))
def weighted_bound_least_squares(A, b, weights=None, bounds=None, **kwargs):
"""Completes a least squares solve of the equation A x = b, to solve N unknowns from M equations
where A is an M x N matrix, x is an N x 1 vector, and b is an M x 1 vector.
Applies weightings to each row to favour certain datapoints. weights is an M x 1 vector.
Applies bounds where bounds is an M x 2 array. each tuple in the array gives the LB and UB for the given equation"""
if weights is None: weights = np.ones_like(b) # If no weight given, equal weight to all
w = np.array(weights)
weighted_A, weighted_b = np.array(A) * w[:, np.newaxis], np.array(b) * w # Apply weights to A, b
try:
solve = optimize.lsq_linear(weighted_A, weighted_b, bounds=list(zip(*bounds)), tol=1e-2)
return solve["x"]
except np.linalg.LinAlgError as e:
out = f"SVD did not converge in Lin Least Sq. Printing params: {A}, {b}"
raise ArithmeticError(out)
class InverseDynamicsSolver:
"""Through scipy optimisation, Skeleton finds a set of force data that corresponds to the correct kinematic data.
Takes a skeleton, and the relevant bones and joints, and solves the set of forces that correspond to correct kinematics."""
def __init__(self, joint_data, target_bones, body_joints, no_torque_joints=None, no_reaction_joints=None,
foot_joints=None, leg_spring_joints=None, model=Model(),
freq=50.0, name="output", is_mocap=True):
for var in [foot_joints, leg_spring_joints, no_reaction_joints, no_torque_joints]:
if var is None:
var = []
self.name = name
self.freq = freq
self.n_frames, self.n_joints, _ = joint_data.shape
self.model = model
self.is_mocap = is_mocap
# Preprocess joint data - basic smoothing
if is_mocap:
window_length = self.freq // 2
else:
window_length = 0.75 * self.freq
if window_length % 2 == 0: window_length -= 1
self.T = self.n_frames / self.freq
self.smooth = lambda X, p=5: signal.savgol_filter(X, window_length=int(window_length), polyorder=p, axis=0)
p = 5 if self.is_mocap else 2
self.unsmoothed_data = joint_data # save unsmoothed data for other uses
self.joint_pos = self.smooth(joint_data, p=p)
self.joint_vel = time_deriv(self.joint_pos, 1 / freq)
self.joint_accel = time_deriv(self.smooth(self.joint_vel), 1 / freq)
self.foot_joints = foot_joints
self.body_joints = body_joints
self.get_foot_joint_from_index = {} # Identify which foot from the index
for fj in self.foot_joints:
for bone, (j1, j2) in target_bones.items():
if fj in [j1, j2]:
self.get_foot_joint_from_index[fj] = bone
self.no_torque_joints = no_torque_joints
self.no_reaction_joints = no_reaction_joints
self.target_bones_dict = target_bones # for use in plotting
self.target_bones = []
self.total_mass = 0
for bone, (joint1, joint2) in target_bones.items():
# Calculate length using the initial positions of jointA and B.
# Smoothing functions can cause the issues for the first few frames, so take avg of later frames
frames = [50, 51, 52, 53, 54, 55, 56]
n_averaging = len(frames)
length = 0
for frame in frames:
posA = Vector(*self.joint_pos[frame, joint1])
posB = Vector(*self.joint_pos[frame, joint2])
if posA.length() == 0 or posB.length() == 0:
n_averaging -= 1
else:
length += posA > posB
length = length / n_averaging # avg of all the frames data taken from
if length == 0:
print(f"Warning: Error in calculating length of '{bone}'")
length = 0.01
b = DoubleCylinder(start=joint1, end=joint2, length=length, name=bone, freq=freq,
**self.model.bone_length_definitions["normal"](length))
self.target_bones.append(b) # add bone to list
self.total_mass += b.mass
self.body = Body(*body_joints, self.joint_pos, freq=freq, name="body")
self.body.get_kinematics(
np.stack([self.joint_pos[:, body_joints[0]], self.joint_pos[:, body_joints[1]]], axis=1))
self.body.get_dynamics()
self.total_mass += self.body.mass
# Paw parameters
m = self.total_mass
paw_d = self.model.paws
self.L0_paws = {"front": paw_d["L0_front"] * m, "rear": paw_d["L0_rear"] * m}
self.k_paws = {"front": paw_d["k_front"] * m, "rear": paw_d["k_rear"] * m ** paw_d["k_rear_prop"]}
self.c_paws = {"front": paw_d["c_front"] * m, "rear": paw_d["c_rear"] * m}
# if self.model.equation_weighting['Paw spring'] > 0:
self.set_paw_equilibrium()
self.get_dynamics()
self.leg_spring_joints = leg_spring_joints
self.calc_leg_lengths()
self.equation_weighting = model.equation_weighting
def get_dynamics(self):
"""Gets dynamics of centre of mass of each bone & body"""
for bone in self.target_bones:
bone.get_kinematics(self.joint_pos[:, [bone.start, bone.end]])
bone.get_dynamics()
body = self.body
body.get_kinematics(
np.stack([self.joint_pos[:, body.start_joints], self.joint_pos[:, body.end_joints]], axis=1))
body.get_dynamics()
def calculate_forces(self, n_frame, report_equations=True):
"""
Sets up a system of linear equations governing the motion of the skeleton at a given frame.
These equations are:
- FREE JOINTS:
The torques at free joints are zero. Free joints are joints only connected to one bone, on the end of the body eg the feet
- INERTIA:
On each bone, the sum of the two joint forces is equal to the mass * acceleration of the bone
- ROTATION:
On each bone, the net torque about the bone is equal to the I * alpha_g of the bone
- BODY:
The body is set up as a slightly different type of bone, in which it has several joints connected at either end, and its position is dictated by all of those joints.
See the code for it below, it has its own set of inertial and rotational equations.
This is set up as a least squares problem Ax = b, where A is a matrix of coefficients to multiply the unknowns by,
x is the unknowns (in the form [F_1_x, F_1_y, F_1_z, F_2_x, ... T_1, T, ...]
b is the result of the equations.
A weighting is also applied to each row to weight the least squares problem (eg to priorities free joint equations)
The problem also has bounds applied to it. For now, these bounds are simply that foot joint vertical reaction forces are non negative.
Improvements:
- Replace the current spinal system with a large non axisymmetric cylinder to represent the body
- Add a sphere to represent the head
"""
# Consult report for explanation of system
A = []
b = []
weights = [] # Collect weightings for each equation as they are added to the system
equation_weighting = self.equation_weighting
# Reasonable bounds for each force, and for each torque. Current limits set at 10 * weight for mass, 10 * mass at one metre for torque
max_force = 3 * self.total_mass * g
max_torque = 3 * self.total_mass * g
# bounds can be adjusted further for specific joints (eg no downards reaction at the feet)
bounds = [(-max_force, max_force)] * (3 * self.n_joints) + [(-max_torque, max_torque)] * (self.n_joints)
def A_row(vals={}):
"""Returns a row of 0s length 4 * self.n_joints, with other vectors in any indices in vals.
vals is a dict of index:vector"""
row = [0.0] * 4 * self.n_joints
for index, val in vals.items():
row[index] = val
return row
def add_blank_row():
A.append(A_row({}))
b.append(0)
weights.append(0)
def add_n_blank_rows(n=1):
for i in range(n): add_blank_row()
null, unit, g_vec = Vector(0, 0, 0), Vector(1, 1, 1), Vector(0, 0, -g)
n_joints = self.n_joints
def get_index(joint, dimension=0, is_force=True):
"""Get correct index of D"""
return (3 * n_joints * int(not is_force)) + ([1, 3][is_force] * joint) + dimension
# dimension = 0 for x, 1 for y, 2 for z
# First, add the equations to show that the torques in each of the foot joints are zero
for no_torque_joint in self.no_torque_joints:
# Set up the equation 1 * tau_{foot_joint} = 0
# BOUNDARY CONDITIONS ARE FIXED, RATHER THAN AN ADDITIONAL EQUATION. SO INCORPORATE THEM INTO BOUNDS
bounds[get_index(no_torque_joint, is_force=False)] = (0, 1e-10)
for no_reaction_joint in self.no_reaction_joints: # BC : no reactions
for dim in [0, 1, 2]:
bounds[get_index(no_reaction_joint, dimension=dim, is_force=True)] = (0, 1e-10)
for foot_joint in self.foot_joints:
## If the feet are a certain amount off the ground for that foot, also assign the reaction forces to be zero
bone_name = self.get_foot_joint_from_index[foot_joint]
end = bone_name.split(" ")[1] # get 'front' or 'rear'
L0 = self.L0_paws[end] # get stiffness from 'front' or 'rear' in bone name
# L0 = self.paw_equilibrium_values[foot_joint]
k_paw = self.k_paws[end]
c_paw = self.c_paws[end]
paw_disp = self.paw_disps[foot_joint][n_frame]
paw_off_ground = self.joint_pos[n_frame, foot_joint, 2] >= L0 # BC: no reaction in foot off ground
paw_off_ground = paw_disp == 0
if paw_off_ground: # BC: no reaction in foot off ground
for dim in [0, 1, 2]:
bounds[get_index(foot_joint, dimension=dim, is_force=True)] = (0, 1e-10)
add_n_blank_rows(4) # for consistency of number of eqns
else: # If paw near ground, add force due to spring
height = self.unsmoothed_data[n_frame, foot_joint, 2]
eps = L0 - height # min((L0 - height), L0/2)
eps_dot = self.joint_vel[n_frame, foot_joint, 2]
F_damp = 0 # c_paw * eps_dot
if self.model.equation_weighting['Paw spring'] > 0:
## PAW SPRING MODEL
eps = paw_disp
F_spring = k_paw * eps + c_paw * eps_dot
if foot_joint != 20:
A.append(A_row({get_index(foot_joint, dimension=2, is_force=True): 1}))
b.append(F_spring + F_damp)
weights.append(equation_weighting["Paw spring"])
if self.model.equation_weighting['Leg spring'] > 0:
## LEG SPRING MODEL
K = 3000 if end == "front" else 2000
for dim in [0, 1, 2]:
# component = self.leg_vecs[foot_joint][n_frame][dim]
F_spring = self.leg_disps[foot_joint][n_frame] * K # * component
A.append(A_row({get_index(foot_joint, dimension=dim, is_force=True): 1}))
b.append(F_spring + F_damp)
weights.append(equation_weighting["Leg spring"])
# Set bounds for foot joints to only have positive vertical reactions
bounds[get_index(foot_joint, dimension=2, is_force=True)] = (0, max_force)
bounds[get_index(foot_joint, dimension=1, is_force=True)] = (0, 1e-10) # set Fy=0
for bone in self.target_bones:
j_1, j_2 = bone.start, bone.end
x_1, x_2 = bone.X[n_frame]
# F_1 + F_2 + F_grav = F_net
F_net = bone.F_net[n_frame]
for dim in [0, 1, 2]:
A.append(A_row({get_index(j_1, dim): 1, get_index(j_2, dim): - 1}))
b.append((F_net - bone.mass * g_vec)[dim])
weights.append(equation_weighting["Inertial"])
tau_net = bone.tau_net[n_frame]
x_g = bone.XG[n_frame]
r_1, r_2 = (x_1 - x_g), (x_2 - x_g)
# direction of each T is perpendicular to the bones that the joint is on
adjacent_1_bone = [b for b in self.target_bones if b.end == j_1 and b != bone]
if len(adjacent_1_bone) == 1: # if there is an adjacent bone
adj_bone = adjacent_1_bone[0]
T_1_dir = Vector(*r_1).cross((adj_bone.X[n_frame, 1] - adj_bone.XG[n_frame])).unit()
if len(adjacent_1_bone) == 0 or np.isnan(T_1_dir).any(): # if no adjacent, or if above calc causes error
T_1_dir = (0, 1, 0) # Improve later, for now say all torques about y axis
adjacent_2_bone = [b for b in self.target_bones if b.start == j_2 and b != bone]
if len(adjacent_2_bone) == 1: # if there is an adjacent bone
adj_bone = adjacent_2_bone[0]
T_2_dir = Vector(*r_2).cross((adj_bone.X[n_frame, 0] - adj_bone.XG[n_frame])).unit()
if len(adjacent_2_bone) == 0 or np.isnan(T_2_dir).any(): # if no adjacent, or if above calc causes error
T_2_dir = (0, 1, 0) # Improve later, for now say all torques about y axis
for dim in [0, 1, 2]:
# This loop essentially writes out the following equations into A and b for each dimension (x,y,z):
# r1 x F1 + r2 x F2 + T1 + T2 = T_net
# The cross product of r = (x,y,z) and F = (Fx, Fy, Fz) yields (Fz*y - Fy*z, ...)
# Take the x component, x -> Fz*y - Fy*z
# Notice that Fy is negative, and Fz is positive. This is always true, that, for the forces, one lower dimension than the current is positive, and one higher is negative (cyclical relations)
# use this below
# Get dim above and below, wrapping round for below x and above z
dim_below = (dim - 1) % 3
dim_above = (dim + 1) % 3
coeff_dict = {
get_index(j_1, dim): 0,
# eg no effect of F_x in the x directional torque (not relevant statement, only here for readability)
get_index(j_1, dim_above): - r_1[dim_below], # eg multiply - z by Fy in the x direction
get_index(j_1, dim_below): r_1[dim_above], # eg multiply y by Fz in the x direction
# Reversed polarity for joint 2 as the desired force is - F2
get_index(j_2, dim_above): r_2[dim_below],
get_index(j_2, dim_below): - r_2[dim_above],
# Add the torques on each joint
get_index(j_1, is_force=False): T_1_dir[dim],
get_index(j_2, is_force=False): -T_2_dir[dim]
}
A.append(A_row(coeff_dict))
b.append(tau_net[dim])
weights.append(equation_weighting["Rotational"])
### SOLVE FORCES ON BODY. Note body defined so all joint forces/torques on it are positive
body = self.body
F_net = body.F_net[n_frame]
# BODY INERTIAL FORCES
for dim in [0, 1, 2]:
A.append(A_row({get_index(j, dim): 1 for j in self.body.start_joints + self.body.end_joints}))
b.append((F_net - body.mass * g_vec)[dim])
weights.append(equation_weighting["Inertial"])
# BODY ROTATIONAL FORCES - same as for bones
x_g = body.XG[n_frame]
tau_net = body.tau_net[n_frame]
# Improve above later, for now say all torques about y axis
T_dir = (0, 1, 0)
for dim in [0, 1, 2]:
coeff_dict = {}
for joint in body.start_joints + body.end_joints:
x_j = self.joint_pos[n_frame, joint]
r_j = (x_j - x_g) # position vector to centre
# Get dim above and below, wrapping round for below x and above z
dim_below, dim_above = (dim - 1) % 3, (dim + 1) % 3
coeff_dict[get_index(joint, dim_above)] = -r_j[dim_below] # eg multiply - z by Fy in the x direction
coeff_dict[get_index(joint, dim_below)] = r_j[dim_above] # eg multiply y by Fz in the x direction
coeff_dict[get_index(joint, is_force=False)] = T_dir[dim] # Add pure torque of pin
A.append(A_row(coeff_dict))
b.append(tau_net[dim])
weights.append(equation_weighting["Rotational"])
# print each line of the equations defined by A, b, with the final result
# Only print variables with both non-zero values, and non-zero coefficients
if report_equations:
print(f"----Frame {n_frame}----")
params = []
for joint in range(self.n_joints):
for dim in "xyz":
params.append(F"F_{joint}_{dim}") # Add forces by joint
for joint in range(self.n_joints):
params.append(F"T_{joint}") # Add torques by joint
for n, (coeffs, result) in enumerate(zip(A, b)):
s = []
for j, (coeff, param) in enumerate(zip(coeffs, params)):
if coeff != 0:
s.append(f"{round(coeff, 3)} * {param}")
# b_actual = np.dot(A[n], D)
# pct_error = abs(100 * (b_actual - result) / b_actual)
if n <= 7:
print(f"{' + '.join(s)} = {round(result, 3)}") # ({round(b_actual, 3)}) [{round(pct_error, 2)}%]")
return A, b, weights, bounds
def solve_forces(self, report_equations=False, end_frames_disregarded=5, prefix="",
save=True):
"""Solves the forces at each frame for the system, collects them and saves them to .npy files.
Note: Currently, due to smoothing, the first 5 and last 5 frames are disregarded"""
self.get_dynamics()
n_joints = self.n_joints
if report_equations:
print("Solving system...")
print(f"Total mass {round(self.total_mass, 2)} kg.")
# If dir doesn't exist, make it
dir = path_join(DataSources.dynamics_data, self.name)
if self.name not in os.listdir(DataSources.dynamics_data):
os.mkdir(dir)
forces, torques = [], []
f_shape, t_shape = (self.n_joints, 3), (self.n_joints,)
# Add zeros either end due to not being able to calculate for the first or last 2 frames
for i in range(end_frames_disregarded):
forces.append(np.zeros(f_shape))
torques.append(np.zeros(t_shape))
calc_forces = []
calc_torques = []
progress = tqdm(total=self.n_frames - 2 * end_frames_disregarded)
for n_frame in range(end_frames_disregarded, self.n_frames - end_frames_disregarded):
A, b, weights, bounds = self.calculate_forces(n_frame, report_equations=report_equations)
D = weighted_bound_least_squares(A, b, weights, bounds, rcond=None)
f, tau = D[:(3 * n_joints)], D[(3 * n_joints):]
f, tau = f.reshape((n_joints, 3)), tau.reshape((n_joints))
calc_forces.append(f)
calc_torques.append(tau)
progress.update()
forces[end_frames_disregarded: - end_frames_disregarded] = calc_forces
torques += calc_torques
for i in range(end_frames_disregarded):
forces.append(np.zeros(f_shape))
torques.append(np.zeros(t_shape))
if save:
np.save(path_join(dir, prefix + "forces.npy"), forces)
np.save(path_join(dir, prefix + "torques.npy"), torques)
return np.array(forces), np.array(torques)
def get_com_position(self):
"""Calculates the position of the centre of mass of the whole system at each timestep"""
return sum(b.XG * b.mass for b in self.target_bones + [self.body]) / self.total_mass
def return_equations(self, end_frames_disregarded=5):
"""For each frame, return the equation vector b"""
self.get_dynamics()
bs = []
for n_frame in range(end_frames_disregarded, self.n_frames - end_frames_disregarded):
A, b, weights, bounds = self.calculate_forces(n_frame, report_equations=False)
bs.append(b)
return np.array(bs)
def set_paw_equilibrium(self):
"""Get paw equilibrium from mocap data by finding the drop of the paw.
This method will work for the current dataset, but is likely not robust, so can be replaced with
a better method of finding the paw equilibrium at a later date"""
if self.is_mocap:
paw_z_heights = self.unsmoothed_data[:, self.foot_joints, 2]
else:
paw_z_heights = self.unsmoothed_data[:, self.foot_joints, 2]
self.paw_disps = {} # paw joint: displacement over time, for paw spring model
min_contacts_detected = 3 # minimum requirement to use peak detection mode
plot = True
if plot:
fig, axes = plt.subplots(nrows=2, ncols=2)
footfall_detector = FootfallDetector(train=False, load=True, name=["smal", "mocap"][self.is_mocap])
for n, paw in enumerate(self.foot_joints):
contact_ends_failed = False
disp = np.zeros((self.n_frames)) # will give eps - the displacement of the paw from equilibrium
# for when the paw is in contact with the ground
Z = paw_z_heights[:, n]
on_ground = footfall_detector.process_clip(Z)
on_ground_idxs = np.where(on_ground > 0)[0]
if plot:
axes[n // 2, n % 2].plot(Z.mean() * (on_ground), color="red", alpha=0.3)
min_footfall_width = 3 # 3 frames long minimum to count as a footfall
footfalls = consecutive(on_ground_idxs)
trigger_height = np.percentile(np.array([Z[ff].max() for ff in footfalls]), 25) # mean trigger height
for footfall in footfalls:
if len(footfall) > min_footfall_width:
# disp[footfall] = Z[footfall].max() - Z[footfall] # old
disp[footfall] = np.clip(trigger_height - Z[footfall], a_min=0, a_max=None)
self.paw_disps[paw] = disp
if plot:
ax = axes[n // 2, n % 2]
ax.plot(Z)
Z_on_ground = Z.copy()
Z_on_ground[disp == 0] = np.nan
ax.plot(Z_on_ground, color="green")
ax.plot(disp)
Z_smoothed = self.joint_pos[:, paw, 2]
ax.set_title(n)
if plot:
plt.show(block=False)
plt.draw()
plt.pause(1e-8)
def view_ground_displacements(self, deriv=0):
"""Plot and show a graph of vertical displacement against frames for each paw - identifying L0 for each paw"""
fig, axes = plt.subplots(nrows=4)
for n, j in enumerate(self.foot_joints):
label = foot_joint_labels[n]
ax = axes[n]
if deriv == 0:
X = self.joint_pos[:, j, 2]
X_unsmoothed = self.unsmoothed_data[:, j, 2]
ax.plot(X)
ax.plot(X_unsmoothed, alpha=.6)
# ax.axhline(self.paw_equilibrium_values[j], ls = "--")
ax.axhline(self.L0_paws[label.split(" ")[0]])
elif deriv == 1:
ax.plot(self.joint_vel[:, j, 2])
ax.set_title(label)
plt.show()
def view_com_displacements(self, deriv=0):
"""Plot and show graph of X, Y, and Z motion of CoM of dog.
If deriv > 0, plot that derivative of the displacement"""
fig, ax = plt.subplots()
com_data = self.get_com_position()
if deriv > 0:
com_data = nth_time_deriv(com_data, 1 / self.freq, n=deriv)
for i in [0, 1, 2]:
ax.plot(com_data[:, i], label="xyz"[i])
ax.legend()
plt.show()
def calc_leg_lengths(self):
"""Uses the compliant-legged walking model estimation to work out the average length of legs.
Assume legs are undeformed while off ground. Work out avg distance from leg to COM"""
self.leg_disps = {} # length of leg over time for each paw
self.leg_vecs = {} # normalised vector of leg spring direction for each paw
plot = True
if plot: fig, axes = plt.subplots(nrows=2, ncols=2, sharex="all", sharey="row")
for n, paw in enumerate(self.foot_joints):
is_front = n < 2 # Assumes order of f left, f right, r left, r right
tol = 1e-3
on_ground = self.paw_disps[paw] > tol
off_ground = self.paw_disps[paw] <= tol
# centre_of_rot = self.body.XG[:]#self.body.X[:, int(is_front)]
# centre_of_rot = self.unsmoothed_data[:, self.body_joints[is_front][n%2]]
if self.is_mocap:
centre_of_rot = self.unsmoothed_data[:, self.leg_spring_joints[n]]
paw_pos = self.unsmoothed_data[:, paw]
else:
centre_of_rot = self.unsmoothed_data[:, self.leg_spring_joints[n]]
paw_pos = self.unsmoothed_data[:, paw]
X, Z = np.swapaxes(centre_of_rot[:, [0, 2]], 0, 1) # get X, Z position of CoM
X_PAW, Z_PAW = np.swapaxes(paw_pos[:, [0, 2]], 0, 1) # get X, Z position of CoM
THETA = np.arctan((X_PAW - X) / (Z - Z_PAW)) # angle between spring and vertical
L = ((X - X_PAW) ** 2 + (Z - Z_PAW) ** 2) ** .5
L0 = (L).max()
z_disp = (L - L0) * np.cos(THETA)
x_disp = (L - L0) * np.sin(THETA)
# get z displacement by footfall
disp = np.zeros(self.n_frames)
# if self.is_mocap:
for ff in consecutive(np.where(on_ground)[0]):
if len(ff) < 3: continue # min width of footfall required
disp[ff] = z_disp[ff].max() - z_disp[ff]
# else:
# disp = -z_disp
self.leg_disps[paw] = disp
if plot:
ax = axes[n // 2, n % 2]
# ax.plot(L)
ax.plot(L - L0)
ax.plot(disp, color="green")
if plot:
plt.tight_layout()
# plt.show()
plt.show(block=False)
plt.draw()
plt.pause(1e-8)
def norm_kin_data(kin_data, targ_markers=None):
"""Normalise kinematic data.
If targ_markers given, normalise so these markers are at desired height"""
norm_height = 0.4 # 0.635 # fixed to Ally height for now
# scale so minimum is at (0,0,0)
for dim in [0, 1, 2]:
kin_data[:, :, dim] -= kin_data[:, :, dim].min()
if targ_markers is None:
kin_data = norm_height * kin_data / np.max(kin_data[:, :, 2])
elif targ_markers is not None:
height_target = kin_data[:, targ_markers, 2].mean()
kin_data = norm_height * kin_data / height_target
return kin_data
def get_dyn_data(dynamic_src, clip_length, mass, is_mocap=True, target_freq=100):
"""Loads and returns kinematic data"""
force_plate_data, force_plate_tdelay = load_force_plate_data(dynamic_src, is_mocap)
raw_dyn_data = force_plate_data
raw_dyn_data *= 1 / (mass * 9.81)
# resample if requested
if target_freq != freq_forceplate:
target_frames = int(len(raw_dyn_data) * target_freq / freq_forceplate)
dyn_data = signal.resample(raw_dyn_data, target_frames)
# this resampling causes a jumpiness for the periods of zero value. Fix that here:
tol = 1e-4
for paw in range(dyn_data.shape[1]):
# get indices where should be 0
antifootfalls = consecutive(np.where(raw_dyn_data[:, paw] < tol)[0])
min_width = 10 # in frames
for aff in antifootfalls:
if len(aff) < min_width: continue
start, end = aff[0] * target_freq / freq_forceplate, aff[-1] * target_freq / freq_forceplate
# ^ start and end indices, in remapped frame
dyn_data[int(start):int(end), paw] = 0 # set to 0
freq = target_freq
else:
freq = freq_forceplate
dyn_data = raw_dyn_data
frame_delay = int(freq * force_plate_tdelay)
n_frames_forceplate = int(clip_length * freq) # number of frames for forceplate to be same time length as mocap
if frame_delay == 0:
return dyn_data[:n_frames_forceplate]
if frame_delay > 0: # crop forceplate data
return dyn_data[frame_delay: frame_delay + n_frames_forceplate] # crop forceplate data to match mocap/SMAL data
else: # fdelay <0, pad forceplate data
return np.pad(dyn_data, ((int(-frame_delay), 0), (0, 0)))[:n_frames_forceplate]
kin_src_to_solver_name = lambda s: s.replace("/", " ").replace(" ", "_").replace(".c3d", "")
def load_solver(kin_src, clip_length, mocap=True, resample_freq=100):
if mocap:
joint_data = C3DData(ax=None, src=kin_src, interpolate=True, crop=clip_length,
fix_rotations="3 kph" in kin_src) # only fix rotations for 3 kph for now
else:
joint_data = SMALData(kin_src, freq=30, norm=True, crop=clip_length, smooth=True)
joint_data.resample_at(resample_freq) ### TRY RESAMPLING DATA TO 100 Hz
target_bones, body_joints, no_torque_joints, leg_spring_joints = joint_data.generate_skeleton_mapping()
# Normalise data based on z data, so that the dog is roughly 0.5m high. Also smooth data
kin_data = np.array(joint_data.all_data)
kin_data = norm_kin_data(kin_data, targ_markers=leg_spring_joints)
solver_kwargs = dict(target_bones=target_bones,
body_joints=body_joints, no_torque_joints=no_torque_joints,
foot_joints=no_torque_joints, leg_spring_joints=leg_spring_joints,
freq=joint_data.freq,
name=kin_src_to_solver_name(kin_src))
solver = InverseDynamicsSolver(joint_data=kin_data, **solver_kwargs, is_mocap=mocap)
print(f"Solver loaded. Mass = {solver.total_mass:.1f} kg.")
return solver
|
"""Implementation for Eldes Cloud"""
import asyncio
import async_timeout
import logging
import aiohttp
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED
)
from ..const import API_URL, API_PATHS
_LOGGER = logging.getLogger(__name__)
ALARM_STATES_MAP = {
"DISARMED": STATE_ALARM_DISARMED,
"ARMED": STATE_ALARM_ARMED_AWAY,
"ARMSTAY": STATE_ALARM_ARMED_HOME
}
class EldesCloud:
"""Interacts with Eldes via public API."""
def __init__(self, session: aiohttp.ClientSession, username: str, password: str):
"""Performs login and save session cookie."""
self.timeout = 10
self.headers = {
'X-Requested-With': 'XMLHttpRequest',
'x-whitelable': 'eldes'
}
self.refresh_token = ''
self._http_session = session
self._username = username
self._password = password
async def _setOAuthHeader(self, data):
if 'refreshToken' in data:
self.refresh_token = data['refreshToken']
if 'token' in data:
self.headers['Authorization'] = f"Bearer {data["token"]}"
return data
async def _api_call(self, url, method, data=None):
try:
async with async_timeout.timeout(self.timeout):
req = await self._http_session.request(
method,
url,
json=data,
headers=self.headers
)
req.raise_for_status()
return req
except aiohttp.ClientError as err:
_LOGGER.error("Client error on API %s request %s", url, err)
raise
except asyncio.TimeoutError:
_LOGGER.error("Client timeout error on API request %s", url)
raise
async def login(self):
data = {
'email': self._username,
'password': self._password,
'hostDeviceId': ''
}
url = f"{API_URL}{API_PATHS["AUTH"]}login"
resp = await self._api_call(url, "POST", data)
result = await resp.json()
_LOGGER.debug(
"login result: %s",
result
)
return await self._setOAuthHeader(result)
async def renew_token(self):
"""Updates auth token."""
headers = self.headers
headers['Authorization'] = f"Bearer {self.refresh_token}"
url = f"{API_URL}{API_PATHS["AUTH"]}token"
response = await self._http_session.get(
url,
timeout=self.timeout,
headers=headers
)
result = await response.json()
_LOGGER.debug(
"renew_token result: %s",
result
)
return await self._setOAuthHeader(result)
async def get_devices(self):
"""Gets device list."""
url = f"{API_URL}{API_PATHS["DEVICE"]}list"
response = await self._api_call(url, "GET")
result = await response.json()
devices = result.get("deviceListEntries", [])
_LOGGER.debug(
"get_devices result: %s",
devices
)
return devices
async def get_device_info(self, imei):
"""Gets device information."""
url = f"{API_URL}{API_PATHS["DEVICE"]}info?imei={imei}"
response = await self._api_call(url, "GET")
result = await response.json()
_LOGGER.debug(
"get_device_info result: %s",
result
)
return result
async def get_device_partitions(self, imei):
"""Gets device partitions/zones."""
data = {
'imei': imei
}
url = f"{API_URL}{API_PATHS["DEVICE"]}partition/list?imei={imei}"
response = await self._api_call(url, "POST", data)
result = await response.json()
partitions = result.get("partitions", [])
# Replace Eldes state with HA state name
for partitionIndex, _ in enumerate(partitions):
partitions[partitionIndex]["state"] = ALARM_STATES_MAP[partitions[partitionIndex].get("state", STATE_ALARM_DISARMED)]
_LOGGER.debug(
"get_device_partitions result: %s",
partitions
)
return partitions
async def get_device_outputs(self, imei):
"""Gets device outputs/automations."""
data = {
'imei': imei
}
url = f"{API_URL}{API_PATHS["DEVICE"]}list-outputs/{imei}"
response = await self._api_call(url, "POST", data)
result = await response.json()
outputs = result.get("deviceOutputs", [])
_LOGGER.debug(
"get_device_outputs result: %s",
outputs
)
return outputs
async def set_alarm(self, mode, imei, zone_id):
"""Sets alarm to provided mode."""
data = {
'imei': imei,
'partitionIndex': zone_id
}
url = f"{API_URL}{API_PATHS["DEVICE"]}action/{mode}"
response = await self._api_call(url, "POST", data)
result = await response.text()
_LOGGER.debug(
"set_alarm result: %s",
result
)
return result
async def turn_on_output(self, imei, output_id):
"""Turns on output."""
data = {
"": ""
}
url = f"{API_URL}{API_PATHS["DEVICE"]}control/enable/{imei}/{output_id}"
response = await self._api_call(url, "PUT", data)
_LOGGER.debug(
"turn_on_output response: %s",
response
)
return response
async def turn_off_output(self, imei, output_id):
"""Turns off output."""
data = {
"": ""
}
url = f"{API_URL}{API_PATHS["DEVICE"]}control/disable/{imei}/{output_id}"
response = await self._api_call(url, "PUT", data)
_LOGGER.debug(
"turn_off_output response: %s",
response
)
return response
|
"""Implementation for Eldes Cloud"""
import asyncio
import async_timeout
import logging
import aiohttp
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED
)
from ..const import API_URL, API_PATHS
_LOGGER = logging.getLogger(__name__)
ALARM_STATES_MAP = {
"DISARMED": STATE_ALARM_DISARMED,
"ARMED": STATE_ALARM_ARMED_AWAY,
"ARMSTAY": STATE_ALARM_ARMED_HOME
}
class EldesCloud:
"""Interacts with Eldes via public API."""
def __init__(self, session: aiohttp.ClientSession, username: str, password: str):
"""Performs login and save session cookie."""
self.timeout = 10
self.headers = {
'X-Requested-With': 'XMLHttpRequest',
'x-whitelable': 'eldes'
}
self.refresh_token = ''
self._http_session = session
self._username = username
self._password = password
async def _setOAuthHeader(self, data):
if 'refreshToken' in data:
self.refresh_token = data['refreshToken']
if 'token' in data:
self.headers['Authorization'] = f"Bearer {data['token']}"
return data
async def _api_call(self, url, method, data=None):
try:
async with async_timeout.timeout(self.timeout):
req = await self._http_session.request(
method,
url,
json=data,
headers=self.headers
)
req.raise_for_status()
return req
except aiohttp.ClientError as err:
_LOGGER.error("Client error on API %s request %s", url, err)
raise
except asyncio.TimeoutError:
_LOGGER.error("Client timeout error on API request %s", url)
raise
async def login(self):
data = {
'email': self._username,
'password': self._password,
'hostDeviceId': ''
}
url = f"{API_URL}{API_PATHS['AUTH']}login"
resp = await self._api_call(url, "POST", data)
result = await resp.json()
_LOGGER.debug(
"login result: %s",
result
)
return await self._setOAuthHeader(result)
async def renew_token(self):
"""Updates auth token."""
headers = self.headers
headers['Authorization'] = f"Bearer {self.refresh_token}"
url = f"{API_URL}{API_PATHS['AUTH']}token"
response = await self._http_session.get(
url,
timeout=self.timeout,
headers=headers
)
result = await response.json()
_LOGGER.debug(
"renew_token result: %s",
result
)
return await self._setOAuthHeader(result)
async def get_devices(self):
"""Gets device list."""
url = f"{API_URL}{API_PATHS['DEVICE']}list"
response = await self._api_call(url, "GET")
result = await response.json()
devices = result.get("deviceListEntries", [])
_LOGGER.debug(
"get_devices result: %s",
devices
)
return devices
async def get_device_info(self, imei):
"""Gets device information."""
url = f"{API_URL}{API_PATHS['DEVICE']}info?imei={imei}"
response = await self._api_call(url, "GET")
result = await response.json()
_LOGGER.debug(
"get_device_info result: %s",
result
)
return result
async def get_device_partitions(self, imei):
"""Gets device partitions/zones."""
data = {
'imei': imei
}
url = f"{API_URL}{API_PATHS['DEVICE']}partition/list?imei={imei}"
response = await self._api_call(url, "POST", data)
result = await response.json()
partitions = result.get("partitions", [])
# Replace Eldes state with HA state name
for partitionIndex, _ in enumerate(partitions):
partitions[partitionIndex]["state"] = ALARM_STATES_MAP[partitions[partitionIndex].get("state", STATE_ALARM_DISARMED)]
_LOGGER.debug(
"get_device_partitions result: %s",
partitions
)
return partitions
async def get_device_outputs(self, imei):
"""Gets device outputs/automations."""
data = {
'imei': imei
}
url = f"{API_URL}{API_PATHS['DEVICE']}list-outputs/{imei}"
response = await self._api_call(url, "POST", data)
result = await response.json()
outputs = result.get("deviceOutputs", [])
_LOGGER.debug(
"get_device_outputs result: %s",
outputs
)
return outputs
async def set_alarm(self, mode, imei, zone_id):
"""Sets alarm to provided mode."""
data = {
'imei': imei,
'partitionIndex': zone_id
}
url = f"{API_URL}{API_PATHS['DEVICE']}action/{mode}"
response = await self._api_call(url, "POST", data)
result = await response.text()
_LOGGER.debug(
"set_alarm result: %s",
result
)
return result
async def turn_on_output(self, imei, output_id):
"""Turns on output."""
data = {
"": ""
}
url = f"{API_URL}{API_PATHS['DEVICE']}control/enable/{imei}/{output_id}"
response = await self._api_call(url, "PUT", data)
_LOGGER.debug(
"turn_on_output response: %s",
response
)
return response
async def turn_off_output(self, imei, output_id):
"""Turns off output."""
data = {
"": ""
}
url = f"{API_URL}{API_PATHS['DEVICE']}control/disable/{imei}/{output_id}"
response = await self._api_call(url, "PUT", data)
_LOGGER.debug(
"turn_off_output response: %s",
response
)
return response
|
import os
import json
import shutil
from lib.config import Config
from lib.variables import Variables, HACKERMODE_FOLDER_NAME
RED = '\033[1;31m'
GREEN = '\033[1;32m'
YELLOW = '\033[1;33m'
NORMAL = '\033[0m'
UNDERLINE = '\033[4m'
BOLD = '\033[1m'
with open(os.path.join(Variables.HACKERMODE_PATH, 'packages.json')) as fp:
INSTALL_DATA = json.load(fp)
class HackerModeInstaller:
def python_system_modules(self) -> list:
"""this
function return all modules that installed in system."""
return os.popen("pip3 freeze").read().split("\n")
def is_installed(self, module, python_modules):
for python_module in python_modules:
if module in python_module:
return [module, python_module]
return False
def installed_message(self, package, show=True):
if show:
default_message = f'{package.split('=')[0]} installed successfully.'
print(f'{NORMAL}[{GREEN}✔{NORMAL}] {GREEN}{default_message}{NORMAL}')
def failed_message(self, package, show=True, is_base=False):
if show:
default_message = f'not able to install "{package}".'
color = RED if is_base else YELLOW
print(f'{NORMAL}[{color}{'✗' if is_base else '!'}{NORMAL}] {color}{default_message}{NORMAL}')
def check(self, show_output=True) -> dict:
"""this
function check packages and modules
and return all packages that not installed.
"""
modules: list = []
packages: list = []
python_modules = self.python_system_modules()
if show_output:
print("\nCHECKING:")
print("python modules:")
for module in INSTALL_DATA["PYTHON3_MODULES"]:
if self.is_installed(module, python_modules) or os.path.exists(
os.popen(f"realpath $(command -v {module}) 2> /dev/null").read().strip()):
self.installed_message(module, show=show_output)
else:
modules.append(module)
self.failed_message(module, show=show_output)
if show_output:
print("packages:")
for package in INSTALL_DATA["PACKAGES"].keys():
if not INSTALL_DATA["PACKAGES"][package][Variables.PLATFORME]:
continue
if os.path.exists(os.popen(f"realpath $(command -v {package.strip()})").read().strip()):
self.installed_message(package, show=show_output)
else:
packages.append(package)
self.failed_message(package, show=show_output)
return {"packages": packages, "modules": modules}
def install(self):
# check platforme
if not Variables.PLATFORME in ('termux', 'linux'):
if Variables.PLATFORME == 'unknown':
print("# The tool could not recognize the system!")
print("# Do You want to continue anyway?")
while True:
if input('# [Y/N]: ').lower() == 'y':
break
else:
print('# good bye :D')
return
else:
print(f"# The tool does not support {Variables.PLATFORME}")
print('# good bye :D')
return
# install packages
need_to_install = self.check(show_output=False)
for package in need_to_install["packages"]:
for command in INSTALL_DATA["PACKAGES"][package][Variables.PLATFORME]:
os.system(command)
# install modules
for module in need_to_install["modules"]:
os.system(f"pip3 install {module}")
# move HackerMode to install path
if Config.get('actions', 'DEBUG', False):
print("# can't move the HackerMode folder ")
print("# to install path in debug mode!")
return None
if os.path.isdir(HACKERMODE_FOLDER_NAME):
try:
shutil.move(HACKERMODE_FOLDER_NAME, Variables.HACKERMODE_INSTALL_PATH)
self.install_tools_packages()
Config.set('actions', 'IS_INSTALLED', True)
self.check()
print(f'# {GREEN}HackerMode installed successfully...{NORMAL}')
except shutil.Error as e:
self.delete(show_message=False)
print(e)
print('# installed failed!')
else:
self.delete(show_message=False)
print(f'{RED}# Error: the tool path not found!')
print(f'# try to run tool using\n# {GREEN}"python3 HackerMode install"{NORMAL}')
print('# installed failed!')
def update(self):
if not Config.get('actions', 'DEBUG', cast=bool, default=False):
hackermode_command_line_path = os.environ.get("_").split("bin/")[0] + "bin/HackerMode"
if os.path.exists(hackermode_command_line_path):
os.remove(hackermode_command_line_path)
os.system(
f'curl https://raw.githubusercontent.com/Arab-developers/HackerMode/future/install.sh > HackerModeInstall && bash HackerModeInstall')
print(f'# {GREEN}HackerMode updated successfully...{NORMAL}')
else:
print("# can't update in the DEUBG mode!")
def add_shortcut(self):
# add HackerMode shortcut...
try:
with open(Variables.BASHRIC_FILE_PATH, "r") as f:
data = f.read()
if data.find(Variables.HACKERMODE_SHORTCUT.strip()) == -1:
with open(Variables.BASHRIC_FILE_PATH, "w") as f:
f.write(data + Variables.HACKERMODE_SHORTCUT)
except PermissionError:
print(NORMAL + "# add HackerMode shortcut:")
print(f"# '{YELLOW}{Variables.HACKERMODE_SHORTCUT}{NORMAL}'")
print("# to this path:")
print("# " + Variables.HACKERMODE_BIN_PATH)
def delete(self, show_message=True):
if show_message:
status = input("# Do you really want to delete the tool?\n [n/y]: ").lower()
else:
status = "y"
if status in ("y", "yes", "ok", "yep"):
bin_path = os.path.join(os.environ["SHELL"].split("/bin/")[0], "/bin/HackerMode")
tool_path = os.path.join(os.environ["HOME"], ".HackerMode")
if os.path.exists(bin_path):
os.remove(bin_path)
if os.path.exists(tool_path):
shutil.rmtree(tool_path)
try:
with open(Variables.BASHRIC_FILE_PATH, "r") as f:
data = f.read()
if data.find(Variables.HACKERMODE_SHORTCUT.strip()) != -1:
with open(Variables.BASHRIC_FILE_PATH, "w") as f:
f.write(data.replace(Variables.HACKERMODE_SHORTCUT, ""))
except PermissionError:
if show_message:
print("# cannot remove HackerMode shortcut!")
if show_message:
print("# The deletion was successful...")
def install_tools_packages(self):
# compile shell file
old_path = os.getcwd()
os.chdir(os.path.join(os.environ.get("HOME"), ".HackerMode/HackerMode/lib"))
os.system("bash setup.sh")
os.chdir(old_path)
# install tools packages
tools_path = os.path.join(os.environ.get("HOME"), ".HackerMode/HackerMode/tools")
for root, dirs, files in os.walk(tools_path):
for dir in dirs:
if os.path.exists(os.path.join(root, dir, "setup.sh")):
print(f"installing {dir} packages:")
old_path = os.getcwd()
os.chdir(os.path.join(root, dir))
os.system("bash setup.sh")
os.chdir(old_path)
if __name__ == "__main__":
x = HackerModeInstaller()
x.check()
x.install()
|
import os
import json
import shutil
from lib.config import Config
from lib.variables import Variables, HACKERMODE_FOLDER_NAME
RED = '\033[1;31m'
GREEN = '\033[1;32m'
YELLOW = '\033[1;33m'
NORMAL = '\033[0m'
UNDERLINE = '\033[4m'
BOLD = '\033[1m'
with open(os.path.join(Variables.HACKERMODE_PATH, 'packages.json')) as fp:
INSTALL_DATA = json.load(fp)
class HackerModeInstaller:
def python_system_modules(self) -> list:
"""this
function return all modules that installed in system."""
return os.popen("pip3 freeze").read().split("\n")
def is_installed(self, module, python_modules):
for python_module in python_modules:
if module in python_module:
return [module, python_module]
return False
def installed_message(self, package, show=True):
if show:
default_message = f'{package.split("=")[0]} installed successfully.'
print(f'{NORMAL}[{GREEN}✔{NORMAL}] {GREEN}{default_message}{NORMAL}')
def failed_message(self, package, show=True, is_base=False):
if show:
default_message = f'not able to install "{package}".'
color = RED if is_base else YELLOW
print(f'{NORMAL}[{color}{"✗" if is_base else "!"}{NORMAL}] {color}{default_message}{NORMAL}')
def check(self, show_output=True) -> dict:
"""this
function check packages and modules
and return all packages that not installed.
"""
modules: list = []
packages: list = []
python_modules = self.python_system_modules()
if show_output:
print("\nCHECKING:")
print("python modules:")
for module in INSTALL_DATA["PYTHON3_MODULES"]:
if self.is_installed(module, python_modules) or os.path.exists(
os.popen(f"realpath $(command -v {module}) 2> /dev/null").read().strip()):
self.installed_message(module, show=show_output)
else:
modules.append(module)
self.failed_message(module, show=show_output)
if show_output:
print("packages:")
for package in INSTALL_DATA["PACKAGES"].keys():
if not INSTALL_DATA["PACKAGES"][package][Variables.PLATFORME]:
continue
if os.path.exists(os.popen(f"realpath $(command -v {package.strip()})").read().strip()):
self.installed_message(package, show=show_output)
else:
packages.append(package)
self.failed_message(package, show=show_output)
return {"packages": packages, "modules": modules}
def install(self):
# check platforme
if not Variables.PLATFORME in ('termux', 'linux'):
if Variables.PLATFORME == 'unknown':
print("# The tool could not recognize the system!")
print("# Do You want to continue anyway?")
while True:
if input('# [Y/N]: ').lower() == 'y':
break
else:
print('# good bye :D')
return
else:
print(f"# The tool does not support {Variables.PLATFORME}")
print('# good bye :D')
return
# install packages
need_to_install = self.check(show_output=False)
for package in need_to_install["packages"]:
for command in INSTALL_DATA["PACKAGES"][package][Variables.PLATFORME]:
os.system(command)
# install modules
for module in need_to_install["modules"]:
os.system(f"pip3 install {module}")
# move HackerMode to install path
if Config.get('actions', 'DEBUG', False):
print("# can't move the HackerMode folder ")
print("# to install path in debug mode!")
return None
if os.path.isdir(HACKERMODE_FOLDER_NAME):
try:
shutil.move(HACKERMODE_FOLDER_NAME, Variables.HACKERMODE_INSTALL_PATH)
self.install_tools_packages()
Config.set('actions', 'IS_INSTALLED', True)
self.check()
print(f'# {GREEN}HackerMode installed successfully...{NORMAL}')
except shutil.Error as e:
self.delete(show_message=False)
print(e)
print('# installed failed!')
else:
self.delete(show_message=False)
print(f'{RED}# Error: the tool path not found!')
print(f'# try to run tool using\n# {GREEN}"python3 HackerMode install"{NORMAL}')
print('# installed failed!')
def update(self):
if not Config.get('actions', 'DEBUG', cast=bool, default=False):
hackermode_command_line_path = os.environ.get("_").split("bin/")[0] + "bin/HackerMode"
if os.path.exists(hackermode_command_line_path):
os.remove(hackermode_command_line_path)
os.system(
f'curl https://raw.githubusercontent.com/Arab-developers/HackerMode/future/install.sh > HackerModeInstall && bash HackerModeInstall')
print(f'# {GREEN}HackerMode updated successfully...{NORMAL}')
else:
print("# can't update in the DEUBG mode!")
def add_shortcut(self):
# add HackerMode shortcut...
try:
with open(Variables.BASHRIC_FILE_PATH, "r") as f:
data = f.read()
if data.find(Variables.HACKERMODE_SHORTCUT.strip()) == -1:
with open(Variables.BASHRIC_FILE_PATH, "w") as f:
f.write(data + Variables.HACKERMODE_SHORTCUT)
except PermissionError:
print(NORMAL + "# add HackerMode shortcut:")
print(f"# '{YELLOW}{Variables.HACKERMODE_SHORTCUT}{NORMAL}'")
print("# to this path:")
print("# " + Variables.HACKERMODE_BIN_PATH)
def delete(self, show_message=True):
if show_message:
status = input("# Do you really want to delete the tool?\n [n/y]: ").lower()
else:
status = "y"
if status in ("y", "yes", "ok", "yep"):
bin_path = os.path.join(os.environ["SHELL"].split("/bin/")[0], "/bin/HackerMode")
tool_path = os.path.join(os.environ["HOME"], ".HackerMode")
if os.path.exists(bin_path):
os.remove(bin_path)
if os.path.exists(tool_path):
shutil.rmtree(tool_path)
try:
with open(Variables.BASHRIC_FILE_PATH, "r") as f:
data = f.read()
if data.find(Variables.HACKERMODE_SHORTCUT.strip()) != -1:
with open(Variables.BASHRIC_FILE_PATH, "w") as f:
f.write(data.replace(Variables.HACKERMODE_SHORTCUT, ""))
except PermissionError:
if show_message:
print("# cannot remove HackerMode shortcut!")
if show_message:
print("# The deletion was successful...")
def install_tools_packages(self):
# compile shell file
old_path = os.getcwd()
os.chdir(os.path.join(os.environ.get("HOME"), ".HackerMode/HackerMode/lib"))
os.system("bash setup.sh")
os.chdir(old_path)
# install tools packages
tools_path = os.path.join(os.environ.get("HOME"), ".HackerMode/HackerMode/tools")
for root, dirs, files in os.walk(tools_path):
for dir in dirs:
if os.path.exists(os.path.join(root, dir, "setup.sh")):
print(f"installing {dir} packages:")
old_path = os.getcwd()
os.chdir(os.path.join(root, dir))
os.system("bash setup.sh")
os.chdir(old_path)
if __name__ == "__main__":
x = HackerModeInstaller()
x.check()
x.install()
|
"""Interact with Taskwarrior."""
import datetime
import os
import re
import threading
import traceback
from pathlib import Path
from shutil import which
from subprocess import PIPE, Popen
from typing import List, Optional, Tuple, Union
import albert as v0 # type: ignore
import dateutil
import gi
import taskw
from fuzzywuzzy import process
from overrides import overrides
from taskw_gcal_sync import TaskWarriorSide
gi.require_version("Notify", "0.7") # isort:skip
gi.require_version("GdkPixbuf", "2.0") # isort:skip
from gi.repository import GdkPixbuf, Notify # isort:skip # type: ignore
# metadata ------------------------------------------------------------------------------------
__title__ = "Taskwarrior interaction"
__version__ = "0.4.0"
__triggers__ = "t "
__authors__ = "Nikos Koukis"
__homepage__ = "https://github.com/bergercookie/awesome-albert-plugins"
__simplename__ = "taskwarrior"
# initial checks ------------------------------------------------------------------------------
# icon ----------------------------------------------------------------------------------------
icon_path = os.path.join(os.path.dirname(__file__), "taskwarrior.svg")
icon_path_b = os.path.join(os.path.dirname(__file__), "taskwarrior_blue.svg")
icon_path_r = os.path.join(os.path.dirname(__file__), "taskwarrior_red.svg")
icon_path_y = os.path.join(os.path.dirname(__file__), "taskwarrior_yellow.svg")
icon_path_c = os.path.join(os.path.dirname(__file__), "taskwarrior_cyan.svg")
icon_path_g = os.path.join(os.path.dirname(__file__), "taskwarrior_green.svg")
# initial configuration -----------------------------------------------------------------------
# should the plugin show relevant some info without the trigger?
show_items_wo_trigger = True
failure_tag = "fail"
cache_path = Path(v0.cacheLocation()) / __simplename__
config_path = Path(v0.configLocation()) / __simplename__
data_path = Path(v0.dataLocation()) / __simplename__
reminders_tag_path = config_path / "reminders_tag"
reminders_tag = "remindme"
class FileBackedVar:
def __init__(self, varname, convert_fn=str, init_val=None):
self._fpath = config_path / varname
self._convert_fn = convert_fn
if init_val:
with open(self._fpath, "w") as f:
f.write(str(init_val))
else:
self._fpath.touch()
def get(self):
with open(self._fpath, "r") as f:
return self._convert_fn(f.read().strip())
def set(self, val):
with open(self._fpath, "w") as f:
return f.write(str(val))
class TaskWarriorSideWLock:
"""Multithreading-safe version of TaskWarriorSide."""
def __init__(self):
self.tw = TaskWarriorSide(enable_caching=True)
self.tw_lock = threading.Lock()
def start(self, *args, **kargs):
with self.tw_lock:
return self.tw.start(*args, **kargs)
def get_all_items(self, *args, **kargs):
with self.tw_lock:
return self.tw.get_all_items(*args, **kargs)
def get_task_id(self, *args, **kargs):
with self.tw_lock:
return self.tw.get_task_id(*args, **kargs)
@property
def reload_items(self):
return self.tw.reload_items
@reload_items.setter
def reload_items(self, val: bool):
self.tw.reload_items = val
def update_item(self, *args, **kargs):
self.tw.update_item(*args, **kargs)
tw_side = TaskWarriorSideWLock()
last_used_date = FileBackedVar(
"last_date_used",
convert_fn=lambda s: datetime.datetime.strptime(s, "%Y-%m-%d").date(),
init_val=datetime.datetime.today().date(),
)
dev_mode = True
# regular expression to match URLs
# https://gist.github.com/gruber/8891611
url_re = re.compile(
r"""(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)\b/?(?!@)))"""
)
# plugin main functions -----------------------------------------------------------------------
def do_notify(msg: str, image=None):
app_name = "Taskwarrior"
Notify.init(app_name)
image = image
n = Notify.Notification.new(app_name, msg, image)
n.show()
def date_only_tzlocal(datetime: datetime.datetime):
return datetime.astimezone(dateutil.tz.tzlocal()).date() # type: ignore
def get_tasks_of_date(date: datetime.date):
tasks = tw_side.get_all_items(skip_completed=True)
# You have to do the comparison in tzlocal. TaskWarrior stores the tasks in UTC and thus
# the effetive date*time* may not match the given date parameter because of the time
# difference
tasks = [t for t in tasks if "due" in t.keys() and date_only_tzlocal(t["due"]) == date]
return tasks
def initialize():
# Called when the extension is loaded (ticked in the settings) - blocking
# create cache location
config_path.mkdir(parents=False, exist_ok=True)
def finalize():
pass
def handleQuery(query):
results = []
# we're into the new day, create and assign a fresh instance
last_used = last_used_date.get()
current_date = datetime.datetime.today().date()
global tw_side, subcommands
if last_used < current_date:
tw_side = TaskWarriorSideWLock()
subcommands = create_subcommands()
last_used_date.set(current_date)
elif last_used > current_date:
# maybe due to NTP?
v0.critical(
f"Current date {current_date} < last_used date {last_used} ?! Overriding current date, please report this if it persists"
)
tw_side = TaskWarriorSideWLock()
subcommands = create_subcommands()
last_used_date.set(current_date)
if not query.isTriggered:
if show_items_wo_trigger and len(query.string) < 2:
results = [
ActiveTasks().get_as_albert_item(),
TodayTasks().get_as_albert_item(),
*results,
]
else:
# join any previously launched threads
for i in range(len(workers)):
workers.pop(i).join(2)
try:
query.disableSort()
results_setup = setup(query)
if results_setup:
return results_setup
tasks = tw_side.get_all_items(skip_completed=True)
query_str = query.string
if len(query_str) < 2:
results.extend([s.get_as_albert_item() for s in subcommands])
results.append(
get_as_item(
text="Reload list of tasks",
actions=[v0.FuncAction("Reload", async_reload_items)],
)
)
tasks.sort(key=lambda t: t["urgency"], reverse=True)
results.extend([get_tw_item(task) for task in tasks])
else:
subcommand_query = get_subcommand_query(query_str)
if subcommand_query:
results.extend(
subcommand_query.command.get_as_albert_items_full(
subcommand_query.query
)
)
if not results:
results.append(get_as_item(text="No results"))
else:
# find relevant results
desc_to_task = {task["description"]: task for task in tasks}
matched = process.extract(query_str, list(desc_to_task.keys()), limit=30)
for m in [elem[0] for elem in matched]:
task = desc_to_task[m]
results.append(get_tw_item(task))
except Exception: # user to report error
if dev_mode:
v0.critical(traceback.format_exc())
raise
results.insert(
0,
v0.Item(
id=__title__,
icon=icon_path,
text="Something went wrong! Press [ENTER] to copy error and report it",
actions=[
v0.ClipAction(
f"Copy error - report it to {__homepage__[8:]}",
f"{traceback.format_exc()}",
)
],
),
)
return results
def get_as_item(**kargs) -> v0.Item:
if "icon" in kargs:
icon = kargs.pop("icon")
else:
icon = icon_path
return v0.Item(id=__title__, icon=icon, **kargs)
# supplementary functions ---------------------------------------------------------------------
workers: List[threading.Thread] = []
def async_reload_items():
def do_reload():
v0.info("TaskWarrior: Updating list of tasks...")
tw_side.reload_items = True
tw_side.get_all_items(skip_completed=True)
t = threading.Thread(target=do_reload)
t.start()
workers.append(t)
def setup(query): # type: ignore
results = []
if not which("task"):
results.append(
v0.Item(
id=__title__,
icon=icon_path,
text=f'"taskwarrior" is not installed.',
subtext='Please install and configure "taskwarrior" accordingly.',
actions=[
v0.UrlAction(
'Open "taskwarrior" website', "https://taskwarrior.org/download/"
)
],
)
)
return results
return results
def save_data(data: str, data_name: str):
"""Save a piece of data in the configuration directory."""
with open(config_path / data_name, "w") as f:
f.write(data)
def load_data(data_name) -> str:
"""Load a piece of data from the configuration directory."""
with open(config_path / data_name, "r") as f:
data = f.readline().strip().split()[0]
return data
def get_as_subtext_field(field, field_title=None):
s = ""
if field:
s = f"{field} | "
else:
return ""
if field_title:
s = f"{field_title}:" + s
return s
def urgency_to_visuals(prio: Union[float, None]) -> Tuple[Union[str, None], Path]:
if prio is None:
return None, Path(icon_path)
elif prio < 4:
return "↓", Path(icon_path_b)
elif prio < 8:
return "↘", Path(icon_path_c)
elif prio < 11:
return "-", Path(icon_path_g)
elif prio < 15:
return "↗", Path(icon_path_y)
else:
return "↑", Path(icon_path_r)
def fail_task(task_id: list):
run_tw_action(args_list=[task_id, "modify", "+fail"])
run_tw_action(args_list=[task_id, "done"])
def run_tw_action(args_list: list, need_pty=False):
args_list = ["task", "rc.recurrence.confirmation=no", "rc.confirmation=off", *args_list]
if need_pty:
args_list.insert(0, "x-terminal-emulator")
args_list.insert(1, "-e")
proc = Popen(args_list, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
image = icon_path_r
msg = f'stdout: {stdout.decode('utf-8')} | stderr: {stderr.decode('utf-8')}'
else:
image = icon_path
msg = stdout.decode("utf-8")
do_notify(msg=msg, image=image)
async_reload_items()
def get_tw_item(task: taskw.task.Task) -> v0.Item: # type: ignore
"""Get a single TW task as an Albert Item."""
field = get_as_subtext_field
task_id = tw_side.get_task_id(task)
actions = [
v0.FuncAction(
"Complete task",
lambda args_list=["done", task_id]: run_tw_action(args_list),
),
v0.FuncAction(
"Delete task",
lambda args_list=["delete", task_id]: run_tw_action(args_list),
),
v0.FuncAction(
"Start task",
lambda args_list=["start", task_id]: run_tw_action(args_list),
),
v0.FuncAction(
"Stop task",
lambda args_list=["stop", task_id]: run_tw_action(args_list),
),
v0.FuncAction(
"Edit task interactively",
lambda args_list=["edit", task_id]: run_tw_action(args_list, need_pty=True),
),
v0.FuncAction(
"Fail task",
lambda task_id=task_id: fail_task(task_id=task_id),
),
v0.ClipAction("Copy task UUID", f"{task_id}"),
]
found_urls = url_re.findall(task["description"])
if "annotations" in task.keys():
found_urls.extend(url_re.findall(" ".join(task["annotations"])))
for url in found_urls[-1::-1]:
actions.insert(0, v0.UrlAction(f"Open {url}", url))
if reminders_tag_path.is_file():
global reminders_tag
reminders_tag = load_data(reminders_tag_path)
else:
save_data("remindme", str(reminders_tag_path))
actions.append(
v0.FuncAction(
f"Add to Reminders (+{reminders_tag})",
lambda args_list=[
"modify",
task_id,
f"+{reminders_tag}",
]: run_tw_action(args_list),
)
)
urgency_str, icon = urgency_to_visuals(task.get("urgency"))
text = f'{task['description']}'
if "start" in task:
text = f'<p style="color:orange;">{text}</p>'
due = None
if "due" in task:
due = task["due"].astimezone(dateutil.tz.tzlocal()).strftime("%Y-%m-%d %H:%M:%S") # type: ignore
return get_as_item(
text=text,
subtext="{}{}{}{}{}".format(
field(urgency_str),
"ID: {}... | ".format(tw_side.get_task_id(task)[:8]),
field(task["status"]),
field(task.get("tags"), "tags"),
field(due, "due"),
)[:-2],
icon=str(icon),
completion=f'{__triggers__}{task['description']}',
actions=actions,
)
# subcommands ---------------------------------------------------------------------------------
class Subcommand:
def __init__(self, *, name, desc):
self.name = name
self.desc = desc
self.subcommand_prefix = f"{__triggers__}{self.name}"
def get_as_albert_item(self):
return get_as_item(text=self.desc, completion=f"{self.subcommand_prefix} ")
def get_as_albert_items_full(self, query_str):
return [self.get_as_albert_item()]
def __str__(self) -> str:
return f"Name: {self.name} | Description: {self.desc}"
class AddSubcommand(Subcommand):
def __init__(self):
super(AddSubcommand, self).__init__(name="add", desc="Add a new task")
@overrides
def get_as_albert_items_full(self, query_str):
items = []
add_item = self.get_as_albert_item()
add_item.subtext = query_str
add_item.completion = f"{self.subcommand_prefix} {query_str}"
add_item.addAction(
v0.FuncAction(
"Add task",
lambda args_list=["add", *query_str.split()]: run_tw_action(args_list),
)
)
items.append(add_item)
to_reminders = self.get_as_albert_item()
to_reminders = v0.Item(
id=__title__,
text=f"Add +{reminders_tag} tag",
subtext="Add +remindme on [TAB]",
icon=icon_path_y,
completion=f"{self.subcommand_prefix} {query_str} +remindme",
)
items.append(to_reminders)
def item_at_date(date: datetime.date, time_24h: int):
dt_str = f'{date.strftime('%Y%m%d')}T{time_24h}0000'
return v0.Item(
id=__title__,
text=f"Due {date}, at {time_24h}:00",
subtext="Add due:dt_str on [TAB]",
icon=icon_path_c,
completion=f"{self.subcommand_prefix} {query_str} due:{dt_str}",
)
items.append(item_at_date(datetime.date.today(), time_24h=15))
items.append(item_at_date(datetime.date.today(), time_24h=19))
items.append(item_at_date(datetime.date.today() + datetime.timedelta(days=1),
time_24h=10))
items.append(item_at_date(datetime.date.today() + datetime.timedelta(days=1),
time_24h=15))
items.append(item_at_date(datetime.date.today() + datetime.timedelta(days=1),
time_24h=19))
return items
class LogSubcommand(Subcommand):
def __init__(self):
super(LogSubcommand, self).__init__(name="log", desc="Log an already done task")
@overrides
def get_as_albert_items_full(self, query_str):
item = self.get_as_albert_item()
item.subtext = query_str
item.addAction(
v0.FuncAction(
"Log task",
lambda args_list=["log", *query_str.split()]: run_tw_action(args_list),
)
)
return [item]
class ActiveTasks(Subcommand):
def __init__(self):
super(ActiveTasks, self).__init__(name="active", desc="Active tasks")
@overrides
def get_as_albert_items_full(self, query_str):
return [
get_tw_item(t) for t in tw_side.get_all_items(skip_completed=True) if "start" in t
]
def move_tasks_of_date_to_next_day(date: datetime.date):
for t in get_tasks_of_date(date):
tw_side.update_item(item_id=str(t["uuid"]), due=t["due"] + datetime.timedelta(days=1))
class DateTasks(Subcommand):
"""
Common parent to classes like TodayTasks, and YesterdayTasks so as to not repeat ourselves.
"""
def __init__(self, date: datetime.date, *args, **kargs):
super(DateTasks, self).__init__(*args, **kargs)
self.date = date
@overrides
def get_as_albert_item(self):
item = super().get_as_albert_item()
item.addAction(
v0.FuncAction(
"Move tasks to the day after",
lambda date=self.date: move_tasks_of_date_to_next_day(date),
)
)
return item
@overrides
def get_as_albert_items_full(self, query_str):
return [get_tw_item(t) for t in get_tasks_of_date(self.date)]
class TodayTasks(DateTasks):
def __init__(self):
super(TodayTasks, self).__init__(
date=datetime.date.today(), name="today", desc="Today's tasks"
)
class YesterdayTasks(DateTasks):
def __init__(self):
super(YesterdayTasks, self).__init__(
date=datetime.date.today() - datetime.timedelta(days=1),
name="yesterday",
desc="Yesterday's tasks",
)
class TomorrowTasks(DateTasks):
def __init__(self):
super(TomorrowTasks, self).__init__(
date=datetime.date.today() + datetime.timedelta(days=1),
name="tomorrow",
desc="Tomorrow's tasks",
)
class SubcommandQuery:
def __init__(self, subcommand: Subcommand, query: str):
"""
Query for a specific subcommand.
:query: Query text - doesn't include the subcommand itself
"""
self.command = subcommand
self.query = query
def __str__(self) -> str:
return f"Command: {self.command}\nQuery Text: {self.query}"
def create_subcommands():
return [
AddSubcommand(),
LogSubcommand(),
ActiveTasks(),
TodayTasks(),
YesterdayTasks(),
TomorrowTasks(),
]
subcommands = create_subcommands()
def get_subcommand_for_name(name: str) -> Optional[Subcommand]:
"""Get a subcommand with the indicated name."""
matching = [s for s in subcommands if s.name.lower() == name.lower()]
if matching:
return matching[0]
def get_subcommand_query(query_str: str) -> Optional[SubcommandQuery]:
"""
Determine whether current query is of a subcommand.
If so first returned the corresponding SubcommandQeury object.
"""
if not query_str:
return None
# spilt:
# "subcommand_name rest of query" -> ["subcommand_name", "rest of query""]
query_parts = query_str.strip().split(None, maxsplit=1)
if len(query_parts) < 2:
query_str = ""
else:
query_str = query_parts[1]
subcommand = get_subcommand_for_name(query_parts[0])
if subcommand:
return SubcommandQuery(subcommand=subcommand, query=query_str)
|
"""Interact with Taskwarrior."""
import datetime
import os
import re
import threading
import traceback
from pathlib import Path
from shutil import which
from subprocess import PIPE, Popen
from typing import List, Optional, Tuple, Union
import albert as v0 # type: ignore
import dateutil
import gi
import taskw
from fuzzywuzzy import process
from overrides import overrides
from taskw_gcal_sync import TaskWarriorSide
gi.require_version("Notify", "0.7") # isort:skip
gi.require_version("GdkPixbuf", "2.0") # isort:skip
from gi.repository import GdkPixbuf, Notify # isort:skip # type: ignore
# metadata ------------------------------------------------------------------------------------
__title__ = "Taskwarrior interaction"
__version__ = "0.4.0"
__triggers__ = "t "
__authors__ = "Nikos Koukis"
__homepage__ = "https://github.com/bergercookie/awesome-albert-plugins"
__simplename__ = "taskwarrior"
# initial checks ------------------------------------------------------------------------------
# icon ----------------------------------------------------------------------------------------
icon_path = os.path.join(os.path.dirname(__file__), "taskwarrior.svg")
icon_path_b = os.path.join(os.path.dirname(__file__), "taskwarrior_blue.svg")
icon_path_r = os.path.join(os.path.dirname(__file__), "taskwarrior_red.svg")
icon_path_y = os.path.join(os.path.dirname(__file__), "taskwarrior_yellow.svg")
icon_path_c = os.path.join(os.path.dirname(__file__), "taskwarrior_cyan.svg")
icon_path_g = os.path.join(os.path.dirname(__file__), "taskwarrior_green.svg")
# initial configuration -----------------------------------------------------------------------
# should the plugin show relevant some info without the trigger?
show_items_wo_trigger = True
failure_tag = "fail"
cache_path = Path(v0.cacheLocation()) / __simplename__
config_path = Path(v0.configLocation()) / __simplename__
data_path = Path(v0.dataLocation()) / __simplename__
reminders_tag_path = config_path / "reminders_tag"
reminders_tag = "remindme"
class FileBackedVar:
def __init__(self, varname, convert_fn=str, init_val=None):
self._fpath = config_path / varname
self._convert_fn = convert_fn
if init_val:
with open(self._fpath, "w") as f:
f.write(str(init_val))
else:
self._fpath.touch()
def get(self):
with open(self._fpath, "r") as f:
return self._convert_fn(f.read().strip())
def set(self, val):
with open(self._fpath, "w") as f:
return f.write(str(val))
class TaskWarriorSideWLock:
"""Multithreading-safe version of TaskWarriorSide."""
def __init__(self):
self.tw = TaskWarriorSide(enable_caching=True)
self.tw_lock = threading.Lock()
def start(self, *args, **kargs):
with self.tw_lock:
return self.tw.start(*args, **kargs)
def get_all_items(self, *args, **kargs):
with self.tw_lock:
return self.tw.get_all_items(*args, **kargs)
def get_task_id(self, *args, **kargs):
with self.tw_lock:
return self.tw.get_task_id(*args, **kargs)
@property
def reload_items(self):
return self.tw.reload_items
@reload_items.setter
def reload_items(self, val: bool):
self.tw.reload_items = val
def update_item(self, *args, **kargs):
self.tw.update_item(*args, **kargs)
tw_side = TaskWarriorSideWLock()
last_used_date = FileBackedVar(
"last_date_used",
convert_fn=lambda s: datetime.datetime.strptime(s, "%Y-%m-%d").date(),
init_val=datetime.datetime.today().date(),
)
dev_mode = True
# regular expression to match URLs
# https://gist.github.com/gruber/8891611
url_re = re.compile(
r"""(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)\b/?(?!@)))"""
)
# plugin main functions -----------------------------------------------------------------------
def do_notify(msg: str, image=None):
app_name = "Taskwarrior"
Notify.init(app_name)
image = image
n = Notify.Notification.new(app_name, msg, image)
n.show()
def date_only_tzlocal(datetime: datetime.datetime):
return datetime.astimezone(dateutil.tz.tzlocal()).date() # type: ignore
def get_tasks_of_date(date: datetime.date):
tasks = tw_side.get_all_items(skip_completed=True)
# You have to do the comparison in tzlocal. TaskWarrior stores the tasks in UTC and thus
# the effetive date*time* may not match the given date parameter because of the time
# difference
tasks = [t for t in tasks if "due" in t.keys() and date_only_tzlocal(t["due"]) == date]
return tasks
def initialize():
# Called when the extension is loaded (ticked in the settings) - blocking
# create cache location
config_path.mkdir(parents=False, exist_ok=True)
def finalize():
pass
def handleQuery(query):
results = []
# we're into the new day, create and assign a fresh instance
last_used = last_used_date.get()
current_date = datetime.datetime.today().date()
global tw_side, subcommands
if last_used < current_date:
tw_side = TaskWarriorSideWLock()
subcommands = create_subcommands()
last_used_date.set(current_date)
elif last_used > current_date:
# maybe due to NTP?
v0.critical(
f"Current date {current_date} < last_used date {last_used} ?! Overriding current date, please report this if it persists"
)
tw_side = TaskWarriorSideWLock()
subcommands = create_subcommands()
last_used_date.set(current_date)
if not query.isTriggered:
if show_items_wo_trigger and len(query.string) < 2:
results = [
ActiveTasks().get_as_albert_item(),
TodayTasks().get_as_albert_item(),
*results,
]
else:
# join any previously launched threads
for i in range(len(workers)):
workers.pop(i).join(2)
try:
query.disableSort()
results_setup = setup(query)
if results_setup:
return results_setup
tasks = tw_side.get_all_items(skip_completed=True)
query_str = query.string
if len(query_str) < 2:
results.extend([s.get_as_albert_item() for s in subcommands])
results.append(
get_as_item(
text="Reload list of tasks",
actions=[v0.FuncAction("Reload", async_reload_items)],
)
)
tasks.sort(key=lambda t: t["urgency"], reverse=True)
results.extend([get_tw_item(task) for task in tasks])
else:
subcommand_query = get_subcommand_query(query_str)
if subcommand_query:
results.extend(
subcommand_query.command.get_as_albert_items_full(
subcommand_query.query
)
)
if not results:
results.append(get_as_item(text="No results"))
else:
# find relevant results
desc_to_task = {task["description"]: task for task in tasks}
matched = process.extract(query_str, list(desc_to_task.keys()), limit=30)
for m in [elem[0] for elem in matched]:
task = desc_to_task[m]
results.append(get_tw_item(task))
except Exception: # user to report error
if dev_mode:
v0.critical(traceback.format_exc())
raise
results.insert(
0,
v0.Item(
id=__title__,
icon=icon_path,
text="Something went wrong! Press [ENTER] to copy error and report it",
actions=[
v0.ClipAction(
f"Copy error - report it to {__homepage__[8:]}",
f"{traceback.format_exc()}",
)
],
),
)
return results
def get_as_item(**kargs) -> v0.Item:
if "icon" in kargs:
icon = kargs.pop("icon")
else:
icon = icon_path
return v0.Item(id=__title__, icon=icon, **kargs)
# supplementary functions ---------------------------------------------------------------------
workers: List[threading.Thread] = []
def async_reload_items():
def do_reload():
v0.info("TaskWarrior: Updating list of tasks...")
tw_side.reload_items = True
tw_side.get_all_items(skip_completed=True)
t = threading.Thread(target=do_reload)
t.start()
workers.append(t)
def setup(query): # type: ignore
results = []
if not which("task"):
results.append(
v0.Item(
id=__title__,
icon=icon_path,
text=f'"taskwarrior" is not installed.',
subtext='Please install and configure "taskwarrior" accordingly.',
actions=[
v0.UrlAction(
'Open "taskwarrior" website', "https://taskwarrior.org/download/"
)
],
)
)
return results
return results
def save_data(data: str, data_name: str):
"""Save a piece of data in the configuration directory."""
with open(config_path / data_name, "w") as f:
f.write(data)
def load_data(data_name) -> str:
"""Load a piece of data from the configuration directory."""
with open(config_path / data_name, "r") as f:
data = f.readline().strip().split()[0]
return data
def get_as_subtext_field(field, field_title=None):
s = ""
if field:
s = f"{field} | "
else:
return ""
if field_title:
s = f"{field_title}:" + s
return s
def urgency_to_visuals(prio: Union[float, None]) -> Tuple[Union[str, None], Path]:
if prio is None:
return None, Path(icon_path)
elif prio < 4:
return "↓", Path(icon_path_b)
elif prio < 8:
return "↘", Path(icon_path_c)
elif prio < 11:
return "-", Path(icon_path_g)
elif prio < 15:
return "↗", Path(icon_path_y)
else:
return "↑", Path(icon_path_r)
def fail_task(task_id: list):
run_tw_action(args_list=[task_id, "modify", "+fail"])
run_tw_action(args_list=[task_id, "done"])
def run_tw_action(args_list: list, need_pty=False):
args_list = ["task", "rc.recurrence.confirmation=no", "rc.confirmation=off", *args_list]
if need_pty:
args_list.insert(0, "x-terminal-emulator")
args_list.insert(1, "-e")
proc = Popen(args_list, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
image = icon_path_r
msg = f'stdout: {stdout.decode("utf-8")} | stderr: {stderr.decode("utf-8")}'
else:
image = icon_path
msg = stdout.decode("utf-8")
do_notify(msg=msg, image=image)
async_reload_items()
def get_tw_item(task: taskw.task.Task) -> v0.Item: # type: ignore
"""Get a single TW task as an Albert Item."""
field = get_as_subtext_field
task_id = tw_side.get_task_id(task)
actions = [
v0.FuncAction(
"Complete task",
lambda args_list=["done", task_id]: run_tw_action(args_list),
),
v0.FuncAction(
"Delete task",
lambda args_list=["delete", task_id]: run_tw_action(args_list),
),
v0.FuncAction(
"Start task",
lambda args_list=["start", task_id]: run_tw_action(args_list),
),
v0.FuncAction(
"Stop task",
lambda args_list=["stop", task_id]: run_tw_action(args_list),
),
v0.FuncAction(
"Edit task interactively",
lambda args_list=["edit", task_id]: run_tw_action(args_list, need_pty=True),
),
v0.FuncAction(
"Fail task",
lambda task_id=task_id: fail_task(task_id=task_id),
),
v0.ClipAction("Copy task UUID", f"{task_id}"),
]
found_urls = url_re.findall(task["description"])
if "annotations" in task.keys():
found_urls.extend(url_re.findall(" ".join(task["annotations"])))
for url in found_urls[-1::-1]:
actions.insert(0, v0.UrlAction(f"Open {url}", url))
if reminders_tag_path.is_file():
global reminders_tag
reminders_tag = load_data(reminders_tag_path)
else:
save_data("remindme", str(reminders_tag_path))
actions.append(
v0.FuncAction(
f"Add to Reminders (+{reminders_tag})",
lambda args_list=[
"modify",
task_id,
f"+{reminders_tag}",
]: run_tw_action(args_list),
)
)
urgency_str, icon = urgency_to_visuals(task.get("urgency"))
text = f'{task["description"]}'
if "start" in task:
text = f'<p style="color:orange;">{text}</p>'
due = None
if "due" in task:
due = task["due"].astimezone(dateutil.tz.tzlocal()).strftime("%Y-%m-%d %H:%M:%S") # type: ignore
return get_as_item(
text=text,
subtext="{}{}{}{}{}".format(
field(urgency_str),
"ID: {}... | ".format(tw_side.get_task_id(task)[:8]),
field(task["status"]),
field(task.get("tags"), "tags"),
field(due, "due"),
)[:-2],
icon=str(icon),
completion=f'{__triggers__}{task["description"]}',
actions=actions,
)
# subcommands ---------------------------------------------------------------------------------
class Subcommand:
def __init__(self, *, name, desc):
self.name = name
self.desc = desc
self.subcommand_prefix = f"{__triggers__}{self.name}"
def get_as_albert_item(self):
return get_as_item(text=self.desc, completion=f"{self.subcommand_prefix} ")
def get_as_albert_items_full(self, query_str):
return [self.get_as_albert_item()]
def __str__(self) -> str:
return f"Name: {self.name} | Description: {self.desc}"
class AddSubcommand(Subcommand):
def __init__(self):
super(AddSubcommand, self).__init__(name="add", desc="Add a new task")
@overrides
def get_as_albert_items_full(self, query_str):
items = []
add_item = self.get_as_albert_item()
add_item.subtext = query_str
add_item.completion = f"{self.subcommand_prefix} {query_str}"
add_item.addAction(
v0.FuncAction(
"Add task",
lambda args_list=["add", *query_str.split()]: run_tw_action(args_list),
)
)
items.append(add_item)
to_reminders = self.get_as_albert_item()
to_reminders = v0.Item(
id=__title__,
text=f"Add +{reminders_tag} tag",
subtext="Add +remindme on [TAB]",
icon=icon_path_y,
completion=f"{self.subcommand_prefix} {query_str} +remindme",
)
items.append(to_reminders)
def item_at_date(date: datetime.date, time_24h: int):
dt_str = f'{date.strftime("%Y%m%d")}T{time_24h}0000'
return v0.Item(
id=__title__,
text=f"Due {date}, at {time_24h}:00",
subtext="Add due:dt_str on [TAB]",
icon=icon_path_c,
completion=f"{self.subcommand_prefix} {query_str} due:{dt_str}",
)
items.append(item_at_date(datetime.date.today(), time_24h=15))
items.append(item_at_date(datetime.date.today(), time_24h=19))
items.append(item_at_date(datetime.date.today() + datetime.timedelta(days=1),
time_24h=10))
items.append(item_at_date(datetime.date.today() + datetime.timedelta(days=1),
time_24h=15))
items.append(item_at_date(datetime.date.today() + datetime.timedelta(days=1),
time_24h=19))
return items
class LogSubcommand(Subcommand):
def __init__(self):
super(LogSubcommand, self).__init__(name="log", desc="Log an already done task")
@overrides
def get_as_albert_items_full(self, query_str):
item = self.get_as_albert_item()
item.subtext = query_str
item.addAction(
v0.FuncAction(
"Log task",
lambda args_list=["log", *query_str.split()]: run_tw_action(args_list),
)
)
return [item]
class ActiveTasks(Subcommand):
def __init__(self):
super(ActiveTasks, self).__init__(name="active", desc="Active tasks")
@overrides
def get_as_albert_items_full(self, query_str):
return [
get_tw_item(t) for t in tw_side.get_all_items(skip_completed=True) if "start" in t
]
def move_tasks_of_date_to_next_day(date: datetime.date):
for t in get_tasks_of_date(date):
tw_side.update_item(item_id=str(t["uuid"]), due=t["due"] + datetime.timedelta(days=1))
class DateTasks(Subcommand):
"""
Common parent to classes like TodayTasks, and YesterdayTasks so as to not repeat ourselves.
"""
def __init__(self, date: datetime.date, *args, **kargs):
super(DateTasks, self).__init__(*args, **kargs)
self.date = date
@overrides
def get_as_albert_item(self):
item = super().get_as_albert_item()
item.addAction(
v0.FuncAction(
"Move tasks to the day after",
lambda date=self.date: move_tasks_of_date_to_next_day(date),
)
)
return item
@overrides
def get_as_albert_items_full(self, query_str):
return [get_tw_item(t) for t in get_tasks_of_date(self.date)]
class TodayTasks(DateTasks):
def __init__(self):
super(TodayTasks, self).__init__(
date=datetime.date.today(), name="today", desc="Today's tasks"
)
class YesterdayTasks(DateTasks):
def __init__(self):
super(YesterdayTasks, self).__init__(
date=datetime.date.today() - datetime.timedelta(days=1),
name="yesterday",
desc="Yesterday's tasks",
)
class TomorrowTasks(DateTasks):
def __init__(self):
super(TomorrowTasks, self).__init__(
date=datetime.date.today() + datetime.timedelta(days=1),
name="tomorrow",
desc="Tomorrow's tasks",
)
class SubcommandQuery:
def __init__(self, subcommand: Subcommand, query: str):
"""
Query for a specific subcommand.
:query: Query text - doesn't include the subcommand itself
"""
self.command = subcommand
self.query = query
def __str__(self) -> str:
return f"Command: {self.command}\nQuery Text: {self.query}"
def create_subcommands():
return [
AddSubcommand(),
LogSubcommand(),
ActiveTasks(),
TodayTasks(),
YesterdayTasks(),
TomorrowTasks(),
]
subcommands = create_subcommands()
def get_subcommand_for_name(name: str) -> Optional[Subcommand]:
"""Get a subcommand with the indicated name."""
matching = [s for s in subcommands if s.name.lower() == name.lower()]
if matching:
return matching[0]
def get_subcommand_query(query_str: str) -> Optional[SubcommandQuery]:
"""
Determine whether current query is of a subcommand.
If so first returned the corresponding SubcommandQeury object.
"""
if not query_str:
return None
# spilt:
# "subcommand_name rest of query" -> ["subcommand_name", "rest of query""]
query_parts = query_str.strip().split(None, maxsplit=1)
if len(query_parts) < 2:
query_str = ""
else:
query_str = query_parts[1]
subcommand = get_subcommand_for_name(query_parts[0])
if subcommand:
return SubcommandQuery(subcommand=subcommand, query=query_str)
|
from fastapi import FastAPI, Form
from fastapi.responses import HTMLResponse
from pydantic import BaseModel
from typing import Optional
app = FastAPI()
class UssdParams(BaseModel):
session_id: str
service_code: str
phone_number: str
text: str
# dummy acc. data
accounts = {
"A001": {
"bill": "420"
},
"A002": {
"bill": "1111"
}
}
async def process_response(resp: str):
response: str = ""
print(resp)
if len(resp) <= 1:
if resp == "":
response = "CON What would you want to check \n"
response += "1. Query Bill \n"
response += "2. Exit"
elif resp == "1":
response = "CON Enter Account number \n"
response += "eg: A111"
elif resp == "2":
response = "END Bye!"
else:
response = "END Invalid choice selected!"
else:
# split the string and get the acc number
_, account_number = resp.split('*')
# check if account number is valid
if account_number in accounts:
response = f"END Your bill is kes {accounts[account_number]["bill"]}"
else:
response = f"END Invalid account number: {account_number}!"
return response
@app.get("/")
def root():
return {"message": "Hello USSD!"}
@app.post("/callback/", response_class=HTMLResponse)
async def ussd_callback(session_id: str = Form(""), service_code: str = Form(""), phone_number: str = Form(""), text: Optional[str] = Form("")):
# print(phone_number)
return await process_response(text)
|
from fastapi import FastAPI, Form
from fastapi.responses import HTMLResponse
from pydantic import BaseModel
from typing import Optional
app = FastAPI()
class UssdParams(BaseModel):
session_id: str
service_code: str
phone_number: str
text: str
# dummy acc. data
accounts = {
"A001": {
"bill": "420"
},
"A002": {
"bill": "1111"
}
}
async def process_response(resp: str):
response: str = ""
print(resp)
if len(resp) <= 1:
if resp == "":
response = "CON What would you want to check \n"
response += "1. Query Bill \n"
response += "2. Exit"
elif resp == "1":
response = "CON Enter Account number \n"
response += "eg: A111"
elif resp == "2":
response = "END Bye!"
else:
response = "END Invalid choice selected!"
else:
# split the string and get the acc number
_, account_number = resp.split('*')
# check if account number is valid
if account_number in accounts:
response = f"END Your bill is kes {accounts[account_number]['bill']}"
else:
response = f"END Invalid account number: {account_number}!"
return response
@app.get("/")
def root():
return {"message": "Hello USSD!"}
@app.post("/callback/", response_class=HTMLResponse)
async def ussd_callback(session_id: str = Form(""), service_code: str = Form(""), phone_number: str = Form(""), text: Optional[str] = Form("")):
# print(phone_number)
return await process_response(text)
|
"""Generate mypy config."""
from __future__ import annotations
import configparser
import io
import os
from pathlib import Path
from typing import Final
from .model import Config, Integration
# Modules which have type hints which known to be broken.
# If you are an author of component listed here, please fix these errors and
# remove your component from this list to enable type checks.
# Do your best to not add anything new here.
IGNORED_MODULES: Final[list[str]] = [
"homeassistant.components.adguard.*",
"homeassistant.components.aemet.*",
"homeassistant.components.alarmdecoder.*",
"homeassistant.components.alexa.*",
"homeassistant.components.almond.*",
"homeassistant.components.amcrest.*",
"homeassistant.components.analytics.*",
"homeassistant.components.asuswrt.*",
"homeassistant.components.atag.*",
"homeassistant.components.aurora.*",
"homeassistant.components.awair.*",
"homeassistant.components.azure_devops.*",
"homeassistant.components.azure_event_hub.*",
"homeassistant.components.blueprint.*",
"homeassistant.components.bluetooth_tracker.*",
"homeassistant.components.bmw_connected_drive.*",
"homeassistant.components.bsblan.*",
"homeassistant.components.camera.*",
"homeassistant.components.canary.*",
"homeassistant.components.cast.*",
"homeassistant.components.cert_expiry.*",
"homeassistant.components.climacell.*",
"homeassistant.components.climate.*",
"homeassistant.components.cloud.*",
"homeassistant.components.cloudflare.*",
"homeassistant.components.config.*",
"homeassistant.components.control4.*",
"homeassistant.components.conversation.*",
"homeassistant.components.deconz.*",
"homeassistant.components.demo.*",
"homeassistant.components.denonavr.*",
"homeassistant.components.device_tracker.*",
"homeassistant.components.devolo_home_control.*",
"homeassistant.components.dhcp.*",
"homeassistant.components.directv.*",
"homeassistant.components.doorbird.*",
"homeassistant.components.dsmr.*",
"homeassistant.components.dynalite.*",
"homeassistant.components.eafm.*",
"homeassistant.components.edl21.*",
"homeassistant.components.elkm1.*",
"homeassistant.components.emonitor.*",
"homeassistant.components.enphase_envoy.*",
"homeassistant.components.entur_public_transport.*",
"homeassistant.components.esphome.*",
"homeassistant.components.evohome.*",
"homeassistant.components.fan.*",
"homeassistant.components.filter.*",
"homeassistant.components.fints.*",
"homeassistant.components.fireservicerota.*",
"homeassistant.components.firmata.*",
"homeassistant.components.fitbit.*",
"homeassistant.components.flo.*",
"homeassistant.components.fortios.*",
"homeassistant.components.foscam.*",
"homeassistant.components.freebox.*",
"homeassistant.components.fritz.*",
"homeassistant.components.fritzbox.*",
"homeassistant.components.garmin_connect.*",
"homeassistant.components.geniushub.*",
"homeassistant.components.gios.*",
"homeassistant.components.glances.*",
"homeassistant.components.gogogate2.*",
"homeassistant.components.google_assistant.*",
"homeassistant.components.google_maps.*",
"homeassistant.components.google_pubsub.*",
"homeassistant.components.gpmdp.*",
"homeassistant.components.gree.*",
"homeassistant.components.growatt_server.*",
"homeassistant.components.gtfs.*",
"homeassistant.components.guardian.*",
"homeassistant.components.habitica.*",
"homeassistant.components.harmony.*",
"homeassistant.components.hassio.*",
"homeassistant.components.hdmi_cec.*",
"homeassistant.components.here_travel_time.*",
"homeassistant.components.hisense_aehw4a1.*",
"homeassistant.components.home_connect.*",
"homeassistant.components.home_plus_control.*",
"homeassistant.components.homeassistant.*",
"homeassistant.components.homekit.*",
"homeassistant.components.homekit_controller.*",
"homeassistant.components.homematicip_cloud.*",
"homeassistant.components.honeywell.*",
"homeassistant.components.hue.*",
"homeassistant.components.huisbaasje.*",
"homeassistant.components.humidifier.*",
"homeassistant.components.iaqualink.*",
"homeassistant.components.icloud.*",
"homeassistant.components.image.*",
"homeassistant.components.incomfort.*",
"homeassistant.components.influxdb.*",
"homeassistant.components.input_boolean.*",
"homeassistant.components.input_datetime.*",
"homeassistant.components.input_number.*",
"homeassistant.components.insteon.*",
"homeassistant.components.ipp.*",
"homeassistant.components.isy994.*",
"homeassistant.components.izone.*",
"homeassistant.components.kaiterra.*",
"homeassistant.components.keenetic_ndms2.*",
"homeassistant.components.kodi.*",
"homeassistant.components.konnected.*",
"homeassistant.components.kostal_plenticore.*",
"homeassistant.components.kulersky.*",
"homeassistant.components.lifx.*",
"homeassistant.components.litejet.*",
"homeassistant.components.litterrobot.*",
"homeassistant.components.lovelace.*",
"homeassistant.components.luftdaten.*",
"homeassistant.components.lutron_caseta.*",
"homeassistant.components.lyric.*",
"homeassistant.components.marytts.*",
"homeassistant.components.media_source.*",
"homeassistant.components.melcloud.*",
"homeassistant.components.meteo_france.*",
"homeassistant.components.metoffice.*",
"homeassistant.components.minecraft_server.*",
"homeassistant.components.mobile_app.*",
"homeassistant.components.motion_blinds.*",
"homeassistant.components.mqtt.*",
"homeassistant.components.mullvad.*",
"homeassistant.components.mysensors.*",
"homeassistant.components.n26.*",
"homeassistant.components.neato.*",
"homeassistant.components.ness_alarm.*",
"homeassistant.components.nest.*",
"homeassistant.components.netatmo.*",
"homeassistant.components.netio.*",
"homeassistant.components.nightscout.*",
"homeassistant.components.nilu.*",
"homeassistant.components.nmap_tracker.*",
"homeassistant.components.norway_air.*",
"homeassistant.components.notion.*",
"homeassistant.components.nsw_fuel_station.*",
"homeassistant.components.nuki.*",
"homeassistant.components.nws.*",
"homeassistant.components.nzbget.*",
"homeassistant.components.omnilogic.*",
"homeassistant.components.onboarding.*",
"homeassistant.components.ondilo_ico.*",
"homeassistant.components.onewire.*",
"homeassistant.components.onvif.*",
"homeassistant.components.ovo_energy.*",
"homeassistant.components.ozw.*",
"homeassistant.components.panasonic_viera.*",
"homeassistant.components.philips_js.*",
"homeassistant.components.pilight.*",
"homeassistant.components.ping.*",
"homeassistant.components.pioneer.*",
"homeassistant.components.plaato.*",
"homeassistant.components.plex.*",
"homeassistant.components.plugwise.*",
"homeassistant.components.plum_lightpad.*",
"homeassistant.components.point.*",
"homeassistant.components.profiler.*",
"homeassistant.components.proxmoxve.*",
"homeassistant.components.rachio.*",
"homeassistant.components.rainmachine.*",
"homeassistant.components.recollect_waste.*",
"homeassistant.components.recorder.*",
"homeassistant.components.reddit.*",
"homeassistant.components.ring.*",
"homeassistant.components.roku.*",
"homeassistant.components.rpi_power.*",
"homeassistant.components.ruckus_unleashed.*",
"homeassistant.components.sabnzbd.*",
"homeassistant.components.screenlogic.*",
"homeassistant.components.script.*",
"homeassistant.components.search.*",
"homeassistant.components.sense.*",
"homeassistant.components.sentry.*",
"homeassistant.components.sesame.*",
"homeassistant.components.sharkiq.*",
"homeassistant.components.shelly.*",
"homeassistant.components.sma.*",
"homeassistant.components.smart_meter_texas.*",
"homeassistant.components.smartthings.*",
"homeassistant.components.smarttub.*",
"homeassistant.components.smarty.*",
"homeassistant.components.smhi.*",
"homeassistant.components.solaredge.*",
"homeassistant.components.solarlog.*",
"homeassistant.components.somfy.*",
"homeassistant.components.somfy_mylink.*",
"homeassistant.components.sonarr.*",
"homeassistant.components.songpal.*",
"homeassistant.components.sonos.*",
"homeassistant.components.spotify.*",
"homeassistant.components.stream.*",
"homeassistant.components.stt.*",
"homeassistant.components.surepetcare.*",
"homeassistant.components.switchbot.*",
"homeassistant.components.switcher_kis.*",
"homeassistant.components.synology_dsm.*",
"homeassistant.components.synology_srm.*",
"homeassistant.components.system_health.*",
"homeassistant.components.system_log.*",
"homeassistant.components.tado.*",
"homeassistant.components.tasmota.*",
"homeassistant.components.tcp.*",
"homeassistant.components.telegram_bot.*",
"homeassistant.components.template.*",
"homeassistant.components.tesla.*",
"homeassistant.components.timer.*",
"homeassistant.components.todoist.*",
"homeassistant.components.toon.*",
"homeassistant.components.tplink.*",
"homeassistant.components.trace.*",
"homeassistant.components.tradfri.*",
"homeassistant.components.tuya.*",
"homeassistant.components.unifi.*",
"homeassistant.components.upcloud.*",
"homeassistant.components.updater.*",
"homeassistant.components.upnp.*",
"homeassistant.components.velbus.*",
"homeassistant.components.vera.*",
"homeassistant.components.verisure.*",
"homeassistant.components.vizio.*",
"homeassistant.components.volumio.*",
"homeassistant.components.webostv.*",
"homeassistant.components.wemo.*",
"homeassistant.components.wink.*",
"homeassistant.components.withings.*",
"homeassistant.components.wled.*",
"homeassistant.components.wunderground.*",
"homeassistant.components.xbox.*",
"homeassistant.components.xiaomi_aqara.*",
"homeassistant.components.xiaomi_miio.*",
"homeassistant.components.yamaha.*",
"homeassistant.components.yeelight.*",
"homeassistant.components.zerproc.*",
"homeassistant.components.zha.*",
"homeassistant.components.zwave.*",
]
HEADER: Final = """
# Automatically generated by hassfest.
#
# To update, run python3 -m script.hassfest
""".lstrip()
GENERAL_SETTINGS: Final[dict[str, str]] = {
"python_version": "3.8",
"show_error_codes": "true",
"follow_imports": "silent",
"ignore_missing_imports": "true",
"warn_incomplete_stub": "true",
"warn_redundant_casts": "true",
"warn_unused_configs": "true",
}
# This is basically the list of checks which is enabled for "strict=true".
# But "strict=true" is applied globally, so we need to list all checks manually.
STRICT_SETTINGS: Final[list[str]] = [
"check_untyped_defs",
"disallow_incomplete_defs",
"disallow_subclassing_any",
"disallow_untyped_calls",
"disallow_untyped_decorators",
"disallow_untyped_defs",
"no_implicit_optional",
"strict_equality",
"warn_return_any",
"warn_unreachable",
"warn_unused_ignores",
# TODO: turn these on, address issues
# "disallow_any_generics",
# "no_implicit_reexport",
]
def generate_and_validate(config: Config) -> str:
"""Validate and generate mypy config."""
config_path = config.root / ".strict-typing"
with config_path.open() as fp:
lines = fp.readlines()
# Filter empty and commented lines.
strict_modules: list[str] = [
line.strip()
for line in lines
if line.strip() != "" and not line.startswith("#")
]
ignored_modules_set: set[str] = set(IGNORED_MODULES)
for module in strict_modules:
if (
not module.startswith("homeassistant.components.")
and module != "homeassistant.components"
):
config.add_error(
"mypy_config", f"Only components should be added: {module}"
)
if module in ignored_modules_set:
config.add_error("mypy_config", f"Module '{module}' is in ignored list")
# Validate that all modules exist.
all_modules = strict_modules + IGNORED_MODULES
for module in all_modules:
if module.endswith(".*"):
module_path = Path(module[:-2].replace(".", os.path.sep))
if not module_path.is_dir():
config.add_error("mypy_config", f"Module '{module} is not a folder")
else:
module = module.replace(".", os.path.sep)
module_path = Path(f"{module}.py")
if module_path.is_file():
continue
module_path = Path(module) / "__init__.py"
if not module_path.is_file():
config.add_error("mypy_config", f"Module '{module} doesn't exist")
mypy_config = configparser.ConfigParser()
general_section = "mypy"
mypy_config.add_section(general_section)
for key, value in GENERAL_SETTINGS.items():
mypy_config.set(general_section, key, value)
for key in STRICT_SETTINGS:
mypy_config.set(general_section, key, "true")
# By default strict checks are disabled for components.
components_section = "mypy-homeassistant.components.*"
mypy_config.add_section(components_section)
for key in STRICT_SETTINGS:
mypy_config.set(components_section, key, "false")
for strict_module in strict_modules:
strict_section = f"mypy-{strict_module}"
mypy_config.add_section(strict_section)
for key in STRICT_SETTINGS:
mypy_config.set(strict_section, key, "true")
# Disable strict checks for tests
tests_section = "mypy-tests.*"
mypy_config.add_section(tests_section)
for key in STRICT_SETTINGS:
mypy_config.set(tests_section, key, "false")
for ignored_module in IGNORED_MODULES:
ignored_section = f"mypy-{ignored_module}"
mypy_config.add_section(ignored_section)
mypy_config.set(ignored_section, "ignore_errors", "true")
with io.StringIO() as fp:
mypy_config.write(fp)
fp.seek(0)
return HEADER + fp.read().strip()
def validate(integrations: dict[str, Integration], config: Config) -> None:
"""Validate mypy config."""
config_path = config.root / "mypy.ini"
config.cache["mypy_config"] = content = generate_and_validate(config)
with open(str(config_path)) as fp:
if fp.read().strip() != content:
config.add_error(
"mypy_config",
"File mypy.ini is not up to date. Run python3 -m script.hassfest",
fixable=True,
)
def generate(integrations: dict[str, Integration], config: Config) -> None:
"""Generate mypy config."""
config_path = config.root / "mypy.ini"
with open(str(config_path), "w") as fp:
fp.write(f"{config.cache["mypy_config"]}\n")
|
"""Generate mypy config."""
from __future__ import annotations
import configparser
import io
import os
from pathlib import Path
from typing import Final
from .model import Config, Integration
# Modules which have type hints which known to be broken.
# If you are an author of component listed here, please fix these errors and
# remove your component from this list to enable type checks.
# Do your best to not add anything new here.
IGNORED_MODULES: Final[list[str]] = [
"homeassistant.components.adguard.*",
"homeassistant.components.aemet.*",
"homeassistant.components.alarmdecoder.*",
"homeassistant.components.alexa.*",
"homeassistant.components.almond.*",
"homeassistant.components.amcrest.*",
"homeassistant.components.analytics.*",
"homeassistant.components.asuswrt.*",
"homeassistant.components.atag.*",
"homeassistant.components.aurora.*",
"homeassistant.components.awair.*",
"homeassistant.components.azure_devops.*",
"homeassistant.components.azure_event_hub.*",
"homeassistant.components.blueprint.*",
"homeassistant.components.bluetooth_tracker.*",
"homeassistant.components.bmw_connected_drive.*",
"homeassistant.components.bsblan.*",
"homeassistant.components.camera.*",
"homeassistant.components.canary.*",
"homeassistant.components.cast.*",
"homeassistant.components.cert_expiry.*",
"homeassistant.components.climacell.*",
"homeassistant.components.climate.*",
"homeassistant.components.cloud.*",
"homeassistant.components.cloudflare.*",
"homeassistant.components.config.*",
"homeassistant.components.control4.*",
"homeassistant.components.conversation.*",
"homeassistant.components.deconz.*",
"homeassistant.components.demo.*",
"homeassistant.components.denonavr.*",
"homeassistant.components.device_tracker.*",
"homeassistant.components.devolo_home_control.*",
"homeassistant.components.dhcp.*",
"homeassistant.components.directv.*",
"homeassistant.components.doorbird.*",
"homeassistant.components.dsmr.*",
"homeassistant.components.dynalite.*",
"homeassistant.components.eafm.*",
"homeassistant.components.edl21.*",
"homeassistant.components.elkm1.*",
"homeassistant.components.emonitor.*",
"homeassistant.components.enphase_envoy.*",
"homeassistant.components.entur_public_transport.*",
"homeassistant.components.esphome.*",
"homeassistant.components.evohome.*",
"homeassistant.components.fan.*",
"homeassistant.components.filter.*",
"homeassistant.components.fints.*",
"homeassistant.components.fireservicerota.*",
"homeassistant.components.firmata.*",
"homeassistant.components.fitbit.*",
"homeassistant.components.flo.*",
"homeassistant.components.fortios.*",
"homeassistant.components.foscam.*",
"homeassistant.components.freebox.*",
"homeassistant.components.fritz.*",
"homeassistant.components.fritzbox.*",
"homeassistant.components.garmin_connect.*",
"homeassistant.components.geniushub.*",
"homeassistant.components.gios.*",
"homeassistant.components.glances.*",
"homeassistant.components.gogogate2.*",
"homeassistant.components.google_assistant.*",
"homeassistant.components.google_maps.*",
"homeassistant.components.google_pubsub.*",
"homeassistant.components.gpmdp.*",
"homeassistant.components.gree.*",
"homeassistant.components.growatt_server.*",
"homeassistant.components.gtfs.*",
"homeassistant.components.guardian.*",
"homeassistant.components.habitica.*",
"homeassistant.components.harmony.*",
"homeassistant.components.hassio.*",
"homeassistant.components.hdmi_cec.*",
"homeassistant.components.here_travel_time.*",
"homeassistant.components.hisense_aehw4a1.*",
"homeassistant.components.home_connect.*",
"homeassistant.components.home_plus_control.*",
"homeassistant.components.homeassistant.*",
"homeassistant.components.homekit.*",
"homeassistant.components.homekit_controller.*",
"homeassistant.components.homematicip_cloud.*",
"homeassistant.components.honeywell.*",
"homeassistant.components.hue.*",
"homeassistant.components.huisbaasje.*",
"homeassistant.components.humidifier.*",
"homeassistant.components.iaqualink.*",
"homeassistant.components.icloud.*",
"homeassistant.components.image.*",
"homeassistant.components.incomfort.*",
"homeassistant.components.influxdb.*",
"homeassistant.components.input_boolean.*",
"homeassistant.components.input_datetime.*",
"homeassistant.components.input_number.*",
"homeassistant.components.insteon.*",
"homeassistant.components.ipp.*",
"homeassistant.components.isy994.*",
"homeassistant.components.izone.*",
"homeassistant.components.kaiterra.*",
"homeassistant.components.keenetic_ndms2.*",
"homeassistant.components.kodi.*",
"homeassistant.components.konnected.*",
"homeassistant.components.kostal_plenticore.*",
"homeassistant.components.kulersky.*",
"homeassistant.components.lifx.*",
"homeassistant.components.litejet.*",
"homeassistant.components.litterrobot.*",
"homeassistant.components.lovelace.*",
"homeassistant.components.luftdaten.*",
"homeassistant.components.lutron_caseta.*",
"homeassistant.components.lyric.*",
"homeassistant.components.marytts.*",
"homeassistant.components.media_source.*",
"homeassistant.components.melcloud.*",
"homeassistant.components.meteo_france.*",
"homeassistant.components.metoffice.*",
"homeassistant.components.minecraft_server.*",
"homeassistant.components.mobile_app.*",
"homeassistant.components.motion_blinds.*",
"homeassistant.components.mqtt.*",
"homeassistant.components.mullvad.*",
"homeassistant.components.mysensors.*",
"homeassistant.components.n26.*",
"homeassistant.components.neato.*",
"homeassistant.components.ness_alarm.*",
"homeassistant.components.nest.*",
"homeassistant.components.netatmo.*",
"homeassistant.components.netio.*",
"homeassistant.components.nightscout.*",
"homeassistant.components.nilu.*",
"homeassistant.components.nmap_tracker.*",
"homeassistant.components.norway_air.*",
"homeassistant.components.notion.*",
"homeassistant.components.nsw_fuel_station.*",
"homeassistant.components.nuki.*",
"homeassistant.components.nws.*",
"homeassistant.components.nzbget.*",
"homeassistant.components.omnilogic.*",
"homeassistant.components.onboarding.*",
"homeassistant.components.ondilo_ico.*",
"homeassistant.components.onewire.*",
"homeassistant.components.onvif.*",
"homeassistant.components.ovo_energy.*",
"homeassistant.components.ozw.*",
"homeassistant.components.panasonic_viera.*",
"homeassistant.components.philips_js.*",
"homeassistant.components.pilight.*",
"homeassistant.components.ping.*",
"homeassistant.components.pioneer.*",
"homeassistant.components.plaato.*",
"homeassistant.components.plex.*",
"homeassistant.components.plugwise.*",
"homeassistant.components.plum_lightpad.*",
"homeassistant.components.point.*",
"homeassistant.components.profiler.*",
"homeassistant.components.proxmoxve.*",
"homeassistant.components.rachio.*",
"homeassistant.components.rainmachine.*",
"homeassistant.components.recollect_waste.*",
"homeassistant.components.recorder.*",
"homeassistant.components.reddit.*",
"homeassistant.components.ring.*",
"homeassistant.components.roku.*",
"homeassistant.components.rpi_power.*",
"homeassistant.components.ruckus_unleashed.*",
"homeassistant.components.sabnzbd.*",
"homeassistant.components.screenlogic.*",
"homeassistant.components.script.*",
"homeassistant.components.search.*",
"homeassistant.components.sense.*",
"homeassistant.components.sentry.*",
"homeassistant.components.sesame.*",
"homeassistant.components.sharkiq.*",
"homeassistant.components.shelly.*",
"homeassistant.components.sma.*",
"homeassistant.components.smart_meter_texas.*",
"homeassistant.components.smartthings.*",
"homeassistant.components.smarttub.*",
"homeassistant.components.smarty.*",
"homeassistant.components.smhi.*",
"homeassistant.components.solaredge.*",
"homeassistant.components.solarlog.*",
"homeassistant.components.somfy.*",
"homeassistant.components.somfy_mylink.*",
"homeassistant.components.sonarr.*",
"homeassistant.components.songpal.*",
"homeassistant.components.sonos.*",
"homeassistant.components.spotify.*",
"homeassistant.components.stream.*",
"homeassistant.components.stt.*",
"homeassistant.components.surepetcare.*",
"homeassistant.components.switchbot.*",
"homeassistant.components.switcher_kis.*",
"homeassistant.components.synology_dsm.*",
"homeassistant.components.synology_srm.*",
"homeassistant.components.system_health.*",
"homeassistant.components.system_log.*",
"homeassistant.components.tado.*",
"homeassistant.components.tasmota.*",
"homeassistant.components.tcp.*",
"homeassistant.components.telegram_bot.*",
"homeassistant.components.template.*",
"homeassistant.components.tesla.*",
"homeassistant.components.timer.*",
"homeassistant.components.todoist.*",
"homeassistant.components.toon.*",
"homeassistant.components.tplink.*",
"homeassistant.components.trace.*",
"homeassistant.components.tradfri.*",
"homeassistant.components.tuya.*",
"homeassistant.components.unifi.*",
"homeassistant.components.upcloud.*",
"homeassistant.components.updater.*",
"homeassistant.components.upnp.*",
"homeassistant.components.velbus.*",
"homeassistant.components.vera.*",
"homeassistant.components.verisure.*",
"homeassistant.components.vizio.*",
"homeassistant.components.volumio.*",
"homeassistant.components.webostv.*",
"homeassistant.components.wemo.*",
"homeassistant.components.wink.*",
"homeassistant.components.withings.*",
"homeassistant.components.wled.*",
"homeassistant.components.wunderground.*",
"homeassistant.components.xbox.*",
"homeassistant.components.xiaomi_aqara.*",
"homeassistant.components.xiaomi_miio.*",
"homeassistant.components.yamaha.*",
"homeassistant.components.yeelight.*",
"homeassistant.components.zerproc.*",
"homeassistant.components.zha.*",
"homeassistant.components.zwave.*",
]
HEADER: Final = """
# Automatically generated by hassfest.
#
# To update, run python3 -m script.hassfest
""".lstrip()
GENERAL_SETTINGS: Final[dict[str, str]] = {
"python_version": "3.8",
"show_error_codes": "true",
"follow_imports": "silent",
"ignore_missing_imports": "true",
"warn_incomplete_stub": "true",
"warn_redundant_casts": "true",
"warn_unused_configs": "true",
}
# This is basically the list of checks which is enabled for "strict=true".
# But "strict=true" is applied globally, so we need to list all checks manually.
STRICT_SETTINGS: Final[list[str]] = [
"check_untyped_defs",
"disallow_incomplete_defs",
"disallow_subclassing_any",
"disallow_untyped_calls",
"disallow_untyped_decorators",
"disallow_untyped_defs",
"no_implicit_optional",
"strict_equality",
"warn_return_any",
"warn_unreachable",
"warn_unused_ignores",
# TODO: turn these on, address issues
# "disallow_any_generics",
# "no_implicit_reexport",
]
def generate_and_validate(config: Config) -> str:
"""Validate and generate mypy config."""
config_path = config.root / ".strict-typing"
with config_path.open() as fp:
lines = fp.readlines()
# Filter empty and commented lines.
strict_modules: list[str] = [
line.strip()
for line in lines
if line.strip() != "" and not line.startswith("#")
]
ignored_modules_set: set[str] = set(IGNORED_MODULES)
for module in strict_modules:
if (
not module.startswith("homeassistant.components.")
and module != "homeassistant.components"
):
config.add_error(
"mypy_config", f"Only components should be added: {module}"
)
if module in ignored_modules_set:
config.add_error("mypy_config", f"Module '{module}' is in ignored list")
# Validate that all modules exist.
all_modules = strict_modules + IGNORED_MODULES
for module in all_modules:
if module.endswith(".*"):
module_path = Path(module[:-2].replace(".", os.path.sep))
if not module_path.is_dir():
config.add_error("mypy_config", f"Module '{module} is not a folder")
else:
module = module.replace(".", os.path.sep)
module_path = Path(f"{module}.py")
if module_path.is_file():
continue
module_path = Path(module) / "__init__.py"
if not module_path.is_file():
config.add_error("mypy_config", f"Module '{module} doesn't exist")
mypy_config = configparser.ConfigParser()
general_section = "mypy"
mypy_config.add_section(general_section)
for key, value in GENERAL_SETTINGS.items():
mypy_config.set(general_section, key, value)
for key in STRICT_SETTINGS:
mypy_config.set(general_section, key, "true")
# By default strict checks are disabled for components.
components_section = "mypy-homeassistant.components.*"
mypy_config.add_section(components_section)
for key in STRICT_SETTINGS:
mypy_config.set(components_section, key, "false")
for strict_module in strict_modules:
strict_section = f"mypy-{strict_module}"
mypy_config.add_section(strict_section)
for key in STRICT_SETTINGS:
mypy_config.set(strict_section, key, "true")
# Disable strict checks for tests
tests_section = "mypy-tests.*"
mypy_config.add_section(tests_section)
for key in STRICT_SETTINGS:
mypy_config.set(tests_section, key, "false")
for ignored_module in IGNORED_MODULES:
ignored_section = f"mypy-{ignored_module}"
mypy_config.add_section(ignored_section)
mypy_config.set(ignored_section, "ignore_errors", "true")
with io.StringIO() as fp:
mypy_config.write(fp)
fp.seek(0)
return HEADER + fp.read().strip()
def validate(integrations: dict[str, Integration], config: Config) -> None:
"""Validate mypy config."""
config_path = config.root / "mypy.ini"
config.cache["mypy_config"] = content = generate_and_validate(config)
with open(str(config_path)) as fp:
if fp.read().strip() != content:
config.add_error(
"mypy_config",
"File mypy.ini is not up to date. Run python3 -m script.hassfest",
fixable=True,
)
def generate(integrations: dict[str, Integration], config: Config) -> None:
"""Generate mypy config."""
config_path = config.root / "mypy.ini"
with open(str(config_path), "w") as fp:
fp.write(f"{config.cache['mypy_config']}\n")
|
def count_step(m, w, h):
m = [[i for i in l] for l in m]
next_pos = [(0, 0)]
while next_pos:
x, y = next_pos.pop(0)
for i, j in ((-1, 0), (1, 0), (0, -1), (0, 1)):
x_, y_ = x + i, y + j
if 0 <= x_ < w and 0 <= y_ < h:
if not m[y_][x_]:
m[y_][x_] = m[y][x] + 1
next_pos.append((x_, y_))
step = m[-1][-1]
return step + 1 if step else float('inf')
def solution(m):
w, h = len(m[0]), len(m)
shortest_possible = w + h - 1
if count_step(m, w, h) == shortest_possible:
return shortest_possible
shortest = float('inf')
for x, y in [(x, y) for x in range(w) for y in range(h) if m[y][x]]:
tmp = [[i for i in l] for l in m]
tmp[y][x] = 0
result = count_step(tmp, w, h)
shortest = min(shortest, result)
if result == shortest_possible:
break
return shortest
if __name__ == '__main__':
from time import perf_counter_ns
basic_tests = (
([
[0, 1, 1, 0],
[0, 0, 0, 1],
[1, 1, 0, 0],
[1, 1, 1, 0]], 7),
([
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]], 11)
)
additional_tests = (
([
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]], 11),
([
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]], 21),
([
[0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 0, 0, 0, 0, 0]], 13),
([
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]], float('inf')),
([
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0]], 19),
([
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1],
[1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],
[0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1],
[0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1],
[0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0]], 53),
)
results = {}
num_iters = 1
for func in [func for func in dir() if func.startswith('solution')]:
results[func] = []
print(f'\n{func}() (Number of Iterations {num_iters:,})')
for test in basic_tests + additional_tests:
matrix, expected = test
start = perf_counter_ns()
for i in range(num_iters):
result = globals()[func](matrix)
end = perf_counter_ns()
results[func].append(end - start)
print(f'{func}("{matrix}") returned {result} '
f'({'correct' if result == expected else f'expected: {expected}"})'
f' in {end - start:,} nanoseconds.')
|
def count_step(m, w, h):
m = [[i for i in l] for l in m]
next_pos = [(0, 0)]
while next_pos:
x, y = next_pos.pop(0)
for i, j in ((-1, 0), (1, 0), (0, -1), (0, 1)):
x_, y_ = x + i, y + j
if 0 <= x_ < w and 0 <= y_ < h:
if not m[y_][x_]:
m[y_][x_] = m[y][x] + 1
next_pos.append((x_, y_))
step = m[-1][-1]
return step + 1 if step else float('inf')
def solution(m):
w, h = len(m[0]), len(m)
shortest_possible = w + h - 1
if count_step(m, w, h) == shortest_possible:
return shortest_possible
shortest = float('inf')
for x, y in [(x, y) for x in range(w) for y in range(h) if m[y][x]]:
tmp = [[i for i in l] for l in m]
tmp[y][x] = 0
result = count_step(tmp, w, h)
shortest = min(shortest, result)
if result == shortest_possible:
break
return shortest
if __name__ == '__main__':
from time import perf_counter_ns
basic_tests = (
([
[0, 1, 1, 0],
[0, 0, 0, 1],
[1, 1, 0, 0],
[1, 1, 1, 0]], 7),
([
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]], 11)
)
additional_tests = (
([
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]], 11),
([
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]], 21),
([
[0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 0, 0, 0, 0, 0]], 13),
([
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]], float('inf')),
([
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0]], 19),
([
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1],
[1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1],
[0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1],
[0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1],
[0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0]], 53),
)
results = {}
num_iters = 1
for func in [func for func in dir() if func.startswith('solution')]:
results[func] = []
print(f'\n{func}() (Number of Iterations {num_iters:,})')
for test in basic_tests + additional_tests:
matrix, expected = test
start = perf_counter_ns()
for i in range(num_iters):
result = globals()[func](matrix)
end = perf_counter_ns()
results[func].append(end - start)
print(f'{func}("{matrix}") returned {result} '
f'({"correct" if result == expected else f"expected: {expected}"})'
f' in {end - start:,} nanoseconds.')
|
import sqlite3
import threading
import notify2
from datetime import datetime
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import parse_qs
from halo import Halo
from prompt_toolkit import ANSI
from prompt_toolkit.application import Application, get_app
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.document import Document
from prompt_toolkit.filters import Condition
from prompt_toolkit.formatted_text import to_formatted_text, \
fragment_list_to_text
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.key_binding.bindings.focus import focus_next
from prompt_toolkit.layout import BufferControl
from prompt_toolkit.layout.containers import HSplit, VSplit, Window
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.layout.processors import Processor, Transformation
from prompt_toolkit.styles import Style
from prompt_toolkit.widgets import SearchToolbar, TextArea, Frame, RadioList
import utils
from utils import ansi_bold, ansi_italics, ansi_end
conn = sqlite3.connect(':memory:', check_same_thread=False)
c = conn.cursor()
conn.execute('''CREATE TABLE history
(id integer primary key,
msg_time time, sender text, msg text, channel text)''')
identity = utils.config['user']['identity']
spinner = Halo(spinner="dots", text="starting app ...")
spinner.start()
notify2.init("cchat")
n = notify2.Notification(None)
n.set_urgency(notify2.URGENCY_NORMAL)
n.set_timeout(5000)
cmd_area_text = "type in command/message - ctrl-c to quit"
class ChatServer(BaseHTTPRequestHandler, ):
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def _html(self, params):
"""Shows the url params on the browser in html.
Nothing useful. Just for debugging
"""
content = f"<html><body><p>{params}</p></body></html>"
return content.encode("utf8")
def do_GET(self):
self._set_headers()
buffer = Application.current_buffer
params = parse_qs(self.path)
self.wfile.write(self._html(params))
channel = params.get('/?ChannelSid') or params.get('ChannelSid')
if channel:
channel = channel[0]
chat_handler(buffer, process_response(params), channel=channel)
def log_message(self, format, *args):
"""suppress logs"""
return
def chat_server(server_class=HTTPServer,
handler_class=ChatServer,
addr="localhost",
port=8000):
server_address = (addr, port)
httpd = server_class(server_address, handler_class)
httpd.serve_forever()
def process_response(response, from_db=False):
"""receives response from webhook when actions happen on the chat client
processes response and returns a formatted message to show on the client
response might also be from db in the case of fetching chat history"""
try:
if from_db:
processed_response = ''
for line in response:
message_time = line[1]
message_from = line[2]
message_body = line[3]
processed_response += f"{message_time} " \
f"{message_from} " \
f"{message_body}"
else:
if response.get('/?EventType') and response['/?EventType'][0] in (
'onMemberAdded', 'onMemberRemoved',):
processed_response = f"{ansi_italics}{response["Identity"][0]} " \
f"{response["Reason"][0].lower()}{ansi_end}\n"
else:
message_date = datetime.strptime(
response['DateCreated'][0], '%Y-%m-%dT%H:%M:%S.%fZ'
)
message_time = message_date.strftime("%H:%M")
message_from = response['From'][0]
message_body = response['Body'][0]
processed_response = f"{message_time} " \
f"{ansi_bold}{message_from}{ansi_end} " \
f"{message_body}\n"
return f"{processed_response}"
except KeyError as e:
return f"Failed to parse response: {e}\n"
except Exception as e:
return f"An error occurred: {e}"
spinner.start("rendering interface ...")
class FormatText(Processor):
def apply_transformation(self, input_):
fragments = to_formatted_text(
ANSI(fragment_list_to_text(input_.fragments)))
return Transformation(fragments)
# layout.
search_field = SearchToolbar() # for reverse search.
output_field = Buffer()
channels_window = RadioList(utils.get_channels())
general_ch = utils.config['channels']['general']
channels_window.current_value = general_ch
channels_frame = Frame(channels_window, title="channels",
width=23)
output_window = Frame(Window(BufferControl(
buffer=output_field,
focusable=False,
input_processors=[FormatText()]),
wrap_lines=True),
title="#general")
input_field = TextArea(
height=1,
prompt='> ',
multiline=False,
wrap_lines=False,
search_field=search_field,
)
command_window_frame = Frame(input_field, title=cmd_area_text)
upper_container = VSplit([channels_frame, output_window])
container = HSplit(
[
upper_container,
command_window_frame,
search_field,
]
)
def chat_handler(buffer, message, channel=None, from_db=False):
"""from_db=True if showing chat history"""
try:
active_channel_sid = channels_window.current_value
if channel == active_channel_sid: # only show the message if the channel it was sent to is the active one
output = output_field.text + message
output_field.document = Document(
text=output, cursor_position=len(output),
)
except BaseException as e:
output = output_field.text + "{}\n".format(e)
output_field.document = Document(
text=output, cursor_position=len(output),
)
else:
"""When a user switches channels, we want to clear the messages
in the current channel and show the messages from the new channel.
When they come back to a previous channel, they expect to see the
messages they left there (+new unread ones if any). Fetching all
channel messages from the server each time would be expensive,
so save chat in sqlite db and fetch from there."""
if not from_db:
try:
msg_data = message.split(None, 2)
c.execute('INSERT INTO history VALUES (NULL,?,?,?,?)',
(msg_data[0], msg_data[1], msg_data[2],
channel))
conn.commit()
# show notification if user is @mentioned
if f'@{identity}' in msg_data[2].split():
mentioned_channel = [ch[1] for ch in utils.get_channels() if ch[0] == channel][0]
n.update('cchat',
f'You\'ve been mentioned on #{mentioned_channel}')
n.show()
except IndexError:
# not a chat message
pass
except Exception as e:
conn.rollback()
output = output_field.text + "{}\n".format(e)
output_field.document = Document(
text=output, cursor_position=len(output),
)
# key bindings.
bindings = KeyBindings()
@bindings.add("c-c")
@bindings.add("c-q")
def _(event):
""" Pressing Ctrl-Q or Ctrl-C will exit the user interface. """
event.app.exit()
@bindings.add('tab')
def tab_(event):
focus_next(event)
@Condition
def input_buffer_active():
"""Only activate 'enter' key binding if input buffer is not active"""
if not get_app().layout.buffer_has_focus:
active_channel = channels_window.values[channels_window._selected_index][1]
active_channel_sid = channels_window.values[channels_window._selected_index][0]
channels_window.current_value = active_channel_sid
output_window.title = f"#{active_channel}"
c.execute('SELECT * FROM history WHERE channel=?',
(active_channel_sid,))
chat_history = c.fetchall()
output_field.document = Document(
text='', cursor_position=0,
)
buffer = Application.current_buffer
chat_handler(buffer,
process_response(chat_history, True),
active_channel_sid,
True)
@bindings.add('enter', filter=input_buffer_active)
def enter_(event):
pass
# Style.
style = Style(
[
("line", "#004400"),
]
)
# handle commands
def command_handler(buffer):
# input starting with '/' is treated as a command
try:
if input_field.text.startswith('/'): # command
cmd_response = utils.command_handler(input_field.text)
output = f"{cmd_response}\n"
new_text = output_field.text + output
output_field.document = Document(
text=new_text, cursor_position=len(new_text),
)
if cmd_response.find('Error') == -1 and \
input_field.text.find('channel') != -1:
# channel command - refresh channel list
channels_window.values = utils.get_channels()
channels_window.current_value = general_ch
output_window.title = "#general"
c.execute('SELECT * FROM history WHERE channel=?',
(general_ch,))
chat_history = c.fetchall()
output_field.document = Document(
text='', cursor_position=0,
)
buffer = Application.current_buffer
chat_handler(buffer,
process_response(chat_history, True),
general_ch,
True)
elif input_field.text.strip(): # message
utils.send_message(channels_window.current_value,
input_field.text)
except BaseException as e:
output = f"\n\n{e}"
new_text = output_field.text + output
output_field.document = Document(
text=new_text, cursor_position=len(new_text),
)
input_field.accept_handler = command_handler
spinner.succeed("interface rendered")
spinner.start("starting app ...")
# Run application.
application = Application(
layout=Layout(container, focused_element=input_field),
key_bindings=bindings,
style=style,
mouse_support=True,
full_screen=True,
erase_when_done=True,
)
spinner.succeed("all good")
def main():
# start server
daemon = threading.Thread(name='daemon_server',
target=chat_server)
daemon.setDaemon(True) # killed once the main thread is dead
daemon.start()
# start app
application.run()
if __name__ == "__main__":
main()
|
import sqlite3
import threading
import notify2
from datetime import datetime
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import parse_qs
from halo import Halo
from prompt_toolkit import ANSI
from prompt_toolkit.application import Application, get_app
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.document import Document
from prompt_toolkit.filters import Condition
from prompt_toolkit.formatted_text import to_formatted_text, \
fragment_list_to_text
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.key_binding.bindings.focus import focus_next
from prompt_toolkit.layout import BufferControl
from prompt_toolkit.layout.containers import HSplit, VSplit, Window
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.layout.processors import Processor, Transformation
from prompt_toolkit.styles import Style
from prompt_toolkit.widgets import SearchToolbar, TextArea, Frame, RadioList
import utils
from utils import ansi_bold, ansi_italics, ansi_end
conn = sqlite3.connect(':memory:', check_same_thread=False)
c = conn.cursor()
conn.execute('''CREATE TABLE history
(id integer primary key,
msg_time time, sender text, msg text, channel text)''')
identity = utils.config['user']['identity']
spinner = Halo(spinner="dots", text="starting app ...")
spinner.start()
notify2.init("cchat")
n = notify2.Notification(None)
n.set_urgency(notify2.URGENCY_NORMAL)
n.set_timeout(5000)
cmd_area_text = "type in command/message - ctrl-c to quit"
class ChatServer(BaseHTTPRequestHandler, ):
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def _html(self, params):
"""Shows the url params on the browser in html.
Nothing useful. Just for debugging
"""
content = f"<html><body><p>{params}</p></body></html>"
return content.encode("utf8")
def do_GET(self):
self._set_headers()
buffer = Application.current_buffer
params = parse_qs(self.path)
self.wfile.write(self._html(params))
channel = params.get('/?ChannelSid') or params.get('ChannelSid')
if channel:
channel = channel[0]
chat_handler(buffer, process_response(params), channel=channel)
def log_message(self, format, *args):
"""suppress logs"""
return
def chat_server(server_class=HTTPServer,
handler_class=ChatServer,
addr="localhost",
port=8000):
server_address = (addr, port)
httpd = server_class(server_address, handler_class)
httpd.serve_forever()
def process_response(response, from_db=False):
"""receives response from webhook when actions happen on the chat client
processes response and returns a formatted message to show on the client
response might also be from db in the case of fetching chat history"""
try:
if from_db:
processed_response = ''
for line in response:
message_time = line[1]
message_from = line[2]
message_body = line[3]
processed_response += f"{message_time} " \
f"{message_from} " \
f"{message_body}"
else:
if response.get('/?EventType') and response['/?EventType'][0] in (
'onMemberAdded', 'onMemberRemoved',):
processed_response = f"{ansi_italics}{response['Identity'][0]} " \
f"{response['Reason'][0].lower()}{ansi_end}\n"
else:
message_date = datetime.strptime(
response['DateCreated'][0], '%Y-%m-%dT%H:%M:%S.%fZ'
)
message_time = message_date.strftime("%H:%M")
message_from = response['From'][0]
message_body = response['Body'][0]
processed_response = f"{message_time} " \
f"{ansi_bold}{message_from}{ansi_end} " \
f"{message_body}\n"
return f"{processed_response}"
except KeyError as e:
return f"Failed to parse response: {e}\n"
except Exception as e:
return f"An error occurred: {e}"
spinner.start("rendering interface ...")
class FormatText(Processor):
def apply_transformation(self, input_):
fragments = to_formatted_text(
ANSI(fragment_list_to_text(input_.fragments)))
return Transformation(fragments)
# layout.
search_field = SearchToolbar() # for reverse search.
output_field = Buffer()
channels_window = RadioList(utils.get_channels())
general_ch = utils.config['channels']['general']
channels_window.current_value = general_ch
channels_frame = Frame(channels_window, title="channels",
width=23)
output_window = Frame(Window(BufferControl(
buffer=output_field,
focusable=False,
input_processors=[FormatText()]),
wrap_lines=True),
title="#general")
input_field = TextArea(
height=1,
prompt='> ',
multiline=False,
wrap_lines=False,
search_field=search_field,
)
command_window_frame = Frame(input_field, title=cmd_area_text)
upper_container = VSplit([channels_frame, output_window])
container = HSplit(
[
upper_container,
command_window_frame,
search_field,
]
)
def chat_handler(buffer, message, channel=None, from_db=False):
"""from_db=True if showing chat history"""
try:
active_channel_sid = channels_window.current_value
if channel == active_channel_sid: # only show the message if the channel it was sent to is the active one
output = output_field.text + message
output_field.document = Document(
text=output, cursor_position=len(output),
)
except BaseException as e:
output = output_field.text + "{}\n".format(e)
output_field.document = Document(
text=output, cursor_position=len(output),
)
else:
"""When a user switches channels, we want to clear the messages
in the current channel and show the messages from the new channel.
When they come back to a previous channel, they expect to see the
messages they left there (+new unread ones if any). Fetching all
channel messages from the server each time would be expensive,
so save chat in sqlite db and fetch from there."""
if not from_db:
try:
msg_data = message.split(None, 2)
c.execute('INSERT INTO history VALUES (NULL,?,?,?,?)',
(msg_data[0], msg_data[1], msg_data[2],
channel))
conn.commit()
# show notification if user is @mentioned
if f'@{identity}' in msg_data[2].split():
mentioned_channel = [ch[1] for ch in utils.get_channels() if ch[0] == channel][0]
n.update('cchat',
f'You\'ve been mentioned on #{mentioned_channel}')
n.show()
except IndexError:
# not a chat message
pass
except Exception as e:
conn.rollback()
output = output_field.text + "{}\n".format(e)
output_field.document = Document(
text=output, cursor_position=len(output),
)
# key bindings.
bindings = KeyBindings()
@bindings.add("c-c")
@bindings.add("c-q")
def _(event):
""" Pressing Ctrl-Q or Ctrl-C will exit the user interface. """
event.app.exit()
@bindings.add('tab')
def tab_(event):
focus_next(event)
@Condition
def input_buffer_active():
"""Only activate 'enter' key binding if input buffer is not active"""
if not get_app().layout.buffer_has_focus:
active_channel = channels_window.values[channels_window._selected_index][1]
active_channel_sid = channels_window.values[channels_window._selected_index][0]
channels_window.current_value = active_channel_sid
output_window.title = f"#{active_channel}"
c.execute('SELECT * FROM history WHERE channel=?',
(active_channel_sid,))
chat_history = c.fetchall()
output_field.document = Document(
text='', cursor_position=0,
)
buffer = Application.current_buffer
chat_handler(buffer,
process_response(chat_history, True),
active_channel_sid,
True)
@bindings.add('enter', filter=input_buffer_active)
def enter_(event):
pass
# Style.
style = Style(
[
("line", "#004400"),
]
)
# handle commands
def command_handler(buffer):
# input starting with '/' is treated as a command
try:
if input_field.text.startswith('/'): # command
cmd_response = utils.command_handler(input_field.text)
output = f"{cmd_response}\n"
new_text = output_field.text + output
output_field.document = Document(
text=new_text, cursor_position=len(new_text),
)
if cmd_response.find('Error') == -1 and \
input_field.text.find('channel') != -1:
# channel command - refresh channel list
channels_window.values = utils.get_channels()
channels_window.current_value = general_ch
output_window.title = "#general"
c.execute('SELECT * FROM history WHERE channel=?',
(general_ch,))
chat_history = c.fetchall()
output_field.document = Document(
text='', cursor_position=0,
)
buffer = Application.current_buffer
chat_handler(buffer,
process_response(chat_history, True),
general_ch,
True)
elif input_field.text.strip(): # message
utils.send_message(channels_window.current_value,
input_field.text)
except BaseException as e:
output = f"\n\n{e}"
new_text = output_field.text + output
output_field.document = Document(
text=new_text, cursor_position=len(new_text),
)
input_field.accept_handler = command_handler
spinner.succeed("interface rendered")
spinner.start("starting app ...")
# Run application.
application = Application(
layout=Layout(container, focused_element=input_field),
key_bindings=bindings,
style=style,
mouse_support=True,
full_screen=True,
erase_when_done=True,
)
spinner.succeed("all good")
def main():
# start server
daemon = threading.Thread(name='daemon_server',
target=chat_server)
daemon.setDaemon(True) # killed once the main thread is dead
daemon.start()
# start app
application.run()
if __name__ == "__main__":
main()
|
import logging
import snap7
# for setup the Logo connection please follow this link
# http://snap7.sourceforge.net/logo.html
logging.basicConfig(level=logging.INFO)
# Siemens LOGO devices Logo 8 is the default
Logo_7 = True
logger = logging.getLogger(__name__)
plc = snap7.logo.Logo()
plc.connect("192.168.0.41",0x1000,0x2000)
if plc.get_connected():
logger.info("connected")
# read I1 from logo
vm_address = ("V923.0" if Logo_7==True else "V1024.0")
print (f"I1: {str(plc.read(vm_address))}")
# write some values in VM addresses between 0 and 100
value_1 = 0b10110001
value_2 = 480
print("write 0b10110001 to V10")
plc.write("V10", value_1)
print(f"read V10.0 must be 1 - check: {str(plc.read("V10.0"))}")
print(f"read V10.3 must be 0 - check: {str(plc.read("V10.3"))}")
print(f"read V10.7 must be 1 - check: {str(plc.read("V10.7"))}")
print("write 480 analog value to VW20")
plc.write("VW20", value_2)
print(f"read VW20 must be 480 - check: {str(plc.read("VW20"))}")
print("trigger V10.2")
plc.write("V10.2", 0)
plc.write("V10.2", 1)
plc.write("V10.2", 0)
else:
logger.error("Conncetion failed")
plc.disconnect()
logger.info("Disconnected")
plc.destroy()
|
import logging
import snap7
# for setup the Logo connection please follow this link
# http://snap7.sourceforge.net/logo.html
logging.basicConfig(level=logging.INFO)
# Siemens LOGO devices Logo 8 is the default
Logo_7 = True
logger = logging.getLogger(__name__)
plc = snap7.logo.Logo()
plc.connect("192.168.0.41",0x1000,0x2000)
if plc.get_connected():
logger.info("connected")
# read I1 from logo
vm_address = ("V923.0" if Logo_7==True else "V1024.0")
print (f"I1: {str(plc.read(vm_address))}")
# write some values in VM addresses between 0 and 100
value_1 = 0b10110001
value_2 = 480
print("write 0b10110001 to V10")
plc.write("V10", value_1)
print(f"read V10.0 must be 1 - check: {str(plc.read('V10.0'))}")
print(f"read V10.3 must be 0 - check: {str(plc.read('V10.3'))}")
print(f"read V10.7 must be 1 - check: {str(plc.read('V10.7'))}")
print("write 480 analog value to VW20")
plc.write("VW20", value_2)
print(f"read VW20 must be 480 - check: {str(plc.read('VW20'))}")
print("trigger V10.2")
plc.write("V10.2", 0)
plc.write("V10.2", 1)
plc.write("V10.2", 0)
else:
logger.error("Conncetion failed")
plc.disconnect()
logger.info("Disconnected")
plc.destroy()
|
colors = {"clean": "\033[m",
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"purple": "\033[35m",
"cian": "\033[36m"}
teams = ("Fortaleza", "Athletico-PR", "Atlético-GO", "Bragantino", "Bahia", "Fluminense", "Palmeiras", "Flamengo",
"Atlético-MG",
"Corinthians", "Ceará", "Santos", "Cuiabá", "Sport", "São Paulo", "Juventude", "Inter", "Grêmio", "América-MG",
"Chapecoense")
print(f"{colors["green"]}The teams are: {colors["clean"]}")
for c in sorted(teams):
print(f"{c}")
print(f"{colors["yellow"]}The 5 first placed are")
for c in teams[0:5]:
print(f"{c}", end=" - ")
print(f"{colors["blue"]}\n\nThe 4 last placed are")
for c in teams[-4:]:
print(f"{c}", end=" - ")
print(f"\n\n{colors["red"]}The Chapecoense team is in {teams.index("Chapecoense")+1} place{colors["clean"]}")
|
colors = {"clean": "\033[m",
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"purple": "\033[35m",
"cian": "\033[36m"}
teams = ("Fortaleza", "Athletico-PR", "Atlético-GO", "Bragantino", "Bahia", "Fluminense", "Palmeiras", "Flamengo",
"Atlético-MG",
"Corinthians", "Ceará", "Santos", "Cuiabá", "Sport", "São Paulo", "Juventude", "Inter", "Grêmio", "América-MG",
"Chapecoense")
print(f"{colors['green']}The teams are: {colors['clean']}")
for c in sorted(teams):
print(f"{c}")
print(f"{colors['yellow']}The 5 first placed are")
for c in teams[0:5]:
print(f"{c}", end=" - ")
print(f"{colors['blue']}\n\nThe 4 last placed are")
for c in teams[-4:]:
print(f"{c}", end=" - ")
print(f"\n\n{colors['red']}The Chapecoense team is in {teams.index('Chapecoense')+1} place{colors['clean']}")
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
from textwrap import dedent
from flake8_pantsbuild import PB10, PB11, PB12, PB13, PB20, PB30
def test_pb_10(flake8dir) -> None:
template = dedent(
"""\
import os.path
class Example:
CONSTANT = "foo"
def foo(self, value):
return os.path.join({}.CONSTANT, value)
"""
)
flake8dir.make_py_files(good=template.format("self"), bad=template.format("Example"))
result = flake8dir.run_flake8()
assert [f"./bad.py:8:29: {PB10.format(name="Example", attr="CONSTANT")}"] == result.out_lines
def test_pb_11(flake8dir) -> None:
violating_pairs = itertools.product([None, False, True, 1, "'a'"], ["or", "and"])
violations = {
f"bad{i}": f"x = 0\n{pair[0]} {pair[1]} x" for i, pair in enumerate(violating_pairs)
}
flake8dir.make_py_files(good="x = y = 0\nx or y", **violations)
result = flake8dir.run_flake8()
assert sorted(f"./{fp}.py:2:1: {PB11}" for fp in violations) == sorted(result.out_lines)
def test_pb_12(flake8dir) -> None:
violations = {
f"bad{i}": f"x = 0\nx and {constant}"
for i, constant in enumerate([None, False, True, 1, "'a'"])
}
flake8dir.make_py_files(good="x = y = 0\nx and y", **violations)
result = flake8dir.run_flake8()
assert sorted(f"./{fp}.py:2:7: {PB12}" for fp in violations) == sorted(result.out_lines)
def test_pb_13(flake8dir) -> None:
flake8dir.make_example_py(
dedent(
"""\
foo = open('test.txt')
with open('test.txt'):
pass
bar = open('test.txt')
with open('test.txt') as fp:
fp.read()
"""
)
)
result = flake8dir.run_flake8()
assert [f"./example.py:1:7: {PB13}", f"./example.py:6:7: {PB13}"] == result.out_lines
def test_pb_20(flake8dir) -> None:
flake8dir.make_example_py(
dedent(
"""\
def one():
pass
def four():
pass
def two():
pass
def hanging():
_ = (
"this"
"is"
"ok")
"""
)
)
result = flake8dir.run_flake8(
extra_args=["--enable-extensions", "PB20", "--extend-ignore", "E111"]
)
assert [
f"./example.py:2:1: {PB20.format("1")}",
f"./example.py:6:1: {PB20.format("4")}",
] == result.out_lines
def test_pb_30(flake8dir) -> None:
flake8dir.make_example_py(
dedent(
"""\
from textwrap import dedent
bad = \\
123
also_bad = "hello" \\
"world"
okay = '''
str1 \\
str2 \\
str3
'''
also_okay = dedent(
'''\
str
'''
)
# this is okay too \\
"""
)
)
result = flake8dir.run_flake8(extra_args=["--enable-extensions", "PB30"])
assert [f"./example.py:3:7: {PB30}", f"./example.py:6:20: {PB30}"] == result.out_lines
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
from textwrap import dedent
from flake8_pantsbuild import PB10, PB11, PB12, PB13, PB20, PB30
def test_pb_10(flake8dir) -> None:
template = dedent(
"""\
import os.path
class Example:
CONSTANT = "foo"
def foo(self, value):
return os.path.join({}.CONSTANT, value)
"""
)
flake8dir.make_py_files(good=template.format("self"), bad=template.format("Example"))
result = flake8dir.run_flake8()
assert [f"./bad.py:8:29: {PB10.format(name='Example', attr='CONSTANT')}"] == result.out_lines
def test_pb_11(flake8dir) -> None:
violating_pairs = itertools.product([None, False, True, 1, "'a'"], ["or", "and"])
violations = {
f"bad{i}": f"x = 0\n{pair[0]} {pair[1]} x" for i, pair in enumerate(violating_pairs)
}
flake8dir.make_py_files(good="x = y = 0\nx or y", **violations)
result = flake8dir.run_flake8()
assert sorted(f"./{fp}.py:2:1: {PB11}" for fp in violations) == sorted(result.out_lines)
def test_pb_12(flake8dir) -> None:
violations = {
f"bad{i}": f"x = 0\nx and {constant}"
for i, constant in enumerate([None, False, True, 1, "'a'"])
}
flake8dir.make_py_files(good="x = y = 0\nx and y", **violations)
result = flake8dir.run_flake8()
assert sorted(f"./{fp}.py:2:7: {PB12}" for fp in violations) == sorted(result.out_lines)
def test_pb_13(flake8dir) -> None:
flake8dir.make_example_py(
dedent(
"""\
foo = open('test.txt')
with open('test.txt'):
pass
bar = open('test.txt')
with open('test.txt') as fp:
fp.read()
"""
)
)
result = flake8dir.run_flake8()
assert [f"./example.py:1:7: {PB13}", f"./example.py:6:7: {PB13}"] == result.out_lines
def test_pb_20(flake8dir) -> None:
flake8dir.make_example_py(
dedent(
"""\
def one():
pass
def four():
pass
def two():
pass
def hanging():
_ = (
"this"
"is"
"ok")
"""
)
)
result = flake8dir.run_flake8(
extra_args=["--enable-extensions", "PB20", "--extend-ignore", "E111"]
)
assert [
f"./example.py:2:1: {PB20.format('1')}",
f"./example.py:6:1: {PB20.format('4')}",
] == result.out_lines
def test_pb_30(flake8dir) -> None:
flake8dir.make_example_py(
dedent(
"""\
from textwrap import dedent
bad = \\
123
also_bad = "hello" \\
"world"
okay = '''
str1 \\
str2 \\
str3
'''
also_okay = dedent(
'''\
str
'''
)
# this is okay too \\
"""
)
)
result = flake8dir.run_flake8(extra_args=["--enable-extensions", "PB30"])
assert [f"./example.py:3:7: {PB30}", f"./example.py:6:20: {PB30}"] == result.out_lines
|
from discord.ext import commands
import discord
import sys
from pathlib import Path
import motor.motor_asyncio
from config import token, extension_dir
from utils.context import UnnamedContext
from utils.help import PaginatedHelpCommand
class UnnamedBot(commands.Bot):
def __init__(self, command_prefix, **options):
self.db_client = motor.motor_asyncio.AsyncIOMotorClient('localhost', 27017)
self.db = self.db_client['unnamed-bot']
super().__init__(command_prefix, **options)
async def on_ready(self):
print(f"\n{"#" * 40}"
f"\n{self.user.name}"
f"\nPython version: {sys.version}"
f"\nDiscord.py version: {discord.__version__}\n{"#" * 40}"
f"\nLogged in as: {self.user.name} (ID: {self.user.id})")
self.help_command = PaginatedHelpCommand()
def run(self, **kwargs):
if kwargs.get('load_all'):
self.load_all_extensions(self.get_all_extensions_from_dir())
super().run(token)
async def get_context(self, message, *, cls=None):
return await super().get_context(message, cls=UnnamedContext)
@staticmethod
def format_cog(path, replacements=(('/', '.'), ('\\', '.'), ('.py', ''))):
for replacement in replacements:
path = path.replace(*replacement)
return path
def get_all_extensions_from_dir(self, directory=extension_dir):
for cog in Path(directory).glob('**/*.py'):
cog_path = self.format_cog(str(cog))
yield cog_path
yield 'jishaku'
def load_all_extensions(self, extensions):
for extension in extensions:
try:
self.load_extension(extension)
print(f"Loaded {extension}")
except Exception as e:
print(f"Could'nt load {extension}. {e.__class__}: {e}")
|
from discord.ext import commands
import discord
import sys
from pathlib import Path
import motor.motor_asyncio
from config import token, extension_dir
from utils.context import UnnamedContext
from utils.help import PaginatedHelpCommand
class UnnamedBot(commands.Bot):
def __init__(self, command_prefix, **options):
self.db_client = motor.motor_asyncio.AsyncIOMotorClient('localhost', 27017)
self.db = self.db_client['unnamed-bot']
super().__init__(command_prefix, **options)
async def on_ready(self):
print(f"\n{'#' * 40}"
f"\n{self.user.name}"
f"\nPython version: {sys.version}"
f"\nDiscord.py version: {discord.__version__}\n{'#' * 40}"
f"\nLogged in as: {self.user.name} (ID: {self.user.id})")
self.help_command = PaginatedHelpCommand()
def run(self, **kwargs):
if kwargs.get('load_all'):
self.load_all_extensions(self.get_all_extensions_from_dir())
super().run(token)
async def get_context(self, message, *, cls=None):
return await super().get_context(message, cls=UnnamedContext)
@staticmethod
def format_cog(path, replacements=(('/', '.'), ('\\', '.'), ('.py', ''))):
for replacement in replacements:
path = path.replace(*replacement)
return path
def get_all_extensions_from_dir(self, directory=extension_dir):
for cog in Path(directory).glob('**/*.py'):
cog_path = self.format_cog(str(cog))
yield cog_path
yield 'jishaku'
def load_all_extensions(self, extensions):
for extension in extensions:
try:
self.load_extension(extension)
print(f"Loaded {extension}")
except Exception as e:
print(f"Could'nt load {extension}. {e.__class__}: {e}")
|
import re
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import importlib
import torch
import torch.autograd
from torch.utils.data import dataset
import torch.utils.data.dataloader
import ignite.utils
import ignite.handlers.early_stopping
import ignite.engine
import ignite.metrics
import ignite.contrib.handlers
import ignite.contrib.handlers.tensorboard_logger
import ignite.contrib.handlers.tqdm_logger
import ignite.handlers.param_scheduler
from copy import deepcopy
import tensorboardX
import src.models
import src.data.load_dataset
from src.config.load_config import load_config
from src.common import get_constructor, magma_init
import src.torch_extensions
class Training:
def __init__(self, config, *args, **kwargs):
# parse config
self.seed = config['random']['seed']
self.num_epoch = config['training']['num_epoch']
self.dim_input = (1, config['model']['architecture']['num_input'])
self.cfg = config['model']
self.early_stopping = config['training']['early_stopping']
self.reduce_lr_plateau = config['training']['reduce_lr_plateau']
self.setup_cuda()
self.dummy_input = torch.autograd.Variable(
torch.zeros(self.dim_input).to(self.device))
self.setup_tensorboard(
config['experiment']['name'] + config['model']['name'], **config['tensorboard'])
self.setup_model(**self.cfg)
self.setup_ignite()
def setup_cuda(self, cuda_device_id=0):
torch.backends.cuda.fasval = True
torch.cuda.set_device(cuda_device_id)
self.device = torch.device('cuda')
torch.cuda.manual_seed_all(self.seed)
torch.manual_seed(self.seed)
magma_init()
def setup_model(self, architecture, loss, optim, **kwargs):
constructor = get_constructor('src.models', architecture['type'])
self.model = constructor(**architecture).to(self.device)
self.optim = torch.optim.Adam(self.model.parameters(), lr=optim['lr'])
def setup_ignite(self):
ignite.utils.manual_seed(self.seed)
val_metrics = {key: ignite.metrics.Loss(self.model.loggable_losses[key])
for key in self.model.loggable_losses}
def prepare_batch(batch, device=None, non_blocking=False, *args, **kwargs):
converted = ignite.utils.convert_tensor(
batch, device, non_blocking)
return converted, converted
def output_transform(x, y, y_pred, loss=None):
return {'y': y, 'y_pred': y_pred, 'criterion_kwargs': {}, 'loss': loss}
self.trainer = ignite.engine.create_supervised_trainer(
self.model, self.optim, self.model.loss, device=self.device, prepare_batch=prepare_batch, output_transform=output_transform)
self.evaluator = ignite.engine.create_supervised_evaluator(
self.model, val_metrics, device=self.device, prepare_batch=prepare_batch, output_transform=output_transform)
for mtrc in val_metrics:
val_metrics[mtrc].attach(self.trainer, mtrc)
# prevent messages from cluttering the log
self.trainer.logger.setLevel(logging.WARN)
self.evaluator.logger.setLevel(logging.WARN)
# progress bar
pbar = ignite.contrib.handlers.tqdm_logger.ProgressBar()
pbar.attach(self.trainer)
# save graph to tensorboard
self.tb_logger.writer.add_graph(self.model, self.dummy_input)
# attach events - tensorboard loggers
losses = [loss for loss in self.model.loggable_losses]
self.tb_logger.attach_output_handler(
self.trainer, ignite.engine.Events.EPOCH_COMPLETED, tag='training',
metric_names=losses,
global_step_transform=ignite.contrib.handlers.tensorboard_logger.global_step_from_engine(self.trainer))
self.tb_logger.attach_output_handler(
self.evaluator, ignite.engine.Events.EPOCH_COMPLETED, tag='validation',
metric_names=losses,
global_step_transform=ignite.contrib.handlers.tensorboard_logger.global_step_from_engine(self.trainer))
# attach events - early stopping
def score_function(engine):
return -engine.state.metrics['loss']
self.es = ignite.handlers.early_stopping.EarlyStopping(**self.early_stopping, score_function=score_function, trainer=self.trainer)
self.evaluator.add_event_handler(ignite.engine.Events.COMPLETED, self.es)
# attach events - learning rate scheduling
self.ps = src.torch_extensions.ReduceLROnPlateauScheduler(self.optim, metric_name='loss', **self.reduce_lr_plateau)
self.evaluator.add_event_handler(ignite.engine.Events.COMPLETED, self.ps)
@self.trainer.on(ignite.engine.Events.STARTED)
def on_start(engine):
logging.info('Starting training')
@self.trainer.on(ignite.engine.Events.COMPLETED)
def on_complete(engine):
torch.save(self.model.state_dict(), self.model_save_path)
logging.info('Training complete. Saved model to:{}'.format(
self.model_save_path))
@self.evaluator.on(ignite.engine.Events.COMPLETED)
def on_complete(engine):
# print loss etc.
logging.info(
f'Avg validation loss: {engine.state.metrics['loss']}')
def train(self, train_loader, val_loader, model_save_path):
self.model_save_path = model_save_path
self.loss_list = []
@self.trainer.on(ignite.engine.Events.EPOCH_COMPLETED)
def on_epoch_complete(engine):
logging.info(
f'Training epoch {engine.state.epoch} complete. Avg training loss: {engine.state.metrics['loss']}')
self.evaluator.run(val_loader)
self.trainer.run(train_loader, self.num_epoch)
def setup_tensorboard(self, folder_name, save_path, **kwargs):
path = Path(save_path) / folder_name
self.tb_logger = ignite.contrib.handlers.TensorboardLogger(
log_dir=path)
@click.command()
@click.argument('data_path', type=click.Path(exists=True))
@click.argument('experiment_cfg_path', type=click.Path(exists=True))
def main(data_path, experiment_cfg_path):
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
data_path = Path(data_path)
# config loader
cfg = load_config(experiment_cfg_path)
# data loader
dataset_name_prefix = cfg['dataset']['name']
train_dataset = src.data.load_dataset.Waveform_dataset(
data_path, '{}_train.hdf5'.format(dataset_name_prefix))
val_dataset = src.data.load_dataset.Waveform_dataset(
data_path, '{}_val.hdf5'.format(dataset_name_prefix))
train_loader = torch.utils.data.dataloader.DataLoader(
dataset=train_dataset, **cfg['train_loader'])
val_loader = torch.utils.data.dataloader.DataLoader(
dataset=val_dataset, **cfg['val_loader'])
# model
trainer = Training(cfg)
model_save_path = Path(cfg['model']['path']) / cfg['model']['name']
trainer.train(train_loader, val_loader, model_save_path)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
|
import re
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import importlib
import torch
import torch.autograd
from torch.utils.data import dataset
import torch.utils.data.dataloader
import ignite.utils
import ignite.handlers.early_stopping
import ignite.engine
import ignite.metrics
import ignite.contrib.handlers
import ignite.contrib.handlers.tensorboard_logger
import ignite.contrib.handlers.tqdm_logger
import ignite.handlers.param_scheduler
from copy import deepcopy
import tensorboardX
import src.models
import src.data.load_dataset
from src.config.load_config import load_config
from src.common import get_constructor, magma_init
import src.torch_extensions
class Training:
def __init__(self, config, *args, **kwargs):
# parse config
self.seed = config['random']['seed']
self.num_epoch = config['training']['num_epoch']
self.dim_input = (1, config['model']['architecture']['num_input'])
self.cfg = config['model']
self.early_stopping = config['training']['early_stopping']
self.reduce_lr_plateau = config['training']['reduce_lr_plateau']
self.setup_cuda()
self.dummy_input = torch.autograd.Variable(
torch.zeros(self.dim_input).to(self.device))
self.setup_tensorboard(
config['experiment']['name'] + config['model']['name'], **config['tensorboard'])
self.setup_model(**self.cfg)
self.setup_ignite()
def setup_cuda(self, cuda_device_id=0):
torch.backends.cuda.fasval = True
torch.cuda.set_device(cuda_device_id)
self.device = torch.device('cuda')
torch.cuda.manual_seed_all(self.seed)
torch.manual_seed(self.seed)
magma_init()
def setup_model(self, architecture, loss, optim, **kwargs):
constructor = get_constructor('src.models', architecture['type'])
self.model = constructor(**architecture).to(self.device)
self.optim = torch.optim.Adam(self.model.parameters(), lr=optim['lr'])
def setup_ignite(self):
ignite.utils.manual_seed(self.seed)
val_metrics = {key: ignite.metrics.Loss(self.model.loggable_losses[key])
for key in self.model.loggable_losses}
def prepare_batch(batch, device=None, non_blocking=False, *args, **kwargs):
converted = ignite.utils.convert_tensor(
batch, device, non_blocking)
return converted, converted
def output_transform(x, y, y_pred, loss=None):
return {'y': y, 'y_pred': y_pred, 'criterion_kwargs': {}, 'loss': loss}
self.trainer = ignite.engine.create_supervised_trainer(
self.model, self.optim, self.model.loss, device=self.device, prepare_batch=prepare_batch, output_transform=output_transform)
self.evaluator = ignite.engine.create_supervised_evaluator(
self.model, val_metrics, device=self.device, prepare_batch=prepare_batch, output_transform=output_transform)
for mtrc in val_metrics:
val_metrics[mtrc].attach(self.trainer, mtrc)
# prevent messages from cluttering the log
self.trainer.logger.setLevel(logging.WARN)
self.evaluator.logger.setLevel(logging.WARN)
# progress bar
pbar = ignite.contrib.handlers.tqdm_logger.ProgressBar()
pbar.attach(self.trainer)
# save graph to tensorboard
self.tb_logger.writer.add_graph(self.model, self.dummy_input)
# attach events - tensorboard loggers
losses = [loss for loss in self.model.loggable_losses]
self.tb_logger.attach_output_handler(
self.trainer, ignite.engine.Events.EPOCH_COMPLETED, tag='training',
metric_names=losses,
global_step_transform=ignite.contrib.handlers.tensorboard_logger.global_step_from_engine(self.trainer))
self.tb_logger.attach_output_handler(
self.evaluator, ignite.engine.Events.EPOCH_COMPLETED, tag='validation',
metric_names=losses,
global_step_transform=ignite.contrib.handlers.tensorboard_logger.global_step_from_engine(self.trainer))
# attach events - early stopping
def score_function(engine):
return -engine.state.metrics['loss']
self.es = ignite.handlers.early_stopping.EarlyStopping(**self.early_stopping, score_function=score_function, trainer=self.trainer)
self.evaluator.add_event_handler(ignite.engine.Events.COMPLETED, self.es)
# attach events - learning rate scheduling
self.ps = src.torch_extensions.ReduceLROnPlateauScheduler(self.optim, metric_name='loss', **self.reduce_lr_plateau)
self.evaluator.add_event_handler(ignite.engine.Events.COMPLETED, self.ps)
@self.trainer.on(ignite.engine.Events.STARTED)
def on_start(engine):
logging.info('Starting training')
@self.trainer.on(ignite.engine.Events.COMPLETED)
def on_complete(engine):
torch.save(self.model.state_dict(), self.model_save_path)
logging.info('Training complete. Saved model to:{}'.format(
self.model_save_path))
@self.evaluator.on(ignite.engine.Events.COMPLETED)
def on_complete(engine):
# print loss etc.
logging.info(
f'Avg validation loss: {engine.state.metrics["loss"]}')
def train(self, train_loader, val_loader, model_save_path):
self.model_save_path = model_save_path
self.loss_list = []
@self.trainer.on(ignite.engine.Events.EPOCH_COMPLETED)
def on_epoch_complete(engine):
logging.info(
f'Training epoch {engine.state.epoch} complete. Avg training loss: {engine.state.metrics["loss"]}')
self.evaluator.run(val_loader)
self.trainer.run(train_loader, self.num_epoch)
def setup_tensorboard(self, folder_name, save_path, **kwargs):
path = Path(save_path) / folder_name
self.tb_logger = ignite.contrib.handlers.TensorboardLogger(
log_dir=path)
@click.command()
@click.argument('data_path', type=click.Path(exists=True))
@click.argument('experiment_cfg_path', type=click.Path(exists=True))
def main(data_path, experiment_cfg_path):
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
data_path = Path(data_path)
# config loader
cfg = load_config(experiment_cfg_path)
# data loader
dataset_name_prefix = cfg['dataset']['name']
train_dataset = src.data.load_dataset.Waveform_dataset(
data_path, '{}_train.hdf5'.format(dataset_name_prefix))
val_dataset = src.data.load_dataset.Waveform_dataset(
data_path, '{}_val.hdf5'.format(dataset_name_prefix))
train_loader = torch.utils.data.dataloader.DataLoader(
dataset=train_dataset, **cfg['train_loader'])
val_loader = torch.utils.data.dataloader.DataLoader(
dataset=val_dataset, **cfg['val_loader'])
# model
trainer = Training(cfg)
model_save_path = Path(cfg['model']['path']) / cfg['model']['name']
trainer.train(train_loader, val_loader, model_save_path)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
|
from urllib.parse import urlparse
import classyjson as cj
import asyncio
import discord
import math
import time
from util.code import format_exception
from util.ipc import PacketType
def strip_command(ctx): # returns message.clean_content excluding the command used
length = len(ctx.prefix) + len(ctx.invoked_with) + 1
return ctx.message.clean_content[length:]
def dm_check(ctx):
def _dm_check(m):
return ctx.author == m.author and ctx.author.dm_channel == m.channel
return _dm_check
def recursive_update(obj, new):
if isinstance(obj, dict) and isinstance(new, dict):
for k, v in new.items():
obj[k] = recursive_update(obj.get(k, cj.classify({})), v)
elif isinstance(obj, list) and isinstance(new, list):
obj = [] # obj here needs to be reset to zero to avoid weird list issues (see /update command in cogs/cmds/owner.py)
for i, v in enumerate(new):
obj.append(recursive_update(obj[i], v) if i < len(obj) else v)
else:
return new
return obj
def make_health_bar(health: int, max_health: int, full: str, half: str, empty: str):
assert max_health % 2 == 0
return (
(full * (health // 2))
+ (half * (health % 2))
+ (empty * ((max_health // 2) - math.ceil(health / 2)))
+ f" ({health}/{max_health})"
)
async def lb_logic(bot, lb_list: list, u_entry: object, rank_fstr: str):
# add user entry to leaderboard if it's not there already
if u_entry is not None and u_entry[0] not in [e[0] for e in lb_list]:
lb_list.append(u_entry)
# sort
lb_list = sorted(lb_list, key=(lambda e: e[1]), reverse=True)
# shorten list
lb_list = lb_list[:9] if (u_entry is not None and u_entry[2] > 9) else lb_list[:10]
body = ""
# create base leaderboard
for entry in lb_list:
user = getattr(bot.get_user(entry[0]), "name", None)
if user is None:
res = await bot.ipc.broadcast(
{"type": PacketType.EVAL, "code": f"getattr(bot.get_user({entry[0]}), 'name', None)"}
)
for r in res.responses:
if not r.success:
raise ValueError(r.result)
if r.result:
user = r.result
break
if user is None:
user = "Unknown User"
else:
user = discord.utils.escape_markdown(user)
body += rank_fstr.format(entry[2], entry[1], user)
# add user if user is missing from the leaderboard
if u_entry is not None and u_entry[2] > 9:
body += "\n⋮" + rank_fstr.format(u_entry[2], u_entry[1], discord.utils.escape_markdown(bot.get_user(u_entry[0]).name))
return body + "\uFEFF"
def calc_total_wealth(db_user, u_items):
return (
db_user["emeralds"]
+ db_user["vault_balance"] * 9
+ sum([u_it["sell_price"] * u_it.get("amount", 0) for u_it in u_items if u_it["sell_price"] > 0])
)
def emojify_item(d, item: str):
try:
emoji_key = d.emoji_items[item]
if emoji_key.startswith("fish."):
return d.emojis.fish[emoji_key[5:]]
return d.emojis[emoji_key]
except KeyError:
return d.emojis.air
def format_required(d: object, shop_item: object, amount: int = 1):
base = f" {shop_item.buy_price * amount}{d.emojis.emerald}"
for req_item, req_amount in shop_item.requires.get("items", {}).items():
base += f" + {req_amount * amount}{d.emojis[d.emoji_items[req_item]]}"
return base
async def update_support_member_role(bot, member):
try:
db = bot.get_cog("Database")
support_guild = bot.get_guild(bot.d.support_server_id)
if support_guild is None:
support_guild = await bot.fetch_guild(bot.d.support_server_id)
role_map_values = set(bot.d.role_mappings.values())
roles = []
for role in member.roles: # add non rank roles to roles list
if role.id not in role_map_values and role.id != bot.d.support_server_id:
roles.append(role)
pickaxe_role = bot.d.role_mappings.get(await db.fetch_pickaxe(member.id))
if pickaxe_role is not None:
roles.append(support_guild.get_role(pickaxe_role))
if await db.fetch_item(member.id, "Bane Of Pillagers Amulet") is not None:
roles.append(support_guild.get_role(bot.d.role_mappings.get("BOP")))
if roles != member.roles:
try:
await member.edit(roles=roles)
except (discord.errors.Forbidden, discord.errors.HTTPException):
pass
except Exception as e:
print(format_exception(e))
class TTLPreventDuplicate:
def __init__(self, expire_after: float, max_size: int):
self.expire_after = expire_after
self.max_size = max_size
self.store = {}
def put(self, obj):
self.store[obj] = time.time()
def check(self, obj):
return obj in self.store
async def run(self):
try:
while True:
for k, v in list(self.store.items()):
if (time.time() - v) > self.expire_after:
del self.store[k]
await asyncio.sleep(1)
except asyncio.CancelledError:
pass
def fix_giphy_url(url: str) -> str:
return f"https://i.giphy.com/media/{url.split("-")[-1]}/giphy.gif"
|
from urllib.parse import urlparse
import classyjson as cj
import asyncio
import discord
import math
import time
from util.code import format_exception
from util.ipc import PacketType
def strip_command(ctx): # returns message.clean_content excluding the command used
length = len(ctx.prefix) + len(ctx.invoked_with) + 1
return ctx.message.clean_content[length:]
def dm_check(ctx):
def _dm_check(m):
return ctx.author == m.author and ctx.author.dm_channel == m.channel
return _dm_check
def recursive_update(obj, new):
if isinstance(obj, dict) and isinstance(new, dict):
for k, v in new.items():
obj[k] = recursive_update(obj.get(k, cj.classify({})), v)
elif isinstance(obj, list) and isinstance(new, list):
obj = [] # obj here needs to be reset to zero to avoid weird list issues (see /update command in cogs/cmds/owner.py)
for i, v in enumerate(new):
obj.append(recursive_update(obj[i], v) if i < len(obj) else v)
else:
return new
return obj
def make_health_bar(health: int, max_health: int, full: str, half: str, empty: str):
assert max_health % 2 == 0
return (
(full * (health // 2))
+ (half * (health % 2))
+ (empty * ((max_health // 2) - math.ceil(health / 2)))
+ f" ({health}/{max_health})"
)
async def lb_logic(bot, lb_list: list, u_entry: object, rank_fstr: str):
# add user entry to leaderboard if it's not there already
if u_entry is not None and u_entry[0] not in [e[0] for e in lb_list]:
lb_list.append(u_entry)
# sort
lb_list = sorted(lb_list, key=(lambda e: e[1]), reverse=True)
# shorten list
lb_list = lb_list[:9] if (u_entry is not None and u_entry[2] > 9) else lb_list[:10]
body = ""
# create base leaderboard
for entry in lb_list:
user = getattr(bot.get_user(entry[0]), "name", None)
if user is None:
res = await bot.ipc.broadcast(
{"type": PacketType.EVAL, "code": f"getattr(bot.get_user({entry[0]}), 'name', None)"}
)
for r in res.responses:
if not r.success:
raise ValueError(r.result)
if r.result:
user = r.result
break
if user is None:
user = "Unknown User"
else:
user = discord.utils.escape_markdown(user)
body += rank_fstr.format(entry[2], entry[1], user)
# add user if user is missing from the leaderboard
if u_entry is not None and u_entry[2] > 9:
body += "\n⋮" + rank_fstr.format(u_entry[2], u_entry[1], discord.utils.escape_markdown(bot.get_user(u_entry[0]).name))
return body + "\uFEFF"
def calc_total_wealth(db_user, u_items):
return (
db_user["emeralds"]
+ db_user["vault_balance"] * 9
+ sum([u_it["sell_price"] * u_it.get("amount", 0) for u_it in u_items if u_it["sell_price"] > 0])
)
def emojify_item(d, item: str):
try:
emoji_key = d.emoji_items[item]
if emoji_key.startswith("fish."):
return d.emojis.fish[emoji_key[5:]]
return d.emojis[emoji_key]
except KeyError:
return d.emojis.air
def format_required(d: object, shop_item: object, amount: int = 1):
base = f" {shop_item.buy_price * amount}{d.emojis.emerald}"
for req_item, req_amount in shop_item.requires.get("items", {}).items():
base += f" + {req_amount * amount}{d.emojis[d.emoji_items[req_item]]}"
return base
async def update_support_member_role(bot, member):
try:
db = bot.get_cog("Database")
support_guild = bot.get_guild(bot.d.support_server_id)
if support_guild is None:
support_guild = await bot.fetch_guild(bot.d.support_server_id)
role_map_values = set(bot.d.role_mappings.values())
roles = []
for role in member.roles: # add non rank roles to roles list
if role.id not in role_map_values and role.id != bot.d.support_server_id:
roles.append(role)
pickaxe_role = bot.d.role_mappings.get(await db.fetch_pickaxe(member.id))
if pickaxe_role is not None:
roles.append(support_guild.get_role(pickaxe_role))
if await db.fetch_item(member.id, "Bane Of Pillagers Amulet") is not None:
roles.append(support_guild.get_role(bot.d.role_mappings.get("BOP")))
if roles != member.roles:
try:
await member.edit(roles=roles)
except (discord.errors.Forbidden, discord.errors.HTTPException):
pass
except Exception as e:
print(format_exception(e))
class TTLPreventDuplicate:
def __init__(self, expire_after: float, max_size: int):
self.expire_after = expire_after
self.max_size = max_size
self.store = {}
def put(self, obj):
self.store[obj] = time.time()
def check(self, obj):
return obj in self.store
async def run(self):
try:
while True:
for k, v in list(self.store.items()):
if (time.time() - v) > self.expire_after:
del self.store[k]
await asyncio.sleep(1)
except asyncio.CancelledError:
pass
def fix_giphy_url(url: str) -> str:
return f"https://i.giphy.com/media/{url.split('-')[-1]}/giphy.gif"
|
import smtplib
import urllib.parse
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from itsdangerous import URLSafeTimedSerializer
from flask import current_app
from server.services.messaging.template_service import get_template, get_profile_url
class SMTPService:
@staticmethod
def send_verification_email(to_address: str, username: str):
""" Sends a verification email with a unique token so we can verify user owns this email address """
# TODO these could be localised if needed, in the future
html_template = get_template('email_verification_en.html')
text_template = get_template('email_verification_en.txt')
verification_url = SMTPService._generate_email_verification_url(to_address, username)
html_template = html_template.replace('[USERNAME]', username)
html_template = html_template.replace('[VEFIFICATION_LINK]', verification_url)
text_template = text_template.replace('[USERNAME]', username)
text_template = text_template.replace('[VEFIFICATION_LINK]', verification_url)
subject = 'HOT Tasking Manager - Email Verification'
SMTPService._send_mesage(to_address, subject, html_template, text_template)
return True
@staticmethod
def send_email_alert(to_address: str, username: str):
""" Send an email to user to alert them they have a new message"""
current_app.logger.debug(f'Test if email required {to_address}')
if not to_address:
return False # Many users will not have supplied email address so return
# TODO these could be localised if needed, in the future
html_template = get_template('message_alert_en.html')
text_template = get_template('message_alert_en.txt')
inbox_url = f"{current_app.config["APP_BASE_URL"]}/inbox"
html_template = html_template.replace('[USERNAME]', username)
html_template = html_template.replace('[PROFILE_LINK]', inbox_url)
text_template = text_template.replace('[USERNAME]', username)
text_template = text_template.replace('[PROFILE_LINK]', inbox_url)
subject = 'You have a new message on the HOT Tasking Manager'
SMTPService._send_mesage(to_address, subject, html_template, text_template)
return True
@staticmethod
def _send_mesage(to_address: str, subject: str, html_message: str, text_message: str):
""" Helper sends SMTP message """
from_address = current_app.config['EMAIL_FROM_ADDRESS']
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = from_address
msg['To'] = to_address
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text_message, 'plain')
part2 = MIMEText(html_message, 'html')
msg.attach(part1)
msg.attach(part2)
sender = SMTPService._init_smtp_client()
current_app.logger.debug(f'Sending email via SMTP {to_address}')
sender.sendmail(from_address, to_address, msg.as_string())
sender.quit()
current_app.logger.debug(f'Email sent {to_address}')
@staticmethod
def _init_smtp_client():
""" Initialise SMTP client from app settings """
smtp_settings = current_app.config['SMTP_SETTINGS']
sender = smtplib.SMTP(smtp_settings['host'])
sender.starttls()
sender.login(smtp_settings['smtp_user'], smtp_settings['smtp_password'])
return sender
@staticmethod
def _generate_email_verification_url(email_address: str, user_name: str):
""" Generate email verification url with unique token """
entropy = current_app.secret_key if current_app.secret_key else 'un1testingmode'
serializer = URLSafeTimedSerializer(entropy)
token = serializer.dumps(email_address.lower())
base_url = current_app.config['APP_BASE_URL']
verification_params = {'token': token, 'username': user_name}
verification_url = '{0}/api/auth/email?{1}'.format(base_url, urllib.parse.urlencode(verification_params))
return verification_url
|
import smtplib
import urllib.parse
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from itsdangerous import URLSafeTimedSerializer
from flask import current_app
from server.services.messaging.template_service import get_template, get_profile_url
class SMTPService:
@staticmethod
def send_verification_email(to_address: str, username: str):
""" Sends a verification email with a unique token so we can verify user owns this email address """
# TODO these could be localised if needed, in the future
html_template = get_template('email_verification_en.html')
text_template = get_template('email_verification_en.txt')
verification_url = SMTPService._generate_email_verification_url(to_address, username)
html_template = html_template.replace('[USERNAME]', username)
html_template = html_template.replace('[VEFIFICATION_LINK]', verification_url)
text_template = text_template.replace('[USERNAME]', username)
text_template = text_template.replace('[VEFIFICATION_LINK]', verification_url)
subject = 'HOT Tasking Manager - Email Verification'
SMTPService._send_mesage(to_address, subject, html_template, text_template)
return True
@staticmethod
def send_email_alert(to_address: str, username: str):
""" Send an email to user to alert them they have a new message"""
current_app.logger.debug(f'Test if email required {to_address}')
if not to_address:
return False # Many users will not have supplied email address so return
# TODO these could be localised if needed, in the future
html_template = get_template('message_alert_en.html')
text_template = get_template('message_alert_en.txt')
inbox_url = f"{current_app.config['APP_BASE_URL']}/inbox"
html_template = html_template.replace('[USERNAME]', username)
html_template = html_template.replace('[PROFILE_LINK]', inbox_url)
text_template = text_template.replace('[USERNAME]', username)
text_template = text_template.replace('[PROFILE_LINK]', inbox_url)
subject = 'You have a new message on the HOT Tasking Manager'
SMTPService._send_mesage(to_address, subject, html_template, text_template)
return True
@staticmethod
def _send_mesage(to_address: str, subject: str, html_message: str, text_message: str):
""" Helper sends SMTP message """
from_address = current_app.config['EMAIL_FROM_ADDRESS']
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = from_address
msg['To'] = to_address
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text_message, 'plain')
part2 = MIMEText(html_message, 'html')
msg.attach(part1)
msg.attach(part2)
sender = SMTPService._init_smtp_client()
current_app.logger.debug(f'Sending email via SMTP {to_address}')
sender.sendmail(from_address, to_address, msg.as_string())
sender.quit()
current_app.logger.debug(f'Email sent {to_address}')
@staticmethod
def _init_smtp_client():
""" Initialise SMTP client from app settings """
smtp_settings = current_app.config['SMTP_SETTINGS']
sender = smtplib.SMTP(smtp_settings['host'])
sender.starttls()
sender.login(smtp_settings['smtp_user'], smtp_settings['smtp_password'])
return sender
@staticmethod
def _generate_email_verification_url(email_address: str, user_name: str):
""" Generate email verification url with unique token """
entropy = current_app.secret_key if current_app.secret_key else 'un1testingmode'
serializer = URLSafeTimedSerializer(entropy)
token = serializer.dumps(email_address.lower())
base_url = current_app.config['APP_BASE_URL']
verification_params = {'token': token, 'username': user_name}
verification_url = '{0}/api/auth/email?{1}'.format(base_url, urllib.parse.urlencode(verification_params))
return verification_url
|
from parsons.notifications.gmail import Gmail
import json
import os
import requests_mock
import unittest
import shutil
import base64
import email
_dir = os.path.dirname(__file__)
class TestGmail(unittest.TestCase):
@requests_mock.Mocker()
def setUp(self, m):
self.tmp_folder = "tmp/"
self.credentials_file = f"{self.tmp_folder}/credentials.json"
self.token_file = f"{self.tmp_folder}/token.json"
os.mkdir(self.tmp_folder)
with open(self.credentials_file, 'w') as f:
f.write(json.dumps({
"installed": {
"client_id": "someclientid.apps.googleusercontent.com",
"project_id": "some-project-id-12345",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://www.googleapis.com/oauth2/v3/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_secret": "someclientsecret",
"redirect_uris": ["urn:ietf:wg:oauth:2.0:oob", "http://localhost"]
}
}))
with open(self.token_file, 'w') as f:
f.write(json.dumps({
"access_token": "someaccesstoken",
"client_id": "some-client-id.apps.googleusercontent.com",
"client_secret": "someclientsecret",
"refresh_token": "1/refreshrate",
"token_expiry": "2030-02-20T23:28:09Z",
"token_uri": "https://www.googleapis.com/oauth2/v3/token",
"user_agent": None,
"revoke_uri": "https://oauth2.googleapis.com/revoke",
"id_token": None,
"id_token_jwt": None,
"token_response": {
"access_token": "someaccesstoken",
"expires_in": 3600000,
"scope": "https://www.googleapis.com/auth/gmail.send",
"token_type": "Bearer"
},
"scopes": ["https://www.googleapis.com/auth/gmail.send"],
"token_info_uri": "https://oauth2.googleapis.com/tokeninfo",
"invalid": False,
"_class": "OAuth2Credentials",
"_module": "oauth2client.client"
}))
self.gmail = Gmail(self.credentials_file, self.token_file)
def tearDown(self):
# Delete tmp folder and files
shutil.rmtree(self.tmp_folder)
def test_create_message_simple(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test email"
message_text = "The is the message text of the email"
msg = self.gmail._create_message_simple(
sender, to, subject, message_text)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'text/plain; charset="us-ascii"'),
('MIME-Version', '1.0'),
('Content-Transfer-Encoding', '7bit'),
('to', to),
('from', sender),
('subject', subject)]
# Check the metadata
self.assertListEqual(decoded.items(), expected_items)
# Check the message
self.assertEqual(decoded.get_payload(), message_text)
# Check the number of parts
expected_parts = 1
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test_create_message_html(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test html email"
message_text = "The is the message text of the email"
message_html = "<p>This is the html message part of the email</p>"
msg = self.gmail._create_message_html(
sender, to, subject, message_text, message_html)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'multipart/alternative;\n boundary='),
('MIME-Version', '1.0'),
('subject', subject),
('from', sender),
('to', to)]
# The boundary id changes everytime. Replace it with the beginnig to
# avoid failures
updated_items = []
for i in decoded.items():
if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa
updated_items.append(
('Content-Type', 'multipart/alternative;\n boundary='))
else:
updated_items.append((i[0], i[1]))
# Check the metadata
self.assertListEqual(updated_items, expected_items)
# Check the message
# The first part is just a container for the text and html parts
parts = decoded.get_payload()
self.assertEqual(parts[0].get_payload(), message_text)
self.assertEqual(parts[1].get_payload(), message_html)
# Check the number of parts
expected_parts = 3
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test_create_message_html_no_text(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test html email"
message_html = "<p>This is the html message part of the email</p>"
msg = self.gmail._create_message_html(
sender, to, subject, '', message_html)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'multipart/alternative;\n boundary='),
('MIME-Version', '1.0'),
('subject', subject),
('from', sender),
('to', to)]
# The boundary id changes everytime. Replace it with the beginnig to
# avoid failures
updated_items = []
for i in decoded.items():
if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa
updated_items.append(
('Content-Type', 'multipart/alternative;\n boundary='))
else:
updated_items.append((i[0], i[1]))
# Check the metadata
self.assertListEqual(updated_items, expected_items)
# Check the message
# The first part is just a container for the text and html parts
parts = decoded.get_payload()
self.assertEqual(parts[0].get_payload(), message_html)
# Check the number of parts
expected_parts = 2
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test_create_message_attachments(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test email with attachements"
message_text = "The is the message text of the email with attachments"
message_html = ("<p>This is the html message part of the email "
"with attachments</p>")
attachments = [f'{_dir}/assets/loremipsum.txt']
msg = self.gmail._create_message_attachments(
sender, to, subject, message_text, attachments,
message_html=message_html)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'multipart/alternative;\n boundary='),
('MIME-Version', '1.0'),
('to', to),
('from', sender),
('subject', subject)]
# The boundary id changes everytime. Replace it with the beginnig to
# avoid failures
updated_items = []
for i in decoded.items():
if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa
updated_items.append(
('Content-Type', 'multipart/alternative;\n boundary='))
else:
updated_items.append((i[0], i[1]))
# Check the metadata
self.assertListEqual(updated_items, expected_items)
# Check the message
# The first part is just a container for the text and html parts
parts = decoded.get_payload()
self.assertEqual(parts[0].get_payload(), message_text)
self.assertEqual(parts[1].get_payload(), message_html)
if os.linesep == '\r\n':
file = f'{_dir}/assets/loremipsum_b64_win_txt.txt'
else:
file = f'{_dir}/assets/loremipsum_b64_txt.txt'
with open(file, 'r') as f:
b64_txt = f.read()
self.assertEqual(parts[2].get_payload(), b64_txt)
self.assertEqual(parts[2].get_content_type(), 'text/plain')
# Check the number of parts
expected_parts = 4
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test_create_message_attachments_jpeg(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test email with attachements"
message_text = "The is the message text of the email with attachments"
message_html = ("<p>This is the html message part of the email "
"with attachments</p>")
attachments = [f'{_dir}/assets/loremipsum.jpeg']
msg = self.gmail._create_message_attachments(
sender, to, subject, message_text, attachments,
message_html=message_html)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'multipart/alternative;\n boundary='),
('MIME-Version', '1.0'),
('to', to),
('from', sender),
('subject', subject)]
# The boundary id changes everytime. Replace it with the beginnig to
# avoid failures
updated_items = []
for i in decoded.items():
if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa
updated_items.append(
('Content-Type', 'multipart/alternative;\n boundary='))
else:
updated_items.append((i[0], i[1]))
# Check the metadata
self.assertListEqual(updated_items, expected_items)
# Check the message
# The first part is just a container for the text and html parts
parts = decoded.get_payload()
self.assertEqual(parts[0].get_payload(), message_text)
self.assertEqual(parts[1].get_payload(), message_html)
with open(f'{_dir}/assets/loremipsum_b64_jpeg.txt', 'r') as f:
b64_txt = f.read()
self.assertEqual(parts[2].get_payload(), b64_txt)
expected_id = f"<{attachments[0].split("/")[-1]}>"
self.assertEqual(parts[2].get('Content-ID'), expected_id)
self.assertEqual(parts[2].get_content_type(), 'image/jpeg')
# Check the number of parts
expected_parts = 4
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test_create_message_attachments_m4a(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test email with attachements"
message_text = "The is the message text of the email with attachments"
message_html = ("<p>This is the html message part of the email "
"with attachments</p>")
attachments = [f'{_dir}/assets/loremipsum.m4a']
msg = self.gmail._create_message_attachments(
sender, to, subject, message_text, attachments,
message_html=message_html)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'multipart/alternative;\n boundary='),
('MIME-Version', '1.0'),
('to', to),
('from', sender),
('subject', subject)]
# The boundary id changes everytime. Replace it with the beginnig to
# avoid failures
updated_items = []
for i in decoded.items():
if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa
updated_items.append(
('Content-Type', 'multipart/alternative;\n boundary='))
else:
updated_items.append((i[0], i[1]))
# Check the metadata
self.assertListEqual(updated_items, expected_items)
# Check the message
# The first part is just a container for the text and html parts
parts = decoded.get_payload()
self.assertEqual(parts[0].get_payload(), message_text)
self.assertEqual(parts[1].get_payload(), message_html)
with open(f'{_dir}/assets/loremipsum_b64_m4a.txt', 'r') as f:
b64_txt = f.read()
self.assertEqual(parts[2].get_payload(), b64_txt)
self.assertEqual(parts[2].get_content_maintype(), 'audio')
# Check the number of parts
expected_parts = 4
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test_create_message_attachments_mp3(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test email with attachements"
message_text = "The is the message text of the email with attachments"
message_html = ("<p>This is the html message part of the email "
"with attachments</p>")
attachments = [f'{_dir}/assets/loremipsum.mp3']
msg = self.gmail._create_message_attachments(
sender, to, subject, message_text, attachments,
message_html=message_html)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'multipart/alternative;\n boundary='),
('MIME-Version', '1.0'),
('to', to),
('from', sender),
('subject', subject)]
# The boundary id changes everytime. Replace it with the beginnig to
# avoid failures
updated_items = []
for i in decoded.items():
if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa
updated_items.append(
('Content-Type', 'multipart/alternative;\n boundary='))
else:
updated_items.append((i[0], i[1]))
# Check the metadata
self.assertListEqual(updated_items, expected_items)
# Check the message
# The first part is just a container for the text and html parts
parts = decoded.get_payload()
self.assertEqual(parts[0].get_payload(), message_text)
self.assertEqual(parts[1].get_payload(), message_html)
with open(f'{_dir}/assets/loremipsum_b64_mp3.txt', 'r') as f:
b64_txt = f.read()
self.assertEqual(parts[2].get_payload(), b64_txt)
self.assertEqual(parts[2].get_content_type(), 'audio/mpeg')
# Check the number of parts
expected_parts = 4
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test_create_message_attachments_mp4(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test email with attachements"
message_text = "The is the message text of the email with attachments"
message_html = ("<p>This is the html message part of the email "
"with attachments</p>")
attachments = [f'{_dir}/assets/loremipsum.mp4']
msg = self.gmail._create_message_attachments(
sender, to, subject, message_text, attachments,
message_html=message_html)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'multipart/alternative;\n boundary='),
('MIME-Version', '1.0'),
('to', to),
('from', sender),
('subject', subject)]
# The boundary id changes everytime. Replace it with the beginnig to
# avoid failures
updated_items = []
for i in decoded.items():
if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa
updated_items.append(
('Content-Type', 'multipart/alternative;\n boundary='))
else:
updated_items.append((i[0], i[1]))
# Check the metadata
self.assertListEqual(updated_items, expected_items)
# Check the message
# The first part is just a container for the text and html parts
parts = decoded.get_payload()
self.assertEqual(parts[0].get_payload(), message_text)
self.assertEqual(parts[1].get_payload(), message_html)
with open(f'{_dir}/assets/loremipsum_b64_mp4.txt', 'r') as f:
b64_txt = f.read()
self.assertEqual(parts[2].get_payload(), b64_txt)
self.assertEqual(parts[2].get_content_type(), 'video/mp4')
# Check the number of parts
expected_parts = 4
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test_create_message_attachments_pdf(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test email with attachements"
message_text = "The is the message text of the email with attachments"
message_html = ("<p>This is the html message part of the email "
"with attachments</p>")
attachments = [f'{_dir}/assets/loremipsum.pdf']
msg = self.gmail._create_message_attachments(
sender, to, subject, message_text, attachments,
message_html=message_html)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'multipart/alternative;\n boundary='),
('MIME-Version', '1.0'),
('to', to),
('from', sender),
('subject', subject)]
# The boundary id changes everytime. Replace it with the beginnig to
# avoid failures
updated_items = []
for i in decoded.items():
if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa
updated_items.append(
('Content-Type', 'multipart/alternative;\n boundary='))
else:
updated_items.append((i[0], i[1]))
# Check the metadata
self.assertListEqual(updated_items, expected_items)
# Check the message
# The first part is just a container for the text and html parts
parts = decoded.get_payload()
self.assertEqual(parts[0].get_payload(), message_text)
self.assertEqual(parts[1].get_payload(), message_html)
with open(f'{_dir}/assets/loremipsum_b64_pdf.txt', 'r') as f:
b64_txt = f.read()
self.assertEqual(parts[2].get_payload(), b64_txt)
self.assertEqual(parts[2].get_content_type(), 'application/pdf')
# Check the number of parts
expected_parts = 4
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test__validate_email_string(self):
emails = [
{"email": "Sender <sender@email.com>", "expected": True},
{"email": "sender@email.com", "expected": True},
{"email": "<sender@email.com>", "expected": True},
{"email": "Sender sender@email.com", "expected": False},
{"email": "Sender <sender2email.com>", "expected": False},
{"email": "Sender <sender@email,com>", "expected": True},
{"email": "Sender <sender+alias@email,com>", "expected": True}
]
for e in emails:
if e['expected']:
self.assertTrue(self.gmail._validate_email_string(e['email']))
else:
self.assertRaises(
ValueError, self.gmail._validate_email_string, e['email'])
# TODO test sending emails
if __name__ == "__main__":
unittest.main()
|
from parsons.notifications.gmail import Gmail
import json
import os
import requests_mock
import unittest
import shutil
import base64
import email
_dir = os.path.dirname(__file__)
class TestGmail(unittest.TestCase):
@requests_mock.Mocker()
def setUp(self, m):
self.tmp_folder = "tmp/"
self.credentials_file = f"{self.tmp_folder}/credentials.json"
self.token_file = f"{self.tmp_folder}/token.json"
os.mkdir(self.tmp_folder)
with open(self.credentials_file, 'w') as f:
f.write(json.dumps({
"installed": {
"client_id": "someclientid.apps.googleusercontent.com",
"project_id": "some-project-id-12345",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://www.googleapis.com/oauth2/v3/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_secret": "someclientsecret",
"redirect_uris": ["urn:ietf:wg:oauth:2.0:oob", "http://localhost"]
}
}))
with open(self.token_file, 'w') as f:
f.write(json.dumps({
"access_token": "someaccesstoken",
"client_id": "some-client-id.apps.googleusercontent.com",
"client_secret": "someclientsecret",
"refresh_token": "1/refreshrate",
"token_expiry": "2030-02-20T23:28:09Z",
"token_uri": "https://www.googleapis.com/oauth2/v3/token",
"user_agent": None,
"revoke_uri": "https://oauth2.googleapis.com/revoke",
"id_token": None,
"id_token_jwt": None,
"token_response": {
"access_token": "someaccesstoken",
"expires_in": 3600000,
"scope": "https://www.googleapis.com/auth/gmail.send",
"token_type": "Bearer"
},
"scopes": ["https://www.googleapis.com/auth/gmail.send"],
"token_info_uri": "https://oauth2.googleapis.com/tokeninfo",
"invalid": False,
"_class": "OAuth2Credentials",
"_module": "oauth2client.client"
}))
self.gmail = Gmail(self.credentials_file, self.token_file)
def tearDown(self):
# Delete tmp folder and files
shutil.rmtree(self.tmp_folder)
def test_create_message_simple(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test email"
message_text = "The is the message text of the email"
msg = self.gmail._create_message_simple(
sender, to, subject, message_text)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'text/plain; charset="us-ascii"'),
('MIME-Version', '1.0'),
('Content-Transfer-Encoding', '7bit'),
('to', to),
('from', sender),
('subject', subject)]
# Check the metadata
self.assertListEqual(decoded.items(), expected_items)
# Check the message
self.assertEqual(decoded.get_payload(), message_text)
# Check the number of parts
expected_parts = 1
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test_create_message_html(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test html email"
message_text = "The is the message text of the email"
message_html = "<p>This is the html message part of the email</p>"
msg = self.gmail._create_message_html(
sender, to, subject, message_text, message_html)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'multipart/alternative;\n boundary='),
('MIME-Version', '1.0'),
('subject', subject),
('from', sender),
('to', to)]
# The boundary id changes everytime. Replace it with the beginnig to
# avoid failures
updated_items = []
for i in decoded.items():
if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa
updated_items.append(
('Content-Type', 'multipart/alternative;\n boundary='))
else:
updated_items.append((i[0], i[1]))
# Check the metadata
self.assertListEqual(updated_items, expected_items)
# Check the message
# The first part is just a container for the text and html parts
parts = decoded.get_payload()
self.assertEqual(parts[0].get_payload(), message_text)
self.assertEqual(parts[1].get_payload(), message_html)
# Check the number of parts
expected_parts = 3
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test_create_message_html_no_text(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test html email"
message_html = "<p>This is the html message part of the email</p>"
msg = self.gmail._create_message_html(
sender, to, subject, '', message_html)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'multipart/alternative;\n boundary='),
('MIME-Version', '1.0'),
('subject', subject),
('from', sender),
('to', to)]
# The boundary id changes everytime. Replace it with the beginnig to
# avoid failures
updated_items = []
for i in decoded.items():
if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa
updated_items.append(
('Content-Type', 'multipart/alternative;\n boundary='))
else:
updated_items.append((i[0], i[1]))
# Check the metadata
self.assertListEqual(updated_items, expected_items)
# Check the message
# The first part is just a container for the text and html parts
parts = decoded.get_payload()
self.assertEqual(parts[0].get_payload(), message_html)
# Check the number of parts
expected_parts = 2
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test_create_message_attachments(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test email with attachements"
message_text = "The is the message text of the email with attachments"
message_html = ("<p>This is the html message part of the email "
"with attachments</p>")
attachments = [f'{_dir}/assets/loremipsum.txt']
msg = self.gmail._create_message_attachments(
sender, to, subject, message_text, attachments,
message_html=message_html)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'multipart/alternative;\n boundary='),
('MIME-Version', '1.0'),
('to', to),
('from', sender),
('subject', subject)]
# The boundary id changes everytime. Replace it with the beginnig to
# avoid failures
updated_items = []
for i in decoded.items():
if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa
updated_items.append(
('Content-Type', 'multipart/alternative;\n boundary='))
else:
updated_items.append((i[0], i[1]))
# Check the metadata
self.assertListEqual(updated_items, expected_items)
# Check the message
# The first part is just a container for the text and html parts
parts = decoded.get_payload()
self.assertEqual(parts[0].get_payload(), message_text)
self.assertEqual(parts[1].get_payload(), message_html)
if os.linesep == '\r\n':
file = f'{_dir}/assets/loremipsum_b64_win_txt.txt'
else:
file = f'{_dir}/assets/loremipsum_b64_txt.txt'
with open(file, 'r') as f:
b64_txt = f.read()
self.assertEqual(parts[2].get_payload(), b64_txt)
self.assertEqual(parts[2].get_content_type(), 'text/plain')
# Check the number of parts
expected_parts = 4
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test_create_message_attachments_jpeg(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test email with attachements"
message_text = "The is the message text of the email with attachments"
message_html = ("<p>This is the html message part of the email "
"with attachments</p>")
attachments = [f'{_dir}/assets/loremipsum.jpeg']
msg = self.gmail._create_message_attachments(
sender, to, subject, message_text, attachments,
message_html=message_html)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'multipart/alternative;\n boundary='),
('MIME-Version', '1.0'),
('to', to),
('from', sender),
('subject', subject)]
# The boundary id changes everytime. Replace it with the beginnig to
# avoid failures
updated_items = []
for i in decoded.items():
if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa
updated_items.append(
('Content-Type', 'multipart/alternative;\n boundary='))
else:
updated_items.append((i[0], i[1]))
# Check the metadata
self.assertListEqual(updated_items, expected_items)
# Check the message
# The first part is just a container for the text and html parts
parts = decoded.get_payload()
self.assertEqual(parts[0].get_payload(), message_text)
self.assertEqual(parts[1].get_payload(), message_html)
with open(f'{_dir}/assets/loremipsum_b64_jpeg.txt', 'r') as f:
b64_txt = f.read()
self.assertEqual(parts[2].get_payload(), b64_txt)
expected_id = f"<{attachments[0].split('/')[-1]}>"
self.assertEqual(parts[2].get('Content-ID'), expected_id)
self.assertEqual(parts[2].get_content_type(), 'image/jpeg')
# Check the number of parts
expected_parts = 4
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test_create_message_attachments_m4a(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test email with attachements"
message_text = "The is the message text of the email with attachments"
message_html = ("<p>This is the html message part of the email "
"with attachments</p>")
attachments = [f'{_dir}/assets/loremipsum.m4a']
msg = self.gmail._create_message_attachments(
sender, to, subject, message_text, attachments,
message_html=message_html)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'multipart/alternative;\n boundary='),
('MIME-Version', '1.0'),
('to', to),
('from', sender),
('subject', subject)]
# The boundary id changes everytime. Replace it with the beginnig to
# avoid failures
updated_items = []
for i in decoded.items():
if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa
updated_items.append(
('Content-Type', 'multipart/alternative;\n boundary='))
else:
updated_items.append((i[0], i[1]))
# Check the metadata
self.assertListEqual(updated_items, expected_items)
# Check the message
# The first part is just a container for the text and html parts
parts = decoded.get_payload()
self.assertEqual(parts[0].get_payload(), message_text)
self.assertEqual(parts[1].get_payload(), message_html)
with open(f'{_dir}/assets/loremipsum_b64_m4a.txt', 'r') as f:
b64_txt = f.read()
self.assertEqual(parts[2].get_payload(), b64_txt)
self.assertEqual(parts[2].get_content_maintype(), 'audio')
# Check the number of parts
expected_parts = 4
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test_create_message_attachments_mp3(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test email with attachements"
message_text = "The is the message text of the email with attachments"
message_html = ("<p>This is the html message part of the email "
"with attachments</p>")
attachments = [f'{_dir}/assets/loremipsum.mp3']
msg = self.gmail._create_message_attachments(
sender, to, subject, message_text, attachments,
message_html=message_html)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'multipart/alternative;\n boundary='),
('MIME-Version', '1.0'),
('to', to),
('from', sender),
('subject', subject)]
# The boundary id changes everytime. Replace it with the beginnig to
# avoid failures
updated_items = []
for i in decoded.items():
if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa
updated_items.append(
('Content-Type', 'multipart/alternative;\n boundary='))
else:
updated_items.append((i[0], i[1]))
# Check the metadata
self.assertListEqual(updated_items, expected_items)
# Check the message
# The first part is just a container for the text and html parts
parts = decoded.get_payload()
self.assertEqual(parts[0].get_payload(), message_text)
self.assertEqual(parts[1].get_payload(), message_html)
with open(f'{_dir}/assets/loremipsum_b64_mp3.txt', 'r') as f:
b64_txt = f.read()
self.assertEqual(parts[2].get_payload(), b64_txt)
self.assertEqual(parts[2].get_content_type(), 'audio/mpeg')
# Check the number of parts
expected_parts = 4
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test_create_message_attachments_mp4(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test email with attachements"
message_text = "The is the message text of the email with attachments"
message_html = ("<p>This is the html message part of the email "
"with attachments</p>")
attachments = [f'{_dir}/assets/loremipsum.mp4']
msg = self.gmail._create_message_attachments(
sender, to, subject, message_text, attachments,
message_html=message_html)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'multipart/alternative;\n boundary='),
('MIME-Version', '1.0'),
('to', to),
('from', sender),
('subject', subject)]
# The boundary id changes everytime. Replace it with the beginnig to
# avoid failures
updated_items = []
for i in decoded.items():
if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa
updated_items.append(
('Content-Type', 'multipart/alternative;\n boundary='))
else:
updated_items.append((i[0], i[1]))
# Check the metadata
self.assertListEqual(updated_items, expected_items)
# Check the message
# The first part is just a container for the text and html parts
parts = decoded.get_payload()
self.assertEqual(parts[0].get_payload(), message_text)
self.assertEqual(parts[1].get_payload(), message_html)
with open(f'{_dir}/assets/loremipsum_b64_mp4.txt', 'r') as f:
b64_txt = f.read()
self.assertEqual(parts[2].get_payload(), b64_txt)
self.assertEqual(parts[2].get_content_type(), 'video/mp4')
# Check the number of parts
expected_parts = 4
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test_create_message_attachments_pdf(self):
sender = "Sender <sender@email.com>"
to = "Recepient <recepient@email.com>"
subject = "This is a test email with attachements"
message_text = "The is the message text of the email with attachments"
message_html = ("<p>This is the html message part of the email "
"with attachments</p>")
attachments = [f'{_dir}/assets/loremipsum.pdf']
msg = self.gmail._create_message_attachments(
sender, to, subject, message_text, attachments,
message_html=message_html)
raw = self.gmail._encode_raw_message(msg)
decoded = email.message_from_bytes(
base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8')))
expected_items = [
('Content-Type', 'multipart/alternative;\n boundary='),
('MIME-Version', '1.0'),
('to', to),
('from', sender),
('subject', subject)]
# The boundary id changes everytime. Replace it with the beginnig to
# avoid failures
updated_items = []
for i in decoded.items():
if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa
updated_items.append(
('Content-Type', 'multipart/alternative;\n boundary='))
else:
updated_items.append((i[0], i[1]))
# Check the metadata
self.assertListEqual(updated_items, expected_items)
# Check the message
# The first part is just a container for the text and html parts
parts = decoded.get_payload()
self.assertEqual(parts[0].get_payload(), message_text)
self.assertEqual(parts[1].get_payload(), message_html)
with open(f'{_dir}/assets/loremipsum_b64_pdf.txt', 'r') as f:
b64_txt = f.read()
self.assertEqual(parts[2].get_payload(), b64_txt)
self.assertEqual(parts[2].get_content_type(), 'application/pdf')
# Check the number of parts
expected_parts = 4
self.assertEqual(sum([1 for i in decoded.walk()]), expected_parts)
def test__validate_email_string(self):
emails = [
{"email": "Sender <sender@email.com>", "expected": True},
{"email": "sender@email.com", "expected": True},
{"email": "<sender@email.com>", "expected": True},
{"email": "Sender sender@email.com", "expected": False},
{"email": "Sender <sender2email.com>", "expected": False},
{"email": "Sender <sender@email,com>", "expected": True},
{"email": "Sender <sender+alias@email,com>", "expected": True}
]
for e in emails:
if e['expected']:
self.assertTrue(self.gmail._validate_email_string(e['email']))
else:
self.assertRaises(
ValueError, self.gmail._validate_email_string, e['email'])
# TODO test sending emails
if __name__ == "__main__":
unittest.main()
|
from pathlib import Path
from pprint import pprint
import keyword
import builtins
import textwrap
from ursina import color, lerp, application
def indentation(line):
return len(line) - len(line.lstrip())
def get_module_attributes(str):
attrs = list()
for l in str.split('\n'):
if len(l) == 0:
continue
if l.startswith(tuple(keyword.kwlist) + tuple(dir(builtins)) + (' ', '#', '\'', '\"', '_')):
continue
attrs.append(l)
return attrs
def get_classes(str):
classes = dict()
for c in str.split('\nclass ')[1:]:
class_name = c.split(':', 1)[0]
if class_name.startswith(('\'', '"')):
continue
# print(class_name)
classes[class_name] = c.split(':', 1)[1]
return classes
def get_class_attributes(str):
attributes = list()
lines = str.split('\n')
start = 0
end = len(lines)
for i, line in enumerate(lines):
if line == '''if __name__ == '__main__':''':
break
found_init = False
if line.strip().startswith('def __init__'):
if found_init:
break
start = i
for j in range(i+1, len(lines)):
if (indentation(lines[j]) == indentation(line)
and not lines[j].strip().startswith('def late_init')
):
end = j
found_init = True
break
init_section = lines[start:end]
# print('init_section:', start, end, init_section)
for i, line in enumerate(init_section):
if line.strip().startswith('self.') and ' = ' in line and line.startswith(' '*8) and not line.startswith(' '*9):
stripped_line = line.split('self.', 1)[1]
if '.' in stripped_line.split(' ')[0] or stripped_line.startswith('_'):
continue
key = stripped_line.split(' = ')[0]
value = stripped_line.split(' = ')[1]
if i < len(init_section) and indentation(init_section[i+1]) > indentation(line):
# value = 'multiline'
start = i
end = i
indent = indentation(line)
for j in range(i+1, len(init_section)):
if indentation(init_section[j]) <= indent:
end = j
break
for l in init_section[start+1:end]:
value += '\n' + l[4:]
attributes.append(key + ' = ' + value)
if '@property' in code:
for i, line in enumerate(lines):
if line.strip().startswith('@property'):
name = lines[i+1].split('def ')[1].split('(')[0]
# include comments for properties
if '#' in lines[i+1]:
name += ((20-len(name)) * ' ') + '<gray>#' + lines[i+1].split('#',1)[1] + '</gray>'
if not name in [e.split(' = ')[0] for e in attributes]:
attributes.append(name)
return attributes
def get_functions(str, is_class=False):
functions = dict()
lines = str.split('\n')
functions = list()
lines = str.split('\n')
ignore_functions_for_property_generation = 'generate_properties(' in str
for i, line in enumerate(lines):
if line == '''if __name__ == '__main__':''' or 'docignore' in line:
break
if line.strip().startswith('def '):
if not is_class and line.split('(')[1].startswith('self'):
continue
name = line.split('def ')[1].split('(')[0]
if name.startswith('_') or lines[i-1].strip().startswith('@'):
continue
if ignore_functions_for_property_generation:
if name.startswith('get_') or name.startswith('set_'):
continue
params = line.replace('(self, ', '(')
params = params.replace('(self)', '()')
params = params.split('(', 1)[1].rsplit(')', 1)[0]
comment = ''
if '#' in line:
comment = '#' + line.split('#')[1]
functions.append((name, params, comment))
return functions
def clear_tags(str):
for tag in ('purple', 'olive', 'yellow', 'blue'):
str = str.replace(f'<{tag}>', '')
str = str.replace(f'</{tag}>', '')
return str
def get_example(str, name=None): # use name to highlight the relevant class
key = '''if __name__ == '__main__':'''
lines = list()
example_started = False
for l in str.split('\n'):
if example_started:
lines.append(l)
if l == key:
example_started = True
example = '\n'.join(lines)
example = textwrap.dedent(example)
example = example.split('# test\n')[0]
ignore = ('app = Ursina()', 'app.run()', 'from ursina import *')
if 'class Ursina' in str: # don't ignore in main.py
ignore = ()
lines = [e for e in example.split('\n') if not e in ignore and not e.strip().startswith('#')]
import re
styled_lines = list()
for line in lines:
line = line.replace('def ', '<purple>def</purple> ')
line = line.replace('from ', '<purple>from</purple> ')
line = line.replace('import ', '<purple>import</purple> ')
line = line.replace('for ', '<purple>for</purple> ')
line = line.replace('elif ', '<purple>elif</purple> ')
line = line.replace('if ', '<purple>if</purple> ')
line = line.replace(' not ', ' <purple>not</purple> ')
line = line.replace('else:', '<purple>else</purple>:')
line = line.replace('Entity', '<olive>Entity</olive>')
for e in ('print', 'range', 'hasattr', 'getattr', 'setattr'):
line = line.replace(f'{e}(' , f'<blue>{e}</blue>(')
# colorize ursina specific params
for e in ('enabled', 'parent', 'world_parent', 'model', 'highlight_color', 'color',
'texture_scale', 'texture', 'visible',
'position', 'z', 'y', 'z',
'rotation', 'rotation_x', 'rotation_y', 'rotation_z',
'scale', 'scale_x', 'scale_y', 'scale_z',
'origin', 'origin_x', 'origin_y', 'origin_z',
'text', 'on_click', 'icon', 'collider', 'shader', 'curve', 'ignore',
'vertices', 'triangles', 'uvs', 'normals', 'colors', 'mode', 'thickness'
):
line = line.replace(f'{e}=' , f'<olive>{e}</olive>=')
# colorize numbers
for i in range(10):
line = line.replace(f'{i}', f'<yellow>{i}</yellow>')
# destyle Vec2 and Vec3
line = line.replace(f'<yellow>3</yellow>(', '3(')
line = line.replace(f'<yellow>2</yellow>(', '2(')
# highlight class name
if name:
if '(' in name:
name = name.split('(')[0]
line = line.replace(f'{name}(', f'<purple><b>{name}</b></purple>(')
line = line.replace(f'={name}(', f'=<purple><b>{name}</b></purple>(')
# line = line.replace(f'.{name}', f'.<font colorK
if ' #' in line:
# remove colored words inside strings
line = clear_tags(line)
line = line.replace(' #', ' <gray>#')
line += '</gray>'
styled_lines.append(line)
lines = styled_lines
example = '\n'.join(lines)
# find triple qutoted strings
if example.count("'''") % 2 == 0 and example.count("'''") > 1:
parts = example.strip().split("'''")
parts = [e for e in parts if e]
is_quote = example.strip().startswith("'''")
for i in range(not is_quote, len(parts), 2):
parts[i] = clear_tags(parts[i])
parts[i] = "<green>'''" + parts[i] + "'''</green>"
example = ''.join(parts)
# find single quoted words
styled_lines = []
for line in example.split('\n'):
quotes = re.findall('\'(.*?)\'', line)
quotes = ['\'' + q + '\'' for q in quotes]
for q in quotes:
line = line.replace(q, '<green>' + clear_tags(q) + '</green>')
styled_lines.append(line)
example = '\n'.join(styled_lines)
return example.strip()
def is_singleton(str):
for l in str.split('\n'):
# if l.startswith('sys.modules['):
if l.startswith('instance = '):
return True
result = False
path = application.package_folder
most_used_info = dict()
module_info = dict()
class_info = dict()
# ignore files that are not committed
ignored_files = list()
for f in ignored_files:
print('ignoring:', f)
ignored_files.append(path / 'gamepad.py')
for f in path.glob('*.py'):
if f in ignored_files:
continue
if f.name.startswith('_') or f.name == 'build.py':
module_info['build'] = (
f,
'python -m ursina.build',
{},
'',
'''open cmd at your project folder and run 'python -m ursina.build' to package your app for windows.'''
)
continue
with open(f, encoding='utf8') as t:
code = t.read()
code = code.replace('<', '<').replace('>', '>')
if not is_singleton(code):
name = f.stem
attrs, funcs = list(), list()
attrs = get_module_attributes(code)
funcs = get_functions(code)
example = get_example(code, name)
if attrs or funcs:
module_info[name] = (f, '', attrs, funcs, example)
# continue
classes = get_classes(code)
for class_name, class_definition in classes.items():
if 'Enum' in class_name:
class_definition = class_definition.split('def ')[0]
attrs = [l.strip() for l in class_definition.split('\n') if ' = ' in l]
class_info[class_name] = (f, '', attrs, '', '')
continue
if 'def __init__' in class_definition:
# init line
params = '__init__('+ class_definition.split('def __init__(')[1].split('\n')[0][:-1]
attrs = get_class_attributes(class_definition)
methods = get_functions(class_definition, is_class=True)
example = get_example(code, class_name)
class_info[class_name] = (f, params, attrs, methods, example)
# singletons
else:
module_name = f.name.split('.')[0]
classes = get_classes(code)
for class_name, class_definition in classes.items():
# print(module_name)
attrs, methods = list(), list()
attrs = get_class_attributes(class_definition)
methods = get_functions(class_definition, is_class=True)
example = get_example(code, class_name)
module_info[module_name] = (f, '', attrs, methods, example)
prefab_info = dict()
for f in path.glob('prefabs/*.py'):
if f.name.startswith('_') or f in ignored_files:
continue
with open(f, encoding='utf8') as t:
code = t.read()
code = code.replace('<', '<').replace('>', '>')
classes = get_classes(code)
for class_name, class_definition in classes.items():
if 'def __init__' in class_definition:
params = '__init__('+ class_definition.split('def __init__(')[1].split('\n')[0][:-1]
attrs = get_class_attributes(class_definition)
methods = get_functions(class_definition, is_class=True)
example = get_example(code, class_name)
prefab_info[class_name] = (f, params, attrs, methods, example)
script_info = dict()
for f in path.glob('scripts/*.py'):
if f.name.startswith('_') or f in ignored_files:
continue
# if f.is_file() and f.name.endswith(('.py', )):
with open(f, encoding='utf8') as t:
code = t.read()
if not 'class ' in code:
name = f.name.split('.')[0]
attrs, funcs = list(), list()
attrs = get_module_attributes(code)
funcs = get_functions(code)
example = get_example(code)
if attrs or funcs:
script_info[name] = (f, '', attrs, funcs, example)
classes = get_classes(code)
for class_name, class_definition in classes.items():
if 'def __init__' in class_definition:
params = '__init__('+ class_definition.split('def __init__(')[1].split('\n')[0][:-1]
attrs = get_class_attributes(class_definition)
methods = get_functions(class_definition, is_class=True)
example = get_example(code, class_name)
script_info[class_name] = (f, params, attrs, methods, example)
asset_info = dict()
model_names = [f'\'{f.stem}\'' for f in path.glob('models_compressed/*.ursinamesh')]
asset_info['models'] = ('', '', model_names, '', '''e = Entity(model='quad')''')
texture_names = [f'\'{f.stem}\'' for f in path.glob('textures/*.*')]
asset_info['textures'] = ('', '', texture_names, '', '''e = Entity(model='cube', texture='brick')''')
shaders = [f'{f.stem}' for f in path.glob('shaders/*.*')] + [f'{f.stem}' for f in path.glob('shaders/screenspace_shaders/*.*')]
shaders = [e for e in shaders if not e.startswith('_')]
asset_info['shaders'] = ('', '', shaders, '', '''from ursina.shaders import normals_shader\ne = Entity(shader=normals_shader)''')
for f in path.glob('models/procedural/*.py'):
if f.name.startswith('_') or f in ignored_files:
continue
with open(f, encoding='utf8') as t:
code = t.read()
classes = get_classes(code)
for class_name, class_definition in classes.items():
if 'def __init__' in class_definition:
params = '__init__('+ class_definition.split('def __init__(')[1].split('\n')[0][:-1]
attrs = get_class_attributes(class_definition)
methods = get_functions(class_definition, is_class=True)
example = get_example(code, class_name)
asset_info[class_name] = (f, params, attrs, methods, example)
most_used_info = dict()
for name in ('Entity(NodePath)', 'Text(Entity)', 'Button(Entity)', 'mouse', 'raycaster',):
for d in (module_info, class_info, prefab_info):
if name in d:
most_used_info[name] = d[name]
del d[name]
def html_color(color):
return f'hsl({color.h}, {int(color.s*100)}%, {int(color.v*100)}%)'
def make_html(style, file_name):
if style == 'light':
base_color = color.color(60, 0, .99)
background_color = lerp(base_color, base_color.invert(), 0)
else:
base_color = color.color(60, 1, .01)
background_color = lerp(base_color, base_color.invert(), .125)
text_color = lerp(background_color, background_color.invert(), .9)
example_color = lerp(background_color, text_color, .1)
scrollbar_color = html_color(lerp(background_color, text_color, .1))
link_color = html_color(color.gray)
init_color = html_color(base_color.invert())
style = f'''
<style>
html {{
scrollbar-face-color: {html_color(text_color)};
scrollbar-base-color: {html_color(text_color)};
scrollbar-3dlight-color: {html_color(text_color)}4;
scrollbar-highlight-color: {html_color(text_color)};
scrollbar-track-color: {html_color(background_color)};
scrollbar-arrow-color: {html_color(background_color)};
scrollbar-shadow-color: {html_color(text_color)};
scrollbar-darkshadow-color: {html_color(text_color)};
}}
::-webkit-scrollbar {{ width: 8px; height: 3px;}}
::-webkit-scrollbar {{ width: 8px; height: 3px;}}
::-webkit-scrollbar-button {{ background-color: {scrollbar_color}; }}
::-webkit-scrollbar-track {{ background-color: {html_color(background_color)};}}
::-webkit-scrollbar-track-piece {{ background-color: {html_color(background_color)};}}
::-webkit-scrollbar-thumb {{ height: 50px; background-color: {scrollbar_color}; border-radius: 3px;}}
::-webkit-scrollbar-corner {{ background-color: {html_color(background_color)};}}
::-webkit-resizer {{ background-color: {html_color(background_color)};}}
body {{
margin: auto;
background-color: {html_color(background_color)};
color: {html_color(text_color)};
font-family: monospace;
position: absolute;
top:0;
left: 24em;
font-size: 1.375em;
font-weight: lighter;
max-width: 100%;
overflow-x: hidden;
white-space: pre-wrap;
}}
a {{
color: {link_color};
}}
purple {{color: hsl(289.0, 50%, 50%);}}
gray {{color: gray;}}
olive {{color: olive;}}
yellow {{color: darkgoldenrod;}}
green {{color: seagreen;}}
blue {{color: hsl(210, 50%, 50%);}}
.example {{
padding-left: 1em;
background-color: {html_color(example_color)};
}}
.params {{
color:{init_color};
font-weight:bold;
}}
</style>
'''
# return style
html = '<title> ursina cheat sheet</title>'
html += '''
<b>Ursina cheat sheet</b>
This document lists most modules and classes in ursina. Each section is structured as follows:
ClassName(BaseClass)
module location
parameters
How instantiate the class, ie. Button(text='', **kwargs).
'**kwargs' in this case, means you can give it optional keyword arguments.
For example, Button('Start', scale=.25, color=color.blue, position=(-.1,.25)) also includes
information on how big the button should be, its color and its position.
attributes
Names of values we can get/set, sometimes followed by its starting value and a short explanation.
For example, 'scale', 'color' and 'position' are
attributes we gave the Button above. These are members of Entity, which Button class
inherits from, so the Button class can also access these.
methods/functions
these ends with (), which means they are functions that can be called.
Also lists their parameters and default arguments.
For example, Entity has a method called 'look_at()'. You need to give it a
'target' (an Entity or position) to look at and optionally say
which axis will be facing the target.
example
You can search the document with Ctrl+F for instant search results.
'''
sidebar = '''
<div class="sidebar" style="
left: 0px;
position: fixed;
top: 0px;
padding-top:40px;
padding-left:20px;
bottom: 0;
overflow-y: scroll;
width: 15em;
z-index: 1;
">
<a href="cheat_sheet.html">light</a> <a href="cheat_sheet_dark.html">dark</a>
'''
for i, class_dictionary in enumerate((most_used_info, module_info, class_info, prefab_info, script_info, asset_info)):
for name, attrs_and_functions in class_dictionary.items():
print('generating docs for', name)
location, params, attrs, funcs, example = attrs_and_functions
params = params.replace('__init__', name.split('(')[0])
params = params.replace('(self, ', '(')
params = params.replace('(self)', '()')
name = name.replace('ShowBase', '')
name = name.replace('NodePath', '')
for parent_class in ('Entity', 'Button', 'Draggable', 'Text', 'Collider', 'Mesh', 'Prismatoid'):
name = name.replace(f'({parent_class})', f'(<a style="color: gray;" href="#{parent_class}">{parent_class}</a>)')
base_name = name
if '(' in base_name:
base_name = base_name.split('(')[0]
base_name = base_name.split(')')[0]
name = name.replace('(', '<gray>(')
name = name.replace(')', ')</gray>')
v = lerp(text_color.v, background_color.v, .2)
# v = .5
col = color.color(50-(i*30), .9, v)
col = html_color(col)
sidebar += f'''<a style="color:{col};" href="#{base_name}">{base_name}</a>\n'''
html += '\n'
html += f'''<div id="{base_name}"><div id="{base_name}" style="color:{col}; font-size:1.75em; font-weight:normal;">{name}</div>'''
html += '<div style="position:relative; padding:0em 0em 2em 1em; margin:0;">'
# location
location = str(location)
if 'ursina' in location:
location = location.split('ursina')[-1]
github_link = 'https://github.com/pokepetter/ursina/tree/master/ursina' + location.replace('\\', '/')
location = location.replace('\\', '.')[:-3]
html += f'''<a href="{github_link}"><gray>ursina{location}</gray></a><br><br>'''
if params:
params = f'<params class="params">{params}</params>\n'
html += params + '\n'
for e in attrs:
if ' = ' in e:
e = f'''{e.split(' = ')[0]}<gray> = {e.split(' = ')[1]}</gray> '''
html += f'''{e}\n'''
html += '\n'
for e in funcs:
e = f'{e[0]}(<gray>{e[1]}</gray>) <gray>{e[2]}</gray>'
html += e + '\n'
if example:
html += '\n<div class="example">' + example +'\n</div>'
html += '\n</div></div>'
html = html.replace('<gray></gray>', '')
sidebar += '\n'
sidebar += '</div>'
html += '</div>'
html = sidebar + style + '<div id="content">' + html + '</div>' + '</body>'
with open(file_name, 'w', encoding='utf-8') as f:
f.write(html)
make_html('light', 'cheat_sheet.html')
make_html('dark', 'cheat_sheet_dark.html')
|
from pathlib import Path
from pprint import pprint
import keyword
import builtins
import textwrap
from ursina import color, lerp, application
def indentation(line):
return len(line) - len(line.lstrip())
def get_module_attributes(str):
attrs = list()
for l in str.split('\n'):
if len(l) == 0:
continue
if l.startswith(tuple(keyword.kwlist) + tuple(dir(builtins)) + (' ', '#', '\'', '\"', '_')):
continue
attrs.append(l)
return attrs
def get_classes(str):
classes = dict()
for c in str.split('\nclass ')[1:]:
class_name = c.split(':', 1)[0]
if class_name.startswith(('\'', '"')):
continue
# print(class_name)
classes[class_name] = c.split(':', 1)[1]
return classes
def get_class_attributes(str):
attributes = list()
lines = str.split('\n')
start = 0
end = len(lines)
for i, line in enumerate(lines):
if line == '''if __name__ == '__main__':''':
break
found_init = False
if line.strip().startswith('def __init__'):
if found_init:
break
start = i
for j in range(i+1, len(lines)):
if (indentation(lines[j]) == indentation(line)
and not lines[j].strip().startswith('def late_init')
):
end = j
found_init = True
break
init_section = lines[start:end]
# print('init_section:', start, end, init_section)
for i, line in enumerate(init_section):
if line.strip().startswith('self.') and ' = ' in line and line.startswith(' '*8) and not line.startswith(' '*9):
stripped_line = line.split('self.', 1)[1]
if '.' in stripped_line.split(' ')[0] or stripped_line.startswith('_'):
continue
key = stripped_line.split(' = ')[0]
value = stripped_line.split(' = ')[1]
if i < len(init_section) and indentation(init_section[i+1]) > indentation(line):
# value = 'multiline'
start = i
end = i
indent = indentation(line)
for j in range(i+1, len(init_section)):
if indentation(init_section[j]) <= indent:
end = j
break
for l in init_section[start+1:end]:
value += '\n' + l[4:]
attributes.append(key + ' = ' + value)
if '@property' in code:
for i, line in enumerate(lines):
if line.strip().startswith('@property'):
name = lines[i+1].split('def ')[1].split('(')[0]
# include comments for properties
if '#' in lines[i+1]:
name += ((20-len(name)) * ' ') + '<gray>#' + lines[i+1].split('#',1)[1] + '</gray>'
if not name in [e.split(' = ')[0] for e in attributes]:
attributes.append(name)
return attributes
def get_functions(str, is_class=False):
functions = dict()
lines = str.split('\n')
functions = list()
lines = str.split('\n')
ignore_functions_for_property_generation = 'generate_properties(' in str
for i, line in enumerate(lines):
if line == '''if __name__ == '__main__':''' or 'docignore' in line:
break
if line.strip().startswith('def '):
if not is_class and line.split('(')[1].startswith('self'):
continue
name = line.split('def ')[1].split('(')[0]
if name.startswith('_') or lines[i-1].strip().startswith('@'):
continue
if ignore_functions_for_property_generation:
if name.startswith('get_') or name.startswith('set_'):
continue
params = line.replace('(self, ', '(')
params = params.replace('(self)', '()')
params = params.split('(', 1)[1].rsplit(')', 1)[0]
comment = ''
if '#' in line:
comment = '#' + line.split('#')[1]
functions.append((name, params, comment))
return functions
def clear_tags(str):
for tag in ('purple', 'olive', 'yellow', 'blue'):
str = str.replace(f'<{tag}>', '')
str = str.replace(f'</{tag}>', '')
return str
def get_example(str, name=None): # use name to highlight the relevant class
key = '''if __name__ == '__main__':'''
lines = list()
example_started = False
for l in str.split('\n'):
if example_started:
lines.append(l)
if l == key:
example_started = True
example = '\n'.join(lines)
example = textwrap.dedent(example)
example = example.split('# test\n')[0]
ignore = ('app = Ursina()', 'app.run()', 'from ursina import *')
if 'class Ursina' in str: # don't ignore in main.py
ignore = ()
lines = [e for e in example.split('\n') if not e in ignore and not e.strip().startswith('#')]
import re
styled_lines = list()
for line in lines:
line = line.replace('def ', '<purple>def</purple> ')
line = line.replace('from ', '<purple>from</purple> ')
line = line.replace('import ', '<purple>import</purple> ')
line = line.replace('for ', '<purple>for</purple> ')
line = line.replace('elif ', '<purple>elif</purple> ')
line = line.replace('if ', '<purple>if</purple> ')
line = line.replace(' not ', ' <purple>not</purple> ')
line = line.replace('else:', '<purple>else</purple>:')
line = line.replace('Entity', '<olive>Entity</olive>')
for e in ('print', 'range', 'hasattr', 'getattr', 'setattr'):
line = line.replace(f'{e}(' , f'<blue>{e}</blue>(')
# colorize ursina specific params
for e in ('enabled', 'parent', 'world_parent', 'model', 'highlight_color', 'color',
'texture_scale', 'texture', 'visible',
'position', 'z', 'y', 'z',
'rotation', 'rotation_x', 'rotation_y', 'rotation_z',
'scale', 'scale_x', 'scale_y', 'scale_z',
'origin', 'origin_x', 'origin_y', 'origin_z',
'text', 'on_click', 'icon', 'collider', 'shader', 'curve', 'ignore',
'vertices', 'triangles', 'uvs', 'normals', 'colors', 'mode', 'thickness'
):
line = line.replace(f'{e}=' , f'<olive>{e}</olive>=')
# colorize numbers
for i in range(10):
line = line.replace(f'{i}', f'<yellow>{i}</yellow>')
# destyle Vec2 and Vec3
line = line.replace(f'<yellow>3</yellow>(', '3(')
line = line.replace(f'<yellow>2</yellow>(', '2(')
# highlight class name
if name:
if '(' in name:
name = name.split('(')[0]
line = line.replace(f'{name}(', f'<purple><b>{name}</b></purple>(')
line = line.replace(f'={name}(', f'=<purple><b>{name}</b></purple>(')
# line = line.replace(f'.{name}', f'.<font colorK
if ' #' in line:
# remove colored words inside strings
line = clear_tags(line)
line = line.replace(' #', ' <gray>#')
line += '</gray>'
styled_lines.append(line)
lines = styled_lines
example = '\n'.join(lines)
# find triple qutoted strings
if example.count("'''") % 2 == 0 and example.count("'''") > 1:
parts = example.strip().split("'''")
parts = [e for e in parts if e]
is_quote = example.strip().startswith("'''")
for i in range(not is_quote, len(parts), 2):
parts[i] = clear_tags(parts[i])
parts[i] = "<green>'''" + parts[i] + "'''</green>"
example = ''.join(parts)
# find single quoted words
styled_lines = []
for line in example.split('\n'):
quotes = re.findall('\'(.*?)\'', line)
quotes = ['\'' + q + '\'' for q in quotes]
for q in quotes:
line = line.replace(q, '<green>' + clear_tags(q) + '</green>')
styled_lines.append(line)
example = '\n'.join(styled_lines)
return example.strip()
def is_singleton(str):
for l in str.split('\n'):
# if l.startswith('sys.modules['):
if l.startswith('instance = '):
return True
result = False
path = application.package_folder
most_used_info = dict()
module_info = dict()
class_info = dict()
# ignore files that are not committed
ignored_files = list()
for f in ignored_files:
print('ignoring:', f)
ignored_files.append(path / 'gamepad.py')
for f in path.glob('*.py'):
if f in ignored_files:
continue
if f.name.startswith('_') or f.name == 'build.py':
module_info['build'] = (
f,
'python -m ursina.build',
{},
'',
'''open cmd at your project folder and run 'python -m ursina.build' to package your app for windows.'''
)
continue
with open(f, encoding='utf8') as t:
code = t.read()
code = code.replace('<', '<').replace('>', '>')
if not is_singleton(code):
name = f.stem
attrs, funcs = list(), list()
attrs = get_module_attributes(code)
funcs = get_functions(code)
example = get_example(code, name)
if attrs or funcs:
module_info[name] = (f, '', attrs, funcs, example)
# continue
classes = get_classes(code)
for class_name, class_definition in classes.items():
if 'Enum' in class_name:
class_definition = class_definition.split('def ')[0]
attrs = [l.strip() for l in class_definition.split('\n') if ' = ' in l]
class_info[class_name] = (f, '', attrs, '', '')
continue
if 'def __init__' in class_definition:
# init line
params = '__init__('+ class_definition.split('def __init__(')[1].split('\n')[0][:-1]
attrs = get_class_attributes(class_definition)
methods = get_functions(class_definition, is_class=True)
example = get_example(code, class_name)
class_info[class_name] = (f, params, attrs, methods, example)
# singletons
else:
module_name = f.name.split('.')[0]
classes = get_classes(code)
for class_name, class_definition in classes.items():
# print(module_name)
attrs, methods = list(), list()
attrs = get_class_attributes(class_definition)
methods = get_functions(class_definition, is_class=True)
example = get_example(code, class_name)
module_info[module_name] = (f, '', attrs, methods, example)
prefab_info = dict()
for f in path.glob('prefabs/*.py'):
if f.name.startswith('_') or f in ignored_files:
continue
with open(f, encoding='utf8') as t:
code = t.read()
code = code.replace('<', '<').replace('>', '>')
classes = get_classes(code)
for class_name, class_definition in classes.items():
if 'def __init__' in class_definition:
params = '__init__('+ class_definition.split('def __init__(')[1].split('\n')[0][:-1]
attrs = get_class_attributes(class_definition)
methods = get_functions(class_definition, is_class=True)
example = get_example(code, class_name)
prefab_info[class_name] = (f, params, attrs, methods, example)
script_info = dict()
for f in path.glob('scripts/*.py'):
if f.name.startswith('_') or f in ignored_files:
continue
# if f.is_file() and f.name.endswith(('.py', )):
with open(f, encoding='utf8') as t:
code = t.read()
if not 'class ' in code:
name = f.name.split('.')[0]
attrs, funcs = list(), list()
attrs = get_module_attributes(code)
funcs = get_functions(code)
example = get_example(code)
if attrs or funcs:
script_info[name] = (f, '', attrs, funcs, example)
classes = get_classes(code)
for class_name, class_definition in classes.items():
if 'def __init__' in class_definition:
params = '__init__('+ class_definition.split('def __init__(')[1].split('\n')[0][:-1]
attrs = get_class_attributes(class_definition)
methods = get_functions(class_definition, is_class=True)
example = get_example(code, class_name)
script_info[class_name] = (f, params, attrs, methods, example)
asset_info = dict()
model_names = [f'\'{f.stem}\'' for f in path.glob('models_compressed/*.ursinamesh')]
asset_info['models'] = ('', '', model_names, '', '''e = Entity(model='quad')''')
texture_names = [f'\'{f.stem}\'' for f in path.glob('textures/*.*')]
asset_info['textures'] = ('', '', texture_names, '', '''e = Entity(model='cube', texture='brick')''')
shaders = [f'{f.stem}' for f in path.glob('shaders/*.*')] + [f'{f.stem}' for f in path.glob('shaders/screenspace_shaders/*.*')]
shaders = [e for e in shaders if not e.startswith('_')]
asset_info['shaders'] = ('', '', shaders, '', '''from ursina.shaders import normals_shader\ne = Entity(shader=normals_shader)''')
for f in path.glob('models/procedural/*.py'):
if f.name.startswith('_') or f in ignored_files:
continue
with open(f, encoding='utf8') as t:
code = t.read()
classes = get_classes(code)
for class_name, class_definition in classes.items():
if 'def __init__' in class_definition:
params = '__init__('+ class_definition.split('def __init__(')[1].split('\n')[0][:-1]
attrs = get_class_attributes(class_definition)
methods = get_functions(class_definition, is_class=True)
example = get_example(code, class_name)
asset_info[class_name] = (f, params, attrs, methods, example)
most_used_info = dict()
for name in ('Entity(NodePath)', 'Text(Entity)', 'Button(Entity)', 'mouse', 'raycaster',):
for d in (module_info, class_info, prefab_info):
if name in d:
most_used_info[name] = d[name]
del d[name]
def html_color(color):
return f'hsl({color.h}, {int(color.s*100)}%, {int(color.v*100)}%)'
def make_html(style, file_name):
if style == 'light':
base_color = color.color(60, 0, .99)
background_color = lerp(base_color, base_color.invert(), 0)
else:
base_color = color.color(60, 1, .01)
background_color = lerp(base_color, base_color.invert(), .125)
text_color = lerp(background_color, background_color.invert(), .9)
example_color = lerp(background_color, text_color, .1)
scrollbar_color = html_color(lerp(background_color, text_color, .1))
link_color = html_color(color.gray)
init_color = html_color(base_color.invert())
style = f'''
<style>
html {{
scrollbar-face-color: {html_color(text_color)};
scrollbar-base-color: {html_color(text_color)};
scrollbar-3dlight-color: {html_color(text_color)}4;
scrollbar-highlight-color: {html_color(text_color)};
scrollbar-track-color: {html_color(background_color)};
scrollbar-arrow-color: {html_color(background_color)};
scrollbar-shadow-color: {html_color(text_color)};
scrollbar-darkshadow-color: {html_color(text_color)};
}}
::-webkit-scrollbar {{ width: 8px; height: 3px;}}
::-webkit-scrollbar {{ width: 8px; height: 3px;}}
::-webkit-scrollbar-button {{ background-color: {scrollbar_color}; }}
::-webkit-scrollbar-track {{ background-color: {html_color(background_color)};}}
::-webkit-scrollbar-track-piece {{ background-color: {html_color(background_color)};}}
::-webkit-scrollbar-thumb {{ height: 50px; background-color: {scrollbar_color}; border-radius: 3px;}}
::-webkit-scrollbar-corner {{ background-color: {html_color(background_color)};}}
::-webkit-resizer {{ background-color: {html_color(background_color)};}}
body {{
margin: auto;
background-color: {html_color(background_color)};
color: {html_color(text_color)};
font-family: monospace;
position: absolute;
top:0;
left: 24em;
font-size: 1.375em;
font-weight: lighter;
max-width: 100%;
overflow-x: hidden;
white-space: pre-wrap;
}}
a {{
color: {link_color};
}}
purple {{color: hsl(289.0, 50%, 50%);}}
gray {{color: gray;}}
olive {{color: olive;}}
yellow {{color: darkgoldenrod;}}
green {{color: seagreen;}}
blue {{color: hsl(210, 50%, 50%);}}
.example {{
padding-left: 1em;
background-color: {html_color(example_color)};
}}
.params {{
color:{init_color};
font-weight:bold;
}}
</style>
'''
# return style
html = '<title> ursina cheat sheet</title>'
html += '''
<b>Ursina cheat sheet</b>
This document lists most modules and classes in ursina. Each section is structured as follows:
ClassName(BaseClass)
module location
parameters
How instantiate the class, ie. Button(text='', **kwargs).
'**kwargs' in this case, means you can give it optional keyword arguments.
For example, Button('Start', scale=.25, color=color.blue, position=(-.1,.25)) also includes
information on how big the button should be, its color and its position.
attributes
Names of values we can get/set, sometimes followed by its starting value and a short explanation.
For example, 'scale', 'color' and 'position' are
attributes we gave the Button above. These are members of Entity, which Button class
inherits from, so the Button class can also access these.
methods/functions
these ends with (), which means they are functions that can be called.
Also lists their parameters and default arguments.
For example, Entity has a method called 'look_at()'. You need to give it a
'target' (an Entity or position) to look at and optionally say
which axis will be facing the target.
example
You can search the document with Ctrl+F for instant search results.
'''
sidebar = '''
<div class="sidebar" style="
left: 0px;
position: fixed;
top: 0px;
padding-top:40px;
padding-left:20px;
bottom: 0;
overflow-y: scroll;
width: 15em;
z-index: 1;
">
<a href="cheat_sheet.html">light</a> <a href="cheat_sheet_dark.html">dark</a>
'''
for i, class_dictionary in enumerate((most_used_info, module_info, class_info, prefab_info, script_info, asset_info)):
for name, attrs_and_functions in class_dictionary.items():
print('generating docs for', name)
location, params, attrs, funcs, example = attrs_and_functions
params = params.replace('__init__', name.split('(')[0])
params = params.replace('(self, ', '(')
params = params.replace('(self)', '()')
name = name.replace('ShowBase', '')
name = name.replace('NodePath', '')
for parent_class in ('Entity', 'Button', 'Draggable', 'Text', 'Collider', 'Mesh', 'Prismatoid'):
name = name.replace(f'({parent_class})', f'(<a style="color: gray;" href="#{parent_class}">{parent_class}</a>)')
base_name = name
if '(' in base_name:
base_name = base_name.split('(')[0]
base_name = base_name.split(')')[0]
name = name.replace('(', '<gray>(')
name = name.replace(')', ')</gray>')
v = lerp(text_color.v, background_color.v, .2)
# v = .5
col = color.color(50-(i*30), .9, v)
col = html_color(col)
sidebar += f'''<a style="color:{col};" href="#{base_name}">{base_name}</a>\n'''
html += '\n'
html += f'''<div id="{base_name}"><div id="{base_name}" style="color:{col}; font-size:1.75em; font-weight:normal;">{name}</div>'''
html += '<div style="position:relative; padding:0em 0em 2em 1em; margin:0;">'
# location
location = str(location)
if 'ursina' in location:
location = location.split('ursina')[-1]
github_link = 'https://github.com/pokepetter/ursina/tree/master/ursina' + location.replace('\\', '/')
location = location.replace('\\', '.')[:-3]
html += f'''<a href="{github_link}"><gray>ursina{location}</gray></a><br><br>'''
if params:
params = f'<params class="params">{params}</params>\n'
html += params + '\n'
for e in attrs:
if ' = ' in e:
e = f'''{e.split(' = ')[0]}<gray> = {e.split(' = ')[1]}</gray> '''
html += f'''{e}\n'''
html += '\n'
for e in funcs:
e = f'{e[0]}(<gray>{e[1]}</gray>) <gray>{e[2]}</gray>'
html += e + '\n'
if example:
html += '\n<div class="example">' + example +'\n</div>'
html += '\n</div></div>'
html = html.replace('<gray></gray>', '')
sidebar += '\n'
sidebar += '</div>'
html += '</div>'
html = sidebar + style + '<div id="content">' + html + '</div>' + '</body>'
with open(file_name, 'w', encoding='utf-8') as f:
f.write(html)
make_html('light', 'cheat_sheet.html')
make_html('dark', 'cheat_sheet_dark.html')
|
import swagger_client
from swagger_client.rest import ApiException
import maya
import os
import json
import datetime
import pandas as pd
import glob
import datetime
from loguru import logger
import requests
import socket
import urllib
import webbrowser
from http.server import BaseHTTPRequestHandler, HTTPServer
class StravaIO():
def __init__(self, access_token=None):
if access_token is None:
access_token = os.getenv('STRAVA_ACCESS_TOKEN')
self.configuration = swagger_client.Configuration()
self.configuration.access_token = access_token
self._api_client = swagger_client.ApiClient(self.configuration)
self.athletes_api = swagger_client.AthletesApi(self._api_client)
self.activities_api = swagger_client.ActivitiesApi(self._api_client)
self.streams_api = swagger_client.StreamsApi(self._api_client)
def get_logged_in_athlete(self):
"""Get logged in athlete
Returns
-------
athlete: Athlete object
"""
try:
rv = Athlete(self.athletes_api.get_logged_in_athlete())
except ApiException as e:
logger.error(f""""
Error in strava_swagger_client.AthletesApi!
STRAVA_ACCESS_TOKEN is likely out of date!
Check the https://github.com/sladkovm/strava-oauth for help.
Returning None.
Original Error:
{e}""")
rv = None
return rv
def local_athletes(self):
"""List local athletes
Returns
-------
athletes: generator of JSON friendly dicts
"""
for f_name in glob.glob(os.path.join(dir_stravadata(), 'athlete*.json')):
with open(f_name) as f:
yield json.load(f)
def get_activity_by_id(self, id):
"""Get activity by ID
Returns
-------
activity: Activity ojbect
"""
return Activity(self.activities_api.get_activity_by_id(id))
def get_logged_in_athlete_activities(self, after=0, list_activities=None):
"""List all activities after a given date
Parameters
----------
after: int, str or datetime object
If integer, the time since epoch is assumed
If str, the maya.parse() compatible date string is expected e.g. iso8601 or 2018-01-01 or 20180101
If datetime, the datetime object is expected
Returns
-------
list_activities: list
List of SummaryActivity objects
"""
if list_activities is None:
list_activities = []
after = date_to_epoch(after)
_fetched = self.activities_api.get_logged_in_athlete_activities(after=after)
if len(_fetched) > 0:
print(f"Fetched {len(_fetched)}, the latests is on {_fetched[-1].start_date}")
list_activities.extend(_fetched)
if len(_fetched) == 30:
last_after = list_activities[-1].start_date
return self.get_logged_in_athlete_activities(after=last_after, list_activities=list_activities)
else:
print("empty list")
return list_activities
def local_activities(self, athlete_id):
"""List local activities
Parameters
----------
athlete_id: int
Returns
-------
activities: generator of JSON friendly dicts
"""
dir_activities = os.path.join(dir_stravadata(), f"activities_{athlete_id}")
for f_name in glob.glob(os.path.join(dir_activities, '*.json')):
with open(f_name) as f:
yield json.load(f)
def local_streams(self, athlete_id):
"""List local streams
Parameters
----------
athlete_id: int
Returns
-------
streams: generator of dataframes
"""
dir_streams = os.path.join(dir_stravadata(), f"streams_{athlete_id}")
for f_name in glob.glob(os.path.join(dir_streams, '*.parquet')):
yield pd.read_parquet(f_name)
def get_activity_streams(self, id, athlete_id, local=True):
"""Get activity streams by ID
Parameters
----------
id: int
activity_id
athlete_id: int
athlete_id
local: bool (default=True)
if the streams is already storred, return the local version
Returns
-------
streams: Streams ojbect (remote) or pd.Dataframe (local)
"""
if local:
dir_streams = os.path.join(dir_stravadata(), f"streams_{athlete_id}")
f_name = f"streams_{id}.parquet"
f_path = os.path.join(dir_streams, f_name)
if f_path in glob.glob(f_path):
return pd.read_parquet(f_path)
keys = ['time', 'distance', 'latlng', 'altitude', 'velocity_smooth',
'heartrate', 'cadence', 'watts', 'temp', 'moving', 'grade_smooth']
api_response = self.streams_api.get_activity_streams(id, keys, key_by_type=True)
return Streams(api_response, id, athlete_id)
class Athlete():
def __init__(self, api_response):
"""
Parameters
----------
api_response: swagger_client.get...() object
e.g. athletes_api.get_logged_in_athlete()
"""
self.api_response = api_response
self.id = self.api_response.id
def __str__(self):
return self._stringify()
def __repr__(self):
return self._stringify()
def to_dict(self):
_dict = self.api_response.to_dict()
_dict = convert_datetime_to_iso8601(_dict)
return _dict
def store_locally(self):
strava_dir = dir_stravadata()
f_name = f"athlete_{self.api_response.id}.json"
with open(os.path.join(strava_dir, f_name), 'w') as fp:
json.dump(self.to_dict(), fp)
def _stringify(self):
return json.dumps(self.to_dict(), indent=2)
class Activity():
def __init__(self, api_response, client=None):
self.api_response = api_response
self.athlete_id = self.api_response.athlete.id
self.id = self.api_response.id
if client:
self.streams_api = client.streams_api
else:
client = None
def __repr__(self):
return f"Activity: {self.id}, Date: {self.api_response.start_date}, Name: {self.api_response.name}"
def to_dict(self):
_dict = self.api_response.to_dict()
_dict = convert_datetime_to_iso8601(_dict)
return _dict
def store_locally(self):
strava_dir = dir_stravadata()
athlete_id = self.api_response.athlete.id
activities_dir = os.path.join(strava_dir, f"activities_{athlete_id}")
if not os.path.exists(activities_dir):
os.mkdir(activities_dir)
f_name = f"activity_{self.api_response.id}.json"
with open(os.path.join(activities_dir, f_name), 'w') as fp:
json.dump(self.to_dict(), fp)
class Streams():
ACCEPTED_KEYS = ['time', 'distance', 'altitude', 'velocity_smooth', 'heartrate', 'cadence', 'watts', 'temp', 'moving', 'grade_smooth', 'lat', 'lng']
def __init__(self, api_response, activity_id, athlete_id):
self.api_response = api_response
self.activity_id = activity_id
self.athlete_id = athlete_id
def __repr__(self):
return f"""Streams for {self.activity_id}\nKeys: {list(self.to_dict().keys())}\nAccess: obj.key or obj.to_dict() to load into a pd.DataFrame()"""
def to_dict(self):
_dict = self.api_response.to_dict()
r = {}
for k, v in _dict.items():
if v is not None:
r.update({k: v['data']})
if r.get('latlng', None):
latlng = r.pop('latlng')
_r = list(zip(*latlng))
r.update({'lat': list(_r[0])})
r.update({'lng': list(_r[1])})
return r
def store_locally(self):
_df = pd.DataFrame(self.to_dict())
strava_dir = dir_stravadata()
streams_dir = os.path.join(strava_dir, f"streams_{self.athlete_id}")
if not os.path.exists(streams_dir):
os.mkdir(streams_dir)
f_name = f"streams_{self.activity_id}.parquet"
_df.to_parquet(os.path.join(streams_dir, f_name))
@property
def time(self):
return self._get_stream_by_name('time')
@property
def distance(self):
return self._get_stream_by_name('distance')
@property
def altitude(self):
return self._get_stream_by_name('altitude')
@property
def velocity_smooth(self):
return self._get_stream_by_name('velocity_smooth')
@property
def heartrate(self):
return self._get_stream_by_name('heartrate')
@property
def cadence(self):
return self._get_stream_by_name('cadence')
@property
def watts(self):
return self._get_stream_by_name('watts')
@property
def grade_smooth(self):
return self._get_stream_by_name('grade_smooth')
@property
def moving(self):
return self._get_stream_by_name('moving')
@property
def lat(self):
return self._get_stream_by_name('lat')
@property
def lng(self):
return self._get_stream_by_name('lng')
def _get_stream_by_name(self, key):
if key not in self.ACCEPTED_KEYS:
raise KeyError(f"key must be one of {self.ACCEPTED_KEYS}")
try:
rv = self.to_dict()[key]
except KeyError:
logger.warning(f"Stream does not contain {key}")
rv = None
return rv
def strava_oauth2(client_id=None, client_secret=None):
"""Run strava authorization flow. This function will open a default system
browser alongside starting a local webserver. The authorization procedure will be completed in the browser.
The access token will be returned in the browser in the format ready to copy to the .env file.
Parameters:
-----------
client_id: int, if not provided will be retrieved from the STRAVA_CLIENT_ID env viriable
client_secret: str, if not provided will be retrieved from the STRAVA_CLIENT_SECRET env viriable
"""
if client_id is None:
client_id = os.getenv('STRAVA_CLIENT_ID', None)
if client_id is None:
raise ValueError('client_id is None')
if client_secret is None:
client_secret = os.getenv('STRAVA_CLIENT_SECRET', None)
if client_secret is None:
raise ValueError('client_secret is None')
port = 8000
_request_strava_authorize(client_id, port)
logger.info(f"serving at port {port}")
token = run_server_and_wait_for_token(
port=port,
client_id=client_id,
client_secret=client_secret
)
return token
def _request_strava_authorize(client_id, port):
params_oauth = {
"client_id": client_id,
"response_type": "code",
"redirect_uri": f"http://localhost:{port}/authorization_successful",
"scope": "read,profile:read_all,activity:read",
"state": 'https://github.com/sladkovm/strava-http',
"approval_prompt": "force"
}
values_url = urllib.parse.urlencode(params_oauth)
base_url = 'https://www.strava.com/oauth/authorize'
rv = base_url + '?' + values_url
webbrowser.get().open(rv)
return None
def run_server_and_wait_for_token(port, client_id, client_secret):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('127.0.0.1', port))
s.listen()
conn, addr = s.accept()
request_bytes = b''
with conn:
while True:
chunk = conn.recv(512)
request_bytes += chunk
if request_bytes.endswith(b'\r\n\r\n'):
break
conn.sendall(b'HTTP/1.1 200 OK\r\n\r\nsuccess\r\n')
request = request_bytes.decode('utf-8')
status_line = request.split('\n', 1)[0]
method, raw_url, protocol_version = status_line.split(' ')
url = urllib.parse.urlparse(raw_url)
query_string = url.query
query_params = urllib.parse.parse_qs(query_string, keep_blank_values=True)
if url.path == "/authorization_successful":
code = query_params.get('code')[0]
logger.debug(f"code: {code}")
params = {
"client_id": client_id,
"client_secret": client_secret,
"code": code,
"grant_type": "authorization_code"
}
r = requests.post("https://www.strava.com/oauth/token", params)
data = r.json()
logger.debug(f"Authorized athlete: {data.get("access_token", "Oeps something went wrong!")}")
else:
data = url.path.encode()
return data
def convert_datetime_to_iso8601(d):
for k, v in d.items():
if isinstance(v, dict):
convert_datetime_to_iso8601(v)
elif isinstance(v, list):
for i in v:
if isinstance(i, dict):
convert_datetime_to_iso8601(i)
else:
if isinstance(v, datetime.datetime):
d[k] = maya.parse(v).iso8601()
return d
def dir_stravadata():
home_dir = os.path.expanduser('~')
strava_dir = os.path.join(home_dir, '.stravadata')
if not os.path.exists(strava_dir):
os.mkdir(strava_dir)
return strava_dir
def date_to_epoch(date):
"""Convert a date to epoch representation"""
rv = None
if isinstance(date, int):
rv = date
if isinstance(date, datetime.datetime):
_ = maya.parse(date)
rv = _.epoch
if isinstance(date, str):
_ = maya.when(date)
rv = _.epoch
if rv is None:
raise TypeError('date must be epoch int, datetime obj or the string')
return rv
|
import swagger_client
from swagger_client.rest import ApiException
import maya
import os
import json
import datetime
import pandas as pd
import glob
import datetime
from loguru import logger
import requests
import socket
import urllib
import webbrowser
from http.server import BaseHTTPRequestHandler, HTTPServer
class StravaIO():
def __init__(self, access_token=None):
if access_token is None:
access_token = os.getenv('STRAVA_ACCESS_TOKEN')
self.configuration = swagger_client.Configuration()
self.configuration.access_token = access_token
self._api_client = swagger_client.ApiClient(self.configuration)
self.athletes_api = swagger_client.AthletesApi(self._api_client)
self.activities_api = swagger_client.ActivitiesApi(self._api_client)
self.streams_api = swagger_client.StreamsApi(self._api_client)
def get_logged_in_athlete(self):
"""Get logged in athlete
Returns
-------
athlete: Athlete object
"""
try:
rv = Athlete(self.athletes_api.get_logged_in_athlete())
except ApiException as e:
logger.error(f""""
Error in strava_swagger_client.AthletesApi!
STRAVA_ACCESS_TOKEN is likely out of date!
Check the https://github.com/sladkovm/strava-oauth for help.
Returning None.
Original Error:
{e}""")
rv = None
return rv
def local_athletes(self):
"""List local athletes
Returns
-------
athletes: generator of JSON friendly dicts
"""
for f_name in glob.glob(os.path.join(dir_stravadata(), 'athlete*.json')):
with open(f_name) as f:
yield json.load(f)
def get_activity_by_id(self, id):
"""Get activity by ID
Returns
-------
activity: Activity ojbect
"""
return Activity(self.activities_api.get_activity_by_id(id))
def get_logged_in_athlete_activities(self, after=0, list_activities=None):
"""List all activities after a given date
Parameters
----------
after: int, str or datetime object
If integer, the time since epoch is assumed
If str, the maya.parse() compatible date string is expected e.g. iso8601 or 2018-01-01 or 20180101
If datetime, the datetime object is expected
Returns
-------
list_activities: list
List of SummaryActivity objects
"""
if list_activities is None:
list_activities = []
after = date_to_epoch(after)
_fetched = self.activities_api.get_logged_in_athlete_activities(after=after)
if len(_fetched) > 0:
print(f"Fetched {len(_fetched)}, the latests is on {_fetched[-1].start_date}")
list_activities.extend(_fetched)
if len(_fetched) == 30:
last_after = list_activities[-1].start_date
return self.get_logged_in_athlete_activities(after=last_after, list_activities=list_activities)
else:
print("empty list")
return list_activities
def local_activities(self, athlete_id):
"""List local activities
Parameters
----------
athlete_id: int
Returns
-------
activities: generator of JSON friendly dicts
"""
dir_activities = os.path.join(dir_stravadata(), f"activities_{athlete_id}")
for f_name in glob.glob(os.path.join(dir_activities, '*.json')):
with open(f_name) as f:
yield json.load(f)
def local_streams(self, athlete_id):
"""List local streams
Parameters
----------
athlete_id: int
Returns
-------
streams: generator of dataframes
"""
dir_streams = os.path.join(dir_stravadata(), f"streams_{athlete_id}")
for f_name in glob.glob(os.path.join(dir_streams, '*.parquet')):
yield pd.read_parquet(f_name)
def get_activity_streams(self, id, athlete_id, local=True):
"""Get activity streams by ID
Parameters
----------
id: int
activity_id
athlete_id: int
athlete_id
local: bool (default=True)
if the streams is already storred, return the local version
Returns
-------
streams: Streams ojbect (remote) or pd.Dataframe (local)
"""
if local:
dir_streams = os.path.join(dir_stravadata(), f"streams_{athlete_id}")
f_name = f"streams_{id}.parquet"
f_path = os.path.join(dir_streams, f_name)
if f_path in glob.glob(f_path):
return pd.read_parquet(f_path)
keys = ['time', 'distance', 'latlng', 'altitude', 'velocity_smooth',
'heartrate', 'cadence', 'watts', 'temp', 'moving', 'grade_smooth']
api_response = self.streams_api.get_activity_streams(id, keys, key_by_type=True)
return Streams(api_response, id, athlete_id)
class Athlete():
def __init__(self, api_response):
"""
Parameters
----------
api_response: swagger_client.get...() object
e.g. athletes_api.get_logged_in_athlete()
"""
self.api_response = api_response
self.id = self.api_response.id
def __str__(self):
return self._stringify()
def __repr__(self):
return self._stringify()
def to_dict(self):
_dict = self.api_response.to_dict()
_dict = convert_datetime_to_iso8601(_dict)
return _dict
def store_locally(self):
strava_dir = dir_stravadata()
f_name = f"athlete_{self.api_response.id}.json"
with open(os.path.join(strava_dir, f_name), 'w') as fp:
json.dump(self.to_dict(), fp)
def _stringify(self):
return json.dumps(self.to_dict(), indent=2)
class Activity():
def __init__(self, api_response, client=None):
self.api_response = api_response
self.athlete_id = self.api_response.athlete.id
self.id = self.api_response.id
if client:
self.streams_api = client.streams_api
else:
client = None
def __repr__(self):
return f"Activity: {self.id}, Date: {self.api_response.start_date}, Name: {self.api_response.name}"
def to_dict(self):
_dict = self.api_response.to_dict()
_dict = convert_datetime_to_iso8601(_dict)
return _dict
def store_locally(self):
strava_dir = dir_stravadata()
athlete_id = self.api_response.athlete.id
activities_dir = os.path.join(strava_dir, f"activities_{athlete_id}")
if not os.path.exists(activities_dir):
os.mkdir(activities_dir)
f_name = f"activity_{self.api_response.id}.json"
with open(os.path.join(activities_dir, f_name), 'w') as fp:
json.dump(self.to_dict(), fp)
class Streams():
ACCEPTED_KEYS = ['time', 'distance', 'altitude', 'velocity_smooth', 'heartrate', 'cadence', 'watts', 'temp', 'moving', 'grade_smooth', 'lat', 'lng']
def __init__(self, api_response, activity_id, athlete_id):
self.api_response = api_response
self.activity_id = activity_id
self.athlete_id = athlete_id
def __repr__(self):
return f"""Streams for {self.activity_id}\nKeys: {list(self.to_dict().keys())}\nAccess: obj.key or obj.to_dict() to load into a pd.DataFrame()"""
def to_dict(self):
_dict = self.api_response.to_dict()
r = {}
for k, v in _dict.items():
if v is not None:
r.update({k: v['data']})
if r.get('latlng', None):
latlng = r.pop('latlng')
_r = list(zip(*latlng))
r.update({'lat': list(_r[0])})
r.update({'lng': list(_r[1])})
return r
def store_locally(self):
_df = pd.DataFrame(self.to_dict())
strava_dir = dir_stravadata()
streams_dir = os.path.join(strava_dir, f"streams_{self.athlete_id}")
if not os.path.exists(streams_dir):
os.mkdir(streams_dir)
f_name = f"streams_{self.activity_id}.parquet"
_df.to_parquet(os.path.join(streams_dir, f_name))
@property
def time(self):
return self._get_stream_by_name('time')
@property
def distance(self):
return self._get_stream_by_name('distance')
@property
def altitude(self):
return self._get_stream_by_name('altitude')
@property
def velocity_smooth(self):
return self._get_stream_by_name('velocity_smooth')
@property
def heartrate(self):
return self._get_stream_by_name('heartrate')
@property
def cadence(self):
return self._get_stream_by_name('cadence')
@property
def watts(self):
return self._get_stream_by_name('watts')
@property
def grade_smooth(self):
return self._get_stream_by_name('grade_smooth')
@property
def moving(self):
return self._get_stream_by_name('moving')
@property
def lat(self):
return self._get_stream_by_name('lat')
@property
def lng(self):
return self._get_stream_by_name('lng')
def _get_stream_by_name(self, key):
if key not in self.ACCEPTED_KEYS:
raise KeyError(f"key must be one of {self.ACCEPTED_KEYS}")
try:
rv = self.to_dict()[key]
except KeyError:
logger.warning(f"Stream does not contain {key}")
rv = None
return rv
def strava_oauth2(client_id=None, client_secret=None):
"""Run strava authorization flow. This function will open a default system
browser alongside starting a local webserver. The authorization procedure will be completed in the browser.
The access token will be returned in the browser in the format ready to copy to the .env file.
Parameters:
-----------
client_id: int, if not provided will be retrieved from the STRAVA_CLIENT_ID env viriable
client_secret: str, if not provided will be retrieved from the STRAVA_CLIENT_SECRET env viriable
"""
if client_id is None:
client_id = os.getenv('STRAVA_CLIENT_ID', None)
if client_id is None:
raise ValueError('client_id is None')
if client_secret is None:
client_secret = os.getenv('STRAVA_CLIENT_SECRET', None)
if client_secret is None:
raise ValueError('client_secret is None')
port = 8000
_request_strava_authorize(client_id, port)
logger.info(f"serving at port {port}")
token = run_server_and_wait_for_token(
port=port,
client_id=client_id,
client_secret=client_secret
)
return token
def _request_strava_authorize(client_id, port):
params_oauth = {
"client_id": client_id,
"response_type": "code",
"redirect_uri": f"http://localhost:{port}/authorization_successful",
"scope": "read,profile:read_all,activity:read",
"state": 'https://github.com/sladkovm/strava-http',
"approval_prompt": "force"
}
values_url = urllib.parse.urlencode(params_oauth)
base_url = 'https://www.strava.com/oauth/authorize'
rv = base_url + '?' + values_url
webbrowser.get().open(rv)
return None
def run_server_and_wait_for_token(port, client_id, client_secret):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('127.0.0.1', port))
s.listen()
conn, addr = s.accept()
request_bytes = b''
with conn:
while True:
chunk = conn.recv(512)
request_bytes += chunk
if request_bytes.endswith(b'\r\n\r\n'):
break
conn.sendall(b'HTTP/1.1 200 OK\r\n\r\nsuccess\r\n')
request = request_bytes.decode('utf-8')
status_line = request.split('\n', 1)[0]
method, raw_url, protocol_version = status_line.split(' ')
url = urllib.parse.urlparse(raw_url)
query_string = url.query
query_params = urllib.parse.parse_qs(query_string, keep_blank_values=True)
if url.path == "/authorization_successful":
code = query_params.get('code')[0]
logger.debug(f"code: {code}")
params = {
"client_id": client_id,
"client_secret": client_secret,
"code": code,
"grant_type": "authorization_code"
}
r = requests.post("https://www.strava.com/oauth/token", params)
data = r.json()
logger.debug(f"Authorized athlete: {data.get('access_token', 'Oeps something went wrong!')}")
else:
data = url.path.encode()
return data
def convert_datetime_to_iso8601(d):
for k, v in d.items():
if isinstance(v, dict):
convert_datetime_to_iso8601(v)
elif isinstance(v, list):
for i in v:
if isinstance(i, dict):
convert_datetime_to_iso8601(i)
else:
if isinstance(v, datetime.datetime):
d[k] = maya.parse(v).iso8601()
return d
def dir_stravadata():
home_dir = os.path.expanduser('~')
strava_dir = os.path.join(home_dir, '.stravadata')
if not os.path.exists(strava_dir):
os.mkdir(strava_dir)
return strava_dir
def date_to_epoch(date):
"""Convert a date to epoch representation"""
rv = None
if isinstance(date, int):
rv = date
if isinstance(date, datetime.datetime):
_ = maya.parse(date)
rv = _.epoch
if isinstance(date, str):
_ = maya.when(date)
rv = _.epoch
if rv is None:
raise TypeError('date must be epoch int, datetime obj or the string')
return rv
|
from ...configuration.configuration import Configuration
from ...exceptions.executorexceptions import CommandExecutionFailure
from ...interfaces.batchsystemadapter import BatchSystemAdapter
from ...interfaces.batchsystemadapter import MachineStatus
from ...utilities.utils import async_run_command
from ...utilities.utils import htcondor_cmd_option_formatter
from ...utilities.utils import htcondor_csv_parser
from ...utilities.asynccachemap import AsyncCacheMap
from ...utilities.attributedict import AttributeDict
from functools import partial
from shlex import quote
from typing import Iterable
import logging
async def htcondor_status_updater(
options: AttributeDict, attributes: AttributeDict
) -> dict:
"""
Helper function to call ``condor_status -af`` asynchronously and to translate
the output into a dictionary
:param options: Additional options for the condor_status call. For example
``{'pool': 'htcondor.example'}`` will be translated into
``condor_status -af ... -pool htcondor.example``
:type options: AttributeDict
:param attributes: Additional fields to add to output of the
``condor_status -af`` response.
:type attributes: AttributeDict
:return: Dictionary containing the output of the ``condor_status`` command
:rtype: dict
"""
attributes_string = f'-af:t {' '.join(attributes.values())}'
options_string = htcondor_cmd_option_formatter(options)
cmd = f"condor_status {attributes_string} -constraint PartitionableSlot=?=True"
if options_string:
cmd = f"{cmd} {options_string}"
htcondor_status = {}
try:
logging.debug(f"HTCondor status update is running. Command: {cmd}")
condor_status = await async_run_command(cmd)
for row in htcondor_csv_parser(
htcondor_input=condor_status,
fieldnames=tuple(attributes.keys()),
delimiter="\t",
replacements=dict(undefined=None),
):
status_key = row["TardisDroneUuid"] or row["Machine"].split(".")[0]
htcondor_status[status_key] = row
except CommandExecutionFailure as cef:
logging.error("condor_status could not be executed!")
logging.error(str(cef))
raise
else:
logging.debug("HTCondor status update finished.")
return htcondor_status
class HTCondorAdapter(BatchSystemAdapter):
"""
:py:class:`~tardis.adapters.batchsystems.htcondor.HTCondorAdapter` implements
the TARDIS interface to dynamically integrate and manage opportunistic resources
with the HTCondor Batch System.
"""
def __init__(self):
config = Configuration()
self.ratios = config.BatchSystem.ratios
try:
self.htcondor_options = config.BatchSystem.options
except AttributeError:
self.htcondor_options = {}
attributes = dict(
Machine="Machine",
State="State",
Activity="Activity",
TardisDroneUuid="TardisDroneUuid",
)
# Escape htcondor expressions and add them to attributes
attributes.update({key: quote(value) for key, value in self.ratios.items()})
self._htcondor_status = AsyncCacheMap(
update_coroutine=partial(
htcondor_status_updater, self.htcondor_options, attributes
),
max_age=config.BatchSystem.max_age * 60,
)
async def disintegrate_machine(self, drone_uuid: str) -> None:
"""
HTCondor does not require any specific disintegration procedure.
:param drone_uuid: Uuid of the worker node, for some sites corresponding
to the host name of the drone.
:type drone_uuid: str
:return: None
"""
return
async def drain_machine(self, drone_uuid: str) -> None:
"""
Drain a machine in the HTCondor batch system, which means that no new
jobs will be accepted
:param drone_uuid: Uuid of the worker node, for some sites corresponding
to the host name of the drone.
:type drone_uuid: str
:return: None
"""
await self._htcondor_status.update_status()
try:
machine = self._htcondor_status[drone_uuid]["Machine"]
except KeyError:
return
options_string = htcondor_cmd_option_formatter(self.htcondor_options)
if options_string:
cmd = f"condor_drain {options_string} -graceful {machine}"
else:
cmd = f"condor_drain -graceful {machine}"
try:
return await async_run_command(cmd)
except CommandExecutionFailure as cef:
if cef.exit_code == 1:
# exit code 1: HTCondor can't connect to StartD of Drone
# https://github.com/htcondor/htcondor/blob/master/src/condor_tools/drain.cpp # noqa: B950
logging.debug(f"Draining failed with: {str(cef)}")
logging.debug(
f"Probably drone {drone_uuid} is not available or already drained."
)
return
raise cef
async def integrate_machine(self, drone_uuid: str) -> None:
"""
HTCondor does not require any specific integration procedure
:param drone_uuid: Uuid of the worker node, for some sites corresponding
to the host name of the drone.
:type drone_uuid: str
:return: None
"""
return None
async def get_resource_ratios(self, drone_uuid: str) -> Iterable[float]:
"""
Get the ratio of requested over total resources (CPU, Memory, Disk, etc.)
for a worker node in HTCondor according to the HTCondor expressions
defined in the adapter configuration.
:param drone_uuid: Uuid of the worker node, for some sites corresponding
to the host name of the drone.
:type drone_uuid: str
:return: Iterable of float containing the ratios
:rtype: Iterable[float]
"""
await self._htcondor_status.update_status()
try:
htcondor_status = self._htcondor_status[drone_uuid]
except KeyError:
return {}
else:
return (
float(value)
for key, value in htcondor_status.items()
if key in self.ratios.keys()
)
async def get_allocation(self, drone_uuid: str) -> float:
"""
Get the allocation of a worker node in HTCondor, which is defined as maximum
of the ratios of requested over total resources (CPU, Memory, Disk, etc.).
:param drone_uuid: Uuid of the worker node, for some sites corresponding
to the host name of the drone.
:type drone_uuid: str
:return: The allocation of a worker node as described above.
:rtype: float
"""
return max(await self.get_resource_ratios(drone_uuid), default=0.0)
async def get_machine_status(self, drone_uuid: str) -> MachineStatus:
"""
Get the status of a worker node in HTCondor (Available, Draining,
Drained, NotAvailable)
:param drone_uuid: Uuid of the worker node, for some sites corresponding
to the host name of the drone.
:type drone_uuid: str
:return: The machine status in HTCondor (Available, Draining, Drained,
NotAvailable)
:rtype: MachineStatus
"""
status_mapping = {
("Unclaimed", "Idle"): MachineStatus.Available,
("Drained", "Retiring"): MachineStatus.Draining,
("Drained", "Idle"): MachineStatus.Drained,
("Owner", "Idle"): MachineStatus.NotAvailable,
}
await self._htcondor_status.update_status()
try:
machine_status = self._htcondor_status[drone_uuid]
except KeyError:
return MachineStatus.NotAvailable
else:
return status_mapping.get(
(machine_status["State"], machine_status["Activity"]),
MachineStatus.NotAvailable,
)
async def get_utilization(self, drone_uuid: str) -> float:
"""
Get the utilization of a worker node in HTCondor, which is defined as
minimum of the ratios of requested over total resources
(CPU, Memory, Disk, etc.).
:param drone_uuid: Uuid of the worker node, for some sites corresponding
to the host name of the drone.
:type drone_uuid: str
:return: The utilization of a worker node as described above.
:rtype: float
"""
return min(await self.get_resource_ratios(drone_uuid), default=0.0)
|
from ...configuration.configuration import Configuration
from ...exceptions.executorexceptions import CommandExecutionFailure
from ...interfaces.batchsystemadapter import BatchSystemAdapter
from ...interfaces.batchsystemadapter import MachineStatus
from ...utilities.utils import async_run_command
from ...utilities.utils import htcondor_cmd_option_formatter
from ...utilities.utils import htcondor_csv_parser
from ...utilities.asynccachemap import AsyncCacheMap
from ...utilities.attributedict import AttributeDict
from functools import partial
from shlex import quote
from typing import Iterable
import logging
async def htcondor_status_updater(
options: AttributeDict, attributes: AttributeDict
) -> dict:
"""
Helper function to call ``condor_status -af`` asynchronously and to translate
the output into a dictionary
:param options: Additional options for the condor_status call. For example
``{'pool': 'htcondor.example'}`` will be translated into
``condor_status -af ... -pool htcondor.example``
:type options: AttributeDict
:param attributes: Additional fields to add to output of the
``condor_status -af`` response.
:type attributes: AttributeDict
:return: Dictionary containing the output of the ``condor_status`` command
:rtype: dict
"""
attributes_string = f'-af:t {" ".join(attributes.values())}'
options_string = htcondor_cmd_option_formatter(options)
cmd = f"condor_status {attributes_string} -constraint PartitionableSlot=?=True"
if options_string:
cmd = f"{cmd} {options_string}"
htcondor_status = {}
try:
logging.debug(f"HTCondor status update is running. Command: {cmd}")
condor_status = await async_run_command(cmd)
for row in htcondor_csv_parser(
htcondor_input=condor_status,
fieldnames=tuple(attributes.keys()),
delimiter="\t",
replacements=dict(undefined=None),
):
status_key = row["TardisDroneUuid"] or row["Machine"].split(".")[0]
htcondor_status[status_key] = row
except CommandExecutionFailure as cef:
logging.error("condor_status could not be executed!")
logging.error(str(cef))
raise
else:
logging.debug("HTCondor status update finished.")
return htcondor_status
class HTCondorAdapter(BatchSystemAdapter):
"""
:py:class:`~tardis.adapters.batchsystems.htcondor.HTCondorAdapter` implements
the TARDIS interface to dynamically integrate and manage opportunistic resources
with the HTCondor Batch System.
"""
def __init__(self):
config = Configuration()
self.ratios = config.BatchSystem.ratios
try:
self.htcondor_options = config.BatchSystem.options
except AttributeError:
self.htcondor_options = {}
attributes = dict(
Machine="Machine",
State="State",
Activity="Activity",
TardisDroneUuid="TardisDroneUuid",
)
# Escape htcondor expressions and add them to attributes
attributes.update({key: quote(value) for key, value in self.ratios.items()})
self._htcondor_status = AsyncCacheMap(
update_coroutine=partial(
htcondor_status_updater, self.htcondor_options, attributes
),
max_age=config.BatchSystem.max_age * 60,
)
async def disintegrate_machine(self, drone_uuid: str) -> None:
"""
HTCondor does not require any specific disintegration procedure.
:param drone_uuid: Uuid of the worker node, for some sites corresponding
to the host name of the drone.
:type drone_uuid: str
:return: None
"""
return
async def drain_machine(self, drone_uuid: str) -> None:
"""
Drain a machine in the HTCondor batch system, which means that no new
jobs will be accepted
:param drone_uuid: Uuid of the worker node, for some sites corresponding
to the host name of the drone.
:type drone_uuid: str
:return: None
"""
await self._htcondor_status.update_status()
try:
machine = self._htcondor_status[drone_uuid]["Machine"]
except KeyError:
return
options_string = htcondor_cmd_option_formatter(self.htcondor_options)
if options_string:
cmd = f"condor_drain {options_string} -graceful {machine}"
else:
cmd = f"condor_drain -graceful {machine}"
try:
return await async_run_command(cmd)
except CommandExecutionFailure as cef:
if cef.exit_code == 1:
# exit code 1: HTCondor can't connect to StartD of Drone
# https://github.com/htcondor/htcondor/blob/master/src/condor_tools/drain.cpp # noqa: B950
logging.debug(f"Draining failed with: {str(cef)}")
logging.debug(
f"Probably drone {drone_uuid} is not available or already drained."
)
return
raise cef
async def integrate_machine(self, drone_uuid: str) -> None:
"""
HTCondor does not require any specific integration procedure
:param drone_uuid: Uuid of the worker node, for some sites corresponding
to the host name of the drone.
:type drone_uuid: str
:return: None
"""
return None
async def get_resource_ratios(self, drone_uuid: str) -> Iterable[float]:
"""
Get the ratio of requested over total resources (CPU, Memory, Disk, etc.)
for a worker node in HTCondor according to the HTCondor expressions
defined in the adapter configuration.
:param drone_uuid: Uuid of the worker node, for some sites corresponding
to the host name of the drone.
:type drone_uuid: str
:return: Iterable of float containing the ratios
:rtype: Iterable[float]
"""
await self._htcondor_status.update_status()
try:
htcondor_status = self._htcondor_status[drone_uuid]
except KeyError:
return {}
else:
return (
float(value)
for key, value in htcondor_status.items()
if key in self.ratios.keys()
)
async def get_allocation(self, drone_uuid: str) -> float:
"""
Get the allocation of a worker node in HTCondor, which is defined as maximum
of the ratios of requested over total resources (CPU, Memory, Disk, etc.).
:param drone_uuid: Uuid of the worker node, for some sites corresponding
to the host name of the drone.
:type drone_uuid: str
:return: The allocation of a worker node as described above.
:rtype: float
"""
return max(await self.get_resource_ratios(drone_uuid), default=0.0)
async def get_machine_status(self, drone_uuid: str) -> MachineStatus:
"""
Get the status of a worker node in HTCondor (Available, Draining,
Drained, NotAvailable)
:param drone_uuid: Uuid of the worker node, for some sites corresponding
to the host name of the drone.
:type drone_uuid: str
:return: The machine status in HTCondor (Available, Draining, Drained,
NotAvailable)
:rtype: MachineStatus
"""
status_mapping = {
("Unclaimed", "Idle"): MachineStatus.Available,
("Drained", "Retiring"): MachineStatus.Draining,
("Drained", "Idle"): MachineStatus.Drained,
("Owner", "Idle"): MachineStatus.NotAvailable,
}
await self._htcondor_status.update_status()
try:
machine_status = self._htcondor_status[drone_uuid]
except KeyError:
return MachineStatus.NotAvailable
else:
return status_mapping.get(
(machine_status["State"], machine_status["Activity"]),
MachineStatus.NotAvailable,
)
async def get_utilization(self, drone_uuid: str) -> float:
"""
Get the utilization of a worker node in HTCondor, which is defined as
minimum of the ratios of requested over total resources
(CPU, Memory, Disk, etc.).
:param drone_uuid: Uuid of the worker node, for some sites corresponding
to the host name of the drone.
:type drone_uuid: str
:return: The utilization of a worker node as described above.
:rtype: float
"""
return min(await self.get_resource_ratios(drone_uuid), default=0.0)
|
import uuid
from os import getenv
from boto3utils import s3
from cirruslib import Catalog, get_task_logger
# envvars
CATALOG_BUCKET = getenv('CIRRUS_CATALOG_BUCKET')
def lambda_handler(payload, context):
catalog = Catalog.from_payload(payload)
logger = get_task_logger("task.pre-batch", catalog=catalog)
url = f"s3://{CATALOG_BUCKET}/batch/{catalog["id"]}/{uuid.uuid1()}.json"
try:
# copy payload to s3
s3().upload_json(catalog, url)
logger.debug(f"Uploaded catalog to {url}")
return {
'url': url
}
except Exception as err:
msg = f"pre-batch: failed pre processing batch job for ({err})"
logger.error(msg, exc_info=True)
raise Exception(msg) from err
|
import uuid
from os import getenv
from boto3utils import s3
from cirruslib import Catalog, get_task_logger
# envvars
CATALOG_BUCKET = getenv('CIRRUS_CATALOG_BUCKET')
def lambda_handler(payload, context):
catalog = Catalog.from_payload(payload)
logger = get_task_logger("task.pre-batch", catalog=catalog)
url = f"s3://{CATALOG_BUCKET}/batch/{catalog['id']}/{uuid.uuid1()}.json"
try:
# copy payload to s3
s3().upload_json(catalog, url)
logger.debug(f"Uploaded catalog to {url}")
return {
'url': url
}
except Exception as err:
msg = f"pre-batch: failed pre processing batch job for ({err})"
logger.error(msg, exc_info=True)
raise Exception(msg) from err
|
"""
Tasks for maintaining the project.
Execute 'invoke --list' for guidance on using Invoke
"""
import platform
import webbrowser
from pathlib import Path
from invoke import call, task
from invoke.context import Context
from invoke.runners import Result
ROOT_DIR = Path(__file__).parent
DOCS_DIR = ROOT_DIR.joinpath("docs")
DOCS_BUILD_DIR = DOCS_DIR.joinpath("_build")
DOCS_INDEX = DOCS_BUILD_DIR.joinpath("index.html")
TEST_DIR = ROOT_DIR.joinpath("tests")
PYTHON_TARGETS = [
TEST_DIR,
ROOT_DIR.joinpath("hooks"),
DOCS_DIR.joinpath("conf.py"),
ROOT_DIR.joinpath("noxfile.py"),
Path(__file__),
]
PYTHON_TARGETS_STR = " ".join([str(p) for p in PYTHON_TARGETS])
def _run(c: Context, command: str) -> Result:
return c.run(command, pty=platform.system() != "Windows")
@task()
def bake(c, replay=False):
# type: (Context, bool) -> None
"""Bake the cookie."""
bake_options = (
["--replay", "--overwrite-if-exists"]
if replay
else ["--no-input", "--overwrite-if-exists"]
)
_run(c, f"poetry run cookiecutter {" ".join(bake_options)} .")
@task()
def watch(c, replay=False):
# type: (Context, bool) -> None
"""Bake and watch for changes."""
bake(c, replay=replay)
_run(
c,
f"poetry run watchmedo shell-command -p '*.*' "
f"-c 'inv bake {"--replay" if replay else ""}' "
"-W -R -D {{ cookiecutter.project_name }}",
)
@task()
def clean_build(c):
# type: (Context) -> None
"""Clean up files from package building."""
_run(c, "rm -fr build/")
_run(c, "rm -fr dist/")
_run(c, "rm -fr .eggs/")
_run(c, "find . -name '*.egg-info' -exec rm -fr {} +")
_run(c, "find . -name '*.egg' -exec rm -f {} +")
@task()
def clean_python(c):
# type: (Context) -> None
"""Clean up python file artifacts."""
_run(c, "find . -name '*.pyc' -exec rm -f {} +")
_run(c, "find . -name '*.pyo' -exec rm -f {} +")
_run(c, "find . -name '*~' -exec rm -f {} +")
_run(c, "find . -name '__pycache__' -exec rm -fr {} +")
@task()
def clean_tests(c):
# type: (Context) -> None
"""Clean up files from testing."""
_run(c, "rm -fr .pytest_cache")
@task()
def clean_docs(c):
# type: (Context) -> None
"""Clean up files from documentation builds."""
_run(c, f"rm -fr {DOCS_BUILD_DIR}")
@task(pre=[clean_build, clean_python, clean_tests, clean_docs])
def clean(c):
# type: (Context) -> None
"""Run all clean sub-tasks."""
@task()
def install_hooks(c):
# type: (Context) -> None
"""Install pre-commit hooks."""
_run(c, "poetry run pre-commit install")
@task()
def hooks(c):
# type: (Context) -> None
"""Run pre-commit hooks."""
_run(c, "poetry run pre-commit run --all-files")
@task(name="format", help={"check": "Checks if source is formatted without applying changes"})
def format_(c, check=False):
# type: (Context, bool) -> None
"""Format code."""
isort_options = ["--check-only", "--diff"] if check else []
_run(c, f"poetry run isort {" ".join(isort_options)} {PYTHON_TARGETS_STR}")
black_options = ["--diff", "--check"] if check else ["--quiet"]
_run(c, f"poetry run black {" ".join(black_options)} {PYTHON_TARGETS_STR}")
@task()
def flake8(c):
# type: (Context) -> None
"""Run flake8."""
_run(c, f"poetry run flakehell lint {PYTHON_TARGETS_STR}")
@task()
def safety(c):
# type: (Context) -> None
"""Run safety."""
_run(
c,
"poetry export --dev --format=requirements.txt --without-hashes | "
"poetry run safety check --stdin --full-report",
)
@task(pre=[flake8, safety, call(format_, check=True)])
def lint(c):
# type: (Context) -> None
"""Run all linting."""
@task()
def mypy(c):
# type: (Context) -> None
"""Run mypy."""
_run(c, f"poetry run mypy {PYTHON_TARGETS_STR}")
@task()
def tests(c):
# type: (Context) -> None
"""Run tests."""
pytest_options = ["--xdoctest"]
_run(c, f"poetry run pytest {" ".join(pytest_options)} {TEST_DIR}")
@task(
help={
"serve": "Build the docs watching for changes",
"open_browser": "Open the docs in the web browser",
}
)
def docs(c, serve=False, open_browser=False):
# type: (Context, bool, bool) -> None
"""Build documentation."""
build_docs = f"sphinx-build -b html {DOCS_DIR} {DOCS_BUILD_DIR}"
_run(c, build_docs)
if open_browser:
webbrowser.open(DOCS_INDEX.absolute().as_uri())
if serve:
_run(c, f"poetry run watchmedo shell-command -p '*.rst;*.md' -c '{build_docs}' -R -D .")
@task(
help={
"part": "Part of the version to be bumped.",
"dry_run": "Don't write any files, just pretend. (default: False)",
}
)
def version(c, part, dry_run=False):
# type: (Context, str, bool) -> None
"""Bump version."""
bump_options = ["--dry-run"] if dry_run else []
_run(c, f"bump2version {" ".join(bump_options)} {part}")
|
"""
Tasks for maintaining the project.
Execute 'invoke --list' for guidance on using Invoke
"""
import platform
import webbrowser
from pathlib import Path
from invoke import call, task
from invoke.context import Context
from invoke.runners import Result
ROOT_DIR = Path(__file__).parent
DOCS_DIR = ROOT_DIR.joinpath("docs")
DOCS_BUILD_DIR = DOCS_DIR.joinpath("_build")
DOCS_INDEX = DOCS_BUILD_DIR.joinpath("index.html")
TEST_DIR = ROOT_DIR.joinpath("tests")
PYTHON_TARGETS = [
TEST_DIR,
ROOT_DIR.joinpath("hooks"),
DOCS_DIR.joinpath("conf.py"),
ROOT_DIR.joinpath("noxfile.py"),
Path(__file__),
]
PYTHON_TARGETS_STR = " ".join([str(p) for p in PYTHON_TARGETS])
def _run(c: Context, command: str) -> Result:
return c.run(command, pty=platform.system() != "Windows")
@task()
def bake(c, replay=False):
# type: (Context, bool) -> None
"""Bake the cookie."""
bake_options = (
["--replay", "--overwrite-if-exists"]
if replay
else ["--no-input", "--overwrite-if-exists"]
)
_run(c, f"poetry run cookiecutter {' '.join(bake_options)} .")
@task()
def watch(c, replay=False):
# type: (Context, bool) -> None
"""Bake and watch for changes."""
bake(c, replay=replay)
_run(
c,
f"poetry run watchmedo shell-command -p '*.*' "
f"-c 'inv bake {'--replay' if replay else ''}' "
"-W -R -D {{ cookiecutter.project_name }}",
)
@task()
def clean_build(c):
# type: (Context) -> None
"""Clean up files from package building."""
_run(c, "rm -fr build/")
_run(c, "rm -fr dist/")
_run(c, "rm -fr .eggs/")
_run(c, "find . -name '*.egg-info' -exec rm -fr {} +")
_run(c, "find . -name '*.egg' -exec rm -f {} +")
@task()
def clean_python(c):
# type: (Context) -> None
"""Clean up python file artifacts."""
_run(c, "find . -name '*.pyc' -exec rm -f {} +")
_run(c, "find . -name '*.pyo' -exec rm -f {} +")
_run(c, "find . -name '*~' -exec rm -f {} +")
_run(c, "find . -name '__pycache__' -exec rm -fr {} +")
@task()
def clean_tests(c):
# type: (Context) -> None
"""Clean up files from testing."""
_run(c, "rm -fr .pytest_cache")
@task()
def clean_docs(c):
# type: (Context) -> None
"""Clean up files from documentation builds."""
_run(c, f"rm -fr {DOCS_BUILD_DIR}")
@task(pre=[clean_build, clean_python, clean_tests, clean_docs])
def clean(c):
# type: (Context) -> None
"""Run all clean sub-tasks."""
@task()
def install_hooks(c):
# type: (Context) -> None
"""Install pre-commit hooks."""
_run(c, "poetry run pre-commit install")
@task()
def hooks(c):
# type: (Context) -> None
"""Run pre-commit hooks."""
_run(c, "poetry run pre-commit run --all-files")
@task(name="format", help={"check": "Checks if source is formatted without applying changes"})
def format_(c, check=False):
# type: (Context, bool) -> None
"""Format code."""
isort_options = ["--check-only", "--diff"] if check else []
_run(c, f"poetry run isort {' '.join(isort_options)} {PYTHON_TARGETS_STR}")
black_options = ["--diff", "--check"] if check else ["--quiet"]
_run(c, f"poetry run black {' '.join(black_options)} {PYTHON_TARGETS_STR}")
@task()
def flake8(c):
# type: (Context) -> None
"""Run flake8."""
_run(c, f"poetry run flakehell lint {PYTHON_TARGETS_STR}")
@task()
def safety(c):
# type: (Context) -> None
"""Run safety."""
_run(
c,
"poetry export --dev --format=requirements.txt --without-hashes | "
"poetry run safety check --stdin --full-report",
)
@task(pre=[flake8, safety, call(format_, check=True)])
def lint(c):
# type: (Context) -> None
"""Run all linting."""
@task()
def mypy(c):
# type: (Context) -> None
"""Run mypy."""
_run(c, f"poetry run mypy {PYTHON_TARGETS_STR}")
@task()
def tests(c):
# type: (Context) -> None
"""Run tests."""
pytest_options = ["--xdoctest"]
_run(c, f"poetry run pytest {' '.join(pytest_options)} {TEST_DIR}")
@task(
help={
"serve": "Build the docs watching for changes",
"open_browser": "Open the docs in the web browser",
}
)
def docs(c, serve=False, open_browser=False):
# type: (Context, bool, bool) -> None
"""Build documentation."""
build_docs = f"sphinx-build -b html {DOCS_DIR} {DOCS_BUILD_DIR}"
_run(c, build_docs)
if open_browser:
webbrowser.open(DOCS_INDEX.absolute().as_uri())
if serve:
_run(c, f"poetry run watchmedo shell-command -p '*.rst;*.md' -c '{build_docs}' -R -D .")
@task(
help={
"part": "Part of the version to be bumped.",
"dry_run": "Don't write any files, just pretend. (default: False)",
}
)
def version(c, part, dry_run=False):
# type: (Context, str, bool) -> None
"""Bump version."""
bump_options = ["--dry-run"] if dry_run else []
_run(c, f"bump2version {' '.join(bump_options)} {part}")
|
'''
parse country case counts provided by ECDC and write results to TSV
this should be run from the top level of the repo.
Will need to be integrated with other parsers once they become available.
'''
import xlrd
import csv
import json
from urllib.request import urlretrieve
from collections import defaultdict
from datetime import datetime, timedelta
from .utils import sorted_date, parse_countries, stoi, store_data
# -----------------------------------------------------------------------------
# Globals
URL = "https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-geographic-disbtribution-worldwide-"
LOC = 'case-counts'
cols = ['location', 'time', 'cases', 'deaths', 'hospitalized', 'ICU', 'recovered']
# -----------------------------------------------------------------------------
# Functions
def retrieve_case_data():
countries = parse_countries(1)
cases = defaultdict(list)
# For now, always get the data from yesterday. We could make fancier check if today's data is already available
yesterday = datetime.today() - timedelta(days=1)
date = yesterday.strftime("%Y-%m-%d")
file_name, headers = urlretrieve(URL+date+".xlsx")
workbook = xlrd.open_workbook(file_name)
#worksheet = workbook.sheet_by_name('COVID-19-geographic-disbtributi')
worksheet = workbook.sheet_by_index(0) # likely more stable
i = 0
Ix = {}
for c in worksheet.row_values(0):
Ix[c] = i
i += 1
for row_index in range(1, worksheet.nrows):
row = worksheet.row_values(row_index)
country = row[Ix['Countries and territories']].replace("_"," ")
# replace country name if we have the "official" one in country_codes.csv
geoID = row[Ix['GeoId']]
if geoID in countries:
country = countries[geoID]
# date = "-".join([str(int(row[Ix['Year']])), str(int(row[Ix['Month']])), str(int(row[Ix['Day']]))])
date = f"{int(row[Ix["Year"]]):04d}-{int(row[Ix["Month"]]):02d}-{int(row[Ix["Day"]]):02d}"
# note: Cases are per day, not cumulative. We need to aggregate later
cases[country].append({"time": date, "deaths": stoi(row[Ix['Deaths']]), "cases": stoi(row[Ix['Cases']])})
for cntry, data in cases.items():
cases[cntry] = sorted_date(cases[cntry])
# aggregate cases/deaths here after sorting
for cntry, data in cases.items():
total = {}
total['cases'] = 0
total['deaths'] = 0
total['recovered'] = 0
for k in total:
for d in data:
if k in d and d[k]:
total[k] += d[k]
d[k] = total[k]
return dict(cases)
# -----------------------------------------------------------------------------
# Main point of entry
def parse():
cases = retrieve_case_data()
store_data(cases, {'default': LOC+'/World.tsv'}, 'ecdc')
if __name__ == "__main__":
# for debugging
cases = retrieve_case_data()
cases = flatten(cases)
|
'''
parse country case counts provided by ECDC and write results to TSV
this should be run from the top level of the repo.
Will need to be integrated with other parsers once they become available.
'''
import xlrd
import csv
import json
from urllib.request import urlretrieve
from collections import defaultdict
from datetime import datetime, timedelta
from .utils import sorted_date, parse_countries, stoi, store_data
# -----------------------------------------------------------------------------
# Globals
URL = "https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-geographic-disbtribution-worldwide-"
LOC = 'case-counts'
cols = ['location', 'time', 'cases', 'deaths', 'hospitalized', 'ICU', 'recovered']
# -----------------------------------------------------------------------------
# Functions
def retrieve_case_data():
countries = parse_countries(1)
cases = defaultdict(list)
# For now, always get the data from yesterday. We could make fancier check if today's data is already available
yesterday = datetime.today() - timedelta(days=1)
date = yesterday.strftime("%Y-%m-%d")
file_name, headers = urlretrieve(URL+date+".xlsx")
workbook = xlrd.open_workbook(file_name)
#worksheet = workbook.sheet_by_name('COVID-19-geographic-disbtributi')
worksheet = workbook.sheet_by_index(0) # likely more stable
i = 0
Ix = {}
for c in worksheet.row_values(0):
Ix[c] = i
i += 1
for row_index in range(1, worksheet.nrows):
row = worksheet.row_values(row_index)
country = row[Ix['Countries and territories']].replace("_"," ")
# replace country name if we have the "official" one in country_codes.csv
geoID = row[Ix['GeoId']]
if geoID in countries:
country = countries[geoID]
# date = "-".join([str(int(row[Ix['Year']])), str(int(row[Ix['Month']])), str(int(row[Ix['Day']]))])
date = f"{int(row[Ix['Year']]):04d}-{int(row[Ix['Month']]):02d}-{int(row[Ix['Day']]):02d}"
# note: Cases are per day, not cumulative. We need to aggregate later
cases[country].append({"time": date, "deaths": stoi(row[Ix['Deaths']]), "cases": stoi(row[Ix['Cases']])})
for cntry, data in cases.items():
cases[cntry] = sorted_date(cases[cntry])
# aggregate cases/deaths here after sorting
for cntry, data in cases.items():
total = {}
total['cases'] = 0
total['deaths'] = 0
total['recovered'] = 0
for k in total:
for d in data:
if k in d and d[k]:
total[k] += d[k]
d[k] = total[k]
return dict(cases)
# -----------------------------------------------------------------------------
# Main point of entry
def parse():
cases = retrieve_case_data()
store_data(cases, {'default': LOC+'/World.tsv'}, 'ecdc')
if __name__ == "__main__":
# for debugging
cases = retrieve_case_data()
cases = flatten(cases)
|
import numpy as np
import scipy.special
import os
import math
import logging
import pandas as pd
import warnings
import time
import json
import pickle
import functools
import tqdm
from typing import Tuple
from autogluon.core.scheduler.scheduler_factory import scheduler_factory
from autogluon.core.utils import set_logger_verbosity
from sklearn.preprocessing import LabelEncoder
import mxnet as mx
from mxnet.util import use_np
from mxnet.lr_scheduler import PolyScheduler, CosineScheduler
from mxnet.gluon.data import DataLoader
from autogluon_contrib_nlp.models import get_backbone
from autogluon_contrib_nlp.lr_scheduler import InverseSquareRootScheduler
from autogluon_contrib_nlp.utils.config import CfgNode
from autogluon_contrib_nlp.utils.misc import grouper, \
count_parameters, repeat, get_mxnet_available_ctx
from autogluon_contrib_nlp.utils.parameter import move_to_ctx, clip_grad_global_norm
from autogluon.core import args, space
from autogluon.core.utils import in_ipynb, verbosity2loglevel
from autogluon.core.utils.utils import get_cpu_count, get_gpu_count
from autogluon.core.utils.loaders import load_pkl, load_pd
from autogluon.core.task.base import compile_scheduler_options_v2
from autogluon.core.task.base.base_task import schedulers
from autogluon.core.metrics import get_metric, Scorer
from autogluon.core.utils.multiprocessing_utils import force_forkserver
from autogluon.core.dataset import TabularDataset
from autogluon.core.decorator import sample_config
from autogluon.core.constants import BINARY, MULTICLASS, REGRESSION
from autogluon.core.scheduler.reporter import FakeReporter
from .modules import MultiModalWithPretrainedTextNN
from .preprocessing import MultiModalTextFeatureProcessor, base_preprocess_cfg,\
MultiModalTextBatchify, get_stats_string, auto_shrink_max_length, get_cls_sep_id
from .utils import average_checkpoints, set_seed
from .. import constants as _C
from ..utils import logging_config
from ..presets import ag_text_presets
from ... import version
logger = logging.getLogger(__name__) # return logger
@use_np
def get_optimizer(cfg, updates_per_epoch):
"""
Parameters
----------
cfg
Configuration
updates_per_epoch
The number of updates per training epoch
Returns
-------
optimizer
The optimizer
optimizer_params
Optimization parameters
max_update
Maximum update
"""
max_update = max(int(np.ceil(updates_per_epoch * cfg.num_train_epochs)), 3)
warmup_steps = int(np.ceil(updates_per_epoch * cfg.num_train_epochs * cfg.warmup_portion))
if cfg.lr_scheduler == 'triangular':
lr_scheduler = PolyScheduler(max_update=max_update,
base_lr=cfg.lr,
warmup_begin_lr=cfg.begin_lr,
pwr=1,
final_lr=cfg.final_lr,
warmup_steps=warmup_steps,
warmup_mode='linear')
elif cfg.lr_scheduler == 'inv_sqrt':
lr_scheduler = InverseSquareRootScheduler(warmup_steps=warmup_steps,
base_lr=cfg.lr,
warmup_init_lr=cfg.begin_lr)
elif cfg.lr_scheduler == 'constant':
lr_scheduler = None
elif cfg.lr_scheduler == 'cosine':
lr_scheduler = CosineScheduler(max_update=max_update,
base_lr=cfg.lr,
final_lr=cfg.final_lr,
warmup_steps=warmup_steps,
warmup_begin_lr=cfg.begin_lr)
else:
raise ValueError('Unsupported lr_scheduler="{}"'
.format(cfg.lr_scheduler))
optimizer_params = {'learning_rate': cfg.lr,
'wd': cfg.wd,
'lr_scheduler': lr_scheduler}
optimizer = cfg.optimizer
additional_params = {key: value for key, value in cfg.optimizer_params}
optimizer_params.update(additional_params)
return optimizer, optimizer_params, max_update
@use_np
def apply_layerwise_decay(model, layerwise_decay, backbone_name, not_included=None):
"""Apply the layer-wise gradient decay
.. math::
lr = lr * layerwise_decay^(max_depth - layer_depth)
Parameters:
----------
model
The backbone model
layerwise_decay: int
layer-wise decay power
not_included: list of str
A list or parameter names that not included in the layer-wise decay
"""
if not_included is None:
not_included = []
# consider the task specific fine-tuning layer as the last layer, following with pooler
# In addition, the embedding parameters have the smaller learning rate based on this setting.
if 'albert' in backbone_name:
# Skip if it is the ALBERT model.
return
if 'electra' in backbone_name:
# For ELECTRA, it's called all_encoder_layers
all_layers = model.encoder.all_encoder_layers
else:
# For other models, it's called all_layers
all_layers = model.encoder.all_layers
max_depth = len(all_layers) + 2
for key, value in model.collect_params().items():
if 'scores' in key:
value.lr_mult = layerwise_decay ** 0
if 'pool' in key:
value.lr_mult = layerwise_decay ** 1
if 'embed' in key:
value.lr_mult = layerwise_decay ** max_depth
for (layer_depth, layer) in enumerate(all_layers):
layer_params = layer.collect_params()
for key, value in layer_params.items():
for pn in not_included:
if pn in key:
continue
value.lr_mult = layerwise_decay ** (max_depth - (layer_depth + 1))
@use_np
def freeze_layers(model, backbone_name, num_trainable_layers):
if 'albert' in backbone_name:
# Skip if it is the ALBERT model.
return
if 'electra' in backbone_name:
# For ELECTRA, it's called all_encoder_layers
all_layers = model.encoder.all_encoder_layers
else:
# For other models, it's called all_layers
all_layers = model.encoder.all_layers
if num_trainable_layers < 0:
return
assert num_trainable_layers <= len(all_layers)
for i in range(len(all_layers) - num_trainable_layers):
for p in all_layers[i].collect_params().values():
p.grad_req = 'null'
return
def base_optimization_config():
"""The basic optimization phase"""
cfg = CfgNode()
cfg.lr_scheduler = 'triangular'
cfg.optimizer = 'adamw'
cfg.early_stopping_patience = 20 # Stop if we cannot find a better checkpoint
cfg.optimizer_params = [('beta1', 0.9),
('beta2', 0.999),
('epsilon', 1e-6),
('correct_bias', False)]
cfg.begin_lr = 0.0
cfg.batch_size = 128
cfg.nbest = 1 # Keep the top K performed models
cfg.per_device_batch_size = 16 # Per-device batch-size
cfg.auto_per_device_batch_size = True # Whether to automatically determine the runnable
# per-device batch_size.
cfg.val_batch_size_mult = 2 # By default, we 2X the batch size for validation
cfg.lr = 1E-4
cfg.final_lr = 0.0
cfg.num_train_epochs = 10
cfg.warmup_portion = 0.1
cfg.layerwise_lr_decay = 0.8 # The layer_wise decay
cfg.wd = 0.01 # Weight Decay
cfg.max_grad_norm = 1.0 # Maximum Gradient Norm
# The validation frequency = validation frequency * num_updates_in_an_epoch
cfg.valid_frequency = 0.2
# Logging frequency = log frequency * num_updates_in_an_epoch
cfg.log_frequency = 0.05
return cfg
def base_model_config():
cfg = CfgNode()
cfg.backbone = CfgNode()
cfg.backbone.name = 'google_electra_base'
cfg.network = MultiModalWithPretrainedTextNN.get_cfg()
cfg.num_trainable_layers = -1 # Use a negative number to indicate that all layers are trainable.
cfg.insert_sep = True # Whether to insert sep tokens between columns
cfg.train_stochastic_chunk = False # Whether to sample a stochastic chunk from the training text
cfg.test_stochastic_chunk = False # Whether to use stochastic chunk in testing
cfg.use_avg_nbest = False # Whether to average the top performed models and use that as the final model.
# This will usually give us better performance.
cfg._disable_update = False # This is a hack for trying to disable the update. Should not be used usually
cfg.inference_num_repeat = 1 # Whether to turn on randomness and repeat the inference for multiple times.
return cfg
def base_misc_config():
cfg = CfgNode()
cfg.seed = 123
cfg.exp_dir = './autonlp'
return cfg
def base_cfg():
cfg = CfgNode()
cfg.version = 1
cfg.optimization = base_optimization_config()
cfg.preprocessing = base_preprocess_cfg()
cfg.model = base_model_config()
cfg.misc = base_misc_config()
cfg.freeze()
return cfg
@use_np
def _classification_regression_predict(net, dataloader, problem_type, label_scaler,
has_label=True, extract_embedding=False,
num_repeat=1):
"""
Parameters
----------
net
The network
dataloader
The dataloader
problem_type
Types of the labels
label_scaler
Label scaler. We will reverse the centering process for regression problem
has_label
Whether label is used
extract_embedding
Whether to extract the embedding
num_repeat
The number of repeats to get the prediction.
If it is larger than 1, we will average the predictions.
If it is a regression problem, we will directly average the outputs.
If it is a classification problem, we will average the logits
Returns
-------
predictions
The predictions
"""
import warnings
# Filter mxnet warnings
warnings.filterwarnings('ignore', module='mxnet')
predictions = [[] for _ in range(num_repeat)]
use_logits = num_repeat > 1 and (problem_type == MULTICLASS or problem_type == BINARY)\
and not extract_embedding
if use_logits:
logits = [[] for _ in range(num_repeat)]
ctx_l = net.collect_params().list_ctx()
for i in range(num_repeat):
for sample_l in grouper(dataloader, len(ctx_l)):
iter_pred_l = []
if use_logits:
iter_logits_l = []
for sample, ctx in zip(sample_l, ctx_l):
if sample is None:
continue
if has_label:
batch_feature, batch_label = sample
else:
batch_feature = sample
batch_feature = move_to_ctx(batch_feature, ctx)
if extract_embedding:
_, embeddings = net(batch_feature)
iter_pred_l.append(embeddings)
else:
pred = net(batch_feature)
if problem_type == MULTICLASS or problem_type == BINARY:
if num_repeat > 1:
iter_logits_l.append(pred)
pred = mx.npx.softmax(pred, axis=-1)
iter_pred_l.append(pred)
for pred in iter_pred_l:
predictions[i].append(pred.asnumpy())
if use_logits:
for ele in iter_logits_l:
logits[i].append(ele.asnumpy())
predictions[i] = np.concatenate(predictions[i], axis=0)
if problem_type == REGRESSION and not extract_embedding:
predictions[i] = label_scaler.inverse_transform(predictions[i])[:, 0]
if use_logits:
logits[i] = np.concatenate(logits[i], axis=0)
if num_repeat == 1:
return predictions[0]
else:
if use_logits:
logits = np.stack(logits, axis=0).mean(axis=0)
return scipy.special.softmax(logits, axis=-1)
else:
return np.stack(predictions, axis=0).mean(axis=0)
def calculate_metric(scorer, ground_truth, predictions, problem_type):
if problem_type == BINARY and scorer.name == 'roc_auc':
# For ROC_AUC, we need to feed in the probability of positive class to the scorer.
return scorer._sign * scorer(ground_truth, predictions[:, 1])
else:
return scorer._sign * scorer(ground_truth, predictions)
@use_np
def train_function(args, reporter, train_df_path, tuning_df_path,
time_limit, time_start, base_config,
problem_type, column_types,
feature_columns, label_column,
log_metrics, eval_metric, ngpus_per_trial,
console_log, seed=None, verbosity=2):
"""
Parameters
----------
args
The arguments
reporter
Reporter of the HPO scheduler.
If it is set to None, we won't use the reporter and will just run a single trial.
train_df_path
Path of the training dataframe
tuning_df_path
Path of the tuning dataframe
time_limit
The time limit of calling this function
time_start
The starting timestamp of the experiment
base_config
Basic configuration
problem_type
Type of the problem.
column_types
Type of columns
feature_columns
The feature columns
label_column
Label column
log_metrics
Metrics for logging
eval_metric
The stopping metric
ngpus_per_trial
The number of GPUs to use per each trial
console_log
Whether to log it to console
seed
The random seed
verbosity
The verbosity
"""
import warnings
warnings.filterwarnings('ignore', module='mxnet')
warnings.filterwarnings('ignore', module='sklearn')
set_seed(seed)
is_fake_reporter = isinstance(reporter, FakeReporter)
if time_limit is not None:
start_train_tick = time.time()
time_left = time_limit - (start_train_tick - time_start)
if time_left <= 0:
if not is_fake_reporter:
reporter.terminate()
return
if is_fake_reporter:
search_space = args.rand
task_id = 0
else:
search_space = args['search_space']
task_id = args.task_id
# Get the log metric scorers
if isinstance(log_metrics, str):
log_metrics = [log_metrics]
# Load the training and tuning data from the parquet file
train_data = pd.read_pickle(train_df_path)
tuning_data = pd.read_pickle(tuning_df_path)
log_metric_scorers = [get_metric(ele) for ele in log_metrics]
eval_metric_scorer = get_metric(eval_metric)
greater_is_better = eval_metric_scorer.greater_is_better
cfg = base_config.clone()
specified_values = []
for key in search_space.keys():
specified_values.append(key)
specified_values.append(search_space[key])
cfg.merge_from_list(specified_values)
exp_dir = cfg.misc.exp_dir
exp_dir = os.path.join(exp_dir, 'task{}'.format(task_id))
os.makedirs(exp_dir, exist_ok=True)
cfg.defrost()
cfg.misc.exp_dir = exp_dir
cfg.freeze()
logger = logging.getLogger()
set_logger_verbosity(verbosity, logger)
logging_config(folder=exp_dir, name='training', logger=logger, console=console_log,
level=logging.DEBUG,
console_level=verbosity2loglevel(verbosity))
logger.log(10, cfg)
# Load backbone model
backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \
= get_backbone(cfg.model.backbone.name)
if 'roberta' in cfg.model.backbone.name:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)
else:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg)
# Build Preprocessor + Preprocess the training dataset + Inference problem type
# TODO Dynamically cache the preprocessor that has been fitted.
if problem_type == MULTICLASS or problem_type == BINARY:
label_generator = LabelEncoder()
label_generator.fit(pd.concat([train_data[label_column], tuning_data[label_column]]))
else:
label_generator = None
preprocessor = MultiModalTextFeatureProcessor(column_types=column_types,
label_column=label_column,
tokenizer_name=cfg.model.backbone.name,
label_generator=label_generator,
cfg=cfg.preprocessing)
logger.info('Fitting and transforming the train data...')
train_dataset = preprocessor.fit_transform(train_data[feature_columns],
train_data[label_column])
with open(os.path.join(exp_dir, 'preprocessor.pkl'), 'wb') as of:
pickle.dump(preprocessor, of)
logger.info(f'Done! Preprocessor saved to {os.path.join(exp_dir, 'preprocessor.pkl')}')
logger.log(10, 'Train Data')
logger.log(10, get_stats_string(preprocessor, train_dataset, is_train=True))
logger.info('Process dev set...')
tuning_dataset = preprocessor.transform(tuning_data[feature_columns],
tuning_data[label_column])
logger.info('Done!')
# Auto Max Length
if cfg.preprocessing.text.auto_max_length:
max_length = auto_shrink_max_length(
train_dataset,
insert_sep=cfg.model.insert_sep,
num_text_features=len(preprocessor.text_feature_names),
auto_max_length_quantile=cfg.preprocessing.text.auto_max_length_quantile,
round_to=cfg.preprocessing.text.auto_max_length_round_to,
max_length=cfg.preprocessing.text.max_length)
else:
max_length = cfg.preprocessing.text.max_length
train_stochastic_chunk = cfg.model.train_stochastic_chunk
test_stochastic_chunk = cfg.model.test_stochastic_chunk
inference_num_repeat = cfg.model.inference_num_repeat
if max_length < cfg.preprocessing.text.max_length:
inference_num_repeat = 1
cfg.defrost()
cfg.preprocessing.text.max_length = max_length
cfg.model.inference_num_repeat = inference_num_repeat
cfg.freeze()
with open(os.path.join(exp_dir, 'cfg.yml'), 'w') as f:
f.write(str(cfg))
logger.info(f'Max length for chunking text: {max_length}, '
f'Stochastic chunk: Train-{train_stochastic_chunk}/Test-{test_stochastic_chunk}, '
f'Test #repeat: {inference_num_repeat}.')
cls_id, sep_id = get_cls_sep_id(tokenizer)
train_batchify_fn = MultiModalTextBatchify(
num_text_inputs=len(preprocessor.text_feature_names),
num_categorical_inputs=len(preprocessor.categorical_feature_names),
num_numerical_inputs=len(preprocessor.numerical_feature_names) > 0,
cls_token_id=cls_id, sep_token_id=sep_id, max_length=max_length,
mode='train', stochastic_chunk=train_stochastic_chunk,
insert_sep=cfg.model.insert_sep)
test_batchify_fn = MultiModalTextBatchify(
num_text_inputs=len(preprocessor.text_feature_names),
num_categorical_inputs=len(preprocessor.categorical_feature_names),
num_numerical_inputs=len(preprocessor.numerical_feature_names) > 0,
cls_token_id=cls_id, sep_token_id=sep_id, max_length=max_length,
mode='test', stochastic_chunk=test_stochastic_chunk,
insert_sep=cfg.model.insert_sep)
# Get the ground-truth dev labels
gt_dev_labels = np.array([ele[-1] for ele in tuning_dataset])
if problem_type == REGRESSION:
gt_dev_labels = preprocessor.label_scaler.inverse_transform(np.expand_dims(gt_dev_labels,
axis=-1))[:, 0]
ctx_l = get_mxnet_available_ctx()
if ngpus_per_trial == 0:
ctx_l = [mx.cpu()]
else:
ctx_l = ctx_l[:ngpus_per_trial]
base_batch_size = cfg.optimization.per_device_batch_size
num_accumulated = int(np.ceil(cfg.optimization.batch_size / (base_batch_size * len(ctx_l))))
inference_base_batch_size = base_batch_size * cfg.optimization.val_batch_size_mult
train_dataloader = DataLoader(train_dataset,
batch_size=base_batch_size,
shuffle=True,
batchify_fn=train_batchify_fn)
dev_dataloader = DataLoader(tuning_dataset,
batch_size=inference_base_batch_size,
shuffle=False,
batchify_fn=test_batchify_fn)
if problem_type == REGRESSION:
out_shape = 1
elif problem_type == MULTICLASS:
out_shape = len(label_generator.classes_)
elif problem_type == BINARY:
assert len(label_generator.classes_) == 2
out_shape = 2
else:
raise NotImplementedError
net = MultiModalWithPretrainedTextNN(
text_backbone=text_backbone,
num_text_features=1,
num_categorical_features=len(preprocessor.categorical_feature_names),
num_numerical_features=len(preprocessor.numerical_feature_names) > 0,
numerical_input_units=None if len(preprocessor.numerical_feature_names) == 0 else len(preprocessor.numerical_feature_names),
num_categories=preprocessor.categorical_num_categories,
get_embedding=False,
cfg=cfg.model.network,
out_shape=out_shape)
net.initialize_with_pretrained_backbone(backbone_params_path, ctx=ctx_l)
net.hybridize()
num_total_params, num_total_fixed_params = count_parameters(net.collect_params())
logger.info('#Total Params/Fixed Params={}/{}'.format(num_total_params,
num_total_fixed_params))
# Initialize the optimizer
updates_per_epoch = int(np.ceil(len(train_dataloader) / (num_accumulated * len(ctx_l))))
optimizer, optimizer_params, max_update \
= get_optimizer(cfg.optimization,
updates_per_epoch=updates_per_epoch)
valid_interval = int(math.ceil(cfg.optimization.valid_frequency * updates_per_epoch))
train_log_interval = int(math.ceil(cfg.optimization.log_frequency * updates_per_epoch))
if 0 < cfg.optimization.layerwise_lr_decay < 1:
apply_layerwise_decay(net.text_backbone,
cfg.optimization.layerwise_lr_decay,
backbone_name=cfg.model.backbone.name)
freeze_layers(net.text_backbone,
backbone_name=cfg.model.backbone.name,
num_trainable_layers=cfg.model.num_trainable_layers)
# Do not apply weight decay to all the LayerNorm and bias
for _, v in net.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
params = [p for p in net.collect_params().values() if p.grad_req != 'null']
trainer = mx.gluon.Trainer(params,
optimizer, optimizer_params,
update_on_kvstore=False)
# Set grad_req if gradient accumulation is required
if num_accumulated > 1:
logger.log(15, 'Using gradient accumulation.'
' Global batch size = {}'.format(cfg.optimization.batch_size))
for p in params:
p.grad_req = 'add'
net.collect_params().zero_grad()
train_loop_dataloader = grouper(repeat(train_dataloader), len(ctx_l))
log_loss_l = [mx.np.array(0.0, dtype=np.float32, ctx=ctx) for ctx in ctx_l]
log_num_samples_l = [0 for _ in ctx_l]
logging_start_tick = time.time()
nbest = cfg.optimization.nbest
best_performance_score = [] # Stores the best performing checkpoints
best_performance_update_idx = [] # Stores the update index that reached the best validation performance
best_score = None
mx.npx.waitall()
no_better_rounds = 0
report_idx = 0
start_tick = time.time()
if time_limit is not None:
time_limit -= start_tick - time_start
if time_limit <= 0:
if not is_fake_reporter:
reporter.terminate()
return
best_report_items = None
report_local_jsonl_f = open(os.path.join(exp_dir, 'results_local.jsonl'), 'w')
logger.info(f'Local training results will be saved to '
f'{os.path.join(exp_dir, 'results_local.jsonl')}.')
for update_idx in range(max_update):
for accum_idx in range(num_accumulated):
sample_l = next(train_loop_dataloader)
loss_l = []
for i, (sample, ctx) in enumerate(zip(sample_l, ctx_l)):
feature_batch, label_batch = sample
feature_batch = move_to_ctx(feature_batch, ctx)
label_batch = move_to_ctx(label_batch, ctx)
with mx.autograd.record():
pred = net(feature_batch)
if problem_type == MULTICLASS or problem_type == BINARY:
logits = mx.npx.log_softmax(pred, axis=-1)
loss = - mx.npx.pick(logits,
mx.np.expand_dims(label_batch, axis=-1))
elif problem_type == REGRESSION:
loss = mx.np.square(pred - mx.np.expand_dims(label_batch, axis=-1))
loss_l.append(loss.mean() / len(ctx_l) / num_accumulated)
log_loss_l[i] += loss_l[i] * len(ctx_l) * loss.shape[0] * num_accumulated
log_num_samples_l[i] += loss.shape[0]
for loss in loss_l:
loss.backward()
# Begin to update
trainer.allreduce_grads()
total_norm, ratio, is_finite = clip_grad_global_norm(params, cfg.optimization.max_grad_norm)
if not cfg.model._disable_update:
trainer.update(1.0, ignore_stale_grad=True)
# Clear after update
if num_accumulated > 1:
net.collect_params().zero_grad()
if (update_idx + 1) % train_log_interval == 0:
log_loss = sum([ele.as_in_ctx(ctx_l[0]) for ele in log_loss_l]).asnumpy()
log_num_samples = sum(log_num_samples_l)
logger.log(15,
'[Iter {}/{}, Epoch {}] train loss={:0.2e}, gnorm={:0.2e}, lr={:0.2e}, #samples processed={},'
' #sample per second={:.2f}. ETA={:.2f}min'
.format(update_idx + 1, max_update,
int(update_idx / updates_per_epoch),
log_loss / log_num_samples, total_norm, trainer.learning_rate,
log_num_samples,
log_num_samples / (time.time() - logging_start_tick),
(time.time() - start_tick) / (update_idx + 1)
* (max_update - update_idx - 1) / 60))
logging_start_tick = time.time()
log_loss_l = [mx.np.array(0.0, dtype=np.float32, ctx=ctx) for ctx in ctx_l]
log_num_samples_l = [0 for _ in ctx_l]
if (update_idx + 1) % valid_interval == 0 or (update_idx + 1) == max_update:
valid_start_tick = time.time()
dev_predictions = \
_classification_regression_predict(net,
dataloader=dev_dataloader,
problem_type=problem_type,
label_scaler=preprocessor.label_scaler,
has_label=False,
num_repeat=inference_num_repeat)
log_scores = [calculate_metric(scorer, gt_dev_labels,
dev_predictions,
problem_type)
for scorer in log_metric_scorers]
dev_score = calculate_metric(eval_metric_scorer, gt_dev_labels,
dev_predictions,
problem_type)
valid_time_spent = time.time() - valid_start_tick
find_better = False
find_topn_better = False
if len(best_performance_score) < nbest:
best_performance_score.append(dev_score)
best_performance_update_idx.append(update_idx + 1)
net.save_parameters(
os.path.join(exp_dir,
f'nbest_model{len(best_performance_score) - 1}.params'))
find_topn_better = True
if best_score is None or greater_is_better and dev_score >= best_score\
or (not greater_is_better and dev_score <= best_score):
find_better = True
net.save_parameters(os.path.join(exp_dir, f'best_model.params'))
best_score = dev_score
else:
# First try to update the top-K
if greater_is_better:
if dev_score >= min(best_performance_score):
find_topn_better = True
replace_idx = np.argmin(best_performance_score)
best_performance_score[replace_idx] = dev_score
best_performance_update_idx[replace_idx] = update_idx + 1
net.save_parameters(
os.path.join(exp_dir, f'nbest_model{replace_idx}.params'))
if dev_score >= best_score:
find_better = True
net.save_parameters(os.path.join(exp_dir, f'best_model.params'))
best_score = dev_score
else:
if dev_score <= max(best_performance_score):
find_topn_better = True
replace_idx = np.argmax(best_performance_score)
best_performance_score[replace_idx] = dev_score
best_performance_update_idx[replace_idx] = update_idx + 1
net.save_parameters(
os.path.join(exp_dir, f'nbest_model{replace_idx}.params'))
if dev_score <= best_score:
find_better = True
net.save_parameters(os.path.join(exp_dir, f'best_model.params'))
best_score = dev_score
if not find_better:
no_better_rounds += 1
else:
no_better_rounds = 0
mx.npx.waitall()
loss_string = ', '.join(['{}={:0.4e}'.format(metric.name, score)
for score, metric in zip(log_scores, log_metric_scorers)])
logger.log(25, '[Iter {}/{}, Epoch {}] valid {}, time spent={:.3f}s,'
' total time spent={:.2f}min. Find new best={}, Find new top-{}={}'.format(
update_idx + 1, max_update, int(update_idx / updates_per_epoch),
loss_string, valid_time_spent, (time.time() - start_tick) / 60,
find_better, nbest, find_topn_better))
if reporter is not None:
report_items = [('iteration', update_idx + 1),
('report_idx', report_idx + 1),
('epoch', int(update_idx / updates_per_epoch))] + \
[(metric.name, score)
for score, metric in zip(log_scores, log_metric_scorers)] + \
[('find_better', find_better),
('find_new_topn', find_topn_better),
('nbest_stat', json.dumps([best_performance_score,
best_performance_update_idx])),
('elapsed_time', int(time.time() - start_tick))]
if eval_metric_scorer._sign < 0:
report_items.append(('reward_attr', -dev_score))
else:
report_items.append(('reward_attr', dev_score))
report_items.append(('eval_metric', eval_metric_scorer.name))
report_items.append(('exp_dir', exp_dir))
if find_better:
best_report_items = report_items
reporter(**dict(report_items))
report_local_jsonl_f.write(json.dumps(dict(report_items)) + '\n')
report_local_jsonl_f.flush()
report_idx += 1
if no_better_rounds >= cfg.optimization.early_stopping_patience:
logger.info('Early stopping patience reached!')
break
total_time_spent = time.time() - start_tick
if time_limit is not None and total_time_spent > time_limit:
break
# Average checkpoints
best_report_items_dict = dict(best_report_items)
best_report_items_dict['report_idx'] = report_idx + 1
reporter(**best_report_items_dict)
report_local_jsonl_f.write(json.dumps(best_report_items_dict) + '\n')
report_local_jsonl_f.close()
def get_recommended_resource(nthreads_per_trial=None,
ngpus_per_trial=None) -> Tuple[int, int]:
"""Get the recommended resources.
Internally, we will try to use GPU whenever it's possible. That means, we will use
a single GPU for finetuning.
Parameters
----------
nthreads_per_trial
The number of threads per trial provided by the user.
ngpus_per_trial
The number of GPUs per trial provided by the user.
Returns
-------
nthreads_per_trial
The recommended resource.
ngpus_per_trial
"""
if nthreads_per_trial is None and ngpus_per_trial is None:
nthreads_per_trial = get_cpu_count()
ngpus_per_trial = 1
elif nthreads_per_trial is not None and ngpus_per_trial is None:
ngpus_per_trial = 1
elif nthreads_per_trial is None and ngpus_per_trial is not None:
if ngpus_per_trial != 0:
num_parallel_jobs = get_gpu_count() // ngpus_per_trial
nthreads_per_trial = max(get_cpu_count() // num_parallel_jobs, 1)
else:
nthreads_per_trial = get_cpu_count()
nthreads_per_trial = min(nthreads_per_trial, get_cpu_count())
ngpus_per_trial = min(ngpus_per_trial, get_gpu_count())
assert nthreads_per_trial > 0 and ngpus_per_trial >= 0,\
'Invalid number of threads and number of GPUs.'
return nthreads_per_trial, ngpus_per_trial
@use_np
class MultiModalTextModel:
"""Learner of the multimodal text data.
It will be called if the user call `fit()` in TextPredictor.
It is used for making predictions on new data and viewing information about
models trained during `fit()`.
"""
def __init__(self, column_types,
feature_columns,
label_columns,
problem_type,
eval_metric,
log_metrics,
output_directory=None):
"""Creates model object.
Parameters
----------
column_types
The column types.
feature_columns
Name of the feature columns
label_columns
Name of the label columns.
problem_type
Type of the problem
eval_metric
The evaluation metric
log_metrics
The metrics for logging
output_directory
The output directory to save the model
logger
The logger
"""
super(MultiModalTextModel, self).__init__()
self._base_config = base_cfg()
self._base_config.defrost()
if output_directory is not None:
self._output_directory = self._base_config.misc.exp_dir = output_directory
self._base_config.misc.exp_dir = os.path.abspath(self._base_config.misc.exp_dir)
self._base_config.freeze()
self._output_directory = self._base_config.misc.exp_dir
self._column_types = column_types
self._eval_metric = eval_metric
self._log_metrics = log_metrics
self._label_columns = label_columns
self._feature_columns = feature_columns
self._problem_type = problem_type
# Need to be set in the train call
self._net = None # Network for training and inference
self._embed_net = None # Network for extract the embedding
self._config = None
self._results = None
self._preprocessor = None
@property
def results(self):
return self._results
@property
def preprocessor(self):
return self._preprocessor
@property
def output_directory(self):
""" Get the output directory. The trained model and the training logs
will be saved to this folder """
return self._output_directory
@property
def label_columns(self):
"""Name of the label columns"""
return self._label_columns
@property
def problem_type(self):
"""Types of the problem"""
return self._problem_type
@property
def feature_columns(self):
"""Name of the features"""
return self._feature_columns
@property
def base_config(self):
"""The basic configuration. Internally, we will fill values in the base config by values
in the search space."""
return self._base_config
@property
def results(self):
"""Results of the final model"""
return self._results
@property
def config(self):
"""The configuration of the final trained model."""
return self._config
@property
def net(self):
return self._net
def train(self, train_data, tuning_data,
num_cpus=None,
num_gpus=None,
time_limit=None,
tune_kwargs=None,
search_space=None,
plot_results=False,
console_log=True,
seed=None,
verbosity=2):
"""The train function.
Parameters
----------
train_data
The training data
tuning_data
The tuning data
num_cpus
Number of CPUs for each trial
num_gpus
Number of GPUs for each trial
time_limit
The time limits
tune_kwargs
Parameters of the HPO algorithms. For example, the scheduling
algorithm, scheduling backend, HPO algorithm.
search_space
The search space options
plot_results
Whether to plot results or not
console_log
Whether to log into the console
seed
The seed
verbosity
Verbosity
"""
set_seed(seed)
set_logger_verbosity(verbosity, logger)
start_tick = time.time()
assert len(self._label_columns) == 1, 'Currently, we only support single label.'
# TODO(sxjscience) Try to support S3
os.makedirs(self._output_directory, exist_ok=True)
if search_space is None:
search_space = \
ag_text_presets.create('default')['models']['MultimodalTextModel']['search_space']
search_space_reg = args(search_space=space.Dict(**search_space))
# Scheduler and searcher for HPO
if tune_kwargs is None:
tune_kwargs = ag_text_presets.create('default')['tune_kwargs']
scheduler_options = tune_kwargs['scheduler_options']
num_cpus, num_gpus = get_recommended_resource(num_cpus, num_gpus)
if num_gpus == 0:
if 'AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU' in os.environ:
use_warning = int(os.environ['AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU'])
else:
use_warning = False
if use_warning:
warnings.warn('No GPU is detected in the machine and we will recommend you to '
'use TextPredictor on a GPU-enabled instance. Currently, '
'training on CPU is slow.')
else:
raise RuntimeError('No GPU is detected in the machine and we will '
'not proceed to run TextPredictor because they will train '
'too slowly with only CPU. You may try to set `ngpus_per_trial` '
'to a number larger than 0 when calling `.fit()`. '
'Also, you can set the environment variable '
'"AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU=1" to force the model to '
'use CPU for training.')
logger.info(f"The GluonNLP V0 backend is used. "
f"We will use {num_cpus} cpus and "
f"{num_gpus} gpus to train each trial.")
if scheduler_options is None:
scheduler_options = dict()
if plot_results is None:
if in_ipynb():
plot_results = True
else:
plot_results = False
scheduler_options = compile_scheduler_options_v2(
scheduler_options=scheduler_options,
scheduler=tune_kwargs['search_strategy'],
search_strategy=tune_kwargs['searcher'],
search_options=tune_kwargs['search_options'],
nthreads_per_trial=num_cpus,
ngpus_per_trial=num_gpus,
checkpoint=os.path.join(self._output_directory, 'checkpoint.ag'),
num_trials=tune_kwargs['num_trials'],
time_out=time_limit,
resume=False,
visualizer=scheduler_options.get('visualizer'),
time_attr='report_idx',
reward_attr='reward_attr',
dist_ip_addrs=scheduler_options.get('dist_ip_addrs'))
# Create a temporary cache file. The internal train function will load the
# temporary cache.
os.makedirs(os.path.join(self._output_directory, 'data_cache'), exist_ok=True)
train_df_path = os.path.join(self._output_directory, 'data_cache',
'cache_train_dataframe.pd.pkl')
tuning_df_path = os.path.join(self._output_directory, 'data_cache',
'cache_tuning_dataframe.pd.pkl')
train_data.to_pickle(train_df_path)
tuning_data.to_pickle(tuning_df_path)
train_fn = search_space_reg(functools.partial(train_function,
train_df_path=train_df_path,
time_limit=time_limit,
time_start=start_tick,
tuning_df_path=tuning_df_path,
base_config=self.base_config,
problem_type=self.problem_type,
column_types=self._column_types,
feature_columns=self._feature_columns,
label_column=self._label_columns[0],
log_metrics=self._log_metrics,
eval_metric=self._eval_metric,
ngpus_per_trial=scheduler_options['resource']['num_gpus'],
console_log=console_log,
verbosity=verbosity))
no_job_finished_err_msg =\
'No training job has been completed! '\
'There are two possibilities: '\
'1) The time_limit is too small, '\
'or 2) There are some internal errors in AutoGluon. '\
'For the first case, you can increase the time_limit or set it to '\
'None, e.g., setting "predictor.fit(..., time_limit=None). To '\
'further investigate the root cause, you can also try to set the '\
'"verbosity=3" and try again, i.e., predictor.set_verbosity(3).'
if scheduler_options['num_trials'] == 1:
train_fn(train_fn.args['search_space'],
train_fn.args['_default_config'])
best_model_saved_dir_path = os.path.join(self._output_directory, 'task0')
cfg_path = os.path.join(self._output_directory, 'task0', 'cfg.yml')
# Check whether the job has finished
if not os.path.exists(cfg_path)\
or not os.path.exists(os.path.join(self._output_directory,
'task0', 'best_model.params')):
raise RuntimeError(no_job_finished_err_msg)
cfg = self.base_config.clone_merge(cfg_path)
local_results = pd.read_json(os.path.join(self._output_directory, 'task0',
'results_local.jsonl'), lines=True)
if plot_results:
plot_training_curves = os.path.join(self._output_directory,
'plot_training_curves.png')
import matplotlib.pyplot as plt
plt.ylabel(self._eval_metric)
plt.xlabel('report_idx')
plt.title("Performance vs Training-Time")
plt.plot(local_results['report_idx'].iloc[:-1],
local_results[local_results['eval_metric'][0]].iloc[:-1], label=f'task0')
plt.legend(loc='best')
plt.savefig(plot_training_curves)
plt.show()
self._results = local_results
else:
if tune_kwargs['search_strategy'] != 'local':
# Force forkserver if it's not using the local sequential HPO
force_forkserver()
scheduler_cls, scheduler_params = scheduler_factory(scheduler_options)
# Create scheduler, run HPO experiment
scheduler = scheduler_cls(train_fn, **scheduler_options)
scheduler.run()
scheduler.join_jobs()
if len(scheduler.config_history) == 0:
raise RuntimeError(no_job_finished_err_msg)
best_config = scheduler.get_best_config()
logger.info('Results=', scheduler.searcher._results)
logger.info('Best_config={}'.format(best_config))
best_task_id = scheduler.get_best_task_id()
best_model_saved_dir_path = os.path.join(self._output_directory,
'task{}'.format(best_task_id))
best_cfg_path = os.path.join(best_model_saved_dir_path, 'cfg.yml')
cfg = self.base_config.clone_merge(best_cfg_path)
if plot_results:
plot_training_curves = os.path.join(self._output_directory,
'plot_training_curves.png')
scheduler.get_training_curves(filename=plot_training_curves,
plot=plot_results,
use_legend=True)
self._results = dict()
self._results.update(best_reward=scheduler.get_best_reward(),
best_config=scheduler.get_best_config(),
total_time=time.time() - start_tick,
metadata=scheduler.metadata,
training_history=scheduler.training_history,
config_history=scheduler.config_history,
reward_attr=scheduler._reward_attr,
config=cfg)
# Consider to move this to a separate predictor
self._config = cfg
# Average parameters
# TODO(sxjscience) Clean up the temporary spaces used to store the intermediate checkpoints.
if cfg.model.use_avg_nbest:
nbest_path_l = []
for best_id in range(cfg.optimization.nbest):
nbest_path = os.path.join(best_model_saved_dir_path, f'nbest_model{best_id}.params')
if os.path.exists(nbest_path):
nbest_path_l.append(nbest_path)
avg_nbest_path = os.path.join(best_model_saved_dir_path, 'nbest_model_avg.params')
average_checkpoints(nbest_path_l, avg_nbest_path)
with open(os.path.join(best_model_saved_dir_path, 'preprocessor.pkl'), 'rb') as in_f:
self._preprocessor = pickle.load(in_f)
backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \
= get_backbone(cfg.model.backbone.name)
if 'roberta' in cfg.model.backbone.name:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)
else:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg)
if self._problem_type == REGRESSION:
out_shape = 1
elif self._problem_type == MULTICLASS:
out_shape = len(self._preprocessor.label_generator.classes_)
elif self._problem_type == BINARY:
assert len(self._preprocessor.label_generator.classes_) == 2
out_shape = 2
else:
raise NotImplementedError
net = MultiModalWithPretrainedTextNN(
text_backbone=text_backbone,
num_text_features=1,
num_categorical_features=len(self._preprocessor.categorical_feature_names),
num_numerical_features=len(self._preprocessor.numerical_feature_names) > 0,
numerical_input_units=None if len(self._preprocessor.numerical_feature_names) == 0 else len(
self._preprocessor.numerical_feature_names),
num_categories=self._preprocessor.categorical_num_categories,
get_embedding=False,
cfg=cfg.model.network,
out_shape=out_shape)
net.hybridize()
if cfg.model.use_avg_nbest:
net.load_parameters(avg_nbest_path, ctx=mx.cpu())
else:
net.load_parameters(os.path.join(best_model_saved_dir_path, 'best_model.params'),
ctx=mx.cpu())
self._net = net
mx.npx.waitall()
def evaluate(self, data, metrics=None, stochastic_chunk=None, num_repeat=None):
""" Report the predictive performance evaluated for a given dataset.
Parameters
----------
data : str or :class:`TabularDataset` or `pandas.DataFrame`
This Dataset must also contain the label-column with the same column-name as specified during `fit()`.
If str is passed, `valid_data` will be loaded using the str value as the file path.
metrics : str or List[str] or None
Name of metric or a list of names of metrics to report.
If it is not given, we will return the score of the stored eval_metric.
stochastic_chunk
Whether to use stochastic chunk
num_repeat
The number of repeats
Returns
-------
ret : single number or a dict of metric --> metric scores
Output
"""
if isinstance(metrics, str):
metrics = [metrics]
elif metrics is None:
metrics = [self._eval_metric]
assert self.net is not None
# We will always use all resources that are available for evaluation
ctx_l = get_mxnet_available_ctx()
self.net.collect_params().reset_ctx(ctx_l)
if not isinstance(data, pd.DataFrame):
if isinstance(data, (list, dict)):
data = pd.DataFrame(data)
elif isinstance(data, str):
data = load_pd.load(data)
else:
raise NotImplementedError(f'The format of data is not understood. '
f'We have type(data)="{type(data)}"')
data = data[self._feature_columns + self._label_columns]
if self._problem_type == MULTICLASS or self._problem_type == BINARY:
ground_truth = self.preprocessor.label_generator.transform(
data[self._label_columns[0]])
predictions = self.predict_proba(data,
stochastic_chunk=stochastic_chunk,
num_repeat=num_repeat)
else:
ground_truth = pd.to_numeric(data[self._label_columns[0]]).to_numpy().astype(np.float32)
predictions = self.predict(data,
stochastic_chunk=stochastic_chunk,
num_repeat=num_repeat)
metric_scores = [calculate_metric(get_metric(metric),
ground_truth, predictions, self._problem_type)
for metric in metrics]
# Once the inference is completed, we will cache all parameters back
# to CPU to avoid memory overflow.
self.net.collect_params().reset_ctx(mx.cpu())
if len(metric_scores) == 1:
return metric_scores[0]
else:
return {metric: score for metric, score in zip(metrics, metric_scores)}
def _internal_predict(self, data, get_original_labels=True, get_probabilities=False,
stochastic_chunk=None, num_repeat=None):
assert self.net is not None
assert self.config is not None
# We will always use all resources that are available for evaluation
ctx_l = get_mxnet_available_ctx()
self.net.collect_params().reset_ctx(ctx_l)
if not isinstance(data, pd.DataFrame):
if isinstance(data, (list, dict)):
data = pd.DataFrame(data)
elif isinstance(data, str):
data = load_pd.load(data)
else:
raise NotImplementedError(f'The format of data is not understood. '
f'We have type(data)="{type(data)}"')
dataset = self.preprocessor.transform(data[self._feature_columns])
inference_batch_size = self.config.optimization.per_device_batch_size \
* self.config.optimization.val_batch_size_mult
cls_id, sep_id = get_cls_sep_id(self.preprocessor.tokenizer)
if stochastic_chunk is None:
stochastic_chunk = self.config.model.test_stochastic_chunk
batchify_fn = MultiModalTextBatchify(
num_text_inputs=len(self.preprocessor.text_feature_names),
num_categorical_inputs=len(self.preprocessor.categorical_feature_names),
num_numerical_inputs=len(self.preprocessor.numerical_feature_names) > 0,
cls_token_id=cls_id, sep_token_id=sep_id,
max_length=self.config.preprocessing.text.max_length,
mode='test',
stochastic_chunk=stochastic_chunk,
insert_sep=self.config.model.insert_sep)
dataloader = DataLoader(dataset,
batch_size=inference_batch_size,
shuffle=False,
batchify_fn=batchify_fn)
if num_repeat is None:
num_repeat = self.config.model.inference_num_repeat
test_predictions = _classification_regression_predict(
self._net,
dataloader=dataloader,
problem_type=self._problem_type,
label_scaler=self.preprocessor.label_scaler,
has_label=False,
num_repeat=num_repeat)
# Once the inference is completed, we will cache all parameters back
# to CPU to avoid memory overflow.
self.net.collect_params().reset_ctx(mx.cpu())
if self._problem_type == MULTICLASS or self._problem_type == BINARY:
if get_probabilities:
return test_predictions
else:
test_predictions = test_predictions.argmax(axis=-1)
if get_original_labels:
test_predictions = np.array(
self.preprocessor.label_generator.inverse_transform(test_predictions))
return test_predictions
@property
def class_labels(self):
"""The original name of the class labels.
For example, the tabular data may contain classes equal to
"entailment", "contradiction", "neutral". Internally, these will be converted to
0, 1, 2, ...
This function returns the original names of these raw labels.
Returns
-------
ret
List that contain the class names. It will be None if it's not a classification problem.
"""
if self.problem_type == MULTICLASS or self.problem_type == BINARY:
return self._preprocessor.label_generator.classes_
else:
warnings.warn('Accessing class names for a non-classification problem. Return None.')
return None
def predict_proba(self, test_data, stochastic_chunk=None, num_repeat=None):
"""Predict class probabilities instead of class labels (for classification tasks).
Parameters
----------
test_data : `pandas.DataFrame`, `autogluon.tabular.TabularDataset`, or str
The test data to get predictions for. Can be DataFrame/Dataset or a file that can
be loaded into DataFrame/Dataset.
stochastic_chunk : bool
Whether to enable stochastic chunk
num_repeat : int or None
The number of repeats for running the inference model.
Returns
-------
probabilities : array
The predicted class probabilities for each sample.
Shape of this array is (#Samples, num_class).
Here, the i-th number means the probability of belonging to the i-th class.
You can access the class names by calling `self.class_names`.
"""
assert self.problem_type == MULTICLASS or self.problem_type == BINARY
return self._internal_predict(test_data,
get_original_labels=False,
get_probabilities=True,
stochastic_chunk=stochastic_chunk,
num_repeat=num_repeat)
def predict(self, test_data, get_original_labels=True, stochastic_chunk=None, num_repeat=None):
"""Make predictions on new data.
Parameters
----------
test_data : `pandas.DataFrame`, `autogluon.tabular.TabularDataset`, or str
The test data to get predictions for. Can be DataFrame/Dataset or a file that can be loaded into DataFrame/Dataset.
get_original_labels : bool, default = True
Whether or not predictions should be formatted in terms of the original labels.
For example, the labels might be "entailment" or "not_entailment" and predictions could either be of this form (if `True`) or integer-indices corresponding to these classes (if `False`).
stochastic_chunk : bool or None, default = None
Whether to turn on stochastic chunk
num_repeat : int or None
The number of repeats
Returns
-------
predictions : array
The predictions for each sample. Shape of this array is (#Samples,).
"""
return self._internal_predict(test_data,
get_original_labels=get_original_labels,
get_probabilities=False,
stochastic_chunk=stochastic_chunk,
num_repeat=num_repeat)
def save(self, dir_path):
"""Save this model to disk.
Parameters
----------
dir_path : str
Directory where the model should be saved.
"""
os.makedirs(dir_path, exist_ok=True)
self.net.save_parameters(os.path.join(dir_path, 'net.params'))
with open(os.path.join(dir_path, 'cfg.yml'), 'w') as of:
of.write(self.config.dump())
# Save preprocessor
with open(os.path.join(dir_path, 'preprocessor.pkl'), 'wb') as of:
pickle.dump(self.preprocessor, of)
if not isinstance(self._eval_metric, str):
eval_metric = self._eval_metric.name
else:
eval_metric = self._eval_metric
log_metrics = []
for metric in self._log_metrics:
if not isinstance(metric, str):
log_metrics.append(metric.name)
else:
log_metrics.append(metric)
# Save additional assets about the parsed dataset information
with open(os.path.join(dir_path, 'assets.json'), 'w') as of:
json.dump(
{
'problem_type': self._problem_type,
'label_columns': self._label_columns,
'eval_metric': eval_metric,
'log_metrics': log_metrics,
'feature_columns': self._feature_columns,
'column_types': self._column_types,
'version': version.__version__,
}, of, ensure_ascii=True)
@classmethod
def load(cls, dir_path: str):
"""Load a model object previously produced by `fit()` from disk and return this object.
It is highly recommended the predictor be loaded with the exact AutoGluon version
it was fit with.
Parameters
----------
dir_path
Path to directory where this model was previously saved.
Returns
-------
model
A `BertForTextPredictionBasic` object that can be used for making predictions on new data.
"""
cfg = base_cfg().clone_merge(os.path.join(dir_path, 'cfg.yml'))
with open(os.path.join(dir_path, 'preprocessor.pkl'), 'rb') as in_f:
preprocessor = pickle.load(in_f)
with open(os.path.join(dir_path, 'assets.json'), 'r') as f:
assets = json.load(f)
label_columns = assets['label_columns']
feature_columns = assets['feature_columns']
eval_metric = assets['eval_metric']
log_metrics = assets['log_metrics']
problem_type = assets['problem_type']
column_types = assets['column_types']
# TODO(sxjscience) Post 0.1. In general, we will need to support compatible version check
version = assets['version']
backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \
= get_backbone(cfg.model.backbone.name)
if 'roberta' in cfg.model.backbone.name:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)
else:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg)
if problem_type == REGRESSION:
out_shape = 1
elif problem_type == MULTICLASS:
out_shape = len(preprocessor.label_generator.classes_)
elif problem_type == BINARY:
assert len(preprocessor.label_generator.classes_) == 2
out_shape = 2
else:
raise NotImplementedError
net = MultiModalWithPretrainedTextNN(
text_backbone=text_backbone,
num_text_features=1,
num_categorical_features=len(preprocessor.categorical_feature_names),
num_numerical_features=len(preprocessor.numerical_feature_names) > 0,
numerical_input_units=None if len(preprocessor.numerical_feature_names) == 0
else len(preprocessor.numerical_feature_names),
num_categories=preprocessor.categorical_num_categories,
get_embedding=False,
cfg=cfg.model.network,
out_shape=out_shape)
net.hybridize()
ctx_l = mx.cpu()
net.load_parameters(os.path.join(dir_path, 'net.params'), ctx=ctx_l)
model = cls(column_types=column_types,
label_columns=label_columns,
feature_columns=feature_columns,
problem_type=problem_type,
eval_metric=eval_metric,
log_metrics=log_metrics)
model._net = net
model._config = cfg
model._preprocessor = preprocessor
return model
def extract_embedding(self, data, stochastic_chunk=None, num_repeat=None):
"""Extract the embedding from the pretrained model.
Parameters
----------
data
Data that can be parsed to pandas dataframe
stochastic_chunk
Whether to use stochastic chunk
num_repeat
The number of repeats
Returns
-------
embeddings
The output embeddings will have shape
(#samples, embedding_dim)
"""
if not isinstance(data, pd.DataFrame):
if isinstance(data, (list, dict)):
data = pd.DataFrame(data)
elif isinstance(data, str):
data = load_pd.load(data)
else:
raise NotImplementedError(f'The format of data is not understood. '
f'We have type(data)="{type(data)}"')
dataset = self.preprocessor.transform(data[self.feature_columns])
inference_batch_size = self.config.optimization.per_device_batch_size \
* self.config.optimization.val_batch_size_mult
cls_id, sep_id = get_cls_sep_id(self.preprocessor.tokenizer)
if stochastic_chunk is None:
stochastic_chunk = self.config.model.test_stochastic_chunk
batchify_fn = MultiModalTextBatchify(
num_text_inputs=len(self.preprocessor.text_feature_names),
num_categorical_inputs=len(self.preprocessor.categorical_feature_names),
num_numerical_inputs=len(self.preprocessor.numerical_feature_names) > 0,
cls_token_id=cls_id, sep_token_id=sep_id,
max_length=self.config.preprocessing.text.max_length,
mode='test',
stochastic_chunk=stochastic_chunk,
insert_sep=self.config.model.insert_sep)
dataloader = DataLoader(dataset,
batch_size=inference_batch_size,
shuffle=False,
batchify_fn=batchify_fn)
if self._embed_net is None:
embed_net = MultiModalWithPretrainedTextNN(
text_backbone=self.net.text_backbone,
num_text_features=1,
num_categorical_features=len(self.preprocessor.categorical_feature_names),
num_numerical_features=len(self.preprocessor.numerical_feature_names) > 0,
numerical_input_units=None if len(self.preprocessor.numerical_feature_names) == 0
else len(self.preprocessor.numerical_feature_names),
num_categories=self.preprocessor.categorical_num_categories,
get_embedding=True,
cfg=self.config.model.network,
out_shape=self.net.out_shape,
params=self.net.collect_params(),
prefix='embed_net_')
embed_net.hybridize()
self._embed_net = embed_net
if num_repeat is None:
num_repeat = self.config.model.inference_num_repeat
ctx_l = get_mxnet_available_ctx()
self._embed_net.collect_params().reset_ctx(ctx_l)
embeddings = _classification_regression_predict(self._embed_net,
dataloader=dataloader,
problem_type=self._problem_type,
label_scaler=self.preprocessor.label_scaler,
has_label=False,
extract_embedding=True,
num_repeat=num_repeat)
self._embed_net.collect_params().reset_ctx(mx.cpu())
return embeddings
|
import numpy as np
import scipy.special
import os
import math
import logging
import pandas as pd
import warnings
import time
import json
import pickle
import functools
import tqdm
from typing import Tuple
from autogluon.core.scheduler.scheduler_factory import scheduler_factory
from autogluon.core.utils import set_logger_verbosity
from sklearn.preprocessing import LabelEncoder
import mxnet as mx
from mxnet.util import use_np
from mxnet.lr_scheduler import PolyScheduler, CosineScheduler
from mxnet.gluon.data import DataLoader
from autogluon_contrib_nlp.models import get_backbone
from autogluon_contrib_nlp.lr_scheduler import InverseSquareRootScheduler
from autogluon_contrib_nlp.utils.config import CfgNode
from autogluon_contrib_nlp.utils.misc import grouper, \
count_parameters, repeat, get_mxnet_available_ctx
from autogluon_contrib_nlp.utils.parameter import move_to_ctx, clip_grad_global_norm
from autogluon.core import args, space
from autogluon.core.utils import in_ipynb, verbosity2loglevel
from autogluon.core.utils.utils import get_cpu_count, get_gpu_count
from autogluon.core.utils.loaders import load_pkl, load_pd
from autogluon.core.task.base import compile_scheduler_options_v2
from autogluon.core.task.base.base_task import schedulers
from autogluon.core.metrics import get_metric, Scorer
from autogluon.core.utils.multiprocessing_utils import force_forkserver
from autogluon.core.dataset import TabularDataset
from autogluon.core.decorator import sample_config
from autogluon.core.constants import BINARY, MULTICLASS, REGRESSION
from autogluon.core.scheduler.reporter import FakeReporter
from .modules import MultiModalWithPretrainedTextNN
from .preprocessing import MultiModalTextFeatureProcessor, base_preprocess_cfg,\
MultiModalTextBatchify, get_stats_string, auto_shrink_max_length, get_cls_sep_id
from .utils import average_checkpoints, set_seed
from .. import constants as _C
from ..utils import logging_config
from ..presets import ag_text_presets
from ... import version
logger = logging.getLogger(__name__) # return logger
@use_np
def get_optimizer(cfg, updates_per_epoch):
"""
Parameters
----------
cfg
Configuration
updates_per_epoch
The number of updates per training epoch
Returns
-------
optimizer
The optimizer
optimizer_params
Optimization parameters
max_update
Maximum update
"""
max_update = max(int(np.ceil(updates_per_epoch * cfg.num_train_epochs)), 3)
warmup_steps = int(np.ceil(updates_per_epoch * cfg.num_train_epochs * cfg.warmup_portion))
if cfg.lr_scheduler == 'triangular':
lr_scheduler = PolyScheduler(max_update=max_update,
base_lr=cfg.lr,
warmup_begin_lr=cfg.begin_lr,
pwr=1,
final_lr=cfg.final_lr,
warmup_steps=warmup_steps,
warmup_mode='linear')
elif cfg.lr_scheduler == 'inv_sqrt':
lr_scheduler = InverseSquareRootScheduler(warmup_steps=warmup_steps,
base_lr=cfg.lr,
warmup_init_lr=cfg.begin_lr)
elif cfg.lr_scheduler == 'constant':
lr_scheduler = None
elif cfg.lr_scheduler == 'cosine':
lr_scheduler = CosineScheduler(max_update=max_update,
base_lr=cfg.lr,
final_lr=cfg.final_lr,
warmup_steps=warmup_steps,
warmup_begin_lr=cfg.begin_lr)
else:
raise ValueError('Unsupported lr_scheduler="{}"'
.format(cfg.lr_scheduler))
optimizer_params = {'learning_rate': cfg.lr,
'wd': cfg.wd,
'lr_scheduler': lr_scheduler}
optimizer = cfg.optimizer
additional_params = {key: value for key, value in cfg.optimizer_params}
optimizer_params.update(additional_params)
return optimizer, optimizer_params, max_update
@use_np
def apply_layerwise_decay(model, layerwise_decay, backbone_name, not_included=None):
"""Apply the layer-wise gradient decay
.. math::
lr = lr * layerwise_decay^(max_depth - layer_depth)
Parameters:
----------
model
The backbone model
layerwise_decay: int
layer-wise decay power
not_included: list of str
A list or parameter names that not included in the layer-wise decay
"""
if not_included is None:
not_included = []
# consider the task specific fine-tuning layer as the last layer, following with pooler
# In addition, the embedding parameters have the smaller learning rate based on this setting.
if 'albert' in backbone_name:
# Skip if it is the ALBERT model.
return
if 'electra' in backbone_name:
# For ELECTRA, it's called all_encoder_layers
all_layers = model.encoder.all_encoder_layers
else:
# For other models, it's called all_layers
all_layers = model.encoder.all_layers
max_depth = len(all_layers) + 2
for key, value in model.collect_params().items():
if 'scores' in key:
value.lr_mult = layerwise_decay ** 0
if 'pool' in key:
value.lr_mult = layerwise_decay ** 1
if 'embed' in key:
value.lr_mult = layerwise_decay ** max_depth
for (layer_depth, layer) in enumerate(all_layers):
layer_params = layer.collect_params()
for key, value in layer_params.items():
for pn in not_included:
if pn in key:
continue
value.lr_mult = layerwise_decay ** (max_depth - (layer_depth + 1))
@use_np
def freeze_layers(model, backbone_name, num_trainable_layers):
if 'albert' in backbone_name:
# Skip if it is the ALBERT model.
return
if 'electra' in backbone_name:
# For ELECTRA, it's called all_encoder_layers
all_layers = model.encoder.all_encoder_layers
else:
# For other models, it's called all_layers
all_layers = model.encoder.all_layers
if num_trainable_layers < 0:
return
assert num_trainable_layers <= len(all_layers)
for i in range(len(all_layers) - num_trainable_layers):
for p in all_layers[i].collect_params().values():
p.grad_req = 'null'
return
def base_optimization_config():
"""The basic optimization phase"""
cfg = CfgNode()
cfg.lr_scheduler = 'triangular'
cfg.optimizer = 'adamw'
cfg.early_stopping_patience = 20 # Stop if we cannot find a better checkpoint
cfg.optimizer_params = [('beta1', 0.9),
('beta2', 0.999),
('epsilon', 1e-6),
('correct_bias', False)]
cfg.begin_lr = 0.0
cfg.batch_size = 128
cfg.nbest = 1 # Keep the top K performed models
cfg.per_device_batch_size = 16 # Per-device batch-size
cfg.auto_per_device_batch_size = True # Whether to automatically determine the runnable
# per-device batch_size.
cfg.val_batch_size_mult = 2 # By default, we 2X the batch size for validation
cfg.lr = 1E-4
cfg.final_lr = 0.0
cfg.num_train_epochs = 10
cfg.warmup_portion = 0.1
cfg.layerwise_lr_decay = 0.8 # The layer_wise decay
cfg.wd = 0.01 # Weight Decay
cfg.max_grad_norm = 1.0 # Maximum Gradient Norm
# The validation frequency = validation frequency * num_updates_in_an_epoch
cfg.valid_frequency = 0.2
# Logging frequency = log frequency * num_updates_in_an_epoch
cfg.log_frequency = 0.05
return cfg
def base_model_config():
cfg = CfgNode()
cfg.backbone = CfgNode()
cfg.backbone.name = 'google_electra_base'
cfg.network = MultiModalWithPretrainedTextNN.get_cfg()
cfg.num_trainable_layers = -1 # Use a negative number to indicate that all layers are trainable.
cfg.insert_sep = True # Whether to insert sep tokens between columns
cfg.train_stochastic_chunk = False # Whether to sample a stochastic chunk from the training text
cfg.test_stochastic_chunk = False # Whether to use stochastic chunk in testing
cfg.use_avg_nbest = False # Whether to average the top performed models and use that as the final model.
# This will usually give us better performance.
cfg._disable_update = False # This is a hack for trying to disable the update. Should not be used usually
cfg.inference_num_repeat = 1 # Whether to turn on randomness and repeat the inference for multiple times.
return cfg
def base_misc_config():
cfg = CfgNode()
cfg.seed = 123
cfg.exp_dir = './autonlp'
return cfg
def base_cfg():
cfg = CfgNode()
cfg.version = 1
cfg.optimization = base_optimization_config()
cfg.preprocessing = base_preprocess_cfg()
cfg.model = base_model_config()
cfg.misc = base_misc_config()
cfg.freeze()
return cfg
@use_np
def _classification_regression_predict(net, dataloader, problem_type, label_scaler,
has_label=True, extract_embedding=False,
num_repeat=1):
"""
Parameters
----------
net
The network
dataloader
The dataloader
problem_type
Types of the labels
label_scaler
Label scaler. We will reverse the centering process for regression problem
has_label
Whether label is used
extract_embedding
Whether to extract the embedding
num_repeat
The number of repeats to get the prediction.
If it is larger than 1, we will average the predictions.
If it is a regression problem, we will directly average the outputs.
If it is a classification problem, we will average the logits
Returns
-------
predictions
The predictions
"""
import warnings
# Filter mxnet warnings
warnings.filterwarnings('ignore', module='mxnet')
predictions = [[] for _ in range(num_repeat)]
use_logits = num_repeat > 1 and (problem_type == MULTICLASS or problem_type == BINARY)\
and not extract_embedding
if use_logits:
logits = [[] for _ in range(num_repeat)]
ctx_l = net.collect_params().list_ctx()
for i in range(num_repeat):
for sample_l in grouper(dataloader, len(ctx_l)):
iter_pred_l = []
if use_logits:
iter_logits_l = []
for sample, ctx in zip(sample_l, ctx_l):
if sample is None:
continue
if has_label:
batch_feature, batch_label = sample
else:
batch_feature = sample
batch_feature = move_to_ctx(batch_feature, ctx)
if extract_embedding:
_, embeddings = net(batch_feature)
iter_pred_l.append(embeddings)
else:
pred = net(batch_feature)
if problem_type == MULTICLASS or problem_type == BINARY:
if num_repeat > 1:
iter_logits_l.append(pred)
pred = mx.npx.softmax(pred, axis=-1)
iter_pred_l.append(pred)
for pred in iter_pred_l:
predictions[i].append(pred.asnumpy())
if use_logits:
for ele in iter_logits_l:
logits[i].append(ele.asnumpy())
predictions[i] = np.concatenate(predictions[i], axis=0)
if problem_type == REGRESSION and not extract_embedding:
predictions[i] = label_scaler.inverse_transform(predictions[i])[:, 0]
if use_logits:
logits[i] = np.concatenate(logits[i], axis=0)
if num_repeat == 1:
return predictions[0]
else:
if use_logits:
logits = np.stack(logits, axis=0).mean(axis=0)
return scipy.special.softmax(logits, axis=-1)
else:
return np.stack(predictions, axis=0).mean(axis=0)
def calculate_metric(scorer, ground_truth, predictions, problem_type):
if problem_type == BINARY and scorer.name == 'roc_auc':
# For ROC_AUC, we need to feed in the probability of positive class to the scorer.
return scorer._sign * scorer(ground_truth, predictions[:, 1])
else:
return scorer._sign * scorer(ground_truth, predictions)
@use_np
def train_function(args, reporter, train_df_path, tuning_df_path,
time_limit, time_start, base_config,
problem_type, column_types,
feature_columns, label_column,
log_metrics, eval_metric, ngpus_per_trial,
console_log, seed=None, verbosity=2):
"""
Parameters
----------
args
The arguments
reporter
Reporter of the HPO scheduler.
If it is set to None, we won't use the reporter and will just run a single trial.
train_df_path
Path of the training dataframe
tuning_df_path
Path of the tuning dataframe
time_limit
The time limit of calling this function
time_start
The starting timestamp of the experiment
base_config
Basic configuration
problem_type
Type of the problem.
column_types
Type of columns
feature_columns
The feature columns
label_column
Label column
log_metrics
Metrics for logging
eval_metric
The stopping metric
ngpus_per_trial
The number of GPUs to use per each trial
console_log
Whether to log it to console
seed
The random seed
verbosity
The verbosity
"""
import warnings
warnings.filterwarnings('ignore', module='mxnet')
warnings.filterwarnings('ignore', module='sklearn')
set_seed(seed)
is_fake_reporter = isinstance(reporter, FakeReporter)
if time_limit is not None:
start_train_tick = time.time()
time_left = time_limit - (start_train_tick - time_start)
if time_left <= 0:
if not is_fake_reporter:
reporter.terminate()
return
if is_fake_reporter:
search_space = args.rand
task_id = 0
else:
search_space = args['search_space']
task_id = args.task_id
# Get the log metric scorers
if isinstance(log_metrics, str):
log_metrics = [log_metrics]
# Load the training and tuning data from the parquet file
train_data = pd.read_pickle(train_df_path)
tuning_data = pd.read_pickle(tuning_df_path)
log_metric_scorers = [get_metric(ele) for ele in log_metrics]
eval_metric_scorer = get_metric(eval_metric)
greater_is_better = eval_metric_scorer.greater_is_better
cfg = base_config.clone()
specified_values = []
for key in search_space.keys():
specified_values.append(key)
specified_values.append(search_space[key])
cfg.merge_from_list(specified_values)
exp_dir = cfg.misc.exp_dir
exp_dir = os.path.join(exp_dir, 'task{}'.format(task_id))
os.makedirs(exp_dir, exist_ok=True)
cfg.defrost()
cfg.misc.exp_dir = exp_dir
cfg.freeze()
logger = logging.getLogger()
set_logger_verbosity(verbosity, logger)
logging_config(folder=exp_dir, name='training', logger=logger, console=console_log,
level=logging.DEBUG,
console_level=verbosity2loglevel(verbosity))
logger.log(10, cfg)
# Load backbone model
backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \
= get_backbone(cfg.model.backbone.name)
if 'roberta' in cfg.model.backbone.name:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)
else:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg)
# Build Preprocessor + Preprocess the training dataset + Inference problem type
# TODO Dynamically cache the preprocessor that has been fitted.
if problem_type == MULTICLASS or problem_type == BINARY:
label_generator = LabelEncoder()
label_generator.fit(pd.concat([train_data[label_column], tuning_data[label_column]]))
else:
label_generator = None
preprocessor = MultiModalTextFeatureProcessor(column_types=column_types,
label_column=label_column,
tokenizer_name=cfg.model.backbone.name,
label_generator=label_generator,
cfg=cfg.preprocessing)
logger.info('Fitting and transforming the train data...')
train_dataset = preprocessor.fit_transform(train_data[feature_columns],
train_data[label_column])
with open(os.path.join(exp_dir, 'preprocessor.pkl'), 'wb') as of:
pickle.dump(preprocessor, of)
logger.info(f'Done! Preprocessor saved to {os.path.join(exp_dir, "preprocessor.pkl")}')
logger.log(10, 'Train Data')
logger.log(10, get_stats_string(preprocessor, train_dataset, is_train=True))
logger.info('Process dev set...')
tuning_dataset = preprocessor.transform(tuning_data[feature_columns],
tuning_data[label_column])
logger.info('Done!')
# Auto Max Length
if cfg.preprocessing.text.auto_max_length:
max_length = auto_shrink_max_length(
train_dataset,
insert_sep=cfg.model.insert_sep,
num_text_features=len(preprocessor.text_feature_names),
auto_max_length_quantile=cfg.preprocessing.text.auto_max_length_quantile,
round_to=cfg.preprocessing.text.auto_max_length_round_to,
max_length=cfg.preprocessing.text.max_length)
else:
max_length = cfg.preprocessing.text.max_length
train_stochastic_chunk = cfg.model.train_stochastic_chunk
test_stochastic_chunk = cfg.model.test_stochastic_chunk
inference_num_repeat = cfg.model.inference_num_repeat
if max_length < cfg.preprocessing.text.max_length:
inference_num_repeat = 1
cfg.defrost()
cfg.preprocessing.text.max_length = max_length
cfg.model.inference_num_repeat = inference_num_repeat
cfg.freeze()
with open(os.path.join(exp_dir, 'cfg.yml'), 'w') as f:
f.write(str(cfg))
logger.info(f'Max length for chunking text: {max_length}, '
f'Stochastic chunk: Train-{train_stochastic_chunk}/Test-{test_stochastic_chunk}, '
f'Test #repeat: {inference_num_repeat}.')
cls_id, sep_id = get_cls_sep_id(tokenizer)
train_batchify_fn = MultiModalTextBatchify(
num_text_inputs=len(preprocessor.text_feature_names),
num_categorical_inputs=len(preprocessor.categorical_feature_names),
num_numerical_inputs=len(preprocessor.numerical_feature_names) > 0,
cls_token_id=cls_id, sep_token_id=sep_id, max_length=max_length,
mode='train', stochastic_chunk=train_stochastic_chunk,
insert_sep=cfg.model.insert_sep)
test_batchify_fn = MultiModalTextBatchify(
num_text_inputs=len(preprocessor.text_feature_names),
num_categorical_inputs=len(preprocessor.categorical_feature_names),
num_numerical_inputs=len(preprocessor.numerical_feature_names) > 0,
cls_token_id=cls_id, sep_token_id=sep_id, max_length=max_length,
mode='test', stochastic_chunk=test_stochastic_chunk,
insert_sep=cfg.model.insert_sep)
# Get the ground-truth dev labels
gt_dev_labels = np.array([ele[-1] for ele in tuning_dataset])
if problem_type == REGRESSION:
gt_dev_labels = preprocessor.label_scaler.inverse_transform(np.expand_dims(gt_dev_labels,
axis=-1))[:, 0]
ctx_l = get_mxnet_available_ctx()
if ngpus_per_trial == 0:
ctx_l = [mx.cpu()]
else:
ctx_l = ctx_l[:ngpus_per_trial]
base_batch_size = cfg.optimization.per_device_batch_size
num_accumulated = int(np.ceil(cfg.optimization.batch_size / (base_batch_size * len(ctx_l))))
inference_base_batch_size = base_batch_size * cfg.optimization.val_batch_size_mult
train_dataloader = DataLoader(train_dataset,
batch_size=base_batch_size,
shuffle=True,
batchify_fn=train_batchify_fn)
dev_dataloader = DataLoader(tuning_dataset,
batch_size=inference_base_batch_size,
shuffle=False,
batchify_fn=test_batchify_fn)
if problem_type == REGRESSION:
out_shape = 1
elif problem_type == MULTICLASS:
out_shape = len(label_generator.classes_)
elif problem_type == BINARY:
assert len(label_generator.classes_) == 2
out_shape = 2
else:
raise NotImplementedError
net = MultiModalWithPretrainedTextNN(
text_backbone=text_backbone,
num_text_features=1,
num_categorical_features=len(preprocessor.categorical_feature_names),
num_numerical_features=len(preprocessor.numerical_feature_names) > 0,
numerical_input_units=None if len(preprocessor.numerical_feature_names) == 0 else len(preprocessor.numerical_feature_names),
num_categories=preprocessor.categorical_num_categories,
get_embedding=False,
cfg=cfg.model.network,
out_shape=out_shape)
net.initialize_with_pretrained_backbone(backbone_params_path, ctx=ctx_l)
net.hybridize()
num_total_params, num_total_fixed_params = count_parameters(net.collect_params())
logger.info('#Total Params/Fixed Params={}/{}'.format(num_total_params,
num_total_fixed_params))
# Initialize the optimizer
updates_per_epoch = int(np.ceil(len(train_dataloader) / (num_accumulated * len(ctx_l))))
optimizer, optimizer_params, max_update \
= get_optimizer(cfg.optimization,
updates_per_epoch=updates_per_epoch)
valid_interval = int(math.ceil(cfg.optimization.valid_frequency * updates_per_epoch))
train_log_interval = int(math.ceil(cfg.optimization.log_frequency * updates_per_epoch))
if 0 < cfg.optimization.layerwise_lr_decay < 1:
apply_layerwise_decay(net.text_backbone,
cfg.optimization.layerwise_lr_decay,
backbone_name=cfg.model.backbone.name)
freeze_layers(net.text_backbone,
backbone_name=cfg.model.backbone.name,
num_trainable_layers=cfg.model.num_trainable_layers)
# Do not apply weight decay to all the LayerNorm and bias
for _, v in net.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
params = [p for p in net.collect_params().values() if p.grad_req != 'null']
trainer = mx.gluon.Trainer(params,
optimizer, optimizer_params,
update_on_kvstore=False)
# Set grad_req if gradient accumulation is required
if num_accumulated > 1:
logger.log(15, 'Using gradient accumulation.'
' Global batch size = {}'.format(cfg.optimization.batch_size))
for p in params:
p.grad_req = 'add'
net.collect_params().zero_grad()
train_loop_dataloader = grouper(repeat(train_dataloader), len(ctx_l))
log_loss_l = [mx.np.array(0.0, dtype=np.float32, ctx=ctx) for ctx in ctx_l]
log_num_samples_l = [0 for _ in ctx_l]
logging_start_tick = time.time()
nbest = cfg.optimization.nbest
best_performance_score = [] # Stores the best performing checkpoints
best_performance_update_idx = [] # Stores the update index that reached the best validation performance
best_score = None
mx.npx.waitall()
no_better_rounds = 0
report_idx = 0
start_tick = time.time()
if time_limit is not None:
time_limit -= start_tick - time_start
if time_limit <= 0:
if not is_fake_reporter:
reporter.terminate()
return
best_report_items = None
report_local_jsonl_f = open(os.path.join(exp_dir, 'results_local.jsonl'), 'w')
logger.info(f'Local training results will be saved to '
f'{os.path.join(exp_dir, "results_local.jsonl")}.')
for update_idx in range(max_update):
for accum_idx in range(num_accumulated):
sample_l = next(train_loop_dataloader)
loss_l = []
for i, (sample, ctx) in enumerate(zip(sample_l, ctx_l)):
feature_batch, label_batch = sample
feature_batch = move_to_ctx(feature_batch, ctx)
label_batch = move_to_ctx(label_batch, ctx)
with mx.autograd.record():
pred = net(feature_batch)
if problem_type == MULTICLASS or problem_type == BINARY:
logits = mx.npx.log_softmax(pred, axis=-1)
loss = - mx.npx.pick(logits,
mx.np.expand_dims(label_batch, axis=-1))
elif problem_type == REGRESSION:
loss = mx.np.square(pred - mx.np.expand_dims(label_batch, axis=-1))
loss_l.append(loss.mean() / len(ctx_l) / num_accumulated)
log_loss_l[i] += loss_l[i] * len(ctx_l) * loss.shape[0] * num_accumulated
log_num_samples_l[i] += loss.shape[0]
for loss in loss_l:
loss.backward()
# Begin to update
trainer.allreduce_grads()
total_norm, ratio, is_finite = clip_grad_global_norm(params, cfg.optimization.max_grad_norm)
if not cfg.model._disable_update:
trainer.update(1.0, ignore_stale_grad=True)
# Clear after update
if num_accumulated > 1:
net.collect_params().zero_grad()
if (update_idx + 1) % train_log_interval == 0:
log_loss = sum([ele.as_in_ctx(ctx_l[0]) for ele in log_loss_l]).asnumpy()
log_num_samples = sum(log_num_samples_l)
logger.log(15,
'[Iter {}/{}, Epoch {}] train loss={:0.2e}, gnorm={:0.2e}, lr={:0.2e}, #samples processed={},'
' #sample per second={:.2f}. ETA={:.2f}min'
.format(update_idx + 1, max_update,
int(update_idx / updates_per_epoch),
log_loss / log_num_samples, total_norm, trainer.learning_rate,
log_num_samples,
log_num_samples / (time.time() - logging_start_tick),
(time.time() - start_tick) / (update_idx + 1)
* (max_update - update_idx - 1) / 60))
logging_start_tick = time.time()
log_loss_l = [mx.np.array(0.0, dtype=np.float32, ctx=ctx) for ctx in ctx_l]
log_num_samples_l = [0 for _ in ctx_l]
if (update_idx + 1) % valid_interval == 0 or (update_idx + 1) == max_update:
valid_start_tick = time.time()
dev_predictions = \
_classification_regression_predict(net,
dataloader=dev_dataloader,
problem_type=problem_type,
label_scaler=preprocessor.label_scaler,
has_label=False,
num_repeat=inference_num_repeat)
log_scores = [calculate_metric(scorer, gt_dev_labels,
dev_predictions,
problem_type)
for scorer in log_metric_scorers]
dev_score = calculate_metric(eval_metric_scorer, gt_dev_labels,
dev_predictions,
problem_type)
valid_time_spent = time.time() - valid_start_tick
find_better = False
find_topn_better = False
if len(best_performance_score) < nbest:
best_performance_score.append(dev_score)
best_performance_update_idx.append(update_idx + 1)
net.save_parameters(
os.path.join(exp_dir,
f'nbest_model{len(best_performance_score) - 1}.params'))
find_topn_better = True
if best_score is None or greater_is_better and dev_score >= best_score\
or (not greater_is_better and dev_score <= best_score):
find_better = True
net.save_parameters(os.path.join(exp_dir, f'best_model.params'))
best_score = dev_score
else:
# First try to update the top-K
if greater_is_better:
if dev_score >= min(best_performance_score):
find_topn_better = True
replace_idx = np.argmin(best_performance_score)
best_performance_score[replace_idx] = dev_score
best_performance_update_idx[replace_idx] = update_idx + 1
net.save_parameters(
os.path.join(exp_dir, f'nbest_model{replace_idx}.params'))
if dev_score >= best_score:
find_better = True
net.save_parameters(os.path.join(exp_dir, f'best_model.params'))
best_score = dev_score
else:
if dev_score <= max(best_performance_score):
find_topn_better = True
replace_idx = np.argmax(best_performance_score)
best_performance_score[replace_idx] = dev_score
best_performance_update_idx[replace_idx] = update_idx + 1
net.save_parameters(
os.path.join(exp_dir, f'nbest_model{replace_idx}.params'))
if dev_score <= best_score:
find_better = True
net.save_parameters(os.path.join(exp_dir, f'best_model.params'))
best_score = dev_score
if not find_better:
no_better_rounds += 1
else:
no_better_rounds = 0
mx.npx.waitall()
loss_string = ', '.join(['{}={:0.4e}'.format(metric.name, score)
for score, metric in zip(log_scores, log_metric_scorers)])
logger.log(25, '[Iter {}/{}, Epoch {}] valid {}, time spent={:.3f}s,'
' total time spent={:.2f}min. Find new best={}, Find new top-{}={}'.format(
update_idx + 1, max_update, int(update_idx / updates_per_epoch),
loss_string, valid_time_spent, (time.time() - start_tick) / 60,
find_better, nbest, find_topn_better))
if reporter is not None:
report_items = [('iteration', update_idx + 1),
('report_idx', report_idx + 1),
('epoch', int(update_idx / updates_per_epoch))] + \
[(metric.name, score)
for score, metric in zip(log_scores, log_metric_scorers)] + \
[('find_better', find_better),
('find_new_topn', find_topn_better),
('nbest_stat', json.dumps([best_performance_score,
best_performance_update_idx])),
('elapsed_time', int(time.time() - start_tick))]
if eval_metric_scorer._sign < 0:
report_items.append(('reward_attr', -dev_score))
else:
report_items.append(('reward_attr', dev_score))
report_items.append(('eval_metric', eval_metric_scorer.name))
report_items.append(('exp_dir', exp_dir))
if find_better:
best_report_items = report_items
reporter(**dict(report_items))
report_local_jsonl_f.write(json.dumps(dict(report_items)) + '\n')
report_local_jsonl_f.flush()
report_idx += 1
if no_better_rounds >= cfg.optimization.early_stopping_patience:
logger.info('Early stopping patience reached!')
break
total_time_spent = time.time() - start_tick
if time_limit is not None and total_time_spent > time_limit:
break
# Average checkpoints
best_report_items_dict = dict(best_report_items)
best_report_items_dict['report_idx'] = report_idx + 1
reporter(**best_report_items_dict)
report_local_jsonl_f.write(json.dumps(best_report_items_dict) + '\n')
report_local_jsonl_f.close()
def get_recommended_resource(nthreads_per_trial=None,
ngpus_per_trial=None) -> Tuple[int, int]:
"""Get the recommended resources.
Internally, we will try to use GPU whenever it's possible. That means, we will use
a single GPU for finetuning.
Parameters
----------
nthreads_per_trial
The number of threads per trial provided by the user.
ngpus_per_trial
The number of GPUs per trial provided by the user.
Returns
-------
nthreads_per_trial
The recommended resource.
ngpus_per_trial
"""
if nthreads_per_trial is None and ngpus_per_trial is None:
nthreads_per_trial = get_cpu_count()
ngpus_per_trial = 1
elif nthreads_per_trial is not None and ngpus_per_trial is None:
ngpus_per_trial = 1
elif nthreads_per_trial is None and ngpus_per_trial is not None:
if ngpus_per_trial != 0:
num_parallel_jobs = get_gpu_count() // ngpus_per_trial
nthreads_per_trial = max(get_cpu_count() // num_parallel_jobs, 1)
else:
nthreads_per_trial = get_cpu_count()
nthreads_per_trial = min(nthreads_per_trial, get_cpu_count())
ngpus_per_trial = min(ngpus_per_trial, get_gpu_count())
assert nthreads_per_trial > 0 and ngpus_per_trial >= 0,\
'Invalid number of threads and number of GPUs.'
return nthreads_per_trial, ngpus_per_trial
@use_np
class MultiModalTextModel:
"""Learner of the multimodal text data.
It will be called if the user call `fit()` in TextPredictor.
It is used for making predictions on new data and viewing information about
models trained during `fit()`.
"""
def __init__(self, column_types,
feature_columns,
label_columns,
problem_type,
eval_metric,
log_metrics,
output_directory=None):
"""Creates model object.
Parameters
----------
column_types
The column types.
feature_columns
Name of the feature columns
label_columns
Name of the label columns.
problem_type
Type of the problem
eval_metric
The evaluation metric
log_metrics
The metrics for logging
output_directory
The output directory to save the model
logger
The logger
"""
super(MultiModalTextModel, self).__init__()
self._base_config = base_cfg()
self._base_config.defrost()
if output_directory is not None:
self._output_directory = self._base_config.misc.exp_dir = output_directory
self._base_config.misc.exp_dir = os.path.abspath(self._base_config.misc.exp_dir)
self._base_config.freeze()
self._output_directory = self._base_config.misc.exp_dir
self._column_types = column_types
self._eval_metric = eval_metric
self._log_metrics = log_metrics
self._label_columns = label_columns
self._feature_columns = feature_columns
self._problem_type = problem_type
# Need to be set in the train call
self._net = None # Network for training and inference
self._embed_net = None # Network for extract the embedding
self._config = None
self._results = None
self._preprocessor = None
@property
def results(self):
return self._results
@property
def preprocessor(self):
return self._preprocessor
@property
def output_directory(self):
""" Get the output directory. The trained model and the training logs
will be saved to this folder """
return self._output_directory
@property
def label_columns(self):
"""Name of the label columns"""
return self._label_columns
@property
def problem_type(self):
"""Types of the problem"""
return self._problem_type
@property
def feature_columns(self):
"""Name of the features"""
return self._feature_columns
@property
def base_config(self):
"""The basic configuration. Internally, we will fill values in the base config by values
in the search space."""
return self._base_config
@property
def results(self):
"""Results of the final model"""
return self._results
@property
def config(self):
"""The configuration of the final trained model."""
return self._config
@property
def net(self):
return self._net
def train(self, train_data, tuning_data,
num_cpus=None,
num_gpus=None,
time_limit=None,
tune_kwargs=None,
search_space=None,
plot_results=False,
console_log=True,
seed=None,
verbosity=2):
"""The train function.
Parameters
----------
train_data
The training data
tuning_data
The tuning data
num_cpus
Number of CPUs for each trial
num_gpus
Number of GPUs for each trial
time_limit
The time limits
tune_kwargs
Parameters of the HPO algorithms. For example, the scheduling
algorithm, scheduling backend, HPO algorithm.
search_space
The search space options
plot_results
Whether to plot results or not
console_log
Whether to log into the console
seed
The seed
verbosity
Verbosity
"""
set_seed(seed)
set_logger_verbosity(verbosity, logger)
start_tick = time.time()
assert len(self._label_columns) == 1, 'Currently, we only support single label.'
# TODO(sxjscience) Try to support S3
os.makedirs(self._output_directory, exist_ok=True)
if search_space is None:
search_space = \
ag_text_presets.create('default')['models']['MultimodalTextModel']['search_space']
search_space_reg = args(search_space=space.Dict(**search_space))
# Scheduler and searcher for HPO
if tune_kwargs is None:
tune_kwargs = ag_text_presets.create('default')['tune_kwargs']
scheduler_options = tune_kwargs['scheduler_options']
num_cpus, num_gpus = get_recommended_resource(num_cpus, num_gpus)
if num_gpus == 0:
if 'AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU' in os.environ:
use_warning = int(os.environ['AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU'])
else:
use_warning = False
if use_warning:
warnings.warn('No GPU is detected in the machine and we will recommend you to '
'use TextPredictor on a GPU-enabled instance. Currently, '
'training on CPU is slow.')
else:
raise RuntimeError('No GPU is detected in the machine and we will '
'not proceed to run TextPredictor because they will train '
'too slowly with only CPU. You may try to set `ngpus_per_trial` '
'to a number larger than 0 when calling `.fit()`. '
'Also, you can set the environment variable '
'"AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU=1" to force the model to '
'use CPU for training.')
logger.info(f"The GluonNLP V0 backend is used. "
f"We will use {num_cpus} cpus and "
f"{num_gpus} gpus to train each trial.")
if scheduler_options is None:
scheduler_options = dict()
if plot_results is None:
if in_ipynb():
plot_results = True
else:
plot_results = False
scheduler_options = compile_scheduler_options_v2(
scheduler_options=scheduler_options,
scheduler=tune_kwargs['search_strategy'],
search_strategy=tune_kwargs['searcher'],
search_options=tune_kwargs['search_options'],
nthreads_per_trial=num_cpus,
ngpus_per_trial=num_gpus,
checkpoint=os.path.join(self._output_directory, 'checkpoint.ag'),
num_trials=tune_kwargs['num_trials'],
time_out=time_limit,
resume=False,
visualizer=scheduler_options.get('visualizer'),
time_attr='report_idx',
reward_attr='reward_attr',
dist_ip_addrs=scheduler_options.get('dist_ip_addrs'))
# Create a temporary cache file. The internal train function will load the
# temporary cache.
os.makedirs(os.path.join(self._output_directory, 'data_cache'), exist_ok=True)
train_df_path = os.path.join(self._output_directory, 'data_cache',
'cache_train_dataframe.pd.pkl')
tuning_df_path = os.path.join(self._output_directory, 'data_cache',
'cache_tuning_dataframe.pd.pkl')
train_data.to_pickle(train_df_path)
tuning_data.to_pickle(tuning_df_path)
train_fn = search_space_reg(functools.partial(train_function,
train_df_path=train_df_path,
time_limit=time_limit,
time_start=start_tick,
tuning_df_path=tuning_df_path,
base_config=self.base_config,
problem_type=self.problem_type,
column_types=self._column_types,
feature_columns=self._feature_columns,
label_column=self._label_columns[0],
log_metrics=self._log_metrics,
eval_metric=self._eval_metric,
ngpus_per_trial=scheduler_options['resource']['num_gpus'],
console_log=console_log,
verbosity=verbosity))
no_job_finished_err_msg =\
'No training job has been completed! '\
'There are two possibilities: '\
'1) The time_limit is too small, '\
'or 2) There are some internal errors in AutoGluon. '\
'For the first case, you can increase the time_limit or set it to '\
'None, e.g., setting "predictor.fit(..., time_limit=None). To '\
'further investigate the root cause, you can also try to set the '\
'"verbosity=3" and try again, i.e., predictor.set_verbosity(3).'
if scheduler_options['num_trials'] == 1:
train_fn(train_fn.args['search_space'],
train_fn.args['_default_config'])
best_model_saved_dir_path = os.path.join(self._output_directory, 'task0')
cfg_path = os.path.join(self._output_directory, 'task0', 'cfg.yml')
# Check whether the job has finished
if not os.path.exists(cfg_path)\
or not os.path.exists(os.path.join(self._output_directory,
'task0', 'best_model.params')):
raise RuntimeError(no_job_finished_err_msg)
cfg = self.base_config.clone_merge(cfg_path)
local_results = pd.read_json(os.path.join(self._output_directory, 'task0',
'results_local.jsonl'), lines=True)
if plot_results:
plot_training_curves = os.path.join(self._output_directory,
'plot_training_curves.png')
import matplotlib.pyplot as plt
plt.ylabel(self._eval_metric)
plt.xlabel('report_idx')
plt.title("Performance vs Training-Time")
plt.plot(local_results['report_idx'].iloc[:-1],
local_results[local_results['eval_metric'][0]].iloc[:-1], label=f'task0')
plt.legend(loc='best')
plt.savefig(plot_training_curves)
plt.show()
self._results = local_results
else:
if tune_kwargs['search_strategy'] != 'local':
# Force forkserver if it's not using the local sequential HPO
force_forkserver()
scheduler_cls, scheduler_params = scheduler_factory(scheduler_options)
# Create scheduler, run HPO experiment
scheduler = scheduler_cls(train_fn, **scheduler_options)
scheduler.run()
scheduler.join_jobs()
if len(scheduler.config_history) == 0:
raise RuntimeError(no_job_finished_err_msg)
best_config = scheduler.get_best_config()
logger.info('Results=', scheduler.searcher._results)
logger.info('Best_config={}'.format(best_config))
best_task_id = scheduler.get_best_task_id()
best_model_saved_dir_path = os.path.join(self._output_directory,
'task{}'.format(best_task_id))
best_cfg_path = os.path.join(best_model_saved_dir_path, 'cfg.yml')
cfg = self.base_config.clone_merge(best_cfg_path)
if plot_results:
plot_training_curves = os.path.join(self._output_directory,
'plot_training_curves.png')
scheduler.get_training_curves(filename=plot_training_curves,
plot=plot_results,
use_legend=True)
self._results = dict()
self._results.update(best_reward=scheduler.get_best_reward(),
best_config=scheduler.get_best_config(),
total_time=time.time() - start_tick,
metadata=scheduler.metadata,
training_history=scheduler.training_history,
config_history=scheduler.config_history,
reward_attr=scheduler._reward_attr,
config=cfg)
# Consider to move this to a separate predictor
self._config = cfg
# Average parameters
# TODO(sxjscience) Clean up the temporary spaces used to store the intermediate checkpoints.
if cfg.model.use_avg_nbest:
nbest_path_l = []
for best_id in range(cfg.optimization.nbest):
nbest_path = os.path.join(best_model_saved_dir_path, f'nbest_model{best_id}.params')
if os.path.exists(nbest_path):
nbest_path_l.append(nbest_path)
avg_nbest_path = os.path.join(best_model_saved_dir_path, 'nbest_model_avg.params')
average_checkpoints(nbest_path_l, avg_nbest_path)
with open(os.path.join(best_model_saved_dir_path, 'preprocessor.pkl'), 'rb') as in_f:
self._preprocessor = pickle.load(in_f)
backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \
= get_backbone(cfg.model.backbone.name)
if 'roberta' in cfg.model.backbone.name:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)
else:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg)
if self._problem_type == REGRESSION:
out_shape = 1
elif self._problem_type == MULTICLASS:
out_shape = len(self._preprocessor.label_generator.classes_)
elif self._problem_type == BINARY:
assert len(self._preprocessor.label_generator.classes_) == 2
out_shape = 2
else:
raise NotImplementedError
net = MultiModalWithPretrainedTextNN(
text_backbone=text_backbone,
num_text_features=1,
num_categorical_features=len(self._preprocessor.categorical_feature_names),
num_numerical_features=len(self._preprocessor.numerical_feature_names) > 0,
numerical_input_units=None if len(self._preprocessor.numerical_feature_names) == 0 else len(
self._preprocessor.numerical_feature_names),
num_categories=self._preprocessor.categorical_num_categories,
get_embedding=False,
cfg=cfg.model.network,
out_shape=out_shape)
net.hybridize()
if cfg.model.use_avg_nbest:
net.load_parameters(avg_nbest_path, ctx=mx.cpu())
else:
net.load_parameters(os.path.join(best_model_saved_dir_path, 'best_model.params'),
ctx=mx.cpu())
self._net = net
mx.npx.waitall()
def evaluate(self, data, metrics=None, stochastic_chunk=None, num_repeat=None):
""" Report the predictive performance evaluated for a given dataset.
Parameters
----------
data : str or :class:`TabularDataset` or `pandas.DataFrame`
This Dataset must also contain the label-column with the same column-name as specified during `fit()`.
If str is passed, `valid_data` will be loaded using the str value as the file path.
metrics : str or List[str] or None
Name of metric or a list of names of metrics to report.
If it is not given, we will return the score of the stored eval_metric.
stochastic_chunk
Whether to use stochastic chunk
num_repeat
The number of repeats
Returns
-------
ret : single number or a dict of metric --> metric scores
Output
"""
if isinstance(metrics, str):
metrics = [metrics]
elif metrics is None:
metrics = [self._eval_metric]
assert self.net is not None
# We will always use all resources that are available for evaluation
ctx_l = get_mxnet_available_ctx()
self.net.collect_params().reset_ctx(ctx_l)
if not isinstance(data, pd.DataFrame):
if isinstance(data, (list, dict)):
data = pd.DataFrame(data)
elif isinstance(data, str):
data = load_pd.load(data)
else:
raise NotImplementedError(f'The format of data is not understood. '
f'We have type(data)="{type(data)}"')
data = data[self._feature_columns + self._label_columns]
if self._problem_type == MULTICLASS or self._problem_type == BINARY:
ground_truth = self.preprocessor.label_generator.transform(
data[self._label_columns[0]])
predictions = self.predict_proba(data,
stochastic_chunk=stochastic_chunk,
num_repeat=num_repeat)
else:
ground_truth = pd.to_numeric(data[self._label_columns[0]]).to_numpy().astype(np.float32)
predictions = self.predict(data,
stochastic_chunk=stochastic_chunk,
num_repeat=num_repeat)
metric_scores = [calculate_metric(get_metric(metric),
ground_truth, predictions, self._problem_type)
for metric in metrics]
# Once the inference is completed, we will cache all parameters back
# to CPU to avoid memory overflow.
self.net.collect_params().reset_ctx(mx.cpu())
if len(metric_scores) == 1:
return metric_scores[0]
else:
return {metric: score for metric, score in zip(metrics, metric_scores)}
def _internal_predict(self, data, get_original_labels=True, get_probabilities=False,
stochastic_chunk=None, num_repeat=None):
assert self.net is not None
assert self.config is not None
# We will always use all resources that are available for evaluation
ctx_l = get_mxnet_available_ctx()
self.net.collect_params().reset_ctx(ctx_l)
if not isinstance(data, pd.DataFrame):
if isinstance(data, (list, dict)):
data = pd.DataFrame(data)
elif isinstance(data, str):
data = load_pd.load(data)
else:
raise NotImplementedError(f'The format of data is not understood. '
f'We have type(data)="{type(data)}"')
dataset = self.preprocessor.transform(data[self._feature_columns])
inference_batch_size = self.config.optimization.per_device_batch_size \
* self.config.optimization.val_batch_size_mult
cls_id, sep_id = get_cls_sep_id(self.preprocessor.tokenizer)
if stochastic_chunk is None:
stochastic_chunk = self.config.model.test_stochastic_chunk
batchify_fn = MultiModalTextBatchify(
num_text_inputs=len(self.preprocessor.text_feature_names),
num_categorical_inputs=len(self.preprocessor.categorical_feature_names),
num_numerical_inputs=len(self.preprocessor.numerical_feature_names) > 0,
cls_token_id=cls_id, sep_token_id=sep_id,
max_length=self.config.preprocessing.text.max_length,
mode='test',
stochastic_chunk=stochastic_chunk,
insert_sep=self.config.model.insert_sep)
dataloader = DataLoader(dataset,
batch_size=inference_batch_size,
shuffle=False,
batchify_fn=batchify_fn)
if num_repeat is None:
num_repeat = self.config.model.inference_num_repeat
test_predictions = _classification_regression_predict(
self._net,
dataloader=dataloader,
problem_type=self._problem_type,
label_scaler=self.preprocessor.label_scaler,
has_label=False,
num_repeat=num_repeat)
# Once the inference is completed, we will cache all parameters back
# to CPU to avoid memory overflow.
self.net.collect_params().reset_ctx(mx.cpu())
if self._problem_type == MULTICLASS or self._problem_type == BINARY:
if get_probabilities:
return test_predictions
else:
test_predictions = test_predictions.argmax(axis=-1)
if get_original_labels:
test_predictions = np.array(
self.preprocessor.label_generator.inverse_transform(test_predictions))
return test_predictions
@property
def class_labels(self):
"""The original name of the class labels.
For example, the tabular data may contain classes equal to
"entailment", "contradiction", "neutral". Internally, these will be converted to
0, 1, 2, ...
This function returns the original names of these raw labels.
Returns
-------
ret
List that contain the class names. It will be None if it's not a classification problem.
"""
if self.problem_type == MULTICLASS or self.problem_type == BINARY:
return self._preprocessor.label_generator.classes_
else:
warnings.warn('Accessing class names for a non-classification problem. Return None.')
return None
def predict_proba(self, test_data, stochastic_chunk=None, num_repeat=None):
"""Predict class probabilities instead of class labels (for classification tasks).
Parameters
----------
test_data : `pandas.DataFrame`, `autogluon.tabular.TabularDataset`, or str
The test data to get predictions for. Can be DataFrame/Dataset or a file that can
be loaded into DataFrame/Dataset.
stochastic_chunk : bool
Whether to enable stochastic chunk
num_repeat : int or None
The number of repeats for running the inference model.
Returns
-------
probabilities : array
The predicted class probabilities for each sample.
Shape of this array is (#Samples, num_class).
Here, the i-th number means the probability of belonging to the i-th class.
You can access the class names by calling `self.class_names`.
"""
assert self.problem_type == MULTICLASS or self.problem_type == BINARY
return self._internal_predict(test_data,
get_original_labels=False,
get_probabilities=True,
stochastic_chunk=stochastic_chunk,
num_repeat=num_repeat)
def predict(self, test_data, get_original_labels=True, stochastic_chunk=None, num_repeat=None):
"""Make predictions on new data.
Parameters
----------
test_data : `pandas.DataFrame`, `autogluon.tabular.TabularDataset`, or str
The test data to get predictions for. Can be DataFrame/Dataset or a file that can be loaded into DataFrame/Dataset.
get_original_labels : bool, default = True
Whether or not predictions should be formatted in terms of the original labels.
For example, the labels might be "entailment" or "not_entailment" and predictions could either be of this form (if `True`) or integer-indices corresponding to these classes (if `False`).
stochastic_chunk : bool or None, default = None
Whether to turn on stochastic chunk
num_repeat : int or None
The number of repeats
Returns
-------
predictions : array
The predictions for each sample. Shape of this array is (#Samples,).
"""
return self._internal_predict(test_data,
get_original_labels=get_original_labels,
get_probabilities=False,
stochastic_chunk=stochastic_chunk,
num_repeat=num_repeat)
def save(self, dir_path):
"""Save this model to disk.
Parameters
----------
dir_path : str
Directory where the model should be saved.
"""
os.makedirs(dir_path, exist_ok=True)
self.net.save_parameters(os.path.join(dir_path, 'net.params'))
with open(os.path.join(dir_path, 'cfg.yml'), 'w') as of:
of.write(self.config.dump())
# Save preprocessor
with open(os.path.join(dir_path, 'preprocessor.pkl'), 'wb') as of:
pickle.dump(self.preprocessor, of)
if not isinstance(self._eval_metric, str):
eval_metric = self._eval_metric.name
else:
eval_metric = self._eval_metric
log_metrics = []
for metric in self._log_metrics:
if not isinstance(metric, str):
log_metrics.append(metric.name)
else:
log_metrics.append(metric)
# Save additional assets about the parsed dataset information
with open(os.path.join(dir_path, 'assets.json'), 'w') as of:
json.dump(
{
'problem_type': self._problem_type,
'label_columns': self._label_columns,
'eval_metric': eval_metric,
'log_metrics': log_metrics,
'feature_columns': self._feature_columns,
'column_types': self._column_types,
'version': version.__version__,
}, of, ensure_ascii=True)
@classmethod
def load(cls, dir_path: str):
"""Load a model object previously produced by `fit()` from disk and return this object.
It is highly recommended the predictor be loaded with the exact AutoGluon version
it was fit with.
Parameters
----------
dir_path
Path to directory where this model was previously saved.
Returns
-------
model
A `BertForTextPredictionBasic` object that can be used for making predictions on new data.
"""
cfg = base_cfg().clone_merge(os.path.join(dir_path, 'cfg.yml'))
with open(os.path.join(dir_path, 'preprocessor.pkl'), 'rb') as in_f:
preprocessor = pickle.load(in_f)
with open(os.path.join(dir_path, 'assets.json'), 'r') as f:
assets = json.load(f)
label_columns = assets['label_columns']
feature_columns = assets['feature_columns']
eval_metric = assets['eval_metric']
log_metrics = assets['log_metrics']
problem_type = assets['problem_type']
column_types = assets['column_types']
# TODO(sxjscience) Post 0.1. In general, we will need to support compatible version check
version = assets['version']
backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \
= get_backbone(cfg.model.backbone.name)
if 'roberta' in cfg.model.backbone.name:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)
else:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg)
if problem_type == REGRESSION:
out_shape = 1
elif problem_type == MULTICLASS:
out_shape = len(preprocessor.label_generator.classes_)
elif problem_type == BINARY:
assert len(preprocessor.label_generator.classes_) == 2
out_shape = 2
else:
raise NotImplementedError
net = MultiModalWithPretrainedTextNN(
text_backbone=text_backbone,
num_text_features=1,
num_categorical_features=len(preprocessor.categorical_feature_names),
num_numerical_features=len(preprocessor.numerical_feature_names) > 0,
numerical_input_units=None if len(preprocessor.numerical_feature_names) == 0
else len(preprocessor.numerical_feature_names),
num_categories=preprocessor.categorical_num_categories,
get_embedding=False,
cfg=cfg.model.network,
out_shape=out_shape)
net.hybridize()
ctx_l = mx.cpu()
net.load_parameters(os.path.join(dir_path, 'net.params'), ctx=ctx_l)
model = cls(column_types=column_types,
label_columns=label_columns,
feature_columns=feature_columns,
problem_type=problem_type,
eval_metric=eval_metric,
log_metrics=log_metrics)
model._net = net
model._config = cfg
model._preprocessor = preprocessor
return model
def extract_embedding(self, data, stochastic_chunk=None, num_repeat=None):
"""Extract the embedding from the pretrained model.
Parameters
----------
data
Data that can be parsed to pandas dataframe
stochastic_chunk
Whether to use stochastic chunk
num_repeat
The number of repeats
Returns
-------
embeddings
The output embeddings will have shape
(#samples, embedding_dim)
"""
if not isinstance(data, pd.DataFrame):
if isinstance(data, (list, dict)):
data = pd.DataFrame(data)
elif isinstance(data, str):
data = load_pd.load(data)
else:
raise NotImplementedError(f'The format of data is not understood. '
f'We have type(data)="{type(data)}"')
dataset = self.preprocessor.transform(data[self.feature_columns])
inference_batch_size = self.config.optimization.per_device_batch_size \
* self.config.optimization.val_batch_size_mult
cls_id, sep_id = get_cls_sep_id(self.preprocessor.tokenizer)
if stochastic_chunk is None:
stochastic_chunk = self.config.model.test_stochastic_chunk
batchify_fn = MultiModalTextBatchify(
num_text_inputs=len(self.preprocessor.text_feature_names),
num_categorical_inputs=len(self.preprocessor.categorical_feature_names),
num_numerical_inputs=len(self.preprocessor.numerical_feature_names) > 0,
cls_token_id=cls_id, sep_token_id=sep_id,
max_length=self.config.preprocessing.text.max_length,
mode='test',
stochastic_chunk=stochastic_chunk,
insert_sep=self.config.model.insert_sep)
dataloader = DataLoader(dataset,
batch_size=inference_batch_size,
shuffle=False,
batchify_fn=batchify_fn)
if self._embed_net is None:
embed_net = MultiModalWithPretrainedTextNN(
text_backbone=self.net.text_backbone,
num_text_features=1,
num_categorical_features=len(self.preprocessor.categorical_feature_names),
num_numerical_features=len(self.preprocessor.numerical_feature_names) > 0,
numerical_input_units=None if len(self.preprocessor.numerical_feature_names) == 0
else len(self.preprocessor.numerical_feature_names),
num_categories=self.preprocessor.categorical_num_categories,
get_embedding=True,
cfg=self.config.model.network,
out_shape=self.net.out_shape,
params=self.net.collect_params(),
prefix='embed_net_')
embed_net.hybridize()
self._embed_net = embed_net
if num_repeat is None:
num_repeat = self.config.model.inference_num_repeat
ctx_l = get_mxnet_available_ctx()
self._embed_net.collect_params().reset_ctx(ctx_l)
embeddings = _classification_regression_predict(self._embed_net,
dataloader=dataloader,
problem_type=self._problem_type,
label_scaler=self.preprocessor.label_scaler,
has_label=False,
extract_embedding=True,
num_repeat=num_repeat)
self._embed_net.collect_params().reset_ctx(mx.cpu())
return embeddings
|
import pymysql
import time
import os
import socket
import threading
from time import sleep
from copy import deepcopy
from contextlib import closing
from json import loads
from queue import Queue
from datetime import datetime
from binascii import hexlify
from src.retranslators import Wialon, EGTS, WialonIPS, GalileoSky, EGTSNoAuth
from src.logs.log_config import logger
from db_connect import *
RETRANSLATORS_ALL = (
'WialonIPS',
'Egts',
'Wialon',
'GalileoSky',
'EGTSNoAuth'
)
RETRANSLATORS = {
'egts' : EGTS(),
'wialon' : Wialon(),
'wialonips' : WialonIPS(),
'galileosky': GalileoSky(),
'egtsnoauth': EGTSNoAuth()
}
RETRANSLATOR_IDS = {ret.lower():n for ret, n in zip(RETRANSLATORS_ALL, range(1, len(RETRANSLATORS_ALL)+1))}
RETRANSLATOR_NAMES = {n:ret.lower() for ret, n in zip(RETRANSLATORS_ALL, range(1, len(RETRANSLATORS_ALL)+1))}
class Tracker(threading.Thread):
CONN_DELAY = 5
def __init__(self, imei, retranslator, ip, port):
super().__init__()
self.ip = ip
self.port = port
self.imei = imei
self.dbconn = pymysql.connect(**CONN)
self.retranslator = retranslator
self.retranslator_id = RETRANSLATOR_IDS[self.retranslator.protocol_name.lower()]
self.settings = self.get_settings()
self.queue = Queue()
self.socket = -1
def get_settings(self):
query = f"SELECT `settings` FROM `retranslate_settings` WHERE `protocol`={self.retranslator_id} AND `imei`={int(self.imei)}"
with self.dbconn.cursor() as cursor:
cursor.execute(query)
if cursor.rowcount!=0:
settings = loads(cursor.fetchone()['settings'])
else:
settings = {}
return settings
def connect(self):
sock = socket.socket()
sock.settimeout(0.1)
sock.setblocking(0)
try:
sock.connect((self.ip, int(self.port)))
return sock
except Exception as e:
logger.debug(f'Не удалось установить соединение ({e})'+ f"\n{self.imei} [{self.ip}:{self.port}] ")
return -1
def fill_queue(self):
with self.dbconn.cursor() as cursor:
query = f"SELECT MAX(`id`) FROM `sent_id` WHERE `ip`='{self.ip}' AND `port`={self.port} AND `imei`={self.imei}"
cursor.execute(query)
last_id = cursor.fetchone()['MAX(`id`)']
if last_id == None:
query = f"SELECT MAX(`id`) FROM `{RECORDS_TBL}`"
cursor.execute(query)
last_id = cursor.fetchone()['MAX(`id`)']
query = f"INSERT INTO `sent_id` VALUES ({last_id}, '{self.ip}', {self.port}, {self.imei})"
cursor.execute(query)
self.dbconn.commit()
query = f"SELECT * FROM {RECORDS_TBL} WHERE `id`>{last_id} AND `imei`={self.imei}"
cursor.execute(query)
rows = cursor.fetchall()
notemp = 0
for row in rows:
if (row.get('lat', None) is not None) and (row['datetime'].timestamp()>0):
self.queue.put(row)
notemp += 1
logger.debug(f'Найдено {notemp} записей для {self.imei} [{self.ip}:{self.port}]\n')
def run(self):
logger.info(f"START {self.retranslator.protocol_name} {self.imei} [{self.ip}:{self.port}]")
while True:
while self.socket==-1:
self.socket = self.connect()
sleep(Tracker.CONN_DELAY)
while self.queue.qsize()==0:
self.fill_queue()
row = self.queue.get()
if row.get('reserve', None):
row['imei'] = str(row['imei'])
row['reserve'] = loads('{'+row['reserve']+'}')
row.update(row['reserve'])
del(row['reserve'])
if not row.get('sat_num', ''):
row.update({"sat_num":0})
row['lon'] = float(row['lon'])
row['lat'] = float(row['lat'])
sended, status = self.retranslator.send(self.send, row, self.settings, self.ip, int(self.port))
if sended:
msg = "Запись ОТПРАВЛЕНА\n"
else:
msg = "Запись НЕ ОТПРАВЛЕНА\n"
msg += "Сервер".ljust(26, '-')+f"{self.ip}:"+f"{self.port}\n"
msg += "Ретранслятор".ljust(26, '-')+f"{self.retranslator.protocol_name}\n"
msg += "ID записи".ljust(26, '-')+f"{row["id"]}\n"
msg += "imei".ljust(26, '-')+f"{row["imei"]}\n"
msg += "Время точки".ljust(26, '-')+f"{datetime.fromtimestamp(row["datetime"])}\n"
msg += "Статус отправки".ljust(26, '-')+f"{status}\n"
msg += f"Записей для {self.imei}".ljust(30, '-')+f"{self.queue.qsize()}\n"
if not sended:
logger.error(msg)
else:
logger.info(msg)
condition = f" WHERE `ip`='{self.ip}" AND `port`={self.port} AND `imei`={row["imei"]}"
query = f"SELECT * FROM `sent_id`" + condition
with self.dbconn.cursor() as cursor:
cursor.execute(query)
if cursor.rowcount==0:
query = f"INSERT INTO `sent_id` VALUES ({row["id"]}, '{self.ip}", {self.port}, {row["imei"]})"
else:
query = f"UPDATE `sent_id` SET `id`={row["id"]}"+condition
cursor.execute(query)
self.dbconn.commit()
self.queue.task_done()
def send(self, bmsg):
try:
msglen = len(bmsg)
self.socket.send(bmsg)
server_answer = self.socket.recv(1024)
logger.debug(f'Пакет данных успешно отправлен (size {msglen} bytes)\n{hexlify(bmsg)}\n'+f'Ответ сервера (size {len(server_answer)} bytes)\n{hexlify(server_answer)}'+ f"\n{self.imei} [{self.ip}:{self.port}] ")
return hexlify(server_answer)
except Exception as e:
self.socket.close()
self.socket = -1
logger.debug(f"Ошибка при отправке данных ({e})"+ f"\n{self.imei} [{self.ip}:{self.port}] ")
return -1
def get_trackers(connection):
with connection.cursor() as cursor:
query = f"SELECT * FROM `retranslate_servers`"
cursor.execute(query)
trackers = cursor.fetchall()
return trackers
|
import pymysql
import time
import os
import socket
import threading
from time import sleep
from copy import deepcopy
from contextlib import closing
from json import loads
from queue import Queue
from datetime import datetime
from binascii import hexlify
from src.retranslators import Wialon, EGTS, WialonIPS, GalileoSky, EGTSNoAuth
from src.logs.log_config import logger
from db_connect import *
RETRANSLATORS_ALL = (
'WialonIPS',
'Egts',
'Wialon',
'GalileoSky',
'EGTSNoAuth'
)
RETRANSLATORS = {
'egts' : EGTS(),
'wialon' : Wialon(),
'wialonips' : WialonIPS(),
'galileosky': GalileoSky(),
'egtsnoauth': EGTSNoAuth()
}
RETRANSLATOR_IDS = {ret.lower():n for ret, n in zip(RETRANSLATORS_ALL, range(1, len(RETRANSLATORS_ALL)+1))}
RETRANSLATOR_NAMES = {n:ret.lower() for ret, n in zip(RETRANSLATORS_ALL, range(1, len(RETRANSLATORS_ALL)+1))}
class Tracker(threading.Thread):
CONN_DELAY = 5
def __init__(self, imei, retranslator, ip, port):
super().__init__()
self.ip = ip
self.port = port
self.imei = imei
self.dbconn = pymysql.connect(**CONN)
self.retranslator = retranslator
self.retranslator_id = RETRANSLATOR_IDS[self.retranslator.protocol_name.lower()]
self.settings = self.get_settings()
self.queue = Queue()
self.socket = -1
def get_settings(self):
query = f"SELECT `settings` FROM `retranslate_settings` WHERE `protocol`={self.retranslator_id} AND `imei`={int(self.imei)}"
with self.dbconn.cursor() as cursor:
cursor.execute(query)
if cursor.rowcount!=0:
settings = loads(cursor.fetchone()['settings'])
else:
settings = {}
return settings
def connect(self):
sock = socket.socket()
sock.settimeout(0.1)
sock.setblocking(0)
try:
sock.connect((self.ip, int(self.port)))
return sock
except Exception as e:
logger.debug(f'Не удалось установить соединение ({e})'+ f"\n{self.imei} [{self.ip}:{self.port}] ")
return -1
def fill_queue(self):
with self.dbconn.cursor() as cursor:
query = f"SELECT MAX(`id`) FROM `sent_id` WHERE `ip`='{self.ip}' AND `port`={self.port} AND `imei`={self.imei}"
cursor.execute(query)
last_id = cursor.fetchone()['MAX(`id`)']
if last_id == None:
query = f"SELECT MAX(`id`) FROM `{RECORDS_TBL}`"
cursor.execute(query)
last_id = cursor.fetchone()['MAX(`id`)']
query = f"INSERT INTO `sent_id` VALUES ({last_id}, '{self.ip}', {self.port}, {self.imei})"
cursor.execute(query)
self.dbconn.commit()
query = f"SELECT * FROM {RECORDS_TBL} WHERE `id`>{last_id} AND `imei`={self.imei}"
cursor.execute(query)
rows = cursor.fetchall()
notemp = 0
for row in rows:
if (row.get('lat', None) is not None) and (row['datetime'].timestamp()>0):
self.queue.put(row)
notemp += 1
logger.debug(f'Найдено {notemp} записей для {self.imei} [{self.ip}:{self.port}]\n')
def run(self):
logger.info(f"START {self.retranslator.protocol_name} {self.imei} [{self.ip}:{self.port}]")
while True:
while self.socket==-1:
self.socket = self.connect()
sleep(Tracker.CONN_DELAY)
while self.queue.qsize()==0:
self.fill_queue()
row = self.queue.get()
if row.get('reserve', None):
row['imei'] = str(row['imei'])
row['reserve'] = loads('{'+row['reserve']+'}')
row.update(row['reserve'])
del(row['reserve'])
if not row.get('sat_num', ''):
row.update({"sat_num":0})
row['lon'] = float(row['lon'])
row['lat'] = float(row['lat'])
sended, status = self.retranslator.send(self.send, row, self.settings, self.ip, int(self.port))
if sended:
msg = "Запись ОТПРАВЛЕНА\n"
else:
msg = "Запись НЕ ОТПРАВЛЕНА\n"
msg += "Сервер".ljust(26, '-')+f"{self.ip}:"+f"{self.port}\n"
msg += "Ретранслятор".ljust(26, '-')+f"{self.retranslator.protocol_name}\n"
msg += "ID записи".ljust(26, '-')+f"{row['id']}\n"
msg += "imei".ljust(26, '-')+f"{row['imei']}\n"
msg += "Время точки".ljust(26, '-')+f"{datetime.fromtimestamp(row['datetime'])}\n"
msg += "Статус отправки".ljust(26, '-')+f"{status}\n"
msg += f"Записей для {self.imei}".ljust(30, '-')+f"{self.queue.qsize()}\n"
if not sended:
logger.error(msg)
else:
logger.info(msg)
condition = f" WHERE `ip`='{self.ip}' AND `port`={self.port} AND `imei`={row['imei']}"
query = f"SELECT * FROM `sent_id`" + condition
with self.dbconn.cursor() as cursor:
cursor.execute(query)
if cursor.rowcount==0:
query = f"INSERT INTO `sent_id` VALUES ({row['id']}, '{self.ip}', {self.port}, {row['imei']})"
else:
query = f"UPDATE `sent_id` SET `id`={row['id']}"+condition
cursor.execute(query)
self.dbconn.commit()
self.queue.task_done()
def send(self, bmsg):
try:
msglen = len(bmsg)
self.socket.send(bmsg)
server_answer = self.socket.recv(1024)
logger.debug(f'Пакет данных успешно отправлен (size {msglen} bytes)\n{hexlify(bmsg)}\n'+f'Ответ сервера (size {len(server_answer)} bytes)\n{hexlify(server_answer)}'+ f"\n{self.imei} [{self.ip}:{self.port}] ")
return hexlify(server_answer)
except Exception as e:
self.socket.close()
self.socket = -1
logger.debug(f"Ошибка при отправке данных ({e})"+ f"\n{self.imei} [{self.ip}:{self.port}] ")
return -1
def get_trackers(connection):
with connection.cursor() as cursor:
query = f"SELECT * FROM `retranslate_servers`"
cursor.execute(query)
trackers = cursor.fetchall()
return trackers
|
"""
Credits:
This file was adopted from: https://github.com/pydata/xarray # noqa
Source file: https://github.com/pydata/xarray/blob/1d7bcbdc75b6d556c04e2c7d7a042e4379e15303/xarray/backends/rasterio_.py # noqa
"""
import contextlib
import os
import re
import threading
import warnings
import numpy as np
import rasterio
from packaging import version
from rasterio.errors import NotGeoreferencedWarning
from rasterio.vrt import WarpedVRT
from xarray import Dataset, IndexVariable
from xarray.backends.common import BackendArray
from xarray.backends.file_manager import CachingFileManager, FileManager
from xarray.backends.locks import SerializableLock
from xarray.coding import times, variables
from xarray.core import indexing
from xarray.core.dataarray import DataArray
from xarray.core.dtypes import maybe_promote
from xarray.core.utils import is_scalar
from xarray.core.variable import as_variable
from rioxarray.exceptions import RioXarrayError
from rioxarray.rioxarray import _generate_spatial_coords
# TODO: should this be GDAL_LOCK instead?
RASTERIO_LOCK = SerializableLock()
NO_LOCK = contextlib.nullcontext()
class FileHandleLocal(threading.local):
"""
This contains the thread local ThreadURIManager
"""
def __init__(self): # pylint: disable=super-init-not-called
self.thread_manager = None # Initialises in each thread
class ThreadURIManager:
"""
This handles opening & closing file handles in each thread.
"""
def __init__(
self,
opener,
*args,
mode="r",
kwargs=None,
):
self._opener = opener
self._args = args
self._mode = mode
self._kwargs = {} if kwargs is None else dict(kwargs)
self._file_handle = None
@property
def file_handle(self):
"""
File handle returned by the opener.
"""
if self._file_handle is not None:
return self._file_handle
self._file_handle = self._opener(*self._args, mode=self._mode, **self._kwargs)
return self._file_handle
def close(self):
"""
Close file handle.
"""
if self._file_handle is not None:
self._file_handle.close()
self._file_handle = None
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
class URIManager(FileManager):
"""
The URI manager is used for lockless reading
"""
def __init__(
self,
opener,
*args,
mode="r",
kwargs=None,
):
self._opener = opener
self._args = args
self._mode = mode
self._kwargs = {} if kwargs is None else dict(kwargs)
self._local = FileHandleLocal()
def acquire(self, needs_lock=True):
if self._local.thread_manager is None:
self._local.thread_manager = ThreadURIManager(
self._opener, *self._args, mode=self._mode, kwargs=self._kwargs
)
return self._local.thread_manager.file_handle
@contextlib.contextmanager
def acquire_context(self, needs_lock=True):
try:
yield self.acquire(needs_lock=needs_lock)
except Exception:
self.close(needs_lock=needs_lock)
raise
def close(self, needs_lock=True):
if self._local.thread_manager is not None:
self._local.thread_manager.close()
self._local.thread_manager = None
def __del__(self):
self.close(needs_lock=False)
def __getstate__(self):
"""State for pickling."""
return (self._opener, self._args, self._mode, self._kwargs)
def __setstate__(self, state):
"""Restore from a pickle."""
opener, args, mode, kwargs = state
self.__init__(opener, *args, mode=mode, kwargs=kwargs)
class RasterioArrayWrapper(BackendArray):
"""A wrapper around rasterio dataset objects"""
# pylint: disable=too-many-instance-attributes
def __init__(
self,
manager,
lock,
name,
vrt_params=None,
masked=False,
mask_and_scale=False,
unsigned=False,
):
self.manager = manager
self.lock = lock
self.masked = masked or mask_and_scale
self.mask_and_scale = mask_and_scale
# cannot save riods as an attribute: this would break pickleability
riods = manager.acquire()
if vrt_params is not None:
riods = WarpedVRT(riods, **vrt_params)
self.vrt_params = vrt_params
self._shape = (riods.count, riods.height, riods.width)
self._dtype = None
dtypes = riods.dtypes
if not np.all(np.asarray(dtypes) == dtypes[0]):
raise ValueError("All bands should have the same dtype")
dtype = _rasterio_to_numpy_dtype(dtypes)
# handle unsigned case
if mask_and_scale and unsigned and dtype.kind == "i":
self._dtype = np.dtype(f"u{dtype.itemsize}")
elif mask_and_scale and unsigned:
warnings.warn(
f"variable {name!r} has _Unsigned attribute but is not "
"of integer type. Ignoring attribute.",
variables.SerializationWarning,
stacklevel=3,
)
self._fill_value = riods.nodata
if self._dtype is None:
if self.masked:
self._dtype, self._fill_value = maybe_promote(dtype)
else:
self._dtype = dtype
@property
def dtype(self):
"""
Data type of the array
"""
return self._dtype
@property
def fill_value(self):
"""
Fill value of the array
"""
return self._fill_value
@property
def shape(self):
"""
Shape of the array
"""
return self._shape
def _get_indexer(self, key):
"""Get indexer for rasterio array.
Parameter
---------
key: tuple of int
Returns
-------
band_key: an indexer for the 1st dimension
window: two tuples. Each consists of (start, stop).
squeeze_axis: axes to be squeezed
np_ind: indexer for loaded numpy array
See also
--------
indexing.decompose_indexer
"""
if len(key) != 3:
raise RioXarrayError("rasterio datasets should always be 3D")
# bands cannot be windowed but they can be listed
band_key = key[0]
np_inds = []
# bands (axis=0) cannot be windowed but they can be listed
if isinstance(band_key, slice):
start, stop, step = band_key.indices(self.shape[0])
band_key = np.arange(start, stop, step)
# be sure we give out a list
band_key = (np.asarray(band_key) + 1).tolist()
if isinstance(band_key, list): # if band_key is not a scalar
np_inds.append(slice(None))
# but other dims can only be windowed
window = []
squeeze_axis = []
for iii, (ikey, size) in enumerate(zip(key[1:], self.shape[1:])):
if isinstance(ikey, slice):
# step is always positive. see indexing.decompose_indexer
start, stop, step = ikey.indices(size)
np_inds.append(slice(None, None, step))
elif is_scalar(ikey):
# windowed operations will always return an array
# we will have to squeeze it later
squeeze_axis.append(-(2 - iii))
start = ikey
stop = ikey + 1
else:
start, stop = np.min(ikey), np.max(ikey) + 1
np_inds.append(ikey - start)
window.append((start, stop))
if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray):
# do outer-style indexing
np_inds[-2:] = np.ix_(*np_inds[-2:])
return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds)
def _getitem(self, key):
band_key, window, squeeze_axis, np_inds = self._get_indexer(key)
if not band_key or any(start == stop for (start, stop) in window):
# no need to do IO
shape = (len(band_key),) + tuple(stop - start for (start, stop) in window)
out = np.zeros(shape, dtype=self.dtype)
else:
with self.lock:
riods = self.manager.acquire(needs_lock=False)
if self.vrt_params is not None:
riods = WarpedVRT(riods, **self.vrt_params)
out = riods.read(band_key, window=window, masked=self.masked)
if self.masked:
out = np.ma.filled(out.astype(self.dtype), self.fill_value)
if self.mask_and_scale:
for iii, band_iii in enumerate(np.atleast_1d(band_key) - 1):
out[iii] = (
out[iii] * riods.scales[band_iii] + riods.offsets[band_iii]
)
if squeeze_axis:
out = np.squeeze(out, axis=squeeze_axis)
return out[np_inds]
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER, self._getitem
)
def _parse_envi(meta):
"""Parse ENVI metadata into Python data structures.
See the link for information on the ENVI header file format:
http://www.harrisgeospatial.com/docs/enviheaderfiles.html
Parameters
----------
meta : dict
Dictionary of keys and str values to parse, as returned by the rasterio
tags(ns='ENVI') call.
Returns
-------
parsed_meta : dict
Dictionary containing the original keys and the parsed values
"""
def parsevec(value):
return np.fromstring(value.strip("{}"), dtype="float", sep=",")
def default(value):
return value.strip("{}")
parse = {"wavelength": parsevec, "fwhm": parsevec}
parsed_meta = {key: parse.get(key, default)(value) for key, value in meta.items()}
return parsed_meta
def _rasterio_to_numpy_dtype(dtypes):
"""Numpy dtype from first entry of rasterio dataset.dtypes"""
# rasterio has some special dtype names (complex_int16 -> np.complex64)
if dtypes[0] == "complex_int16":
dtype = np.dtype("complex64")
else:
dtype = np.dtype(dtypes[0])
return dtype
def _to_numeric(value):
"""
Convert the value to a number
"""
try:
value = int(value)
except (TypeError, ValueError):
try:
value = float(value)
except (TypeError, ValueError):
pass
return value
def _parse_tag(key, value):
# NC_GLOBAL is appended to tags with netcdf driver and is not really needed
key = key.split("NC_GLOBAL#")[-1]
if value.startswith("{") and value.endswith("}"):
try:
new_val = np.fromstring(value.strip("{}"), dtype="float", sep=",")
# pylint: disable=len-as-condition
value = new_val if len(new_val) else _to_numeric(value)
except ValueError:
value = _to_numeric(value)
else:
value = _to_numeric(value)
return key, value
def _parse_tags(tags):
parsed_tags = {}
for key, value in tags.items():
key, value = _parse_tag(key, value)
parsed_tags[key] = value
return parsed_tags
NETCDF_DTYPE_MAP = {
0: object, # NC_NAT
1: np.byte, # NC_BYTE
2: np.char, # NC_CHAR
3: np.short, # NC_SHORT
4: np.int_, # NC_INT, NC_LONG
5: float, # NC_FLOAT
6: np.double, # NC_DOUBLE
7: np.ubyte, # NC_UBYTE
8: np.ushort, # NC_USHORT
9: np.uint, # NC_UINT
10: np.int64, # NC_INT64
11: np.uint64, # NC_UINT64
12: object, # NC_STRING
}
def _load_netcdf_attrs(tags, data_array):
"""
Loads the netCDF attributes into the data array
Attributes stored in this format:
- variable_name#attr_name: attr_value
"""
for key, value in tags.items():
key, value = _parse_tag(key, value)
key_split = key.split("#")
if len(key_split) != 2:
continue
variable_name, attr_name = key_split
if variable_name in data_array.coords:
data_array.coords[variable_name].attrs.update({attr_name: value})
def _load_netcdf_1d_coords(tags):
"""
Dimension information:
- NETCDF_DIM_EXTRA: '{time}' (comma separated list of dim names)
- NETCDF_DIM_time_DEF: '{2,6}' (dim size, dim dtype)
- NETCDF_DIM_time_VALUES: '{0,872712.659688}' (comma separated list of data)
"""
dim_names = tags.get("NETCDF_DIM_EXTRA")
if not dim_names:
return {}
dim_names = dim_names.strip("{}").split(",")
coords = {}
for dim_name in dim_names:
dim_def = tags.get(f"NETCDF_DIM_{dim_name}_DEF")
if not dim_def:
continue
# pylint: disable=unused-variable
dim_size, dim_dtype = dim_def.strip("{}").split(",")
dim_dtype = NETCDF_DTYPE_MAP.get(int(dim_dtype), object)
dim_values = tags[f"NETCDF_DIM_{dim_name}_VALUES"].strip("{}")
coords[dim_name] = IndexVariable(
dim_name, np.fromstring(dim_values, dtype=dim_dtype, sep=",")
)
return coords
def build_subdataset_filter(group_names, variable_names):
"""
Example::
'HDF4_EOS:EOS_GRID:"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf":
MODIS_Grid_2D:sur_refl_b01_1'
Parameters
----------
group_names: str or list or tuple
Name or names of netCDF groups to filter by.
variable_names: str or list or tuple
Name or names of netCDF variables to filter by.
Returns
-------
re.SRE_Pattern: output of re.compile()
"""
variable_query = r"\w+"
if variable_names is not None:
if not isinstance(variable_names, (tuple, list)):
variable_names = [variable_names]
variable_names = [re.escape(variable_name) for variable_name in variable_names]
variable_query = rf"(?:{"|".join(variable_names)})"
if group_names is not None:
if not isinstance(group_names, (tuple, list)):
group_names = [group_names]
group_names = [re.escape(group_name) for group_name in group_names]
group_query = rf"(?:{"|".join(group_names)})"
else:
return re.compile(r"".join([r".*(?:\:/|\:)(/+)?", variable_query, r"$"]))
return re.compile(
r"".join(
[r".*(?:\:/|\:)(/+)?", group_query, r"[:/](/+)?", variable_query, r"$"]
)
)
def _rio_transform(riods):
"""
Get the transform from a rasterio dataset
reguardless of rasterio version.
"""
try:
return riods.transform
except AttributeError:
return riods.affine # rasterio < 1.0
def _get_rasterio_attrs(riods):
"""
Get rasterio specific attributes
"""
# pylint: disable=too-many-branches
# Add rasterio attributes
attrs = _parse_tags(riods.tags(1))
if hasattr(riods, "nodata") and riods.nodata is not None:
# The nodata values for the raster bands
attrs["_FillValue"] = riods.nodata
if hasattr(riods, "scales"):
# The scale values for the raster bands
if len(set(riods.scales)) > 1:
attrs["scales"] = riods.scales
warnings.warn(
"Offsets differ across bands. The 'scale_factor' attribute will "
"not be added. See the 'scales' attribute."
)
else:
attrs["scale_factor"] = riods.scales[0]
if hasattr(riods, "offsets"):
# The offset values for the raster bands
if len(set(riods.offsets)) > 1:
attrs["offsets"] = riods.offsets
warnings.warn(
"Offsets differ across bands. The 'add_offset' attribute will "
"not be added. See the 'offsets' attribute."
)
else:
attrs["add_offset"] = riods.offsets[0]
if hasattr(riods, "descriptions") and any(riods.descriptions):
if len(set(riods.descriptions)) == 1:
attrs["long_name"] = riods.descriptions[0]
else:
# Descriptions for each dataset band
attrs["long_name"] = riods.descriptions
if hasattr(riods, "units") and any(riods.units):
# A list of units string for each dataset band
if len(riods.units) == 1:
attrs["units"] = riods.units[0]
else:
attrs["units"] = riods.units
return attrs
def _decode_datetime_cf(data_array, decode_times, decode_timedelta):
"""
Decide the datetime based on CF conventions
"""
if decode_timedelta is None:
decode_timedelta = decode_times
for coord in data_array.coords:
time_var = None
if decode_times and "since" in data_array[coord].attrs.get("units", ""):
time_var = times.CFDatetimeCoder(use_cftime=True).decode(
as_variable(data_array[coord]), name=coord
)
elif (
decode_timedelta
and data_array[coord].attrs.get("units") in times.TIME_UNITS
):
time_var = times.CFTimedeltaCoder().decode(
as_variable(data_array[coord]), name=coord
)
if time_var is not None:
dimensions, data, attributes, encoding = variables.unpack_for_decoding(
time_var
)
data_array = data_array.assign_coords(
{
coord: IndexVariable(
dims=dimensions,
data=data,
attrs=attributes,
encoding=encoding,
)
}
)
return data_array
def _parse_driver_tags(riods, attrs, coords):
# Parse extra metadata from tags, if supported
parsers = {"ENVI": _parse_envi}
driver = riods.driver
if driver in parsers:
meta = parsers[driver](riods.tags(ns=driver))
for key, value in meta.items():
# Add values as coordinates if they match the band count,
# as attributes otherwise
if isinstance(value, (list, np.ndarray)) and len(value) == riods.count:
coords[key] = ("band", np.asarray(value))
else:
attrs[key] = value
def _load_subdatasets(
riods,
group,
variable,
parse_coordinates,
chunks,
cache,
lock,
masked,
mask_and_scale,
decode_times,
decode_timedelta,
**open_kwargs,
):
"""
Load in rasterio subdatasets
"""
base_tags = _parse_tags(riods.tags())
dim_groups = {}
subdataset_filter = None
if any((group, variable)):
subdataset_filter = build_subdataset_filter(group, variable)
for subdataset in riods.subdatasets:
if subdataset_filter is not None and not subdataset_filter.match(subdataset):
continue
with rasterio.open(subdataset) as rds:
shape = rds.shape
rioda = open_rasterio(
subdataset,
parse_coordinates=shape not in dim_groups and parse_coordinates,
chunks=chunks,
cache=cache,
lock=lock,
masked=masked,
mask_and_scale=mask_and_scale,
default_name=subdataset.split(":")[-1].lstrip("/").replace("/", "_"),
decode_times=decode_times,
decode_timedelta=decode_timedelta,
**open_kwargs,
)
if shape not in dim_groups:
dim_groups[shape] = {rioda.name: rioda}
else:
dim_groups[shape][rioda.name] = rioda
if len(dim_groups) > 1:
dataset = [
Dataset(dim_group, attrs=base_tags) for dim_group in dim_groups.values()
]
elif not dim_groups:
dataset = Dataset(attrs=base_tags)
else:
dataset = Dataset(list(dim_groups.values())[0], attrs=base_tags)
return dataset
def _prepare_dask(result, riods, filename, chunks):
"""
Prepare the data for dask computations
"""
# pylint: disable=import-outside-toplevel
from dask.base import tokenize
# augment the token with the file modification time
try:
mtime = os.path.getmtime(filename)
except OSError:
# the filename is probably an s3 bucket rather than a regular file
mtime = None
if chunks in (True, "auto"):
import dask
from dask.array.core import normalize_chunks
if version.parse(dask.__version__) < version.parse("0.18.0"):
msg = (
"Automatic chunking requires dask.__version__ >= 0.18.0 . "
f"You currently have version {dask.__version__}"
)
raise NotImplementedError(msg)
block_shape = (1,) + riods.block_shapes[0]
chunks = normalize_chunks(
chunks=(1, "auto", "auto"),
shape=(riods.count, riods.height, riods.width),
dtype=riods.dtypes[0],
previous_chunks=tuple((c,) for c in block_shape),
)
token = tokenize(filename, mtime, chunks)
name_prefix = f"open_rasterio-{token}"
return result.chunk(chunks, name_prefix=name_prefix, token=token)
def _handle_encoding(result, mask_and_scale, masked, da_name):
"""
Make sure encoding handled properly
"""
if "grid_mapping" in result.attrs:
variables.pop_to(result.attrs, result.encoding, "grid_mapping", name=da_name)
if mask_and_scale:
if "scale_factor" in result.attrs:
variables.pop_to(
result.attrs, result.encoding, "scale_factor", name=da_name
)
if "add_offset" in result.attrs:
variables.pop_to(result.attrs, result.encoding, "add_offset", name=da_name)
if masked:
if "_FillValue" in result.attrs:
variables.pop_to(result.attrs, result.encoding, "_FillValue", name=da_name)
if "missing_value" in result.attrs:
variables.pop_to(
result.attrs, result.encoding, "missing_value", name=da_name
)
def open_rasterio(
filename,
parse_coordinates=None,
chunks=None,
cache=None,
lock=None,
masked=False,
mask_and_scale=False,
variable=None,
group=None,
default_name=None,
decode_times=True,
decode_timedelta=None,
**open_kwargs,
):
# pylint: disable=too-many-statements,too-many-locals,too-many-branches
"""Open a file with rasterio (experimental).
This should work with any file that rasterio can open (most often:
geoTIFF). The x and y coordinates are generated automatically from the
file's geoinformation, shifted to the center of each pixel (see
`"PixelIsArea" Raster Space
<http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_
for more information).
Parameters
----------
filename: str, rasterio.io.DatasetReader, or rasterio.vrt.WarpedVRT
Path to the file to open. Or already open rasterio dataset.
parse_coordinates: bool, optional
Whether to parse the x and y coordinates out of the file's
``transform`` attribute or not. The default is to automatically
parse the coordinates only if they are rectilinear (1D).
It can be useful to set ``parse_coordinates=False``
if your files are very large or if you don't need the coordinates.
chunks: int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new
DataArray into a dask array. Chunks can also be set to
``True`` or ``"auto"`` to choose sensible chunk sizes according to
``dask.config.get("array.chunk-size")``.
cache: bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False.
lock: bool or dask.utils.SerializableLock, optional
If chunks is provided, this argument is used to ensure that only one
thread per process is reading from a rasterio file object at a time.
By default and when a lock instance is provided,
a :class:`xarray.backends.CachingFileManager` is used to cache File objects.
Since rasterio also caches some data, this will make repeated reads from the
same object fast.
When ``lock=False``, no lock is used, allowing for completely parallel reads
from multiple threads or processes. However, a new file handle is opened on
each request.
masked: bool, optional
If True, read the mask and set values to NaN. Defaults to False.
mask_and_scale: bool, optional
Lazily scale (using the `scales` and `offsets` from rasterio) and mask.
If the _Unsigned attribute is present treat integer arrays as unsigned.
variable: str or list or tuple, optional
Variable name or names to use to filter loading.
group: str or list or tuple, optional
Group name or names to use to filter loading.
default_name: str, optional
The name of the data array if none exists. Default is None.
decode_times: bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
decode_timedelta: bool, optional
If True, decode variables and coordinates with time units in
{“days”, “hours”, “minutes”, “seconds”, “milliseconds”, “microseconds”}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
**open_kwargs: kwargs, optional
Optional keyword arguments to pass into rasterio.open().
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray` | List[:obj:`xarray.Dataset`]:
The newly created dataset(s).
"""
parse_coordinates = True if parse_coordinates is None else parse_coordinates
masked = masked or mask_and_scale
vrt_params = None
if isinstance(filename, rasterio.io.DatasetReader):
filename = filename.name
elif isinstance(filename, rasterio.vrt.WarpedVRT):
vrt = filename
filename = vrt.src_dataset.name
vrt_params = dict(
src_crs=vrt.src_crs.to_string() if vrt.src_crs else None,
crs=vrt.crs.to_string() if vrt.crs else None,
resampling=vrt.resampling,
tolerance=vrt.tolerance,
src_nodata=vrt.src_nodata,
nodata=vrt.nodata,
width=vrt.width,
height=vrt.height,
src_transform=vrt.src_transform,
transform=vrt.transform,
dtype=vrt.working_dtype,
warp_extras=vrt.warp_extras,
)
if lock in (True, None):
lock = RASTERIO_LOCK
elif lock is False:
lock = NO_LOCK
# ensure default for sharing is False
# ref https://github.com/mapbox/rasterio/issues/1504
open_kwargs["sharing"] = open_kwargs.get("sharing", False)
with warnings.catch_warnings(record=True) as rio_warnings:
if lock is not NO_LOCK:
manager = CachingFileManager(
rasterio.open, filename, lock=lock, mode="r", kwargs=open_kwargs
)
else:
manager = URIManager(rasterio.open, filename, mode="r", kwargs=open_kwargs)
riods = manager.acquire()
captured_warnings = rio_warnings.copy()
# raise the NotGeoreferencedWarning if applicable
for rio_warning in captured_warnings:
if not riods.subdatasets or not isinstance(
rio_warning.message, NotGeoreferencedWarning
):
warnings.warn(str(rio_warning.message), type(rio_warning.message))
# open the subdatasets if they exist
if riods.subdatasets:
return _load_subdatasets(
riods=riods,
group=group,
variable=variable,
parse_coordinates=parse_coordinates,
chunks=chunks,
cache=cache,
lock=lock,
masked=masked,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
decode_timedelta=decode_timedelta,
**open_kwargs,
)
if vrt_params is not None:
riods = WarpedVRT(riods, **vrt_params)
if cache is None:
cache = chunks is None
# Get bands
if riods.count < 1:
raise ValueError("Unknown dims")
# parse tags & load alternate coords
attrs = _get_rasterio_attrs(riods=riods)
coords = _load_netcdf_1d_coords(riods.tags())
_parse_driver_tags(riods=riods, attrs=attrs, coords=coords)
for coord in coords:
if f"NETCDF_DIM_{coord}" in attrs:
coord_name = coord
attrs.pop(f"NETCDF_DIM_{coord}")
break
else:
coord_name = "band"
coords[coord_name] = np.asarray(riods.indexes)
# Get geospatial coordinates
if parse_coordinates:
coords.update(
_generate_spatial_coords(_rio_transform(riods), riods.width, riods.height)
)
unsigned = False
encoding = {}
if mask_and_scale and "_Unsigned" in attrs:
unsigned = variables.pop_to(attrs, encoding, "_Unsigned") == "true"
if masked:
encoding["dtype"] = str(_rasterio_to_numpy_dtype(riods.dtypes))
da_name = attrs.pop("NETCDF_VARNAME", default_name)
data = indexing.LazilyOuterIndexedArray(
RasterioArrayWrapper(
manager,
lock,
name=da_name,
vrt_params=vrt_params,
masked=masked,
mask_and_scale=mask_and_scale,
unsigned=unsigned,
)
)
# this lets you write arrays loaded with rasterio
data = indexing.CopyOnWriteArray(data)
if cache and chunks is None:
data = indexing.MemoryCachedArray(data)
result = DataArray(
data=data, dims=(coord_name, "y", "x"), coords=coords, attrs=attrs, name=da_name
)
result.encoding = encoding
# update attributes from NetCDF attributess
_load_netcdf_attrs(riods.tags(), result)
result = _decode_datetime_cf(
result, decode_times=decode_times, decode_timedelta=decode_timedelta
)
# make sure the _FillValue is correct dtype
if "_FillValue" in attrs:
attrs["_FillValue"] = result.dtype.type(attrs["_FillValue"])
# handle encoding
_handle_encoding(result, mask_and_scale, masked, da_name)
# Affine transformation matrix (always available)
# This describes coefficients mapping pixel coordinates to CRS
# For serialization store as tuple of 6 floats, the last row being
# always (0, 0, 1) per definition (see
# https://github.com/sgillies/affine)
result.rio.write_transform(_rio_transform(riods), inplace=True)
if hasattr(riods, "crs") and riods.crs:
result.rio.write_crs(riods.crs, inplace=True)
if chunks is not None:
result = _prepare_dask(result, riods, filename, chunks)
# Make the file closeable
result.set_close(manager.close)
result.rio._manager = manager
# add file path to encoding
result.encoding["source"] = riods.name
result.encoding["rasterio_dtype"] = str(riods.dtypes[0])
return result
|
"""
Credits:
This file was adopted from: https://github.com/pydata/xarray # noqa
Source file: https://github.com/pydata/xarray/blob/1d7bcbdc75b6d556c04e2c7d7a042e4379e15303/xarray/backends/rasterio_.py # noqa
"""
import contextlib
import os
import re
import threading
import warnings
import numpy as np
import rasterio
from packaging import version
from rasterio.errors import NotGeoreferencedWarning
from rasterio.vrt import WarpedVRT
from xarray import Dataset, IndexVariable
from xarray.backends.common import BackendArray
from xarray.backends.file_manager import CachingFileManager, FileManager
from xarray.backends.locks import SerializableLock
from xarray.coding import times, variables
from xarray.core import indexing
from xarray.core.dataarray import DataArray
from xarray.core.dtypes import maybe_promote
from xarray.core.utils import is_scalar
from xarray.core.variable import as_variable
from rioxarray.exceptions import RioXarrayError
from rioxarray.rioxarray import _generate_spatial_coords
# TODO: should this be GDAL_LOCK instead?
RASTERIO_LOCK = SerializableLock()
NO_LOCK = contextlib.nullcontext()
class FileHandleLocal(threading.local):
"""
This contains the thread local ThreadURIManager
"""
def __init__(self): # pylint: disable=super-init-not-called
self.thread_manager = None # Initialises in each thread
class ThreadURIManager:
"""
This handles opening & closing file handles in each thread.
"""
def __init__(
self,
opener,
*args,
mode="r",
kwargs=None,
):
self._opener = opener
self._args = args
self._mode = mode
self._kwargs = {} if kwargs is None else dict(kwargs)
self._file_handle = None
@property
def file_handle(self):
"""
File handle returned by the opener.
"""
if self._file_handle is not None:
return self._file_handle
self._file_handle = self._opener(*self._args, mode=self._mode, **self._kwargs)
return self._file_handle
def close(self):
"""
Close file handle.
"""
if self._file_handle is not None:
self._file_handle.close()
self._file_handle = None
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
class URIManager(FileManager):
"""
The URI manager is used for lockless reading
"""
def __init__(
self,
opener,
*args,
mode="r",
kwargs=None,
):
self._opener = opener
self._args = args
self._mode = mode
self._kwargs = {} if kwargs is None else dict(kwargs)
self._local = FileHandleLocal()
def acquire(self, needs_lock=True):
if self._local.thread_manager is None:
self._local.thread_manager = ThreadURIManager(
self._opener, *self._args, mode=self._mode, kwargs=self._kwargs
)
return self._local.thread_manager.file_handle
@contextlib.contextmanager
def acquire_context(self, needs_lock=True):
try:
yield self.acquire(needs_lock=needs_lock)
except Exception:
self.close(needs_lock=needs_lock)
raise
def close(self, needs_lock=True):
if self._local.thread_manager is not None:
self._local.thread_manager.close()
self._local.thread_manager = None
def __del__(self):
self.close(needs_lock=False)
def __getstate__(self):
"""State for pickling."""
return (self._opener, self._args, self._mode, self._kwargs)
def __setstate__(self, state):
"""Restore from a pickle."""
opener, args, mode, kwargs = state
self.__init__(opener, *args, mode=mode, kwargs=kwargs)
class RasterioArrayWrapper(BackendArray):
"""A wrapper around rasterio dataset objects"""
# pylint: disable=too-many-instance-attributes
def __init__(
self,
manager,
lock,
name,
vrt_params=None,
masked=False,
mask_and_scale=False,
unsigned=False,
):
self.manager = manager
self.lock = lock
self.masked = masked or mask_and_scale
self.mask_and_scale = mask_and_scale
# cannot save riods as an attribute: this would break pickleability
riods = manager.acquire()
if vrt_params is not None:
riods = WarpedVRT(riods, **vrt_params)
self.vrt_params = vrt_params
self._shape = (riods.count, riods.height, riods.width)
self._dtype = None
dtypes = riods.dtypes
if not np.all(np.asarray(dtypes) == dtypes[0]):
raise ValueError("All bands should have the same dtype")
dtype = _rasterio_to_numpy_dtype(dtypes)
# handle unsigned case
if mask_and_scale and unsigned and dtype.kind == "i":
self._dtype = np.dtype(f"u{dtype.itemsize}")
elif mask_and_scale and unsigned:
warnings.warn(
f"variable {name!r} has _Unsigned attribute but is not "
"of integer type. Ignoring attribute.",
variables.SerializationWarning,
stacklevel=3,
)
self._fill_value = riods.nodata
if self._dtype is None:
if self.masked:
self._dtype, self._fill_value = maybe_promote(dtype)
else:
self._dtype = dtype
@property
def dtype(self):
"""
Data type of the array
"""
return self._dtype
@property
def fill_value(self):
"""
Fill value of the array
"""
return self._fill_value
@property
def shape(self):
"""
Shape of the array
"""
return self._shape
def _get_indexer(self, key):
"""Get indexer for rasterio array.
Parameter
---------
key: tuple of int
Returns
-------
band_key: an indexer for the 1st dimension
window: two tuples. Each consists of (start, stop).
squeeze_axis: axes to be squeezed
np_ind: indexer for loaded numpy array
See also
--------
indexing.decompose_indexer
"""
if len(key) != 3:
raise RioXarrayError("rasterio datasets should always be 3D")
# bands cannot be windowed but they can be listed
band_key = key[0]
np_inds = []
# bands (axis=0) cannot be windowed but they can be listed
if isinstance(band_key, slice):
start, stop, step = band_key.indices(self.shape[0])
band_key = np.arange(start, stop, step)
# be sure we give out a list
band_key = (np.asarray(band_key) + 1).tolist()
if isinstance(band_key, list): # if band_key is not a scalar
np_inds.append(slice(None))
# but other dims can only be windowed
window = []
squeeze_axis = []
for iii, (ikey, size) in enumerate(zip(key[1:], self.shape[1:])):
if isinstance(ikey, slice):
# step is always positive. see indexing.decompose_indexer
start, stop, step = ikey.indices(size)
np_inds.append(slice(None, None, step))
elif is_scalar(ikey):
# windowed operations will always return an array
# we will have to squeeze it later
squeeze_axis.append(-(2 - iii))
start = ikey
stop = ikey + 1
else:
start, stop = np.min(ikey), np.max(ikey) + 1
np_inds.append(ikey - start)
window.append((start, stop))
if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray):
# do outer-style indexing
np_inds[-2:] = np.ix_(*np_inds[-2:])
return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds)
def _getitem(self, key):
band_key, window, squeeze_axis, np_inds = self._get_indexer(key)
if not band_key or any(start == stop for (start, stop) in window):
# no need to do IO
shape = (len(band_key),) + tuple(stop - start for (start, stop) in window)
out = np.zeros(shape, dtype=self.dtype)
else:
with self.lock:
riods = self.manager.acquire(needs_lock=False)
if self.vrt_params is not None:
riods = WarpedVRT(riods, **self.vrt_params)
out = riods.read(band_key, window=window, masked=self.masked)
if self.masked:
out = np.ma.filled(out.astype(self.dtype), self.fill_value)
if self.mask_and_scale:
for iii, band_iii in enumerate(np.atleast_1d(band_key) - 1):
out[iii] = (
out[iii] * riods.scales[band_iii] + riods.offsets[band_iii]
)
if squeeze_axis:
out = np.squeeze(out, axis=squeeze_axis)
return out[np_inds]
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER, self._getitem
)
def _parse_envi(meta):
"""Parse ENVI metadata into Python data structures.
See the link for information on the ENVI header file format:
http://www.harrisgeospatial.com/docs/enviheaderfiles.html
Parameters
----------
meta : dict
Dictionary of keys and str values to parse, as returned by the rasterio
tags(ns='ENVI') call.
Returns
-------
parsed_meta : dict
Dictionary containing the original keys and the parsed values
"""
def parsevec(value):
return np.fromstring(value.strip("{}"), dtype="float", sep=",")
def default(value):
return value.strip("{}")
parse = {"wavelength": parsevec, "fwhm": parsevec}
parsed_meta = {key: parse.get(key, default)(value) for key, value in meta.items()}
return parsed_meta
def _rasterio_to_numpy_dtype(dtypes):
"""Numpy dtype from first entry of rasterio dataset.dtypes"""
# rasterio has some special dtype names (complex_int16 -> np.complex64)
if dtypes[0] == "complex_int16":
dtype = np.dtype("complex64")
else:
dtype = np.dtype(dtypes[0])
return dtype
def _to_numeric(value):
"""
Convert the value to a number
"""
try:
value = int(value)
except (TypeError, ValueError):
try:
value = float(value)
except (TypeError, ValueError):
pass
return value
def _parse_tag(key, value):
# NC_GLOBAL is appended to tags with netcdf driver and is not really needed
key = key.split("NC_GLOBAL#")[-1]
if value.startswith("{") and value.endswith("}"):
try:
new_val = np.fromstring(value.strip("{}"), dtype="float", sep=",")
# pylint: disable=len-as-condition
value = new_val if len(new_val) else _to_numeric(value)
except ValueError:
value = _to_numeric(value)
else:
value = _to_numeric(value)
return key, value
def _parse_tags(tags):
parsed_tags = {}
for key, value in tags.items():
key, value = _parse_tag(key, value)
parsed_tags[key] = value
return parsed_tags
NETCDF_DTYPE_MAP = {
0: object, # NC_NAT
1: np.byte, # NC_BYTE
2: np.char, # NC_CHAR
3: np.short, # NC_SHORT
4: np.int_, # NC_INT, NC_LONG
5: float, # NC_FLOAT
6: np.double, # NC_DOUBLE
7: np.ubyte, # NC_UBYTE
8: np.ushort, # NC_USHORT
9: np.uint, # NC_UINT
10: np.int64, # NC_INT64
11: np.uint64, # NC_UINT64
12: object, # NC_STRING
}
def _load_netcdf_attrs(tags, data_array):
"""
Loads the netCDF attributes into the data array
Attributes stored in this format:
- variable_name#attr_name: attr_value
"""
for key, value in tags.items():
key, value = _parse_tag(key, value)
key_split = key.split("#")
if len(key_split) != 2:
continue
variable_name, attr_name = key_split
if variable_name in data_array.coords:
data_array.coords[variable_name].attrs.update({attr_name: value})
def _load_netcdf_1d_coords(tags):
"""
Dimension information:
- NETCDF_DIM_EXTRA: '{time}' (comma separated list of dim names)
- NETCDF_DIM_time_DEF: '{2,6}' (dim size, dim dtype)
- NETCDF_DIM_time_VALUES: '{0,872712.659688}' (comma separated list of data)
"""
dim_names = tags.get("NETCDF_DIM_EXTRA")
if not dim_names:
return {}
dim_names = dim_names.strip("{}").split(",")
coords = {}
for dim_name in dim_names:
dim_def = tags.get(f"NETCDF_DIM_{dim_name}_DEF")
if not dim_def:
continue
# pylint: disable=unused-variable
dim_size, dim_dtype = dim_def.strip("{}").split(",")
dim_dtype = NETCDF_DTYPE_MAP.get(int(dim_dtype), object)
dim_values = tags[f"NETCDF_DIM_{dim_name}_VALUES"].strip("{}")
coords[dim_name] = IndexVariable(
dim_name, np.fromstring(dim_values, dtype=dim_dtype, sep=",")
)
return coords
def build_subdataset_filter(group_names, variable_names):
"""
Example::
'HDF4_EOS:EOS_GRID:"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf":
MODIS_Grid_2D:sur_refl_b01_1'
Parameters
----------
group_names: str or list or tuple
Name or names of netCDF groups to filter by.
variable_names: str or list or tuple
Name or names of netCDF variables to filter by.
Returns
-------
re.SRE_Pattern: output of re.compile()
"""
variable_query = r"\w+"
if variable_names is not None:
if not isinstance(variable_names, (tuple, list)):
variable_names = [variable_names]
variable_names = [re.escape(variable_name) for variable_name in variable_names]
variable_query = rf"(?:{'|'.join(variable_names)})"
if group_names is not None:
if not isinstance(group_names, (tuple, list)):
group_names = [group_names]
group_names = [re.escape(group_name) for group_name in group_names]
group_query = rf"(?:{'|'.join(group_names)})"
else:
return re.compile(r"".join([r".*(?:\:/|\:)(/+)?", variable_query, r"$"]))
return re.compile(
r"".join(
[r".*(?:\:/|\:)(/+)?", group_query, r"[:/](/+)?", variable_query, r"$"]
)
)
def _rio_transform(riods):
"""
Get the transform from a rasterio dataset
reguardless of rasterio version.
"""
try:
return riods.transform
except AttributeError:
return riods.affine # rasterio < 1.0
def _get_rasterio_attrs(riods):
"""
Get rasterio specific attributes
"""
# pylint: disable=too-many-branches
# Add rasterio attributes
attrs = _parse_tags(riods.tags(1))
if hasattr(riods, "nodata") and riods.nodata is not None:
# The nodata values for the raster bands
attrs["_FillValue"] = riods.nodata
if hasattr(riods, "scales"):
# The scale values for the raster bands
if len(set(riods.scales)) > 1:
attrs["scales"] = riods.scales
warnings.warn(
"Offsets differ across bands. The 'scale_factor' attribute will "
"not be added. See the 'scales' attribute."
)
else:
attrs["scale_factor"] = riods.scales[0]
if hasattr(riods, "offsets"):
# The offset values for the raster bands
if len(set(riods.offsets)) > 1:
attrs["offsets"] = riods.offsets
warnings.warn(
"Offsets differ across bands. The 'add_offset' attribute will "
"not be added. See the 'offsets' attribute."
)
else:
attrs["add_offset"] = riods.offsets[0]
if hasattr(riods, "descriptions") and any(riods.descriptions):
if len(set(riods.descriptions)) == 1:
attrs["long_name"] = riods.descriptions[0]
else:
# Descriptions for each dataset band
attrs["long_name"] = riods.descriptions
if hasattr(riods, "units") and any(riods.units):
# A list of units string for each dataset band
if len(riods.units) == 1:
attrs["units"] = riods.units[0]
else:
attrs["units"] = riods.units
return attrs
def _decode_datetime_cf(data_array, decode_times, decode_timedelta):
"""
Decide the datetime based on CF conventions
"""
if decode_timedelta is None:
decode_timedelta = decode_times
for coord in data_array.coords:
time_var = None
if decode_times and "since" in data_array[coord].attrs.get("units", ""):
time_var = times.CFDatetimeCoder(use_cftime=True).decode(
as_variable(data_array[coord]), name=coord
)
elif (
decode_timedelta
and data_array[coord].attrs.get("units") in times.TIME_UNITS
):
time_var = times.CFTimedeltaCoder().decode(
as_variable(data_array[coord]), name=coord
)
if time_var is not None:
dimensions, data, attributes, encoding = variables.unpack_for_decoding(
time_var
)
data_array = data_array.assign_coords(
{
coord: IndexVariable(
dims=dimensions,
data=data,
attrs=attributes,
encoding=encoding,
)
}
)
return data_array
def _parse_driver_tags(riods, attrs, coords):
# Parse extra metadata from tags, if supported
parsers = {"ENVI": _parse_envi}
driver = riods.driver
if driver in parsers:
meta = parsers[driver](riods.tags(ns=driver))
for key, value in meta.items():
# Add values as coordinates if they match the band count,
# as attributes otherwise
if isinstance(value, (list, np.ndarray)) and len(value) == riods.count:
coords[key] = ("band", np.asarray(value))
else:
attrs[key] = value
def _load_subdatasets(
riods,
group,
variable,
parse_coordinates,
chunks,
cache,
lock,
masked,
mask_and_scale,
decode_times,
decode_timedelta,
**open_kwargs,
):
"""
Load in rasterio subdatasets
"""
base_tags = _parse_tags(riods.tags())
dim_groups = {}
subdataset_filter = None
if any((group, variable)):
subdataset_filter = build_subdataset_filter(group, variable)
for subdataset in riods.subdatasets:
if subdataset_filter is not None and not subdataset_filter.match(subdataset):
continue
with rasterio.open(subdataset) as rds:
shape = rds.shape
rioda = open_rasterio(
subdataset,
parse_coordinates=shape not in dim_groups and parse_coordinates,
chunks=chunks,
cache=cache,
lock=lock,
masked=masked,
mask_and_scale=mask_and_scale,
default_name=subdataset.split(":")[-1].lstrip("/").replace("/", "_"),
decode_times=decode_times,
decode_timedelta=decode_timedelta,
**open_kwargs,
)
if shape not in dim_groups:
dim_groups[shape] = {rioda.name: rioda}
else:
dim_groups[shape][rioda.name] = rioda
if len(dim_groups) > 1:
dataset = [
Dataset(dim_group, attrs=base_tags) for dim_group in dim_groups.values()
]
elif not dim_groups:
dataset = Dataset(attrs=base_tags)
else:
dataset = Dataset(list(dim_groups.values())[0], attrs=base_tags)
return dataset
def _prepare_dask(result, riods, filename, chunks):
"""
Prepare the data for dask computations
"""
# pylint: disable=import-outside-toplevel
from dask.base import tokenize
# augment the token with the file modification time
try:
mtime = os.path.getmtime(filename)
except OSError:
# the filename is probably an s3 bucket rather than a regular file
mtime = None
if chunks in (True, "auto"):
import dask
from dask.array.core import normalize_chunks
if version.parse(dask.__version__) < version.parse("0.18.0"):
msg = (
"Automatic chunking requires dask.__version__ >= 0.18.0 . "
f"You currently have version {dask.__version__}"
)
raise NotImplementedError(msg)
block_shape = (1,) + riods.block_shapes[0]
chunks = normalize_chunks(
chunks=(1, "auto", "auto"),
shape=(riods.count, riods.height, riods.width),
dtype=riods.dtypes[0],
previous_chunks=tuple((c,) for c in block_shape),
)
token = tokenize(filename, mtime, chunks)
name_prefix = f"open_rasterio-{token}"
return result.chunk(chunks, name_prefix=name_prefix, token=token)
def _handle_encoding(result, mask_and_scale, masked, da_name):
"""
Make sure encoding handled properly
"""
if "grid_mapping" in result.attrs:
variables.pop_to(result.attrs, result.encoding, "grid_mapping", name=da_name)
if mask_and_scale:
if "scale_factor" in result.attrs:
variables.pop_to(
result.attrs, result.encoding, "scale_factor", name=da_name
)
if "add_offset" in result.attrs:
variables.pop_to(result.attrs, result.encoding, "add_offset", name=da_name)
if masked:
if "_FillValue" in result.attrs:
variables.pop_to(result.attrs, result.encoding, "_FillValue", name=da_name)
if "missing_value" in result.attrs:
variables.pop_to(
result.attrs, result.encoding, "missing_value", name=da_name
)
def open_rasterio(
filename,
parse_coordinates=None,
chunks=None,
cache=None,
lock=None,
masked=False,
mask_and_scale=False,
variable=None,
group=None,
default_name=None,
decode_times=True,
decode_timedelta=None,
**open_kwargs,
):
# pylint: disable=too-many-statements,too-many-locals,too-many-branches
"""Open a file with rasterio (experimental).
This should work with any file that rasterio can open (most often:
geoTIFF). The x and y coordinates are generated automatically from the
file's geoinformation, shifted to the center of each pixel (see
`"PixelIsArea" Raster Space
<http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_
for more information).
Parameters
----------
filename: str, rasterio.io.DatasetReader, or rasterio.vrt.WarpedVRT
Path to the file to open. Or already open rasterio dataset.
parse_coordinates: bool, optional
Whether to parse the x and y coordinates out of the file's
``transform`` attribute or not. The default is to automatically
parse the coordinates only if they are rectilinear (1D).
It can be useful to set ``parse_coordinates=False``
if your files are very large or if you don't need the coordinates.
chunks: int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new
DataArray into a dask array. Chunks can also be set to
``True`` or ``"auto"`` to choose sensible chunk sizes according to
``dask.config.get("array.chunk-size")``.
cache: bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False.
lock: bool or dask.utils.SerializableLock, optional
If chunks is provided, this argument is used to ensure that only one
thread per process is reading from a rasterio file object at a time.
By default and when a lock instance is provided,
a :class:`xarray.backends.CachingFileManager` is used to cache File objects.
Since rasterio also caches some data, this will make repeated reads from the
same object fast.
When ``lock=False``, no lock is used, allowing for completely parallel reads
from multiple threads or processes. However, a new file handle is opened on
each request.
masked: bool, optional
If True, read the mask and set values to NaN. Defaults to False.
mask_and_scale: bool, optional
Lazily scale (using the `scales` and `offsets` from rasterio) and mask.
If the _Unsigned attribute is present treat integer arrays as unsigned.
variable: str or list or tuple, optional
Variable name or names to use to filter loading.
group: str or list or tuple, optional
Group name or names to use to filter loading.
default_name: str, optional
The name of the data array if none exists. Default is None.
decode_times: bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
decode_timedelta: bool, optional
If True, decode variables and coordinates with time units in
{“days”, “hours”, “minutes”, “seconds”, “milliseconds”, “microseconds”}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
**open_kwargs: kwargs, optional
Optional keyword arguments to pass into rasterio.open().
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray` | List[:obj:`xarray.Dataset`]:
The newly created dataset(s).
"""
parse_coordinates = True if parse_coordinates is None else parse_coordinates
masked = masked or mask_and_scale
vrt_params = None
if isinstance(filename, rasterio.io.DatasetReader):
filename = filename.name
elif isinstance(filename, rasterio.vrt.WarpedVRT):
vrt = filename
filename = vrt.src_dataset.name
vrt_params = dict(
src_crs=vrt.src_crs.to_string() if vrt.src_crs else None,
crs=vrt.crs.to_string() if vrt.crs else None,
resampling=vrt.resampling,
tolerance=vrt.tolerance,
src_nodata=vrt.src_nodata,
nodata=vrt.nodata,
width=vrt.width,
height=vrt.height,
src_transform=vrt.src_transform,
transform=vrt.transform,
dtype=vrt.working_dtype,
warp_extras=vrt.warp_extras,
)
if lock in (True, None):
lock = RASTERIO_LOCK
elif lock is False:
lock = NO_LOCK
# ensure default for sharing is False
# ref https://github.com/mapbox/rasterio/issues/1504
open_kwargs["sharing"] = open_kwargs.get("sharing", False)
with warnings.catch_warnings(record=True) as rio_warnings:
if lock is not NO_LOCK:
manager = CachingFileManager(
rasterio.open, filename, lock=lock, mode="r", kwargs=open_kwargs
)
else:
manager = URIManager(rasterio.open, filename, mode="r", kwargs=open_kwargs)
riods = manager.acquire()
captured_warnings = rio_warnings.copy()
# raise the NotGeoreferencedWarning if applicable
for rio_warning in captured_warnings:
if not riods.subdatasets or not isinstance(
rio_warning.message, NotGeoreferencedWarning
):
warnings.warn(str(rio_warning.message), type(rio_warning.message))
# open the subdatasets if they exist
if riods.subdatasets:
return _load_subdatasets(
riods=riods,
group=group,
variable=variable,
parse_coordinates=parse_coordinates,
chunks=chunks,
cache=cache,
lock=lock,
masked=masked,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
decode_timedelta=decode_timedelta,
**open_kwargs,
)
if vrt_params is not None:
riods = WarpedVRT(riods, **vrt_params)
if cache is None:
cache = chunks is None
# Get bands
if riods.count < 1:
raise ValueError("Unknown dims")
# parse tags & load alternate coords
attrs = _get_rasterio_attrs(riods=riods)
coords = _load_netcdf_1d_coords(riods.tags())
_parse_driver_tags(riods=riods, attrs=attrs, coords=coords)
for coord in coords:
if f"NETCDF_DIM_{coord}" in attrs:
coord_name = coord
attrs.pop(f"NETCDF_DIM_{coord}")
break
else:
coord_name = "band"
coords[coord_name] = np.asarray(riods.indexes)
# Get geospatial coordinates
if parse_coordinates:
coords.update(
_generate_spatial_coords(_rio_transform(riods), riods.width, riods.height)
)
unsigned = False
encoding = {}
if mask_and_scale and "_Unsigned" in attrs:
unsigned = variables.pop_to(attrs, encoding, "_Unsigned") == "true"
if masked:
encoding["dtype"] = str(_rasterio_to_numpy_dtype(riods.dtypes))
da_name = attrs.pop("NETCDF_VARNAME", default_name)
data = indexing.LazilyOuterIndexedArray(
RasterioArrayWrapper(
manager,
lock,
name=da_name,
vrt_params=vrt_params,
masked=masked,
mask_and_scale=mask_and_scale,
unsigned=unsigned,
)
)
# this lets you write arrays loaded with rasterio
data = indexing.CopyOnWriteArray(data)
if cache and chunks is None:
data = indexing.MemoryCachedArray(data)
result = DataArray(
data=data, dims=(coord_name, "y", "x"), coords=coords, attrs=attrs, name=da_name
)
result.encoding = encoding
# update attributes from NetCDF attributess
_load_netcdf_attrs(riods.tags(), result)
result = _decode_datetime_cf(
result, decode_times=decode_times, decode_timedelta=decode_timedelta
)
# make sure the _FillValue is correct dtype
if "_FillValue" in attrs:
attrs["_FillValue"] = result.dtype.type(attrs["_FillValue"])
# handle encoding
_handle_encoding(result, mask_and_scale, masked, da_name)
# Affine transformation matrix (always available)
# This describes coefficients mapping pixel coordinates to CRS
# For serialization store as tuple of 6 floats, the last row being
# always (0, 0, 1) per definition (see
# https://github.com/sgillies/affine)
result.rio.write_transform(_rio_transform(riods), inplace=True)
if hasattr(riods, "crs") and riods.crs:
result.rio.write_crs(riods.crs, inplace=True)
if chunks is not None:
result = _prepare_dask(result, riods, filename, chunks)
# Make the file closeable
result.set_close(manager.close)
result.rio._manager = manager
# add file path to encoding
result.encoding["source"] = riods.name
result.encoding["rasterio_dtype"] = str(riods.dtypes[0])
return result
|
import boto3
from botocore.exceptions import ClientError
import json
import os
import time
import datetime
from dateutil import tz
from lib.account import *
from lib.common import *
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
FUNC_PATH = "lambda/function"
LAYER_PATH = "lambda/layer"
def lambda_handler(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=True))
message = json.loads(event['Records'][0]['Sns']['Message'])
logger.info("Received message: " + json.dumps(message, sort_keys=True))
try:
target_account = AWSAccount(message['account_id'])
for r in target_account.get_regions():
discover_lambdas(target_account, r)
discover_lambda_layer(target_account, r)
except AntiopeAssumeRoleError as e:
logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id))
return()
except ClientError as e:
logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e))
capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e))
raise
except Exception as e:
logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context)))
capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e))
raise
def discover_lambdas(target_account, region):
'''Iterate across all regions to discover Lambdas'''
lambdas = []
client = target_account.get_client('lambda', region=region)
response = client.list_functions()
while 'NextMarker' in response: # Gotta Catch 'em all!
lambdas += response['Functions']
response = client.list_functions(Marker=response['NextMarker'])
lambdas += response['Functions']
for l in lambdas:
process_lambda(client, l, target_account, region)
def process_lambda(client, mylambda, target_account, region):
resource_item = {}
resource_item['awsAccountId'] = target_account.account_id
resource_item['awsAccountName'] = target_account.account_name
resource_item['resourceType'] = "AWS::Lambda::Function"
resource_item['source'] = "Antiope"
resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now())
resource_item['awsRegion'] = region
resource_item['configuration'] = mylambda
if 'tags' in mylambda:
resource_item['tags'] = parse_tags(mylambda['tags'])
resource_item['supplementaryConfiguration'] = {}
resource_item['resourceId'] = "{}-{}-{}".format(target_account.account_id, region, mylambda['FunctionName'].replace("/", "-"))
resource_item['resourceName'] = mylambda['FunctionName']
resource_item['ARN'] = mylambda['FunctionArn']
resource_item['errors'] = {}
try:
response = client.get_policy(FunctionName=mylambda['FunctionArn'])
if 'Policy' in response:
resource_item['supplementaryConfiguration']['Policy'] = json.loads(response['Policy'])
except ClientError as e:
message = f"Error getting the Policy for function {mylambda["FunctionName"]} in {region} for {target_account.account_name}: {e}"
resource_item['errors']['Policy'] = message
logger.warning(message)
save_resource_to_s3(FUNC_PATH, resource_item['resourceId'], resource_item)
def discover_lambda_layer(target_account, region):
'''Iterate across all regions to discover Lambdas'''
try:
layers = []
client = target_account.get_client('lambda', region=region)
response = client.list_layers()
while 'NextMarker' in response: # Gotta Catch 'em all!
layers += response['Layers']
response = client.list_layers(Marker=response['NextMarker'])
layers += response['Layers']
for l in layers:
process_layer(client, l, target_account, region)
except AttributeError as e:
import botocore
logger.error(f"Unable to inventory Lambda Layers - Lambda Boto3 doesn't support yet. Boto3: {boto3.__version__} botocore: {botocore.__version__}")
return()
def process_layer(client, layer, target_account, region):
resource_item = {}
resource_item['awsAccountId'] = target_account.account_id
resource_item['awsAccountName'] = target_account.account_name
resource_item['resourceType'] = "AWS::Lambda::Layer"
resource_item['source'] = "Antiope"
resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now())
resource_item['awsRegion'] = region
resource_item['configuration'] = layer
if 'tags' in layer:
resource_item['tags'] = parse_tags(layer['tags'])
resource_item['supplementaryConfiguration'] = {}
resource_item['resourceId'] = "{}-{}-{}".format(target_account.account_id, region, layer['LayerName'].replace("/", "-"))
resource_item['resourceName'] = layer['LayerName']
resource_item['ARN'] = layer['LayerArn']
resource_item['errors'] = {}
try:
resource_item['supplementaryConfiguration']['LayerVersions'] = []
response = client.list_layer_versions(LayerName=layer['LayerName'], MaxItems=50)
for version in response['LayerVersions']:
version['Policy'] = client.get_layer_version_policy(LayerName=layer['LayerName'], VersionNumber=version['Version'])
resource_item['supplementaryConfiguration']['LayerVersions'].append(version)
except ClientError as e:
message = f"Error getting the Policy for layer {layer["LayerName"]} in {region} for {target_account.account_name}: {e}"
resource_item['errors']['LayerVersions'] = message
logger.warning(message)
save_resource_to_s3(LAYER_PATH, resource_item['resourceId'], resource_item)
|
import boto3
from botocore.exceptions import ClientError
import json
import os
import time
import datetime
from dateutil import tz
from lib.account import *
from lib.common import *
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
FUNC_PATH = "lambda/function"
LAYER_PATH = "lambda/layer"
def lambda_handler(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=True))
message = json.loads(event['Records'][0]['Sns']['Message'])
logger.info("Received message: " + json.dumps(message, sort_keys=True))
try:
target_account = AWSAccount(message['account_id'])
for r in target_account.get_regions():
discover_lambdas(target_account, r)
discover_lambda_layer(target_account, r)
except AntiopeAssumeRoleError as e:
logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id))
return()
except ClientError as e:
logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e))
capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e))
raise
except Exception as e:
logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context)))
capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e))
raise
def discover_lambdas(target_account, region):
'''Iterate across all regions to discover Lambdas'''
lambdas = []
client = target_account.get_client('lambda', region=region)
response = client.list_functions()
while 'NextMarker' in response: # Gotta Catch 'em all!
lambdas += response['Functions']
response = client.list_functions(Marker=response['NextMarker'])
lambdas += response['Functions']
for l in lambdas:
process_lambda(client, l, target_account, region)
def process_lambda(client, mylambda, target_account, region):
resource_item = {}
resource_item['awsAccountId'] = target_account.account_id
resource_item['awsAccountName'] = target_account.account_name
resource_item['resourceType'] = "AWS::Lambda::Function"
resource_item['source'] = "Antiope"
resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now())
resource_item['awsRegion'] = region
resource_item['configuration'] = mylambda
if 'tags' in mylambda:
resource_item['tags'] = parse_tags(mylambda['tags'])
resource_item['supplementaryConfiguration'] = {}
resource_item['resourceId'] = "{}-{}-{}".format(target_account.account_id, region, mylambda['FunctionName'].replace("/", "-"))
resource_item['resourceName'] = mylambda['FunctionName']
resource_item['ARN'] = mylambda['FunctionArn']
resource_item['errors'] = {}
try:
response = client.get_policy(FunctionName=mylambda['FunctionArn'])
if 'Policy' in response:
resource_item['supplementaryConfiguration']['Policy'] = json.loads(response['Policy'])
except ClientError as e:
message = f"Error getting the Policy for function {mylambda['FunctionName']} in {region} for {target_account.account_name}: {e}"
resource_item['errors']['Policy'] = message
logger.warning(message)
save_resource_to_s3(FUNC_PATH, resource_item['resourceId'], resource_item)
def discover_lambda_layer(target_account, region):
'''Iterate across all regions to discover Lambdas'''
try:
layers = []
client = target_account.get_client('lambda', region=region)
response = client.list_layers()
while 'NextMarker' in response: # Gotta Catch 'em all!
layers += response['Layers']
response = client.list_layers(Marker=response['NextMarker'])
layers += response['Layers']
for l in layers:
process_layer(client, l, target_account, region)
except AttributeError as e:
import botocore
logger.error(f"Unable to inventory Lambda Layers - Lambda Boto3 doesn't support yet. Boto3: {boto3.__version__} botocore: {botocore.__version__}")
return()
def process_layer(client, layer, target_account, region):
resource_item = {}
resource_item['awsAccountId'] = target_account.account_id
resource_item['awsAccountName'] = target_account.account_name
resource_item['resourceType'] = "AWS::Lambda::Layer"
resource_item['source'] = "Antiope"
resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now())
resource_item['awsRegion'] = region
resource_item['configuration'] = layer
if 'tags' in layer:
resource_item['tags'] = parse_tags(layer['tags'])
resource_item['supplementaryConfiguration'] = {}
resource_item['resourceId'] = "{}-{}-{}".format(target_account.account_id, region, layer['LayerName'].replace("/", "-"))
resource_item['resourceName'] = layer['LayerName']
resource_item['ARN'] = layer['LayerArn']
resource_item['errors'] = {}
try:
resource_item['supplementaryConfiguration']['LayerVersions'] = []
response = client.list_layer_versions(LayerName=layer['LayerName'], MaxItems=50)
for version in response['LayerVersions']:
version['Policy'] = client.get_layer_version_policy(LayerName=layer['LayerName'], VersionNumber=version['Version'])
resource_item['supplementaryConfiguration']['LayerVersions'].append(version)
except ClientError as e:
message = f"Error getting the Policy for layer {layer['LayerName']} in {region} for {target_account.account_name}: {e}"
resource_item['errors']['LayerVersions'] = message
logger.warning(message)
save_resource_to_s3(LAYER_PATH, resource_item['resourceId'], resource_item)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from .ga import GeneticAlgorithm
from . import objectives as ga_objectives
import deap
import warnings
class AutoGeneS:
PLOT_PARAMS = {
'small': {
'figsize': (10,5),
'all_ms': 8,
'sel_ms': 10
},
'large': {
'figsize': (15,10),
'all_ms': 5,
'sel_ms': 10
}
}
PLOT_THRESHOLD = 50
def __init__(self, data):
self.data = data
if len(self.data.shape) != 2:
raise ValueError("data is expected to have two dimensions")
if self.data.shape[0] < 2:
raise ValueError("At least two rows (cell types) expected")
if self.data.shape[1] < self.data.shape[0]:
raise ValueError("Number of columns (genes) must be >= number of rows (cell types)")
if not np.isfinite(self.data).all():
raise ValueError("Some entries in data are not scalars")
self.__has_run = False
self.selection = None
self.selection_index = None
def run(self, ngen=2, mode='standard', nfeatures=None, weights=None, objectives=None, seed=0, verbose=True, **kwargs):
# Check modes
if mode == 'standard':
if nfeatures is not None:
raise ValueError("nfeatures doesn't apply to standard mode (did you mean mode='fixed'?)")
elif mode == 'fixed':
if nfeatures is None:
raise ValueError("You need to supply nfeatures")
if nfeatures > self.data.shape[1]:
raise ValueError("nfeatures must be <= the number of columns (genes)")
if nfeatures < self.data.shape[0]:
raise ValueError("nfeatures must be >= the number of rows (cell types)")
else:
raise ValueError("Invalid mode")
# Check weights and objectives
if weights is None:
if objectives is None:
weights = (-1.0,1.0)
objectives = ('correlation','distance')
else:
raise Exception("Need weights for objectives")
else:
if objectives is not None:
if len(weights) != len(objectives):
raise ValueError("Number of weights does not match number of objectives")
weights_l = []
objectives_l = []
for i,w in enumerate(weights):
if w == 0:
warnings.warn(f"Ignoring objective '{str(objectives[i])}'")
else:
weights_l.append(w)
objectives_l.append(objectives[i])
weights=tuple(weights_l)
objectives=tuple(objectives_l)
else:
raise Exception("Need objectives for weights")
# Store objectives
self.objectives_func = []
self.objectives_names = []
for f in objectives:
if callable(f):
self.objectives_func.append(f)
self.objectives_names.append(f.__name__)
elif isinstance(f,str):
if not hasattr(ga_objectives,f):
raise ValueError(f"No such objective: {f}")
else:
self.objectives_names.append(f)
self.objectives_func.append(getattr(ga_objectives,f))
else:
raise ValueError("Invalid objective")
self.objectives_num = len(self.objectives_func)
self.weights = weights
self.ga = GeneticAlgorithm(
data=self.data,
ngen=ngen,
mode=mode,
weights=weights,
objectives_names=self.objectives_names,
objectives_func=self.objectives_func,
seed=seed,
verbose=verbose,
nfeatures=nfeatures,
**kwargs
)
self.hof = self.ga.run()
self.__has_run = True
def resume(self):
self.ga.resume()
@property
def pareto(self):
self.__assert_run()
return self.hof.items
@property
def fitness_matrix(self):
self.__assert_run()
all = []
for i in range(self.objectives_num):
vals = np.array(list(map(lambda x: x.fitness.values[i], self.hof.items)))
all.append(vals)
return np.array(all).T
#
# Plot results
#
def plot(self,objectives=(0,1), **kwargs):
self.__assert_run()
if self.objectives_num == 1:
raise Exception("Cannot plot for a single objective")
obj = objectives
if len(obj) != 2:
raise ValueError("Must supply two objectives per plot")
if not all(map(lambda x: x in range(self.objectives_num), obj)):
raise ValueError(f"Invalid objectives, must be 0 <= x <= {self.objectives_num-1}")
if not kwargs:
return self.plot(weights=self.weights)
i,desc = self.__from_pareto(**kwargs)
if desc == 'index': legend = f'By index'
if desc == 'weights': legend = f"Using weights {kwargs["weights"]}"
if desc == 'close_to': legend = f"Close to {kwargs["close_to"][1]}"
if 'size' in kwargs:
if kwargs['size'] not in ['small','large']:
raise ValueError("Invalid size")
size = kwargs['size']
else:
if len(self.pareto) < AutoGeneS.PLOT_THRESHOLD:
size = 'small'
else:
size = 'large'
df = pd.DataFrame(self.fitness_matrix).sort_values(by=obj[0])
df_0 = df[obj[0]]
df_1 = df[obj[1]]
params = AutoGeneS.PLOT_PARAMS[size]
plt.figure(figsize=params['figsize'])
line = plt.plot(df_0,df_1)
plt_all, = plt.plot(df_0.drop(i),df_1.drop(i),'bo',ms=params['all_ms'])
plt_sel, = plt.plot(df_0[i],df_1[i],'r^',ms=params['sel_ms'])
plt.xlabel(self.objectives_names[obj[0]])
plt.ylabel(self.objectives_names[obj[1]])
plt.legend([plt_all, plt_sel], ["Option", legend],bbox_to_anchor=(1, 1), loc='upper left')
plt.show()
#
# Select individual
#
def select(self, **kwargs):
self.__assert_run()
if not kwargs:
return self.select(weights=self.weights)
i,desc = self.__from_pareto(**kwargs)
self.selection = self.hof[i]
self.selection_index = i
return self.selection
#
# Helper
#
def __from_pareto(self,**kwargs):
if sum([ x in kwargs for x in ["weights","index","close_to"]]) != 1:
raise Exception("You need to provide exactly one criterion.")
if 'weights' in kwargs:
weights = kwargs['weights']
i_max = self.__index_by_weights(weights)
return i_max,'weights'
if 'index' in kwargs:
index = kwargs['index']
if isinstance(index,int):
if index not in range(len(self.pareto)):
raise ValueError("Invalid index")
return index,'index'
else:
obj,i = index
fit = pd.DataFrame(data=self.fitness_matrix).sort_values(by=obj)
return fit.index.values[i],'index'
if 'close_to' in kwargs:
obj,num = kwargs['close_to']
fit = self.fitness_matrix[:,obj]
i = np.argmin(np.abs(fit-num))
return i,'close_to'
def __index_by_weights(self,weights):
self.__assert_run()
if len(weights) != self.objectives_num:
raise ValueError(f"Number of weights does not match number of objectives")
fitness = self.fitness_matrix
for i in range(self.objectives_num):
max = np.max(fitness[:,i])
if max:
fitness[:,i] *= 1/max
wfitness = fitness.dot(np.array(weights))
return np.argmax(wfitness)
def __assert_run(self):
if not self.__has_run:
raise Exception("AutoGeneS did not run yet")
def __setstate__(self,dict):
deap.creator.FitnessGA.weights = dict['weights']
self.__dict__.update(dict)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from .ga import GeneticAlgorithm
from . import objectives as ga_objectives
import deap
import warnings
class AutoGeneS:
PLOT_PARAMS = {
'small': {
'figsize': (10,5),
'all_ms': 8,
'sel_ms': 10
},
'large': {
'figsize': (15,10),
'all_ms': 5,
'sel_ms': 10
}
}
PLOT_THRESHOLD = 50
def __init__(self, data):
self.data = data
if len(self.data.shape) != 2:
raise ValueError("data is expected to have two dimensions")
if self.data.shape[0] < 2:
raise ValueError("At least two rows (cell types) expected")
if self.data.shape[1] < self.data.shape[0]:
raise ValueError("Number of columns (genes) must be >= number of rows (cell types)")
if not np.isfinite(self.data).all():
raise ValueError("Some entries in data are not scalars")
self.__has_run = False
self.selection = None
self.selection_index = None
def run(self, ngen=2, mode='standard', nfeatures=None, weights=None, objectives=None, seed=0, verbose=True, **kwargs):
# Check modes
if mode == 'standard':
if nfeatures is not None:
raise ValueError("nfeatures doesn't apply to standard mode (did you mean mode='fixed'?)")
elif mode == 'fixed':
if nfeatures is None:
raise ValueError("You need to supply nfeatures")
if nfeatures > self.data.shape[1]:
raise ValueError("nfeatures must be <= the number of columns (genes)")
if nfeatures < self.data.shape[0]:
raise ValueError("nfeatures must be >= the number of rows (cell types)")
else:
raise ValueError("Invalid mode")
# Check weights and objectives
if weights is None:
if objectives is None:
weights = (-1.0,1.0)
objectives = ('correlation','distance')
else:
raise Exception("Need weights for objectives")
else:
if objectives is not None:
if len(weights) != len(objectives):
raise ValueError("Number of weights does not match number of objectives")
weights_l = []
objectives_l = []
for i,w in enumerate(weights):
if w == 0:
warnings.warn(f"Ignoring objective '{str(objectives[i])}'")
else:
weights_l.append(w)
objectives_l.append(objectives[i])
weights=tuple(weights_l)
objectives=tuple(objectives_l)
else:
raise Exception("Need objectives for weights")
# Store objectives
self.objectives_func = []
self.objectives_names = []
for f in objectives:
if callable(f):
self.objectives_func.append(f)
self.objectives_names.append(f.__name__)
elif isinstance(f,str):
if not hasattr(ga_objectives,f):
raise ValueError(f"No such objective: {f}")
else:
self.objectives_names.append(f)
self.objectives_func.append(getattr(ga_objectives,f))
else:
raise ValueError("Invalid objective")
self.objectives_num = len(self.objectives_func)
self.weights = weights
self.ga = GeneticAlgorithm(
data=self.data,
ngen=ngen,
mode=mode,
weights=weights,
objectives_names=self.objectives_names,
objectives_func=self.objectives_func,
seed=seed,
verbose=verbose,
nfeatures=nfeatures,
**kwargs
)
self.hof = self.ga.run()
self.__has_run = True
def resume(self):
self.ga.resume()
@property
def pareto(self):
self.__assert_run()
return self.hof.items
@property
def fitness_matrix(self):
self.__assert_run()
all = []
for i in range(self.objectives_num):
vals = np.array(list(map(lambda x: x.fitness.values[i], self.hof.items)))
all.append(vals)
return np.array(all).T
#
# Plot results
#
def plot(self,objectives=(0,1), **kwargs):
self.__assert_run()
if self.objectives_num == 1:
raise Exception("Cannot plot for a single objective")
obj = objectives
if len(obj) != 2:
raise ValueError("Must supply two objectives per plot")
if not all(map(lambda x: x in range(self.objectives_num), obj)):
raise ValueError(f"Invalid objectives, must be 0 <= x <= {self.objectives_num-1}")
if not kwargs:
return self.plot(weights=self.weights)
i,desc = self.__from_pareto(**kwargs)
if desc == 'index': legend = f'By index'
if desc == 'weights': legend = f"Using weights {kwargs['weights']}"
if desc == 'close_to': legend = f"Close to {kwargs['close_to'][1]}"
if 'size' in kwargs:
if kwargs['size'] not in ['small','large']:
raise ValueError("Invalid size")
size = kwargs['size']
else:
if len(self.pareto) < AutoGeneS.PLOT_THRESHOLD:
size = 'small'
else:
size = 'large'
df = pd.DataFrame(self.fitness_matrix).sort_values(by=obj[0])
df_0 = df[obj[0]]
df_1 = df[obj[1]]
params = AutoGeneS.PLOT_PARAMS[size]
plt.figure(figsize=params['figsize'])
line = plt.plot(df_0,df_1)
plt_all, = plt.plot(df_0.drop(i),df_1.drop(i),'bo',ms=params['all_ms'])
plt_sel, = plt.plot(df_0[i],df_1[i],'r^',ms=params['sel_ms'])
plt.xlabel(self.objectives_names[obj[0]])
plt.ylabel(self.objectives_names[obj[1]])
plt.legend([plt_all, plt_sel], ["Option", legend],bbox_to_anchor=(1, 1), loc='upper left')
plt.show()
#
# Select individual
#
def select(self, **kwargs):
self.__assert_run()
if not kwargs:
return self.select(weights=self.weights)
i,desc = self.__from_pareto(**kwargs)
self.selection = self.hof[i]
self.selection_index = i
return self.selection
#
# Helper
#
def __from_pareto(self,**kwargs):
if sum([ x in kwargs for x in ["weights","index","close_to"]]) != 1:
raise Exception("You need to provide exactly one criterion.")
if 'weights' in kwargs:
weights = kwargs['weights']
i_max = self.__index_by_weights(weights)
return i_max,'weights'
if 'index' in kwargs:
index = kwargs['index']
if isinstance(index,int):
if index not in range(len(self.pareto)):
raise ValueError("Invalid index")
return index,'index'
else:
obj,i = index
fit = pd.DataFrame(data=self.fitness_matrix).sort_values(by=obj)
return fit.index.values[i],'index'
if 'close_to' in kwargs:
obj,num = kwargs['close_to']
fit = self.fitness_matrix[:,obj]
i = np.argmin(np.abs(fit-num))
return i,'close_to'
def __index_by_weights(self,weights):
self.__assert_run()
if len(weights) != self.objectives_num:
raise ValueError(f"Number of weights does not match number of objectives")
fitness = self.fitness_matrix
for i in range(self.objectives_num):
max = np.max(fitness[:,i])
if max:
fitness[:,i] *= 1/max
wfitness = fitness.dot(np.array(weights))
return np.argmax(wfitness)
def __assert_run(self):
if not self.__has_run:
raise Exception("AutoGeneS did not run yet")
def __setstate__(self,dict):
deap.creator.FitnessGA.weights = dict['weights']
self.__dict__.update(dict)
|
from discord.ext import commands
from discord.ext.commands import Bot, Context
from models.command import CommandInfo
import config
from util.discord.channel import ChannelUtil
from util.discord.messages import Messages
from util.env import Env
from db.models.favorite import Favorite
from db.models.user import User
from db.redis import RedisDB
from util.discord.paginator import Entry, Page, Paginator
from util.regex import RegexUtil, AmountAmbiguousException, AmountMissingException
from util.validators import Validators
from models.constants import Constants
from db.models.transaction import Transaction
from util.util import Utils
import asyncio
from tasks.transaction_queue import TransactionQueue
## Command documentation
ADD_FAVORITE_INFO = CommandInfo(
triggers = ["addfavorite"],
overview = "Add a user to your favorites list",
details = f"Add a user to your favorites list. You can have up to **25 favorites**. Example: `{config.Config.instance().command_prefix}addfavorite @bbedward`"
)
REMOVE_FAVORITE_INFO = CommandInfo(
triggers = ["unfavorite", "removefavorite"],
overview = "Remove a user from your favorites list",
details = f"Remove a user from your favorites list Example: `{config.Config.instance().command_prefix}removefavorite 419483863115366410`"
)
FAVORITES_INFO = CommandInfo(
triggers = ["favorites"],
overview = "View list of users you have favorited",
details = f"View the list of every user you have favorited. You can tip all of them using `{config.Config.instance().command_prefix}{"banfavorites" if Env.banano() else ("pawfavorites" if Env.paw() else "ntipfavorites")} <amount>`"
)
TIPFAVORITES_INFO = CommandInfo(
triggers = ["banfavorites" if Env.banano() else ("pawfavorites" if Env.paw() else "ntipfavorites")],
overview = "Tip all the favorites",
details = f"Split a tip among all of the users in your favorites list - similar to a tipsplit. (**minimum tip is {Constants.TIP_MINIMUM} {Constants.TIP_UNIT}**)" +
f"\nExample: `{config.Config.instance().command_prefix}{"banfavorites" if Env.banano() else ("pawfavorites" if Env.paw() else "ntipfavorites")} <amount>`"
)
class FavoriteCog(commands.Cog):
"""Commands for admins only"""
def __init__(self, bot: Bot):
self.bot = bot
async def cog_before_invoke(self, ctx: Context):
ctx.error = False
msg = ctx.message
# See if user exists in DB
user = await User.get_user(msg.author)
if user is None:
ctx.error = True
await Messages.send_error_dm(msg.author, f"You should create an account with me first, send me `{config.Config.instance().command_prefix}help` to get started.")
return
elif user.frozen:
ctx.error = True
await Messages.send_error_dm(msg.author, f"Your account is frozen. Contact an admin if you need further assistance.")
return
ctx.user = user
# Update name if applicable
await user.update_name(msg.author.name)
# Special checks for tipfavorites
if ctx.command.name == 'tipfavorites_cmd':
# Check admins
ctx.god = msg.author.id in config.Config.instance().get_admin_ids()
ctx.admin = False
author: discord.Member = msg.author
for role in author.roles:
if role.id in config.Config.instance().get_admin_roles():
ctx.admin = True
break
# Check paused
if await RedisDB.instance().is_paused():
ctx.error = True
await Messages.send_error_dm(msg.author, f"Transaction activity is currently suspended. I'll be back online soon!")
return
# See if amount meets tip_minimum requirement
try:
send_amount = RegexUtil.find_float(msg.content)
if send_amount < Constants.TIP_MINIMUM:
raise AmountMissingException(f"Tip amount is too low, minimum is {Constants.TIP_MINIMUM}")
elif Validators.too_many_decimals(send_amount):
await Messages.send_error_dm(ctx.message.author, f"You are only allowed to use {Env.precision_digits()} digits after the decimal.")
ctx.error = True
return
except AmountMissingException:
ctx.error = True
await Messages.send_usage_dm(msg.author, TIPFAVORITES_INFO)
ctx.send_amount = send_amount
@commands.command(aliases=ADD_FAVORITE_INFO.triggers)
async def addfavorite_cmd(self, ctx: Context):
if ctx.error:
return
msg = ctx.message
user = ctx.user
to_add = []
for u in msg.mentions:
if u.id == msg.author.id:
continue
to_add.append(u)
for u in msg.content.split():
try:
u_id = int(u.strip())
if u_id == msg.author.id:
continue
else:
for added in to_add:
if added.id == u_id:
continue
discord_user = self.bot.get_user(u_id)
if discord_user is not None:
to_add.append(discord_user)
except Exception:
pass
if len(to_add) < 1:
await Messages.send_usage_dm(msg.author, ADD_FAVORITE_INFO)
return
fav_count = await Favorite.filter(user=ctx.user).count()
if (fav_count + len(to_add)) > 25:
await Messages.add_x_reaction(msg)
await Messages.send_error_dm(msg.author, f"You can only have up to **25 favorites**. With this, you would have **{fav_count + len(to_add)}**.")
return
# Mute users
added_count = 0
for u in to_add:
try:
target_user = await User.get_user(u)
if target_user is not None:
await Favorite.add_favorite(user, target_user)
added_count += 1
except Exception:
pass
if added_count < 1:
await Messages.send_error_dm(msg.author, "I was unable to favorite any users you mentioned.")
return
await msg.add_reaction("\u2764")
await Messages.send_success_dm(msg.author, f"Successfully added {added_count} user(s) to your favorites")
@commands.command(aliases=REMOVE_FAVORITE_INFO.triggers)
async def removefavorite_cmd(self, ctx: Context):
if ctx.error:
return
msg = ctx.message
user = ctx.user
to_remove = []
for u in msg.mentions:
to_remove.append(u)
for u in msg.content.split():
try:
u_id = int(u.strip())
if u_id == msg.author.id:
continue
else:
for added in to_remove:
if added.id == u_id:
continue
discord_user = await User.get_user_id(u_id)
if discord_user is not None:
to_remove.append(discord_user)
except Exception:
pass
if len(to_remove) < 1:
await Messages.send_usage_dm(msg.author, REMOVE_FAVORITE_INFO)
return
# Mute users
removed_count = 0
for u in to_remove:
try:
await Favorite.delete_favorite(user, u)
removed_count += 1
except Exception:
pass
if removed_count < 1:
await Messages.send_error_dm(msg.author, "I was unable to remove any users you mentioned from your favorites.")
return
await msg.add_reaction("\U0001F494")
await Messages.send_success_dm(msg.author, f"Successfully removed {removed_count} user(s) from your favorites")
@commands.command(aliases=FAVORITES_INFO.triggers)
async def favorites_cmd(self, ctx: Context):
if ctx.error:
return
msg = ctx.message
user = ctx.user
favorited_list = await Favorite.filter(user=ctx.user).prefetch_related('favorited_user').all()
if len(favorited_list) < 1:
await msg.author.send("You don't have any users in your favorites list.")
return
# Build user list
entries = []
for u in favorited_list:
entries.append(Entry(f"{u.favorited_user.name}", f"Remove with `{config.Config.instance().command_prefix}unfavorite {u.favorited_user.id}`"))
# Build pages
pages = []
# Overview
author=f"Your Favorites"
description = f"Use `{config.Config.instance().command_prefix}unfavorite <user_id>` to remove a user from your favorites"
i = 0
entry_subset = []
for e in entries:
entry_subset.append(e)
if i == 14:
pages.append(Page(entries=entry_subset, author=author, description=description))
i = 0
entry_subset = []
else:
i += 1
if len(entry_subset) > 0:
pages.append(Page(entries=entry_subset, author=author, description=description))
# Add a bonus page
entries = [Entry("Remove all favorites", "Copy and paste the command to remove everybody from your favorites list")]
author=f"Remove everybody"
description = f"```{config.Config.instance().command_prefix}unfavorite"
for u in favorited_list:
description += f" {u.favorited_user.id}"
description += "```"
pages.append(Page(entries=entries, author=author,description=description))
# Start pagination
pages = Paginator(self.bot, message=msg, page_list=pages,as_dm=True)
await pages.paginate(start_page=1)
@commands.command(aliases=TIPFAVORITES_INFO.triggers)
async def tipfavorites_cmd(self, ctx: Context):
if ctx.error:
await Messages.add_x_reaction(ctx.message)
return
msg = ctx.message
user = ctx.user
send_amount = ctx.send_amount
# Check anti-spam
if not ctx.god and await RedisDB.instance().exists(f"tipfavoritesspam{msg.author.id}"):
await Messages.add_timer_reaction(msg)
await Messages.send_basic_dm(msg.author, "You can only tipfavorites once every 5 minutes")
return
# Get their favorites
favorites = await Favorite.filter(user=user).prefetch_related('favorited_user').all()
if len(favorites) < 1:
await Messages.add_x_reaction(msg)
await Messages.send_error_dm(msg.author, "You don't have any favorites, add some first.")
return
individual_send_amount = Env.truncate_digits(send_amount / len(favorites), max_digits=Env.precision_digits())
if individual_send_amount < Constants.TIP_MINIMUM:
await Messages.add_x_reaction(msg)
await Messages.send_error_dm(msg.author, f"Tip amount too small, each user needs to receive at least {Constants.TIP_MINIMUM}. With your tip they'd only be getting {individual_send_amount}")
return
# See how much they need to make this tip.
amount_needed = individual_send_amount * len(favorites)
available_balance = Env.raw_to_amount(await user.get_available_balance())
if amount_needed > available_balance:
await Messages.add_x_reaction(msg)
await Messages.send_error_dm(msg.author, f"Your balance isn't high enough to complete this tip. You have **{available_balance} {Env.currency_symbol()}**, but this tip would cost you **{amount_needed} {Env.currency_symbol()}**")
return
# Make the transactions in the database
tx_list = []
task_list = []
for u in favorites:
tx = await Transaction.create_transaction_internal_dbuser(
sending_user=user,
amount=individual_send_amount,
receiving_user=u.favorited_user
)
if tx is not None:
tx_list.append(tx)
if not await user.is_muted_by(u.favorited_user.id):
task_list.append(
Messages.send_basic_dm(
member=self.bot.get_user(u.favorited_user.id),
message=f"You were tipped **{individual_send_amount} {Env.currency_symbol()}** by {msg.author.name.replace("`", "")}.\nUse `{config.Config.instance().command_prefix}mute {msg.author.id}` to disable notifications for this user."
)
)
if len(tx_list) < 1:
await Messages.add_x_reaction(msg)
await Messages.send_error_dm(msg.author, f"No users you mentioned are eligible to receive tips.")
return
# Send DMs
asyncio.ensure_future(Utils.run_task_list(task_list))
# Add reactions
await Messages.add_tip_reaction(msg, amount_needed)
# Queue the actual sends
for tx in tx_list:
await TransactionQueue.instance().put(tx)
# anti spam
await RedisDB.instance().set(f"tipfavoritesspam{msg.author.id}", "as", expires=300)
# Update stats
stats: Stats = await user.get_stats(server_id=msg.guild.id)
if msg.channel.id not in config.Config.instance().get_no_stats_channels():
await stats.update_tip_stats(amount_needed)
|
from discord.ext import commands
from discord.ext.commands import Bot, Context
from models.command import CommandInfo
import config
from util.discord.channel import ChannelUtil
from util.discord.messages import Messages
from util.env import Env
from db.models.favorite import Favorite
from db.models.user import User
from db.redis import RedisDB
from util.discord.paginator import Entry, Page, Paginator
from util.regex import RegexUtil, AmountAmbiguousException, AmountMissingException
from util.validators import Validators
from models.constants import Constants
from db.models.transaction import Transaction
from util.util import Utils
import asyncio
from tasks.transaction_queue import TransactionQueue
## Command documentation
ADD_FAVORITE_INFO = CommandInfo(
triggers = ["addfavorite"],
overview = "Add a user to your favorites list",
details = f"Add a user to your favorites list. You can have up to **25 favorites**. Example: `{config.Config.instance().command_prefix}addfavorite @bbedward`"
)
REMOVE_FAVORITE_INFO = CommandInfo(
triggers = ["unfavorite", "removefavorite"],
overview = "Remove a user from your favorites list",
details = f"Remove a user from your favorites list Example: `{config.Config.instance().command_prefix}removefavorite 419483863115366410`"
)
FAVORITES_INFO = CommandInfo(
triggers = ["favorites"],
overview = "View list of users you have favorited",
details = f"View the list of every user you have favorited. You can tip all of them using `{config.Config.instance().command_prefix}{'banfavorites' if Env.banano() else ('pawfavorites' if Env.paw() else 'ntipfavorites')} <amount>`"
)
TIPFAVORITES_INFO = CommandInfo(
triggers = ["banfavorites" if Env.banano() else ("pawfavorites" if Env.paw() else "ntipfavorites")],
overview = "Tip all the favorites",
details = f"Split a tip among all of the users in your favorites list - similar to a tipsplit. (**minimum tip is {Constants.TIP_MINIMUM} {Constants.TIP_UNIT}**)" +
f"\nExample: `{config.Config.instance().command_prefix}{'banfavorites' if Env.banano() else ('pawfavorites' if Env.paw() else 'ntipfavorites')} <amount>`"
)
class FavoriteCog(commands.Cog):
"""Commands for admins only"""
def __init__(self, bot: Bot):
self.bot = bot
async def cog_before_invoke(self, ctx: Context):
ctx.error = False
msg = ctx.message
# See if user exists in DB
user = await User.get_user(msg.author)
if user is None:
ctx.error = True
await Messages.send_error_dm(msg.author, f"You should create an account with me first, send me `{config.Config.instance().command_prefix}help` to get started.")
return
elif user.frozen:
ctx.error = True
await Messages.send_error_dm(msg.author, f"Your account is frozen. Contact an admin if you need further assistance.")
return
ctx.user = user
# Update name if applicable
await user.update_name(msg.author.name)
# Special checks for tipfavorites
if ctx.command.name == 'tipfavorites_cmd':
# Check admins
ctx.god = msg.author.id in config.Config.instance().get_admin_ids()
ctx.admin = False
author: discord.Member = msg.author
for role in author.roles:
if role.id in config.Config.instance().get_admin_roles():
ctx.admin = True
break
# Check paused
if await RedisDB.instance().is_paused():
ctx.error = True
await Messages.send_error_dm(msg.author, f"Transaction activity is currently suspended. I'll be back online soon!")
return
# See if amount meets tip_minimum requirement
try:
send_amount = RegexUtil.find_float(msg.content)
if send_amount < Constants.TIP_MINIMUM:
raise AmountMissingException(f"Tip amount is too low, minimum is {Constants.TIP_MINIMUM}")
elif Validators.too_many_decimals(send_amount):
await Messages.send_error_dm(ctx.message.author, f"You are only allowed to use {Env.precision_digits()} digits after the decimal.")
ctx.error = True
return
except AmountMissingException:
ctx.error = True
await Messages.send_usage_dm(msg.author, TIPFAVORITES_INFO)
ctx.send_amount = send_amount
@commands.command(aliases=ADD_FAVORITE_INFO.triggers)
async def addfavorite_cmd(self, ctx: Context):
if ctx.error:
return
msg = ctx.message
user = ctx.user
to_add = []
for u in msg.mentions:
if u.id == msg.author.id:
continue
to_add.append(u)
for u in msg.content.split():
try:
u_id = int(u.strip())
if u_id == msg.author.id:
continue
else:
for added in to_add:
if added.id == u_id:
continue
discord_user = self.bot.get_user(u_id)
if discord_user is not None:
to_add.append(discord_user)
except Exception:
pass
if len(to_add) < 1:
await Messages.send_usage_dm(msg.author, ADD_FAVORITE_INFO)
return
fav_count = await Favorite.filter(user=ctx.user).count()
if (fav_count + len(to_add)) > 25:
await Messages.add_x_reaction(msg)
await Messages.send_error_dm(msg.author, f"You can only have up to **25 favorites**. With this, you would have **{fav_count + len(to_add)}**.")
return
# Mute users
added_count = 0
for u in to_add:
try:
target_user = await User.get_user(u)
if target_user is not None:
await Favorite.add_favorite(user, target_user)
added_count += 1
except Exception:
pass
if added_count < 1:
await Messages.send_error_dm(msg.author, "I was unable to favorite any users you mentioned.")
return
await msg.add_reaction("\u2764")
await Messages.send_success_dm(msg.author, f"Successfully added {added_count} user(s) to your favorites")
@commands.command(aliases=REMOVE_FAVORITE_INFO.triggers)
async def removefavorite_cmd(self, ctx: Context):
if ctx.error:
return
msg = ctx.message
user = ctx.user
to_remove = []
for u in msg.mentions:
to_remove.append(u)
for u in msg.content.split():
try:
u_id = int(u.strip())
if u_id == msg.author.id:
continue
else:
for added in to_remove:
if added.id == u_id:
continue
discord_user = await User.get_user_id(u_id)
if discord_user is not None:
to_remove.append(discord_user)
except Exception:
pass
if len(to_remove) < 1:
await Messages.send_usage_dm(msg.author, REMOVE_FAVORITE_INFO)
return
# Mute users
removed_count = 0
for u in to_remove:
try:
await Favorite.delete_favorite(user, u)
removed_count += 1
except Exception:
pass
if removed_count < 1:
await Messages.send_error_dm(msg.author, "I was unable to remove any users you mentioned from your favorites.")
return
await msg.add_reaction("\U0001F494")
await Messages.send_success_dm(msg.author, f"Successfully removed {removed_count} user(s) from your favorites")
@commands.command(aliases=FAVORITES_INFO.triggers)
async def favorites_cmd(self, ctx: Context):
if ctx.error:
return
msg = ctx.message
user = ctx.user
favorited_list = await Favorite.filter(user=ctx.user).prefetch_related('favorited_user').all()
if len(favorited_list) < 1:
await msg.author.send("You don't have any users in your favorites list.")
return
# Build user list
entries = []
for u in favorited_list:
entries.append(Entry(f"{u.favorited_user.name}", f"Remove with `{config.Config.instance().command_prefix}unfavorite {u.favorited_user.id}`"))
# Build pages
pages = []
# Overview
author=f"Your Favorites"
description = f"Use `{config.Config.instance().command_prefix}unfavorite <user_id>` to remove a user from your favorites"
i = 0
entry_subset = []
for e in entries:
entry_subset.append(e)
if i == 14:
pages.append(Page(entries=entry_subset, author=author, description=description))
i = 0
entry_subset = []
else:
i += 1
if len(entry_subset) > 0:
pages.append(Page(entries=entry_subset, author=author, description=description))
# Add a bonus page
entries = [Entry("Remove all favorites", "Copy and paste the command to remove everybody from your favorites list")]
author=f"Remove everybody"
description = f"```{config.Config.instance().command_prefix}unfavorite"
for u in favorited_list:
description += f" {u.favorited_user.id}"
description += "```"
pages.append(Page(entries=entries, author=author,description=description))
# Start pagination
pages = Paginator(self.bot, message=msg, page_list=pages,as_dm=True)
await pages.paginate(start_page=1)
@commands.command(aliases=TIPFAVORITES_INFO.triggers)
async def tipfavorites_cmd(self, ctx: Context):
if ctx.error:
await Messages.add_x_reaction(ctx.message)
return
msg = ctx.message
user = ctx.user
send_amount = ctx.send_amount
# Check anti-spam
if not ctx.god and await RedisDB.instance().exists(f"tipfavoritesspam{msg.author.id}"):
await Messages.add_timer_reaction(msg)
await Messages.send_basic_dm(msg.author, "You can only tipfavorites once every 5 minutes")
return
# Get their favorites
favorites = await Favorite.filter(user=user).prefetch_related('favorited_user').all()
if len(favorites) < 1:
await Messages.add_x_reaction(msg)
await Messages.send_error_dm(msg.author, "You don't have any favorites, add some first.")
return
individual_send_amount = Env.truncate_digits(send_amount / len(favorites), max_digits=Env.precision_digits())
if individual_send_amount < Constants.TIP_MINIMUM:
await Messages.add_x_reaction(msg)
await Messages.send_error_dm(msg.author, f"Tip amount too small, each user needs to receive at least {Constants.TIP_MINIMUM}. With your tip they'd only be getting {individual_send_amount}")
return
# See how much they need to make this tip.
amount_needed = individual_send_amount * len(favorites)
available_balance = Env.raw_to_amount(await user.get_available_balance())
if amount_needed > available_balance:
await Messages.add_x_reaction(msg)
await Messages.send_error_dm(msg.author, f"Your balance isn't high enough to complete this tip. You have **{available_balance} {Env.currency_symbol()}**, but this tip would cost you **{amount_needed} {Env.currency_symbol()}**")
return
# Make the transactions in the database
tx_list = []
task_list = []
for u in favorites:
tx = await Transaction.create_transaction_internal_dbuser(
sending_user=user,
amount=individual_send_amount,
receiving_user=u.favorited_user
)
if tx is not None:
tx_list.append(tx)
if not await user.is_muted_by(u.favorited_user.id):
task_list.append(
Messages.send_basic_dm(
member=self.bot.get_user(u.favorited_user.id),
message=f"You were tipped **{individual_send_amount} {Env.currency_symbol()}** by {msg.author.name.replace('`', '')}.\nUse `{config.Config.instance().command_prefix}mute {msg.author.id}` to disable notifications for this user."
)
)
if len(tx_list) < 1:
await Messages.add_x_reaction(msg)
await Messages.send_error_dm(msg.author, f"No users you mentioned are eligible to receive tips.")
return
# Send DMs
asyncio.ensure_future(Utils.run_task_list(task_list))
# Add reactions
await Messages.add_tip_reaction(msg, amount_needed)
# Queue the actual sends
for tx in tx_list:
await TransactionQueue.instance().put(tx)
# anti spam
await RedisDB.instance().set(f"tipfavoritesspam{msg.author.id}", "as", expires=300)
# Update stats
stats: Stats = await user.get_stats(server_id=msg.guild.id)
if msg.channel.id not in config.Config.instance().get_no_stats_channels():
await stats.update_tip_stats(amount_needed)
|
"""
Copyright (c) 2015-2020 Raj Patel(raj454raj@gmail.com), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
import requests
import sites
import urllib3
urllib3.disable_warnings()
current.environment = "test"
# ==============================================================================
class RetrievalTest:
# --------------------------------------------------------------------------
def __init__(self):
self.profile_site = {}
for site in current.SITES:
self.profile_site[site] = getattr(sites, site.lower()).Profile
# --------------------------------------------------------------------------
def test_tag_retrieval(self):
sites_with_tags_functionality = ["CodeChef", "CodeForces", "Spoj", "HackerEarth", "HackerRank", "Timus"]
assertion_hash = {
"with_tags": {
"CodeChef": {
"plink": "https://www.codechef.com/PRACTICE/problems/FNCS",
"tags": [u'data-structure', u'devuy11', u'fenwick', u'medium-hard', u'nov14', u'segment-tree', u'sqrt-decomp']
},
"CodeForces": {
"plink": "http://www.codeforces.com/problemset/problem/323/A",
"tags": [u'combinatorics', u'constructive algorithms']
},
"Spoj": {
"plink": "https://www.spoj.com/problems/YODANESS/",
"tags": [u'graph-theory', u'number-theory', u'shortest-path', u'sorting', u'tree', u'bitmasks']
},
"HackerEarth": {
"plink": "https://www.hackerearth.com/practice/algorithms/dynamic-programming/2-dimensional/practice-problems/algorithm/candy-distribution/",
"tags": [u'Dynamic Programming', u'Mathematics', u'Number Theory']
},
"HackerRank": {
"plink": "https://www.hackerrank.com/challenges/print-the-elements-of-a-linked-list",
"tags": [u'Linked Lists']
},
"Timus": {
"plink": "http://acm.timus.ru/problem.aspx?space=1&num=1954&locale=en",
"tags": [u'hardest problem', u'palindromes', u'string algorithms']
}
},
"without_tags": {
"CodeChef": "https://www.codechef.com/ZCOPRAC/problems/ZCO14004",
"CodeForces": "http://www.codeforces.com/problemset/gymProblem/100570/C",
"Spoj": "https://www.spoj.com/problems/TOUR/",
"HackerEarth": "https://www.hackerearth.com/problem/algorithm/find-pairs-1/",
"Timus": "http://acm.timus.ru/problem.aspx?space=1&num=1559&locale=en"
}
}
for site in sites_with_tags_functionality:
P = self.profile_site[site]
if P.is_website_down():
# Don't test for websites which are acked to be down
continue
tags_func = P.get_problem_details
tags_val = tags_func(problem_link=assertion_hash["with_tags"][site]["plink"],
update_things=["tags"])["tags"]
if set(tags_val) != set(assertion_hash["with_tags"][site]["tags"]):
raise RuntimeError(site + " with tags failure")
if site in assertion_hash["without_tags"]:
tags_val = tags_func(problem_link=assertion_hash["without_tags"][site],
update_things=["tags"])["tags"]
if tags_val not in ([u"-"], []):
raise RuntimeError(site + " without tags failure")
# --------------------------------------------------------------------------
def test_editorial_retrieval(self):
sites_with_editorial_functionality = ["CodeChef", "CodeForces", "HackerEarth", "HackerRank"]
assertion_hash = {
"with_editorial": {
"CodeChef": {
"plink": "https://www.codechef.com/LTIME27/problems/INVERT",
"editorial_link": "https://discuss.codechef.com/problems/INVERT"
},
"CodeForces": {
"plink": "http://www.codeforces.com/problemset/problem/102/B",
"editorial_link": "http://www.codeforces.com/blog/entry/2393"
},
"HackerEarth": {
"plink": "https://www.hackerearth.com/problem/approximate/lots-of-circles/",
"editorial_link": "https://www.hackerearth.com/problem/approximate/lots-of-circles/editorial/"
},
"HackerRank": {
"plink": "https://www.hackerrank.com/challenges/candles-2",
"editorial_link": "https://www.hackerrank.com/challenges/candles-2/editorial/"
},
"AtCoder": {
"plink": "https://atcoder.jp/contests/agc035/tasks/agc035_c",
"editorial_link": "https://img.atcoder.jp/agc035/editorial.pdf"
}
},
"without_editorial": {
"CodeChef": "https://www.codechef.com/PRACTICE/problems/PG",
"CodeForces": "http://www.codeforces.com/problemset/problem/234/D",
"HackerEarth": "https://www.hackerearth.com/problem/algorithm/level-selections/"
}
}
for site in sites_with_editorial_functionality:
P = self.profile_site[site]
if P.is_website_down():
# Don't test for websites which are acked to be down
continue
editorial_func = P.get_problem_details
editorial_link = editorial_func(problem_link=assertion_hash["with_editorial"][site]["plink"],
update_things=["editorial_link"])["editorial_link"]
if editorial_link != assertion_hash["with_editorial"][site]["editorial_link"]:
raise RuntimeError(site + " with editorial failure")
if site in assertion_hash["without_editorial"]:
editorial_link = editorial_func(problem_link=assertion_hash["without_editorial"][site],
update_things=["editorial_link"])["editorial_link"]
if editorial_link is not None:
raise RuntimeError(site + " without editorial failure")
# --------------------------------------------------------------------------
def test_problem_setters_retrieval(self):
sites_with_problem_setters = ["CodeChef", "CodeForces", "HackerEarth", "HackerRank", "Spoj", "Timus"]
assertion_hash = {
"with_problem_setters": {
"CodeChef": {
"plink": "https://www.codechef.com/LTIME27/problems/INVERT",
"problem_setters": ["ma5termind"]
},
"CodeForces": {
"plink": "http://www.codeforces.com/problemset/problem/1200/B",
"problem_setters": ["djm03178", "nong"]
},
"HackerEarth": {
"plink": "https://www.hackerearth.com/problem/algorithm/level-selections/",
"problem_setters": ["akileshreddy40950"]
},
"HackerRank": {
"plink": "https://www.hackerrank.com/challenges/candles-2",
"problem_setters": ["gdisastery"]
},
"Timus": {
"plink": "https://acm.timus.ru/problem.aspx?space=1&num=1954&locale=en",
"problem_setters": ["Mikhail Rubinchik (prepared by Kirill Borozdin)"]
},
"Spoj": {
"plink": "https://www.spoj.com/problems/CONNECT2/",
"problem_setters": ["nikola_borisof"]
}
},
"without_problem_setters": {
"CodeForces": "http://www.codeforces.com/problemset/problem/1212/C",
"HackerEarth": "https://www.hackerearth.com/challenges/college/engineers-day-nit-silchar-challenge/algorithm/valentines-day/"
}
}
for site in sites_with_problem_setters:
P = self.profile_site[site]
if P.is_website_down():
# Don't test for websites which are acked to be down
continue
pd_func = P.get_problem_details
current_setters = pd_func(problem_link=assertion_hash["with_problem_setters"][site]["plink"],
update_things=["problem_setters"])["problem_setters"]
if current_setters != assertion_hash["with_problem_setters"][site]["problem_setters"]:
raise RuntimeError(site + " with problem_setters failure")
if site in assertion_hash["without_problem_setters"]:
current_setters = pd_func(problem_link=assertion_hash["without_problem_setters"][site],
update_things=["problem_setters"])["problem_setters"]
if current_setters is not None:
raise RuntimeError(site + " without problem_setters failure")
return
# --------------------------------------------------------------------------
def test_invalid_handle(self):
handle = "thisreallycantbeahandle308"
result = map(lambda site: (site, self.profile_site[site].is_invalid_handle(handle)),
filter(lambda site: self.profile_site[site].is_website_down() == False,
current.SITES.keys()))
failure_sites = []
for site, res in result:
if not res:
failure_sites.append(site)
if len(failure_sites) > 0:
raise RuntimeError(", ".join(failure_sites) + " " + "invalid handle failure")
# --------------------------------------------------------------------------
def test_download_submission(self):
import requests
from bs4 import BeautifulSoup
sites_with_download_functionality = ["CodeChef", "CodeForces"]
assertion_hash = {
"CodeChef": {
"view_link": "https://www.codechef.com/viewsolution/27348746",
"submission": '#include<bits/stdc++.h>\r\nusing namespace std;\r\nint main(){\r\n\tint t;\r\n\tcin>>t;\r\n\twhile(t--){\r\n\t\tint n,m,u,v;\r\n\t\tcin>>n>>m;\r\n\t\tif(m%2==0){\r\n\t\t\tint temp;\r\n\t\t\tfor(auto i=0;i<m;i++){\r\n\t\t\t\tcin>>temp>>temp;\r\n\t\t\t}\t\r\n\t\t\tcout<<1<<endl;\r\n\t\t\tfor(auto i=0;i<n;i++)\r\n\t\t\t{\r\n\t\t\t\tcout<<1<<" ";\r\n\t\t\t}\r\n\t\t\tcout<<endl;\r\n\t\t\tcontinue;\r\n\t\t}\r\n\r\n\t\t// m is odd\r\n\t\tvector<vector<int>> g(n);\r\n\t\tvector<int> d(n);\r\n\t\tfor(auto i=0;i<m;i++){\r\n\t\t\tcin>>u>>v;\r\n\t\t\td[u-1]++;\r\n\t\t\td[v-1]++;\r\n\t\t\tg[u-1].push_back(v-1);\r\n\t\t\tg[v-1].push_back(u-1);\r\n\t\t}\r\n\r\n\t\t// m is odd and we find an odd vertice\r\n\t\tint idx=-1;\r\n\t\tfor(auto i=0;i<n;i++){\r\n\t\t\tif(d[i]%2==1) {idx=i;break;}\r\n\t\t}\r\n\t\tif(idx!=-1){\r\n\t\t\tcout<<2<<endl;\r\n\t\t\tfor(auto i=0;i<n;i++)\r\n\t\t\t{\r\n\t\t\t\tcout<<((i==idx)?1:2)<<" ";\r\n\t\t\t}\r\n\t\t\tcout<<endl;\r\n\t\t\tcontinue;\r\n\r\n\t\t}\r\n\r\n\t\t// m is odd and all degrees are even\r\n\t\t// idx is 3 idx1 is 2 rest is 1\r\n\t\tidx=-1;\r\n\t\tint idx1=-1;\r\n\t\t// find a vertex removing which we get odd vertices\r\n\t\tfor(auto i=0;i<n;i++){\r\n\t\t\tif(d[i]>0){idx=i;break;}\r\n\t\t}\r\n\t\t// idx will be 3\r\n\t\t// change all degrees\r\n\t\tfor(auto i:g[idx]){\r\n\t\t\td[i]--;\r\n\t\t\tidx1=i;\r\n\t\t}\r\n\t\tcout<<3<<endl;\r\n\t\td[idx]=0;\r\n\t\tg[idx]=vector<int>();\r\n\t\tfor(auto i=0;i<n;i++)\r\n\t\t{\r\n\t\t\tif(i==idx){ \r\n\t\t\t\tcout<<1<<" ";\r\n\t\t\t}\r\n\t\t\telse if(i==idx1){\r\n\t\t\t\tcout<<2<<" ";\r\n\t\t\t}\r\n\t\t\telse{\r\n\t\t\t\tcout<<3<<" ";\r\n\t\t\t}\r\n\t\t}\r\n\t\tcout<<endl;\r\n\t}\r\n}\r\n'
},
"CodeForces": {
"view_link": "http://www.codeforces.com/contest/454/submission/7375767",
"submission": '#include<stdio.h>\nint main()\n{\n\tint n,i,j,k;\n\tscanf("%d",&n);\n\tint h=n/2+1;\n\tfor(i=0;i<h;i++)\n\t{\n\t\tfor(k=0;k<n/2-i;k++)\n\t\t\tprintf("*");\n\t\tfor(j=0;j<2*i+1;j++)\n\t\t\tprintf("D");\n\t\tfor(j=n/2+i+1;j<n;j++)\n\t\t\tprintf("*");\n\t\tprintf("\\n");\n\t}\n\tfor(i=0;i<n/2;i++)\n\t{\n\t\tfor(k=0;k<=i;k++)\n\t\t printf("*");\n\t\tfor(j=n-2*i;j>=3;j--)\n\t\t\tprintf("D");\n\t\tfor(j=0;j<=i;j++)\n\t\t\tprintf("*");\n\t\tprintf("\\n");\n\t}\n\treturn 0;\n}\n'
},
"AtCoder": {
"view_link": "https://atcoder.jp/contests/agc039/submissions/7869333",
"submission": "/**\r\n * author: tourist\r\n * created: 05.10.2019 16:12:28 \r\n**/\r\n#include <bits/stdc++.h>\r\n\r\nusing namespace std;\r\n\r\nint main() {\r\n ios::sync_with_stdio(false);\r\n cin.tie(0);\r\n int n;\r\n cin >> n;\r\n n *= 2;\r\n vector<string> g(n);\r\n for (int i = 0; i < n; i++) {\r\n cin >> g[i];\r\n }\r\n vector<vector<vector<long long>>> dp(2 * n, vector<vector<long long>>(2 * n, vector<long long>(2 * n)));\r\n for (int i = n - 1; i >= 1; i--) {\r\n for (int j = i; j < n; j++) {\r\n for (int k = j; k < n; k++) {\r\n if (i == j && j == k) {\r\n dp[i][j][k] = 1;\r\n continue;\r\n }\r\n if (i == j || j == k) {\r\n dp[i][j][k] = 0;\r\n continue;\r\n }\r\n dp[i][j][k] = 0;\r\n for (int x = i; x < j; x++) {\r\n for (int y = j + 1; y <= k; y++) {\r\n for (int u = i; u <= x; u++) {\r\n for (int v = y; v <= k; v++) {\r\n if (g[u][v] == '1') {\r\n dp[i][j][k] += dp[i][u][x] * dp[y][v][k] * dp[x + 1][j][y - 1];\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n long long ans = 0;\r\n for (int j = 1; j < n; j++) {\r\n if (g[0][j] == '1') {\r\n ans += dp[1][j][n - 1];\r\n }\r\n }\r\n cout << ans << '\\n';\r\n return 0;\r\n}\r\n"
}
}
for site in sites_with_download_functionality:
P = self.profile_site[site]
if P.is_website_down():
# Don't test for websites which are acked to be down
continue
submission_content = P.download_submission(assertion_hash[site]["view_link"])
if submission_content != assertion_hash[site]["submission"]:
raise RuntimeError(site + " download submission failed")
# --------------------------------------------------------------------------
def test_rating_graph(self):
sites_with_rating_graph_functionality = ["CodeChef", "CodeForces", "HackerRank", "HackerEarth"]
handles = {
"CodeChef": "tryingtocode",
"CodeForces": "raj454raj",
"HackerRank": "tryingtocode",
"HackerEarth": "karanaggarwal",
"AtCoder": "imanudeep111"
}
expected_list = {
"CodeChef": [{'data': {'2015-06-15 15:00:00': {'url': 'https://www.codechef.com/JUNE15', 'rating': '1605', 'name': 'June Challenge 2015', 'rank': '1913'}, '2016-06-15 15:00:00': {'url': 'https://www.codechef.com/JUNE16', 'rating': '1641', 'name': 'June Challenge 2016', 'rank': '5083'}, '2014-07-14 15:00:00': {'url': 'https://www.codechef.com/JULY14', 'rating': '1518', 'name': 'July Challenge 2014', 'rank': '2769'}, '2015-08-17 15:00:00': {'url': 'https://www.codechef.com/AUG15', 'rating': '1704', 'name': 'August Challenge 2015', 'rank': '1244'}, '2014-01-13 15:00:00': {'url': 'https://www.codechef.com/JAN14', 'rating': '1462', 'name': 'January Challenge 2014', 'rank': '3548'}, '2014-12-15 17:00:00': {'url': 'https://www.codechef.com/DEC14', 'rating': '1609', 'name': 'December Challenge 2014', 'rank': '2218'}, '2015-01-12 15:00:00': {'url': 'https://www.codechef.com/JAN15', 'rating': '1617', 'name': 'January Challenge 2015', 'rank': '3105'}, '2015-09-14 15:00:00': {'url': 'https://www.codechef.com/SEPT15', 'rating': '1829', 'name': 'September Challenge 2015', 'rank': '1417'}, '2014-11-17 15:00:00': {'url': 'https://www.codechef.com/NOV14', 'rating': '1717', 'name': 'November Challenge 2014', 'rank': '1751'}, '2015-03-16 15:00:00': {'url': 'https://www.codechef.com/MARCH15', 'rating': '1553', 'name': 'March Challenge 2015', 'rank': '2489'}, '2014-06-16 15:00:00': {'url': 'https://www.codechef.com/JUNE14', 'rating': '1455', 'name': 'June Challenge 2014', 'rank': '4382'}, '2014-02-17 15:00:00': {'url': 'https://www.codechef.com/FEB14', 'rating': '1509', 'name': 'February Challenge 2014', 'rank': '2007'}, '2015-05-18 15:00:00': {'url': 'https://www.codechef.com/MAY15', 'rating': '1519', 'name': 'May Challenge 2015', 'rank': '2946'}, '2015-07-13 15:00:00': {'url': 'https://www.codechef.com/JULY15', 'rating': '1635', 'name': 'July Challenge 2015', 'rank': '1554'}, '2014-08-11 15:00:00': {'url': 'https://www.codechef.com/AUG14', 'rating': '1633', 'name': 'August Challenge 2014', 'rank': '1293'}, '2014-10-13 15:00:00': {'url': 'https://www.codechef.com/OCT14', 'rating': '1730', 'name': 'October Challenge 2014', 'rank': '900'}}, 'title': 'CodeChef Long'}, {'data': {'2015-09-21 00:00:00': {'url': 'https://www.codechef.com/COOK62', 'rating': '1807', 'name': 'September Mega Cook-Off 2015', 'rank': '751'}, '2015-08-24 00:50:00': {'url': 'https://www.codechef.com/COOK61', 'rating': '1881', 'name': 'August Cook-Off 2015', 'rank': '221'}}, 'title': 'CodeChef Cook-off'}, {'data': {}, 'title': 'CodeChef Lunchtime'}],
"CodeForces": [{'data': {'2015-09-28 14:30:00': {'rating': '1295', 'name': u'Codeforces Round #322 (Div. 2)', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/581', 'rank': 1836, 'ratingChange': -84}, '2014-09-28 21:05:00': {'rating': '1279', 'name': u'Codeforces Round #270', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/472', 'rank': 3520, 'ratingChange': -124}, '2015-09-10 22:00:00': {'rating': '1422', 'name': u'Codeforces Round #319 (Div. 2)', 'solvedCount': 2, 'url': 'http://www.codeforces.com/contest/577', 'rank': 940, 'ratingChange': 134}, '2016-01-14 22:05:00': {'rating': '1228', 'name': u'Codeforces Round #339 (Div. 2)', 'solvedCount': 0, 'url': 'http://www.codeforces.com/contest/614', 'rank': 1929, 'ratingChange': -81}, '2016-08-20 18:35:00': {'rating': '1298', 'name': u'Codeforces Round #368 (Div. 2)', 'solvedCount': 2, 'url': 'http://www.codeforces.com/contest/707', 'rank': 1919, 'ratingChange': 82}, '2015-10-31 22:00:00': {'rating': '1284', 'name': u'Codeforces Round #328 (Div. 2)', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/592', 'rank': 2075, 'ratingChange': 11}, '2015-10-25 14:30:00': {'rating': '1273', 'name': u'Codeforces Round #327 (Div. 2)', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/591', 'rank': 2259, 'ratingChange': -25}, '2015-09-22 22:00:00': {'rating': '1379', 'name': u'Codeforces Round #321 (Div. 2)', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/580', 'rank': 2018, 'ratingChange': -43}, '2014-08-08 21:00:00': {'rating': '1403', 'name': u'Codeforces Round #260 (Div. 2)', 'solvedCount': 0, 'url': 'http://www.codeforces.com/contest/456', 'rank': 2152, 'ratingChange': -97}, '2015-12-01 21:05:00': {'rating': '1351', 'name': u'Codeforces Round #334 (Div. 2)', 'solvedCount': 2, 'url': 'http://www.codeforces.com/contest/604', 'rank': 1079, 'ratingChange': 67}, '2016-08-29 17:35:00': {'rating': '1309', 'name': u'Codeforces Round #369 (Div. 2)', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/711', 'rank': 2332, 'ratingChange': 11}, '2015-12-09 21:35:00': {'rating': '1309', 'name': u'Codeforces Round #335 (Div. 2)', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/606', 'rank': 2249, 'ratingChange': -42}, '2016-08-11 22:05:00': {'rating': '1216', 'name': u'Codeforces Round #367 (Div. 2)', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/706', 'rank': 2989, 'ratingChange': -12}, '2015-08-29 22:00:00': {'rating': '1288', 'name': u'Codeforces Round #318 [RussianCodeCup Thanks-Round] (Div. 2)', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/574', 'rank': 2009, 'ratingChange': -70}, '2015-10-03 22:15:00': {'rating': '1285', 'name': u'Codeforces Round #323 (Div. 2)', 'solvedCount': 2, 'url': 'http://www.codeforces.com/contest/583', 'rank': 2912, 'ratingChange': -10}, '2015-10-06 22:00:00': {'rating': '1298', 'name': u'Codeforces Round #324 (Div. 2)', 'solvedCount': 2, 'url': 'http://www.codeforces.com/contest/584', 'rank': 2062, 'ratingChange': 13}, '2014-10-06 21:00:00': {'rating': '1227', 'name': u'Codeforces Round #271 (Div. 2)', 'solvedCount': 2, 'url': 'http://www.codeforces.com/contest/474', 'rank': 1654, 'ratingChange': -52}, '2015-08-22 22:00:00': {'rating': '1358', 'name': u'Codeforces Round #317 [AimFund Thanks-Round] (Div. 2)', 'solvedCount': 2, 'url': 'http://www.codeforces.com/contest/572', 'rank': 1114, 'ratingChange': 131}, '2016-09-23 18:35:00': {'rating': '1377', 'name': u'Codeforces Round #373 (Div. 2)', 'solvedCount': 2, 'url': 'http://www.codeforces.com/contest/719', 'rank': 1593, 'ratingChange': 68}}, 'title': 'Codeforces'}],
"HackerRank": [{'data': {'2014-07-21 21:30:00': {'url': u'https://www.hackerrank.com/w7', 'rating': '1554.46', 'name': u'Weekly Challenges - Week 7', 'rank': 499}, '2015-10-30 21:30:00': {'url': u'https://www.hackerrank.com/codestorm', 'rating': '1276.05', 'name': u'CodeStorm 2015', 'rank': 3743}, '2015-08-02 21:30:00': {'url': u'https://www.hackerrank.com/countercode', 'rating': '1287.0', 'name': u'CounterCode 2015', 'rank': 3605}, '2014-08-11 21:30:00': {'url': u'https://www.hackerrank.com/w8', 'rating': '1276.88', 'name': u'Weekly Challenges - Week 8', 'rank': 1204}}, 'title': u'HackerRank - Algorithms'}],
"HackerEarth": [{'data': {'2016-05-21 10:30:00': {'url': 'https://www.hackerearth.com/challenges/competitive/may-circuits/', 'rating': 1493, 'name': 'May Circuits', 'rank': 714}, '2017-10-21 10:30:00': {'url': 'https://www.hackerearth.com/challenges/competitive/october-circuits-17/', 'rating': 1491, 'name': "October Circuits '17", 'rank': 1225}, '2017-09-22 10:30:00': {'url': 'https://www.hackerearth.com/challenges/competitive/september-circuits-17/', 'rating': 1569, 'name': "September Circuits '17", 'rank': 291}, '2020-05-16 10:30:00': {'url': 'https://www.hackerearth.com/challenges/competitive/may-circuits-20/', 'rating': 1415, 'name': "May Circuits '20", 'rank': 647}, '2018-03-17 10:30:00': {'url': 'https://www.hackerearth.com/challenges/competitive/march-circuits-18/', 'rating': 1461, 'name': "March Circuits '18", 'rank': 523}, '2019-01-18 09:30:00': {'url': 'https://www.hackerearth.com/challenges/competitive/january-circuits-19/', 'rating': 1337, 'name': "January Circuits '19", 'rank': 3420}, '2017-07-28 10:30:00': {'url': 'https://www.hackerearth.com/challenges/competitive/july-circuits-17/', 'rating': 1462, 'name': "July Circuits '17", 'rank': 1326}}, 'title': 'HackerEarth'}],
"AtCoder": [{'data': {'2020-01-10 19:10:00': {'url': 'https://atcoder.jp/contests/abc150', 'rating': '-', 'ratingChange': '-', 'name': u'AtCoder Beginner Contest 150', 'rank': u'2640'}, '2020-03-14 19:10:00': {'url': 'https://atcoder.jp/contests/panasonic2020', 'rating': '33', 'ratingChange': '+31', 'name': u'Panasonic Programming Contest 2020', 'rank': u'3897'}, '2020-05-02 19:20:00': {'url': 'https://atcoder.jp/contests/abc165', 'rating': '192', 'ratingChange': '+51', 'name': u'AtCoder Beginner Contest 165', 'rank': u'6343'}, '2020-03-01 19:10:00': {'url': 'https://atcoder.jp/contests/abc157', 'rating': '2', 'ratingChange': '-', 'name': u'AtCoder Beginner Contest 157', 'rank': u'6327'}, '2020-04-26 19:10:00': {'url': 'https://atcoder.jp/contests/abc164', 'rating': '141', 'ratingChange': '+108', 'name': u'AtCoder Beginner Contest 164', 'rank': u'3184'}, '2020-04-19 19:10:00': {'url': 'https://atcoder.jp/contests/abc163', 'rating': '-', 'ratingChange': '-', 'name': u'AtCoder Beginner Contest 163', 'rank': u'4042'}}, 'title': 'AtCoder'}]
}
result = {}
for site in sites_with_rating_graph_functionality:
P = self.profile_site[site]
if P.is_website_down():
# Don't test for websites which are acked to be down
continue
get_rating_func = P.rating_graph_data
res = get_rating_func(handles[site])
if expected_list[site] != res:
raise RuntimeError("Rating graph dict does not match for " + site)
# --------------------------------------------------------------------------
def test_submissions(self):
handles = {
"CodeChef": "tryingtocode",
"CodeForces": "raj454raj",
"HackerRank": "tryingtocode",
"HackerEarth": "raj454raj",
"Spoj": "raj454raj",
"UVa": "raj454raj",
"Timus": "222187",
"AtCoder": "raj454raj"
}
expected_result = {
"CodeChef": [(u'2013-12-02 18:52:13', u'https://www.codechef.com/PRACTICE/problems/TEST', u'TEST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3017060'), (u'2013-12-02 19:02:07', u'https://www.codechef.com/PRACTICE/problems/TEST', u'TEST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3017069'), (u'2013-12-02 19:13:59', u'https://www.codechef.com/PRACTICE/problems/HS08TEST', u'HS08TEST', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3017092'), (u'2013-12-02 19:16:51', u'https://www.codechef.com/PRACTICE/problems/HS08TEST', u'HS08TEST', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3017097'), (u'2013-12-02 19:20:42', u'https://www.codechef.com/PRACTICE/problems/HS08TEST', u'HS08TEST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3017102'), (u'2013-12-02 19:31:26', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3017121'), (u'2013-12-03 01:15:08', u'https://www.codechef.com/PRACTICE/problems/FCTRL', u'FCTRL', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3017614'), (u'2013-12-03 01:15:44', u'https://www.codechef.com/PRACTICE/problems/FCTRL', u'FCTRL', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3017615'), (u'2013-12-03 01:18:21', u'https://www.codechef.com/PRACTICE/problems/FCTRL', u'FCTRL', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3017619'), (u'2013-12-03 01:23:05', u'https://www.codechef.com/PRACTICE/problems/FCTRL', u'FCTRL', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3017629'), (u'2013-12-03 01:33:10', u'https://www.codechef.com/PRACTICE/problems/FCTRL2', u'FCTRL2', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3017639'), (u'2013-12-06 13:51:02', u'https://www.codechef.com/PRACTICE/problems/PRPALIN', u'PRPALIN', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3023114'), (u'2013-12-06 13:59:27', u'https://www.codechef.com/PRACTICE/problems/PRPALIN', u'PRPALIN', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3023128'), (u'2013-12-06 14:26:23', u'https://www.codechef.com/PRACTICE/problems/NUMPATH', u'NUMPATH', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3023162'), (u'2013-12-06 14:34:44', u'https://www.codechef.com/PRACTICE/problems/PRPALIN', u'PRPALIN', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3023172'), (u'2013-12-06 14:40:45', u'https://www.codechef.com/PRACTICE/problems/PRPALIN', u'PRPALIN', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3023183'), (u'2013-12-06 14:58:49', u'https://www.codechef.com/PRACTICE/problems/PRPALIN', u'PRPALIN', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3023209'), (u'2013-12-06 15:22:57', u'https://www.codechef.com/PRACTICE/problems/HOLES', u'HOLES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3023522'), (u'2013-12-12 15:04:32', u'https://www.codechef.com/PRACTICE/problems/NAME2', u'NAME2', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3076899'), (u'2013-12-12 15:22:56', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3077003'), (u'2013-12-12 15:24:57', u'https://www.codechef.com/PRACTICE/problems/MAXCOUNT', u'MAXCOUNT', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3077013'), (u'2013-12-12 17:41:44', u'https://www.codechef.com/PRACTICE/problems/DECSTR', u'DECSTR', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3077862'), (u'2013-12-12 18:04:39', u'https://www.codechef.com/PRACTICE/problems/DECSTR', u'DECSTR', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3078001'), (u'2013-12-12 18:53:41', u'https://www.codechef.com/PRACTICE/problems/DECSTR', u'DECSTR', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3078284'), (u'2013-12-12 19:26:47', u'https://www.codechef.com/PRACTICE/problems/DECSTR', u'DECSTR', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3078484'), (u'2013-12-12 19:39:23', u'https://www.codechef.com/PRACTICE/problems/NAME2', u'NAME2', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3078558'), (u'2013-12-13 15:04:16', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3083547'), (u'2013-12-13 15:09:42', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3083574'), (u'2013-12-13 15:13:40', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3083602'), (u'2013-12-13 19:30:02', u'https://www.codechef.com/PRACTICE/problems/NAME2', u'NAME2', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3085115'), (u'2013-12-14 13:37:45', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3089188'), (u'2013-12-14 13:40:39', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3089199'), (u'2013-12-14 13:45:29', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3089226'), (u'2013-12-14 19:29:31', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3091091'), (u'2013-12-18 00:17:52', u'https://www.codechef.com/PRACTICE/problems/ONP', u'ONP', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3108217'), (u'2013-12-18 00:29:10', u'https://www.codechef.com/PRACTICE/problems/ONP', u'ONP', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3108251'), (u'2013-12-18 00:58:37', u'https://www.codechef.com/PRACTICE/problems/ONP', u'ONP', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3108323'), (u'2013-12-18 01:04:19', u'https://www.codechef.com/PRACTICE/problems/ONP', u'ONP', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3108336'), (u'2013-12-18 01:46:49', u'https://www.codechef.com/PRACTICE/problems/SUMTRIAN', u'SUMTRIAN', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3108432'), (u'2013-12-18 02:02:45', u'https://www.codechef.com/PRACTICE/problems/COINS', u'COINS', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3108454'), (u'2013-12-18 02:09:53', u'https://www.codechef.com/PRACTICE/problems/COINS', u'COINS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3108466'), (u'2013-12-18 02:19:38', u'https://www.codechef.com/PRACTICE/problems/COINS', u'COINS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3108479'), (u'2013-12-18 02:36:47', u'https://www.codechef.com/PRACTICE/problems/COINS', u'COINS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3108489'), (u'2013-12-18 02:38:40', u'https://www.codechef.com/PRACTICE/problems/COINS', u'COINS', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3108491'), (u'2013-12-18 02:40:21', u'https://www.codechef.com/PRACTICE/problems/COINS', u'COINS', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3108493'), (u'2013-12-19 23:56:23', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/3113518'), (u'2013-12-19 23:58:35', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/3113525'), (u'2013-12-20 00:00:56', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3113535'), (u'2013-12-20 02:45:48', u'https://www.codechef.com/PRACTICE/problems/FCTRL2', u'FCTRL2', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/3113821'), (u'2013-12-20 02:48:52', u'https://www.codechef.com/PRACTICE/problems/FCTRL2', u'FCTRL2', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3113825'), (u'2013-12-20 03:10:47', u'https://www.codechef.com/PRACTICE/problems/MARBLES', u'MARBLES', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3113849'), (u'2013-12-20 03:27:48', u'https://www.codechef.com/PRACTICE/problems/MARBLES', u'MARBLES', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3113865'), (u'2013-12-20 03:43:53', u'https://www.codechef.com/PRACTICE/problems/MARBLES', u'MARBLES', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3113877'), (u'2013-12-20 15:47:52', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/3114663'), (u'2013-12-20 15:49:13', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/3114664'), (u'2013-12-20 15:52:15', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3114671'), (u'2013-12-20 15:58:50', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3114683'), (u'2014-01-01 22:25:19', u'https://www.codechef.com/PRACTICE/problems/MSTICK', u'MSTICK', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3148896'), (u'2014-01-02 22:42:07', u'https://www.codechef.com/PRACTICE/problems/RESIST', u'RESIST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3150795'), (u'2014-01-02 22:54:14', u'https://www.codechef.com/PRACTICE/problems/RESIST', u'RESIST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3150836'), (u'2014-01-02 22:56:42', u'https://www.codechef.com/PRACTICE/problems/RESIST', u'RESIST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3150842'), (u'2014-01-02 22:58:50', u'https://www.codechef.com/PRACTICE/problems/RESIST', u'RESIST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3150846'), (u'2014-01-02 23:18:24', u'https://www.codechef.com/PRACTICE/problems/MSTICK', u'MSTICK', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3150913'), (u'2014-01-05 16:58:47', u'https://www.codechef.com/PRACTICE/problems/TWTCLOSE', u'TWTCLOSE', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3188137'), (u'2014-01-06 21:24:27', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3200011'), (u'2014-01-06 21:29:23', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3200056'), (u'2014-01-06 21:58:37', u'https://www.codechef.com/PRACTICE/problems/FLIPCOIN', u'FLIPCOIN', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3200313'), (u'2014-01-06 22:50:32', u'https://www.codechef.com/PRACTICE/problems/FLIPCOIN', u'FLIPCOIN', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3200883'), (u'2014-01-07 15:19:35', u'https://www.codechef.com/PRACTICE/problems/LEVY', u'LEVY', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3205638'), (u'2014-01-07 15:23:13', u'https://www.codechef.com/PRACTICE/problems/LEVY', u'LEVY', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/3205664'), (u'2014-01-07 15:38:53', u'https://www.codechef.com/PRACTICE/problems/LEVY', u'LEVY', 'CE', u'0', u'C++ 4.3.2', 'https://www.codechef.com/viewsolution/3205784'), (u'2014-01-08 17:18:58', u'https://www.codechef.com/JAN14/problems/ERROR', u'ERROR', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3215076'), (u'2014-01-08 17:32:16', u'https://www.codechef.com/JAN14/problems/ERROR', u'ERROR', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3215197'), (u'2014-01-08 17:34:26', u'https://www.codechef.com/JAN14/problems/PLZLYKME', u'PLZLYKME', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3215217'), (u'2014-01-08 17:50:31', u'https://www.codechef.com/JAN14/problems/PLZLYKME', u'PLZLYKME', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3215325'), (u'2014-01-08 23:01:50', u'https://www.codechef.com/JAN14/problems/FGFS', u'FGFS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3217930'), (u'2014-01-09 18:42:17', u'https://www.codechef.com/PRACTICE/problems/TSORT', u'TSORT', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3223261'), (u'2014-01-09 18:49:03', u'https://www.codechef.com/PRACTICE/problems/TSORT', u'TSORT', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3223313'), (u'2014-01-09 18:57:00', u'https://www.codechef.com/PRACTICE/problems/TSORT', u'TSORT', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3223384'), (u'2014-01-09 19:26:01', u'https://www.codechef.com/PRACTICE/problems/PERMUT2', u'PERMUT2', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3223635'), (u'2014-01-09 19:28:32', u'https://www.codechef.com/PRACTICE/problems/PERMUT2', u'PERMUT2', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3223652'), (u'2014-01-09 19:47:04', u'https://www.codechef.com/PRACTICE/problems/TLG', u'TLG', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3223799'), (u'2014-01-09 20:32:49', u'https://www.codechef.com/PRACTICE/problems/TLG', u'TLG', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3224190'), (u'2014-01-09 20:35:41', u'https://www.codechef.com/PRACTICE/problems/TLG', u'TLG', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3224222'), (u'2014-01-09 23:53:53', u'https://www.codechef.com/PRACTICE/problems/TLG', u'TLG', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3225832'), (u'2014-01-10 00:14:05', u'https://www.codechef.com/PRACTICE/problems/NUMGAME', u'NUMGAME', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3226019'), (u'2014-01-10 23:16:53', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3231942'), (u'2014-01-10 23:25:05', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3232000'), (u'2014-01-10 23:32:09', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3232061'), (u'2014-01-10 23:37:08', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3232115'), (u'2014-01-10 23:46:15', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3232189'), (u'2014-01-12 16:08:22', u'https://www.codechef.com/PRACTICE/problems/D1', u'D1', u'TLE', u'0', u'PYTH', 'https://www.codechef.com/viewsolution/3242893'), (u'2014-01-12 16:41:33', u'https://www.codechef.com/PRACTICE/problems/ASTRGAME', u'ASTRGAME', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3243146'), (u'2014-01-12 16:43:25', u'https://www.codechef.com/PRACTICE/problems/ASTRGAME', u'ASTRGAME', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3243158'), (u'2014-01-12 19:38:52', u'https://www.codechef.com/PRACTICE/problems/KPRIME', u'KPRIME', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3244328'), (u'2014-01-12 20:04:49', u'https://www.codechef.com/PRACTICE/problems/KPRIME', u'KPRIME', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3244480'), (u'2014-01-13 10:34:13', u'https://www.codechef.com/PRACTICE/problems/BUY1GET1', u'BUY1GET1', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3248580'), (u'2014-01-13 10:41:26', u'https://www.codechef.com/PRACTICE/problems/BUY1GET1', u'BUY1GET1', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3248611'), (u'2014-01-13 10:52:51', u'https://www.codechef.com/PRACTICE/problems/BUY1GET1', u'BUY1GET1', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3248674'), (u'2014-01-13 11:53:09', u'https://www.codechef.com/PRACTICE/problems/HORSES', u'HORSES', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3249017'), (u'2014-01-13 12:01:58', u'https://www.codechef.com/PRACTICE/problems/HORSES', u'HORSES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3249080'), (u'2014-01-13 12:13:20', u'https://www.codechef.com/PRACTICE/problems/NUMGAME', u'NUMGAME', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3249157'), (u'2014-01-13 12:30:50', u'https://www.codechef.com/PRACTICE/problems/BUY1GET1', u'BUY1GET1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3249302'), (u'2014-01-13 13:14:27', u'https://www.codechef.com/PRACTICE/problems/TWSTR', u'TWSTR', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3249663'), (u'2014-01-13 20:23:37', u'https://www.codechef.com/PRACTICE/problems/HELLO', u'HELLO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3251908'), (u'2014-01-13 21:07:57', u'https://www.codechef.com/PRACTICE/problems/DIGROT', u'DIGROT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3252038'), (u'2014-01-13 21:46:16', u'https://www.codechef.com/PRACTICE/problems/HELLO', u'HELLO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3252146'), (u'2014-01-13 22:06:21', u'https://www.codechef.com/PRACTICE/problems/HELLO', u'HELLO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3252214'), (u'2014-01-13 22:13:24', u'https://www.codechef.com/PRACTICE/problems/HELLO', u'HELLO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3252242'), (u'2014-01-13 22:15:40', u'https://www.codechef.com/PRACTICE/problems/HELLO', u'HELLO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3252253'), (u'2014-01-13 22:21:15', u'https://www.codechef.com/PRACTICE/problems/HELLO', u'HELLO', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3252279'), (u'2014-01-14 00:21:02', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3252851'), (u'2014-01-14 01:05:42', u'https://www.codechef.com/PRACTICE/problems/LAPIN', u'LAPIN', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3253032'), (u'2014-01-14 01:08:04', u'https://www.codechef.com/PRACTICE/problems/LAPIN', u'LAPIN', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3253049'), (u'2014-01-14 01:11:18', u'https://www.codechef.com/PRACTICE/problems/LAPIN', u'LAPIN', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3253069'), (u'2014-01-14 14:06:41', u'https://www.codechef.com/PRACTICE/problems/PPXOR', u'PPXOR', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3254264'), (u'2014-01-14 19:12:48', u'https://www.codechef.com/PRACTICE/problems/CHEFTEAM', u'CHEFTEAM', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3255054'), (u'2014-01-14 19:36:22', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3255134'), (u'2014-01-14 21:11:50', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3255392'), (u'2014-01-14 21:41:46', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3255474'), (u'2014-01-16 18:39:17', u'https://www.codechef.com/PRACTICE/problems/TACHSTCK', u'TACHSTCK', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3260781'), (u'2014-01-16 19:08:18', u'https://www.codechef.com/PRACTICE/problems/TACHSTCK', u'TACHSTCK', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3260885'), (u'2014-01-16 19:36:52', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3261016'), (u'2014-01-18 18:40:00', u'https://www.codechef.com/PRACTICE/problems/RRMATRIX', u'RRMATRIX', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3266986'), (u'2014-01-18 19:16:39', u'https://www.codechef.com/PRACTICE/problems/GRANAMA', u'GRANAMA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3267092'), (u'2014-01-18 19:25:40', u'https://www.codechef.com/PRACTICE/problems/GRANAMA', u'GRANAMA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3267123'), (u'2014-01-18 20:29:27', u'https://www.codechef.com/PRACTICE/problems/GRANAMA', u'GRANAMA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3267298'), (u'2014-01-18 20:35:24', u'https://www.codechef.com/PRACTICE/problems/GRANAMA', u'GRANAMA', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3267306'), (u'2014-01-23 10:03:37', u'https://www.codechef.com/PRACTICE/problems/NUKES', u'NUKES', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3283319'), (u'2014-01-23 10:04:57', u'https://www.codechef.com/PRACTICE/problems/JOHNY', u'JOHNY', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3283321'), (u'2014-01-23 10:06:21', u'https://www.codechef.com/PRACTICE/problems/RIGHTRI', u'RIGHTRI', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3283322'), (u'2014-01-23 10:07:29', u'https://www.codechef.com/PRACTICE/problems/RIGHTRI', u'RIGHTRI', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3283325'), (u'2014-01-23 10:19:28', u'https://www.codechef.com/PRACTICE/problems/RIGHTRI', u'RIGHTRI', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3283340'), (u'2014-01-23 10:22:56', u'https://www.codechef.com/PRACTICE/problems/NUKES', u'NUKES', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3283347'), (u'2014-01-23 10:27:39', u'https://www.codechef.com/PRACTICE/problems/NUKES', u'NUKES', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3283353'), (u'2014-01-23 10:30:21', u'https://www.codechef.com/PRACTICE/problems/NUKES', u'NUKES', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3283357'), (u'2014-01-23 10:42:45', u'https://www.codechef.com/PRACTICE/problems/LAPIN', u'LAPIN', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3283378'), (u'2014-01-23 10:50:27', u'https://www.codechef.com/PRACTICE/problems/LAPIN', u'LAPIN', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3283389'), (u'2014-01-23 10:58:07', u'https://www.codechef.com/PRACTICE/problems/NUKES', u'NUKES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3283393'), (u'2014-02-07 13:56:26', u'https://www.codechef.com/PRACTICE/problems/NUMGAME', u'NUMGAME', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3339806'), (u'2014-02-07 14:04:43', u'https://www.codechef.com/PRACTICE/problems/NUMGAME', u'NUMGAME', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3339834'), (u'2014-02-07 14:07:56', u'https://www.codechef.com/PRACTICE/problems/NUMGAME', u'NUMGAME', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3339845'), (u'2014-02-07 14:12:05', u'https://www.codechef.com/PRACTICE/problems/NUMGAME', u'NUMGAME', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3339853'), (u'2014-02-07 14:43:35', u'https://www.codechef.com/PRACTICE/problems/CIELRCPT', u'CIELRCPT', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3339922'), (u'2014-02-08 18:56:14', u'https://www.codechef.com/FEB14/problems/LCPESY', u'LCPESY', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3359518'), (u'2014-02-08 19:12:55', u'https://www.codechef.com/FEB14/problems/LCPESY', u'LCPESY', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3359744'), (u'2014-02-08 19:39:00', u'https://www.codechef.com/FEB14/problems/SUBMIN', u'SUBMIN', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3360100'), (u'2014-02-11 15:14:10', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3387212'), (u'2014-02-11 15:20:54', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3387257'), (u'2014-02-11 15:30:00', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3387312'), (u'2014-02-11 16:35:28', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3387693'), (u'2014-02-11 16:51:49', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3387801'), (u'2014-02-11 16:55:47', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3387826'), (u'2014-02-13 15:27:31', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3401986'), (u'2014-02-13 16:24:34', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3402304'), (u'2014-02-13 16:52:47', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3402476'), (u'2014-02-22 21:12:12', u'https://www.codechef.com/CDMT2014/problems/MIRRORS', u'MIRRORS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3455971'), (u'2014-02-22 21:14:12', u'https://www.codechef.com/CDMT2014/problems/MIRRORS', u'MIRRORS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3456012'), (u'2014-02-22 21:21:11', u'https://www.codechef.com/CDMT2014/problems/MIRRORS', u'MIRRORS', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3456160'), (u'2014-02-23 00:04:09', u'https://www.codechef.com/CDMT2014/problems/TILE', u'TILE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3460835'), (u'2014-02-23 00:07:15', u'https://www.codechef.com/CDMT2014/problems/TILE0', u'TILE0', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3460874'), (u'2014-02-23 00:23:39', u'https://www.codechef.com/CDNCTR14/problems/QUEST', u'QUEST', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3461126'), (u'2014-02-23 00:35:48', u'https://www.codechef.com/CDNCTR14/problems/QUEST', u'QUEST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3461310'), (u'2014-02-23 01:13:51', u'https://www.codechef.com/CDNCTR14/problems/ARRAY', u'ARRAY', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3461817'), (u'2014-02-23 01:53:29', u'https://www.codechef.com/CDNCTR14/problems/GOT', u'GOT', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3462204'), (u'2014-02-23 02:37:48', u'https://www.codechef.com/CDNCTR14/problems/JADEJA', u'JADEJA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3462594'), (u'2014-02-23 02:42:04', u'https://www.codechef.com/CDNCTR14/problems/JADEJA', u'JADEJA', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3462619'), (u'2014-02-26 23:33:32', u'https://www.codechef.com/PRACTICE/problems/WCOUNT', u'WCOUNT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3477325'), (u'2014-03-04 16:51:10', u'https://www.codechef.com/PRACTICE/problems/TPRODUCT', u'TPRODUCT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3497768'), (u'2014-03-04 17:08:05', u'https://www.codechef.com/PRACTICE/problems/TPRODUCT', u'TPRODUCT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3497791'), (u'2014-03-04 17:11:05', u'https://www.codechef.com/PRACTICE/problems/TPRODUCT', u'TPRODUCT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3497796'), (u'2014-05-25 02:14:27', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', 'RE', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/3938402'), (u'2014-05-25 02:16:35', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3938403'), (u'2014-05-25 02:19:23', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3938407'), (u'2014-05-25 02:28:54', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3938415'), (u'2014-06-08 15:50:16', u'https://www.codechef.com/JUNE14/problems/CHEFZOT', u'CHEFZOT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4019362'), (u'2014-06-08 15:52:51', u'https://www.codechef.com/JUNE14/problems/CHEFZOT', u'CHEFZOT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4019398'), (u'2014-06-08 15:57:49', u'https://www.codechef.com/JUNE14/problems/CHEFZOT', u'CHEFZOT', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4019468'), (u'2014-06-08 16:11:10', u'https://www.codechef.com/JUNE14/problems/GUESS', u'GUESS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4019668'), (u'2014-06-08 16:13:49', u'https://www.codechef.com/JUNE14/problems/GUESS', u'GUESS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4019713'), (u'2014-06-08 17:28:24', u'https://www.codechef.com/JUNE14/problems/FORGETPW', u'FORGETPW', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4020749'), (u'2014-06-09 20:48:17', u'https://www.codechef.com/JUNE14/problems/FORGETPW', u'FORGETPW', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4036865'), (u'2014-06-09 20:51:39', u'https://www.codechef.com/JUNE14/problems/FORGETPW', u'FORGETPW', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4036902'), (u'2014-06-09 20:56:28', u'https://www.codechef.com/JUNE14/problems/FORGETPW', u'FORGETPW', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4036949'), (u'2014-06-11 07:33:23', u'https://www.codechef.com/JUNE14/problems/FORGETPW', u'FORGETPW', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4053523'), (u'2014-06-11 07:54:41', u'https://www.codechef.com/PRACTICE/problems/ALEXNUMB', u'ALEXNUMB', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4053566'), (u'2014-06-11 07:57:12', u'https://www.codechef.com/PRACTICE/problems/ALEXNUMB', u'ALEXNUMB', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4053571'), (u'2014-06-11 07:59:02', u'https://www.codechef.com/PRACTICE/problems/ALEXNUMB', u'ALEXNUMB', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4053576'), (u'2014-06-11 08:04:58', u'https://www.codechef.com/PRACTICE/problems/ALEXNUMB', u'ALEXNUMB', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4053599'), (u'2014-06-11 08:08:47', u'https://www.codechef.com/PRACTICE/problems/ALEXNUMB', u'ALEXNUMB', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4053611'), (u'2014-06-11 08:20:27', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/4053646'), (u'2014-06-11 08:21:52', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/4053653'), (u'2014-06-11 08:22:42', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/4053659'), (u'2014-06-11 08:35:28', u'https://www.codechef.com/PRACTICE/problems/MAXDIFF', u'MAXDIFF', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4053715'), (u'2014-06-11 08:41:38', u'https://www.codechef.com/PRACTICE/problems/MAXDIFF', u'MAXDIFF', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4053747'), (u'2014-06-11 09:20:41', u'https://www.codechef.com/PRACTICE/problems/STONES', u'STONES', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4053906'), (u'2014-06-11 09:23:05', u'https://www.codechef.com/PRACTICE/problems/STONES', u'STONES', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4053914'), (u'2014-06-11 09:28:01', u'https://www.codechef.com/PRACTICE/problems/STONES', u'STONES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4053935'), (u'2014-06-11 09:46:27', u'https://www.codechef.com/PRACTICE/problems/SPCANDY', u'SPCANDY', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/4054028'), (u'2014-06-11 09:49:08', u'https://www.codechef.com/PRACTICE/problems/SPCANDY', u'SPCANDY', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4054050'), (u'2014-06-11 09:50:14', u'https://www.codechef.com/PRACTICE/problems/SPCANDY', u'SPCANDY', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4054056'), (u'2014-06-11 10:13:17', u'https://www.codechef.com/PRACTICE/problems/DIVIDING', u'DIVIDING', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4054186'), (u'2014-06-11 10:17:20', u'https://www.codechef.com/PRACTICE/problems/DIVIDING', u'DIVIDING', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4054200'), (u'2014-06-11 10:21:20', u'https://www.codechef.com/PRACTICE/problems/DIVIDING', u'DIVIDING', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4054222'), (u'2014-06-11 10:46:57', u'https://www.codechef.com/PRACTICE/problems/APPROX', u'APPROX', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4054403'), (u'2014-06-11 11:11:10', u'https://www.codechef.com/PRACTICE/problems/COMPILER', u'COMPILER', 'CE', u'0', u'ADA', 'https://www.codechef.com/viewsolution/4054561'), (u'2014-06-11 11:11:59', u'https://www.codechef.com/PRACTICE/problems/COMPILER', u'COMPILER', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4054571'), (u'2014-06-11 16:59:23', u'https://www.codechef.com/PRACTICE/problems/AMSGAME1', u'AMSGAME1', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4057988'), (u'2014-06-11 17:05:35', u'https://www.codechef.com/PRACTICE/problems/AMSGAME1', u'AMSGAME1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4058067'), (u'2014-06-29 01:44:47', u'https://www.codechef.com/PRACTICE/problems/TREEROOT', u'TREEROOT', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4152751'), (u'2014-06-29 02:02:26', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', u'TLE', u'0', u'PYTH', 'https://www.codechef.com/viewsolution/4152798'), (u'2014-07-04 20:23:15', u'https://www.codechef.com/JULY14/problems/CSUB', u'CSUB', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4188769'), (u'2014-07-04 20:35:55', u'https://www.codechef.com/JULY14/problems/CSUB', u'CSUB', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4189092'), (u'2014-07-04 20:42:22', u'https://www.codechef.com/JULY14/problems/CSUB', u'CSUB', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4189260'), (u'2014-07-04 20:56:59', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4189643'), (u'2014-07-04 20:58:35', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4189684'), (u'2014-07-04 21:29:16', u'https://www.codechef.com/JULY14/problems/CSUB', u'CSUB', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4190477'), (u'2014-07-05 03:32:13', u'https://www.codechef.com/PRACTICE/problems/SPOTWO', u'SPOTWO', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4198760'), (u'2014-07-05 04:31:23', u'https://www.codechef.com/PRACTICE/problems/REMISS', u'REMISS', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4199244'), (u'2014-07-05 04:48:17', u'https://www.codechef.com/PRACTICE/problems/POTATOES', u'POTATOES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4199368'), (u'2014-07-05 04:58:55', u'https://www.codechef.com/PRACTICE/problems/SDSQUARE', u'SDSQUARE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4199453'), (u'2014-07-05 05:05:28', u'https://www.codechef.com/PRACTICE/problems/SDSQUARE', u'SDSQUARE', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4199504'), (u'2014-07-05 05:14:54', u'https://www.codechef.com/PRACTICE/problems/SDSQUARE', u'SDSQUARE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4199569'), (u'2014-07-05 05:19:30', u'https://www.codechef.com/PRACTICE/problems/SDSQUARE', u'SDSQUARE', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4199592'), (u'2014-07-05 05:44:04', u'https://www.codechef.com/PRACTICE/problems/NOLOGIC', u'NOLOGIC', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4199717'), (u'2014-07-12 02:26:44', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4303371'), (u'2014-07-12 03:17:04', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4303603'), (u'2014-07-12 03:17:04', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4303608'), (u'2014-07-12 03:17:04', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4303611'), (u'2014-07-12 03:17:45', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4303624'), (u'2014-07-12 03:22:54', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4303651'), (u'2014-07-12 03:25:18', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4303661'), (u'2014-07-12 03:28:45', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4303679'), (u'2014-07-12 15:12:46', u'https://www.codechef.com/JULY14/problems/FROGV', u'FROGV', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/4307292'), (u'2014-07-13 01:07:50', u'https://www.codechef.com/JULY14/problems/FROGV', u'FROGV', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4312732'), (u'2014-07-17 02:00:29', u'https://www.codechef.com/PRACTICE/problems/BINTREE', u'BINTREE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4337506'), (u'2014-07-17 02:02:30', u'https://www.codechef.com/PRACTICE/problems/BINTREE', u'BINTREE', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4337509'), (u'2014-07-17 21:02:13', u'https://www.codechef.com/PRACTICE/problems/LUCKYSTR', u'LUCKYSTR', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/4339419'), (u'2014-07-17 21:03:35', u'https://www.codechef.com/PRACTICE/problems/LUCKYSTR', u'LUCKYSTR', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4339420'), (u'2014-07-17 21:49:38', u'https://www.codechef.com/PRACTICE/problems/NOLOGIC', u'NOLOGIC', u'WA', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4339533'), (u'2014-07-17 21:54:01', u'https://www.codechef.com/PRACTICE/problems/NOLOGIC', u'NOLOGIC', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4339548'), (u'2014-07-17 21:55:43', u'https://www.codechef.com/PRACTICE/problems/NOLOGIC', u'NOLOGIC', u'TLE', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4339554'), (u'2014-07-17 21:58:37', u'https://www.codechef.com/PRACTICE/problems/NOLOGIC', u'NOLOGIC', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/4339563'), (u'2014-07-17 21:59:31', u'https://www.codechef.com/PRACTICE/problems/NOLOGIC', u'NOLOGIC', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4339567'), (u'2014-07-18 00:42:33', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', u'TLE', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340137'), (u'2014-07-18 01:15:31', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'WA', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340237'), (u'2014-07-18 01:17:19', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'WA', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340243'), (u'2014-07-18 01:21:53', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'WA', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340259'), (u'2014-07-18 01:24:29', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'WA', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340266'), (u'2014-07-18 01:38:21', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340317'), (u'2014-07-18 01:41:49', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340328'), (u'2014-07-18 02:11:22', u'https://www.codechef.com/PRACTICE/problems/COMPILER', u'COMPILER', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4340405'), (u'2014-07-18 02:13:00', u'https://www.codechef.com/PRACTICE/problems/COMPILER', u'COMPILER', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4340412'), (u'2014-07-18 02:15:57', u'https://www.codechef.com/PRACTICE/problems/COMPILER', u'COMPILER', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4340421'), (u'2014-07-18 03:08:59', u'https://www.codechef.com/PRACTICE/problems/WSTRING', u'WSTRING', u'WA', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340523'), (u'2014-07-18 03:18:59', u'https://www.codechef.com/PRACTICE/problems/WSTRING', u'WSTRING', u'WA', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340535'), (u'2014-07-18 04:45:18', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/4340638'), (u'2014-07-18 04:46:15', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340641'), (u'2014-07-18 04:50:29', u'https://www.codechef.com/PRACTICE/problems/BINTREE', u'BINTREE', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4340644'), (u'2014-07-18 04:55:56', u'https://www.codechef.com/PRACTICE/problems/RETPO', u'RETPO', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4340648'), (u'2014-07-18 04:58:27', u'https://www.codechef.com/PRACTICE/problems/BINTREE', u'BINTREE', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4340649'), (u'2014-07-18 05:04:58', u'https://www.codechef.com/PRACTICE/problems/RRMATRIX', u'RRMATRIX', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4340655'), (u'2014-07-18 05:05:52', u'https://www.codechef.com/PRACTICE/problems/RRMATRIX', u'RRMATRIX', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4340657'), (u'2014-07-21 18:05:27', u'https://www.codechef.com/PRACTICE/problems/RRCOPY', u'RRCOPY', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4362844'), (u'2014-07-21 18:24:11', u'https://www.codechef.com/PRACTICE/problems/RRCOPY', u'RRCOPY', u'WA', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4362928'), (u'2014-07-21 18:25:05', u'https://www.codechef.com/PRACTICE/problems/RRCOPY', u'RRCOPY', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4362933'), (u'2014-07-21 18:45:33', u'https://www.codechef.com/PRACTICE/problems/RRSUM', u'RRSUM', u'TLE', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4363040'), (u'2014-07-21 18:49:18', u'https://www.codechef.com/PRACTICE/problems/RRSUM', u'RRSUM', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4363058'), (u'2014-07-21 18:50:51', u'https://www.codechef.com/PRACTICE/problems/RRSUM', u'RRSUM', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4363066'), (u'2014-07-23 00:10:48', u'https://www.codechef.com/PRACTICE/problems/RECTQUER', u'RECTQUER', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4367826'), (u'2014-07-23 01:00:49', u'https://www.codechef.com/PRACTICE/problems/RECTQUER', u'RECTQUER', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4368006'), (u'2014-07-23 01:03:50', u'https://www.codechef.com/PRACTICE/problems/RECTQUER', u'RECTQUER', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4368015'), (u'2014-07-23 01:32:36', u'https://www.codechef.com/PRACTICE/problems/RECTQUER', u'RECTQUER', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4368102'), (u'2014-07-26 00:16:20', u'https://www.codechef.com/PRACTICE/problems/DOUBLE', u'DOUBLE', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4377912'), (u'2014-07-26 00:18:23', u'https://www.codechef.com/PRACTICE/problems/DOUBLE', u'DOUBLE', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4377917'), (u'2014-07-26 00:44:31', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4377999'), (u'2014-07-27 02:46:17', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'TLE', u'0', u'PYTH', 'https://www.codechef.com/viewsolution/4382136'), (u'2014-07-27 02:52:14', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4382143'), (u'2014-07-27 02:55:35', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'TLE', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4382152'), (u'2014-07-27 02:56:53', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'TLE', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4382155'), (u'2014-07-27 02:58:43', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4382159'), (u'2014-07-27 02:59:30', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4382160'), (u'2014-07-27 03:01:22', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4382164'), (u'2014-07-27 03:13:49', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4382175'), (u'2014-07-31 22:31:14', u'https://www.codechef.com/PRACTICE/problems/MARBLES', u'MARBLES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4410407'), (u'2014-07-31 22:32:41', u'https://www.codechef.com/PRACTICE/problems/MARBLES', u'MARBLES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4410421'), (u'2014-07-31 22:36:40', u'https://www.codechef.com/PRACTICE/problems/MARBLES', u'MARBLES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4410455'), (u'2014-07-31 22:37:34', u'https://www.codechef.com/PRACTICE/problems/MARBLES', u'MARBLES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4410461'), (u'2014-08-01 16:03:33', u'https://www.codechef.com/AUG14/problems/PRGIFT', u'PRGIFT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4418584'), (u'2014-08-01 16:10:06', u'https://www.codechef.com/AUG14/problems/PRGIFT', u'PRGIFT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4418854'), (u'2014-08-01 16:16:14', u'https://www.codechef.com/AUG14/problems/PRGIFT', u'PRGIFT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4419068'), (u'2014-08-01 16:28:32', u'https://www.codechef.com/AUG14/problems/PRGIFT', u'PRGIFT', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4419429'), (u'2014-08-01 21:14:20', u'https://www.codechef.com/AUG14/problems/PRGIFT', u'PRGIFT', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4427549'), (u'2014-08-01 22:22:40', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/4428946'), (u'2014-08-01 22:24:47', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4428994'), (u'2014-08-01 22:25:57', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4429019'), (u'2014-08-01 22:26:55', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4429047'), (u'2014-08-02 21:41:49', u'https://www.codechef.com/AUG14/problems/CRAWA', u'CRAWA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4448115'), (u'2014-08-02 21:43:44', u'https://www.codechef.com/AUG14/problems/CRAWA', u'CRAWA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4448136'), (u'2014-08-02 21:51:09', u'https://www.codechef.com/AUG14/problems/CRAWA', u'CRAWA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4448237'), (u'2014-08-02 21:58:27', u'https://www.codechef.com/AUG14/problems/CRAWA', u'CRAWA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4448341'), (u'2014-08-02 23:04:07', u'https://www.codechef.com/AUG14/problems/CRAWA', u'CRAWA', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4449507'), (u'2014-08-06 14:47:12', u'https://www.codechef.com/AUG14/problems/CLETAB', u'CLETAB', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4494226'), (u'2014-08-07 22:22:52', u'https://www.codechef.com/AUG14/problems/CLETAB', u'CLETAB', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4508709'), (u'2014-08-07 22:57:57', u'https://www.codechef.com/AUG14/problems/CLETAB', u'CLETAB', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4509134'), (u'2014-08-07 23:22:17', u'https://www.codechef.com/AUG14/problems/CLETAB', u'CLETAB', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4509429'), (u'2014-08-07 23:31:23', u'https://www.codechef.com/AUG14/problems/CLETAB', u'CLETAB', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4509535'), (u'2014-08-10 02:57:09', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4530125'), (u'2014-08-10 03:03:19', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4530154'), (u'2014-08-10 03:14:11', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4530189'), (u'2014-08-10 03:17:14', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4530195'), (u'2014-08-10 14:56:08', u'https://www.codechef.com/AUG14/problems/REVERSE', u'REVERSE', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/4533200'), (u'2014-08-10 15:14:30', u'https://www.codechef.com/AUG14/problems/REVERSE', u'REVERSE', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4533367'), (u'2014-08-10 17:29:15', u'https://www.codechef.com/PRCNSR14/problems/GAME2048', u'GAME2048', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/4535341'), (u'2014-08-10 17:30:22', u'https://www.codechef.com/PRCNSR14/problems/GAME2048', u'GAME2048', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4535393'), (u'2014-08-10 17:33:44', u'https://www.codechef.com/PRCNSR14/problems/GAME2048', u'GAME2048', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4535586'), (u'2014-08-10 17:34:51', u'https://www.codechef.com/PRCNSR14/problems/GAME2048', u'GAME2048', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4535650'), (u'2014-08-10 17:37:42', u'https://www.codechef.com/PRCNSR14/problems/GAME2048', u'GAME2048', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4535810'), (u'2014-08-10 17:39:14', u'https://www.codechef.com/PRCNSR14/problems/GAME2048', u'GAME2048', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4535898'), (u'2014-08-10 17:40:19', u'https://www.codechef.com/PRCNSR14/problems/GAME2048', u'GAME2048', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4535965'), (u'2014-08-10 17:47:23', u'https://www.codechef.com/PRCNSR14/problems/HLPSUG', u'HLPSUG', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4536336'), (u'2014-08-10 18:03:45', u'https://www.codechef.com/PRCNSR14/problems/HPYBDAY', u'HPYBDAY', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4537126'), (u'2014-08-10 18:25:49', u'https://www.codechef.com/PRCNSR14/problems/HPYBDAY', u'HPYBDAY', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4538160'), (u'2014-08-10 18:27:37', u'https://www.codechef.com/PRCNSR14/problems/HPYBDAY', u'HPYBDAY', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4538244'), (u'2014-08-10 19:11:26', u'https://www.codechef.com/PRCNSR14/problems/PLTGRP', u'PLTGRP', u'TLE', u'0', u'C++11', 'https://www.codechef.com/viewsolution/4539947'), (u'2014-10-03 19:51:34', u'https://www.codechef.com/OCT14/problems/CHEFGR', u'CHEFGR', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4962359'), (u'2014-10-03 19:55:30', u'https://www.codechef.com/OCT14/problems/CHEFGR', u'CHEFGR', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4962494'), (u'2014-10-04 01:01:28', u'https://www.codechef.com/OCT14/problems/PRLADDU', u'PRLADDU', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4970823'), (u'2014-10-04 02:02:38', u'https://www.codechef.com/OCT14/problems/PRLADDU', u'PRLADDU', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4972114'), (u'2014-10-04 02:05:31', u'https://www.codechef.com/OCT14/problems/PRLADDU', u'PRLADDU', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4972172'), (u'2014-10-04 02:08:04', u'https://www.codechef.com/OCT14/problems/PRLADDU', u'PRLADDU', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4972219'), (u'2014-10-04 02:10:59', u'https://www.codechef.com/OCT14/problems/PRLADDU', u'PRLADDU', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4972279'), (u'2014-10-05 19:11:22', u'https://www.codechef.com/OCT14/problems/FATCHEF', u'FATCHEF', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/5008560'), (u'2014-10-05 19:46:59', u'https://www.codechef.com/OCT14/problems/PRPOTION', u'PRPOTION', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5009210'), (u'2014-10-05 20:09:50', u'https://www.codechef.com/OCT14/problems/PRPOTION', u'PRPOTION', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/5009564'), (u'2014-10-08 01:48:44', u'https://www.codechef.com/OCT14/problems/CHEFSQUA', u'CHEFSQUA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5046189'), (u'2014-10-08 19:42:52', u'https://www.codechef.com/OCT14/problems/CHEFSQUA', u'CHEFSQUA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5056254'), (u'2014-10-08 20:45:51', u'https://www.codechef.com/OCT14/problems/CHEFSQUA', u'CHEFSQUA', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/5057583'), (u'2014-10-08 20:47:41', u'https://www.codechef.com/OCT14/problems/CHEFSQUA', u'CHEFSQUA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5057620'), (u'2014-10-08 20:49:47', u'https://www.codechef.com/OCT14/problems/CHEFSQUA', u'CHEFSQUA', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/5057673'), (u'2014-11-07 22:42:18', u'https://www.codechef.com/NOV14/problems/DISCHAR', u'DISCHAR', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5286888'), (u'2014-11-08 15:04:37', u'https://www.codechef.com/NOV14/problems/PRPALN', u'PRPALN', 'PS', u'35', u'C', 'https://www.codechef.com/viewsolution/5300598'), (u'2014-11-08 16:15:45', u'https://www.codechef.com/NOV14/problems/PRPALN', u'PRPALN', 'PS', u'35', u'C', 'https://www.codechef.com/viewsolution/5302106'), (u'2014-11-08 16:24:02', u'https://www.codechef.com/NOV14/problems/PRPALN', u'PRPALN', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5302275'), (u'2014-11-08 16:28:35', u'https://www.codechef.com/NOV14/problems/PRPALN', u'PRPALN', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5302355'), (u'2014-11-08 17:36:31', u'https://www.codechef.com/NOV14/problems/CHEFSEG', u'CHEFSEG', 'PS', u'40', u'C', 'https://www.codechef.com/viewsolution/5303576'), (u'2014-11-08 17:49:57', u'https://www.codechef.com/NOV14/problems/CHEFSEG', u'CHEFSEG', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5303832'), (u'2014-11-08 23:45:46', u'https://www.codechef.com/NOV14/problems/RBTREE', u'RBTREE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5310161'), (u'2014-11-09 00:16:54', u'https://www.codechef.com/NOV14/problems/RBTREE', u'RBTREE', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5310716'), (u'2014-11-09 00:22:33', u'https://www.codechef.com/NOV14/problems/RBTREE', u'RBTREE', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5310827'), (u'2014-11-09 20:55:47', u'https://www.codechef.com/NOV14/problems/CHEFWORD', u'CHEFWORD', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5322719'), (u'2014-11-09 21:00:47', u'https://www.codechef.com/NOV14/problems/CHEFWORD', u'CHEFWORD', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5322778'), (u'2014-11-17 01:56:38', u'https://www.codechef.com/CDSM2014/problems/CHFMAX', u'CHFMAX', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/5414098'), (u'2014-11-17 02:10:10', u'https://www.codechef.com/CDSM2014/problems/CHEFTR', u'CHEFTR', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/5414268'), (u'2014-12-06 02:22:06', u'https://www.codechef.com/DEC14/problems/CAPPLE', u'CAPPLE', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5499111'), (u'2014-12-06 02:27:09', u'https://www.codechef.com/DEC14/problems/CAPPLE', u'CAPPLE', 'PS', u'52', u'C', 'https://www.codechef.com/viewsolution/5499146'), (u'2014-12-06 02:28:40', u'https://www.codechef.com/DEC14/problems/CAPPLE', u'CAPPLE', 'PS', u'52', u'C', 'https://www.codechef.com/viewsolution/5499158'), (u'2014-12-06 02:30:42', u'https://www.codechef.com/DEC14/problems/CAPPLE', u'CAPPLE', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5499166'), (u'2015-01-02 15:18:34', u'https://www.codechef.com/JAN15/problems/GCDQ', u'GCDQ', 'PS', u'40', u'C', 'https://www.codechef.com/viewsolution/5679296'), (u'2015-01-02 15:20:33', u'https://www.codechef.com/JAN15/problems/GCDQ', u'GCDQ', 'PS', u'40', u'C', 'https://www.codechef.com/viewsolution/5679371'), (u'2015-01-02 15:37:03', u'https://www.codechef.com/JAN15/problems/CHEFSTON', u'CHEFSTON', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5679960'), (u'2015-01-02 16:16:32', u'https://www.codechef.com/JAN15/problems/GCDQ', u'GCDQ', 'PS', u'40', u'C', 'https://www.codechef.com/viewsolution/5681465'), (u'2015-01-03 21:23:57', u'https://www.codechef.com/JAN15/problems/GCDQ', u'GCDQ', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5722527'), (u'2015-01-03 21:36:43', u'https://www.codechef.com/JAN15/problems/SEAVOTE', u'SEAVOTE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5722845'), (u'2015-01-03 21:50:45', u'https://www.codechef.com/JAN15/problems/SEAVOTE', u'SEAVOTE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5723185'), (u'2015-01-06 23:28:39', u'https://www.codechef.com/JAN15/problems/SEAVOTE', u'SEAVOTE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5788244'), (u'2015-01-06 23:44:15', u'https://www.codechef.com/JAN15/problems/SEAVOTE', u'SEAVOTE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5788578'), (u'2015-01-06 23:55:07', u'https://www.codechef.com/JAN15/problems/SEAVOTE', u'SEAVOTE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5788839'), (u'2015-01-07 00:02:10', u'https://www.codechef.com/JAN15/problems/SEAVOTE', u'SEAVOTE', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5788999'), (u'2015-03-07 03:45:05', u'https://www.codechef.com/MARCH15/problems/CNOTE', u'CNOTE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/6413565'), (u'2015-03-07 06:18:00', u'https://www.codechef.com/MARCH15/problems/CNOTE', u'CNOTE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/6414065'), (u'2015-03-09 22:29:34', u'https://www.codechef.com/MARCH15/problems/CNOTE', u'CNOTE', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/6447577'), (u'2015-03-09 22:36:29', u'https://www.codechef.com/MARCH15/problems/CNOTE', u'CNOTE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/6447698'), (u'2015-03-09 22:38:36', u'https://www.codechef.com/MARCH15/problems/CNOTE', u'CNOTE', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/6447737'), (u'2015-05-12 02:41:11', u'https://www.codechef.com/MAY15/problems/CHEFRP', u'CHEFRP', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/6900569'), (u'2015-05-12 03:05:02', u'https://www.codechef.com/MAY15/problems/CHEFRP', u'CHEFRP', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/6900712'), (u'2015-05-13 15:59:16', u'https://www.codechef.com/MAY15/problems/CHAPD', u'CHAPD', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/6917484'), (u'2015-05-26 03:53:20', u'https://www.codechef.com/PRACTICE/problems/CFRTEST', u'CFRTEST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/7024771'), (u'2015-05-26 04:46:33', u'https://www.codechef.com/PRACTICE/problems/REARRSTR', u'REARRSTR', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/7024793'), (u'2015-05-26 04:54:59', u'https://www.codechef.com/PRACTICE/problems/CHAPD', u'CHAPD', u'AC', u'100', u'C++ 4.3.2', 'https://www.codechef.com/viewsolution/7024795'), (u'2015-05-30 07:38:40', u'https://www.codechef.com/PRACTICE/problems/PINOCH1', u'PINOCH1', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/7043758'), (u'2015-05-30 07:47:02', u'https://www.codechef.com/PRACTICE/problems/PINOCH1', u'PINOCH1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/7044118'), (u'2015-05-30 07:49:48', u'https://www.codechef.com/PRACTICE/problems/PINOCH1', u'PINOCH1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/7044235'), (u'2015-05-30 08:04:35', u'https://www.codechef.com/PRACTICE/problems/PINOCH2', u'PINOCH2', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/7044809'), (u'2015-05-30 08:09:02', u'https://www.codechef.com/PRACTICE/problems/PINOCH2', u'PINOCH2', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/7044972'), (u'2015-05-30 08:27:56', u'https://www.codechef.com/PRACTICE/problems/RACEWARS', u'RACEWARS', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/7045779'), (u'2015-05-30 08:28:38', u'https://www.codechef.com/PRACTICE/problems/RACEWARS', u'RACEWARS', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7045826'), (u'2015-05-30 08:31:07', u'https://www.codechef.com/PRACTICE/problems/MXZERO', u'MXZERO', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/7045937'), (u'2015-05-30 09:22:29', u'https://www.codechef.com/PRACTICE/problems/RACEWARS', u'RACEWARS', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7046383'), (u'2015-05-30 09:34:19', u'https://www.codechef.com/PRACTICE/problems/HOBB', u'HOBB', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/7046431'), (u'2015-05-30 12:48:40', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/7047261'), (u'2015-05-30 12:50:41', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/7047270'), (u'2015-06-08 22:03:40', u'https://www.codechef.com/JUNE15/problems/CBARG', u'CBARG', 'PS', u'30', u'C', 'https://www.codechef.com/viewsolution/7139999'), (u'2015-06-08 22:10:35', u'https://www.codechef.com/JUNE15/problems/CBARG', u'CBARG', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/7140098'), (u'2015-06-09 17:03:07', u'https://www.codechef.com/JUNE15/problems/CHPLGNS', u'CHPLGNS', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7150141'), (u'2015-06-09 22:09:57', u'https://www.codechef.com/JUNE15/problems/CHPLGNS', u'CHPLGNS', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/7153650'), (u'2015-06-09 22:11:02', u'https://www.codechef.com/JUNE15/problems/CHPLGNS', u'CHPLGNS', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7153663'), (u'2015-06-10 17:52:59', u'https://www.codechef.com/JUNE15/problems/CHPLGNS', u'CHPLGNS', 'PS', u'10', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7163596'), (u'2015-06-10 18:02:31', u'https://www.codechef.com/JUNE15/problems/CHPLGNS', u'CHPLGNS', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7163696'), (u'2015-06-10 23:15:58', u'https://www.codechef.com/JUNE15/problems/CHPLGNS', u'CHPLGNS', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7168947'), (u'2015-06-10 23:27:43', u'https://www.codechef.com/PRACTICE/problems/R303', u'R303', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7169121'), (u'2015-06-11 00:01:43', u'https://www.codechef.com/PRACTICE/problems/R303', u'R303', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7169540'), (u'2015-07-04 02:09:01', u'https://www.codechef.com/JULY15/problems/CHCUBE', u'CHCUBE', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7339812'), (u'2015-07-04 02:49:18', u'https://www.codechef.com/JULY15/problems/LCKYST', u'LCKYST', 'PS', u'8', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7340359'), (u'2015-07-04 02:55:39', u'https://www.codechef.com/JULY15/problems/LCKYST', u'LCKYST', 'PS', u'30', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7340422'), (u'2015-07-04 02:57:16', u'https://www.codechef.com/JULY15/problems/LCKYST', u'LCKYST', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7340447'), (u'2015-07-04 02:59:52', u'https://www.codechef.com/JULY15/problems/LCKYST', u'LCKYST', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7340475'), (u'2015-07-06 15:49:58', u'https://www.codechef.com/JULY15/problems/EGBOBRD', u'EGBOBRD', 'PS', u'15', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7381337'), (u'2015-07-06 15:57:35', u'https://www.codechef.com/JULY15/problems/EGBOBRD', u'EGBOBRD', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7381445'), (u'2015-07-07 20:01:02', u'https://www.codechef.com/JULY15/problems/EGBOBRD', u'EGBOBRD', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7399011'), (u'2015-07-07 20:05:22', u'https://www.codechef.com/JULY15/problems/EGBOBRD', u'EGBOBRD', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7399073'), (u'2015-07-08 00:31:24', u'https://www.codechef.com/JULY15/problems/ADDMUL', u'ADDMUL', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7402380'), (u'2015-07-08 00:33:00', u'https://www.codechef.com/JULY15/problems/ADDMUL', u'ADDMUL', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7402406'), (u'2015-07-12 10:52:20', u'https://www.codechef.com/JULY15/problems/ADDMUL', u'ADDMUL', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7456100'), (u'2015-08-07 17:28:06', u'https://www.codechef.com/AUG15/problems/COOKMACH', u'COOKMACH', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7640195'), (u'2015-08-10 17:08:30', u'https://www.codechef.com/AUG15/problems/GRGUY', u'GRGUY', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7720771'), (u'2015-08-10 19:18:54', u'https://www.codechef.com/AUG15/problems/ADMAG', u'ADMAG', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/7723401'), (u'2015-08-12 06:04:32', u'https://www.codechef.com/AUG15/problems/WOUT', u'WOUT', 'PS', u'25', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7751317'), (u'2015-08-12 06:10:36', u'https://www.codechef.com/AUG15/problems/WOUT', u'WOUT', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7751339'), (u'2015-08-12 06:14:26', u'https://www.codechef.com/AUG15/problems/WOUT', u'WOUT', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7751353'), (u'2015-08-16 00:04:50', u'https://www.codechef.com/PRACTICE/problems/RRATING', u'RRATING', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7817713'), (u'2015-08-16 00:27:10', u'https://www.codechef.com/PRACTICE/problems/RRATING', u'RRATING', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7818066'), (u'2015-08-16 00:37:49', u'https://www.codechef.com/PRACTICE/problems/RRATING', u'RRATING', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7818234'), (u'2015-08-16 00:46:49', u'https://www.codechef.com/PRACTICE/problems/RRATING', u'RRATING', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7818371'), (u'2015-08-16 00:52:48', u'https://www.codechef.com/PRACTICE/problems/RRATING', u'RRATING', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7818462'), (u'2015-08-16 01:06:50', u'https://www.codechef.com/PRACTICE/problems/RRATING', u'RRATING', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7818659'), (u'2015-08-16 01:11:04', u'https://www.codechef.com/PRACTICE/problems/RRATING', u'RRATING', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7818713'), (u'2015-08-16 01:27:22', u'https://www.codechef.com/PRACTICE/problems/RRATING', u'RRATING', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7818980'), (u'2015-08-23 21:36:59', u'https://www.codechef.com/COOK61/problems/CARDLINE', u'CARDLINE', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7898648'), (u'2015-08-23 21:41:10', u'https://www.codechef.com/COOK61/problems/TWOSTR', u'TWOSTR', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7898953'), (u'2015-08-23 21:58:03', u'https://www.codechef.com/COOK61/problems/XORNUBER', u'XORNUBER', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7901142'), (u'2015-08-23 22:06:19', u'https://www.codechef.com/COOK61/problems/XORNUBER', u'XORNUBER', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7902094'), (u'2015-09-10 02:09:12', u'https://www.codechef.com/SEPT15/problems/MSTEP', u'MSTEP', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8102573'), (u'2015-09-10 02:51:18', u'https://www.codechef.com/SEPT15/problems/DONUTS', u'DONUTS', 'PS', u'30', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8102955'), (u'2015-09-10 20:48:37', u'https://www.codechef.com/SEPT15/problems/DONUTS', u'DONUTS', 'PS', u'10', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8112817'), (u'2015-09-10 21:39:10', u'https://www.codechef.com/SEPT15/problems/DONUTS', u'DONUTS', 'PS', u'40', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8113610'), (u'2015-09-12 08:08:58', u'https://www.codechef.com/SEPT15/problems/DONUTS', u'DONUTS', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8132761'), (u'2015-09-12 08:19:28', u'https://www.codechef.com/SEPT15/problems/DONUTS', u'DONUTS', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8132775'), (u'2015-09-12 22:15:45', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8142069'), (u'2015-09-12 22:23:17', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8142165'), (u'2015-09-12 22:31:16', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8142257'), (u'2015-09-12 22:35:11', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8142304'), (u'2015-09-12 22:52:32', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8142551'), (u'2015-09-12 22:58:28', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8142618'), (u'2015-09-12 23:03:31', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8142689'), (u'2015-09-12 23:06:41', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/8142738'), (u'2015-09-12 23:09:39', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/8142768'), (u'2015-09-20 22:05:39', u'https://www.codechef.com/COOK62/problems/FRGTNLNG', u'FRGTNLNG', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8212884'), (u'2015-09-20 22:34:31', u'https://www.codechef.com/COOK62/problems/STACKS', u'STACKS', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8215005'), (u'2015-09-20 23:10:47', u'https://www.codechef.com/COOK62/problems/STACKS', u'STACKS', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8217486'), (u'2015-09-20 23:16:22', u'https://www.codechef.com/COOK62/problems/STACKS', u'STACKS', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8217838'), (u'2015-09-21 13:34:29', u'https://www.codechef.com/PRACTICE/problems/FRGTNLNG', u'FRGTNLNG', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8222436'), (u'2015-09-25 21:08:04', u'https://www.codechef.com/PRACTICE/problems/TPRODUCT', u'TPRODUCT', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8245383'), (u'2015-09-25 21:15:54', u'https://www.codechef.com/PRACTICE/problems/TPRODUCT', u'TPRODUCT', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8245418'), (u'2015-09-25 21:30:38', u'https://www.codechef.com/PRACTICE/problems/TPRODUCT', u'TPRODUCT', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8245472'), (u'2015-09-25 21:37:47', u'https://www.codechef.com/PRACTICE/problems/TPRODUCT', u'TPRODUCT', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8245498'), (u'2015-09-27 19:14:01', u'https://www.codechef.com/PRACTICE/problems/SPALNUM', u'SPALNUM', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8266897'), (u'2015-09-27 19:19:39', u'https://www.codechef.com/PRACTICE/problems/SPALNUM', u'SPALNUM', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8267017'), (u'2015-09-27 19:23:52', u'https://www.codechef.com/PRACTICE/problems/SPALNUM', u'SPALNUM', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8267096'), (u'2015-09-29 21:53:04', u'https://www.codechef.com/PRACTICE/problems/LUCKY', u'LUCKY', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8280451'), (u'2015-10-20 09:59:02', u'https://www.codechef.com/PRACTICE/problems/ASP', u'ASP', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8594490'), (u'2015-10-20 10:00:30', u'https://www.codechef.com/PRACTICE/problems/ASP', u'ASP', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8594496'), (u'2015-12-14 23:46:01', u'https://www.codechef.com/PRACTICE/problems/CHEFST', u'CHEFST', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8959065'), (u'2015-12-14 23:47:46', u'https://www.codechef.com/PRACTICE/problems/CHEFST', u'CHEFST', 'PS', u'30', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8959080'), (u'2015-12-15 00:01:01', u'https://www.codechef.com/PRACTICE/problems/CHEFST', u'CHEFST', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8959153'), (u'2016-05-14 16:46:03', u'https://www.codechef.com/PRACTICE/problems/KOL1509', u'KOL1509', 'RE', u'0', u'C++14', 'https://www.codechef.com/viewsolution/10082758'), (u'2016-06-05 13:55:56', u'https://www.codechef.com/JUNE16/problems/DEVARRAY', u'DEVARRAY', 'CE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/10333457'), (u'2016-06-05 13:59:32', u'https://www.codechef.com/JUNE16/problems/DEVARRAY', u'DEVARRAY', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/10333552'), (u'2017-11-03 00:35:24', u'https://www.codechef.com/PRACTICE/problems/BLACKCOM', u'BLACKCOM', 'CE', u'0', u'C++ 6.3', 'https://www.codechef.com/viewsolution/16037895'), (u'2017-11-03 00:41:17', u'https://www.codechef.com/PRACTICE/problems/BLACKCOM', u'BLACKCOM', u'WA', u'0', u'PYTH', 'https://www.codechef.com/viewsolution/16037935'), (u'2017-12-03 19:26:28', u'https://www.codechef.com/PRACTICE/problems/WEICOM', u'WEICOM', u'WA', u'0', u'PYTH', 'https://www.codechef.com/viewsolution/16433447'), (u'2018-10-07 19:12:16', u'https://www.codechef.com/PRACTICE/problems/BLACKCOM', u'BLACKCOM', 'CE', u'0', u'C++14', 'https://www.codechef.com/viewsolution/20545692'), (u'2018-10-23 22:36:07', u'https://www.codechef.com/PRACTICE/problems/SURCHESS', u'SURCHESS', 'CE', u'0', u'C++14', 'https://www.codechef.com/viewsolution/21187090'), (u'2018-11-07 12:50:39', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/21518903'), (u'2018-11-07 12:51:53', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', u'WA', u'0', u'C++14', 'https://www.codechef.com/viewsolution/21518924'), (u'2018-11-07 12:57:36', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', u'WA', u'0', u'C++14', 'https://www.codechef.com/viewsolution/21519029'), (u'2018-11-07 12:58:22', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', u'WA', u'0', u'C++14', 'https://www.codechef.com/viewsolution/21519043'), (u'2018-11-07 13:00:37', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', u'WA', u'0', u'C++14', 'https://www.codechef.com/viewsolution/21519089'), (u'2018-11-07 13:02:45', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', 'PS', u'50', u'C++14', 'https://www.codechef.com/viewsolution/21519127'), (u'2018-11-07 13:08:22', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', u'AC', u'100', u'C++14', 'https://www.codechef.com/viewsolution/21519248')],
"CodeForces": [('2014-06-20 14:16:29', u'http://www.codeforces.com/problemset/problem/443/A', u'Anton and Letters', 'CE', '0', u'GNU C', 'http://www.codeforces.com/contest/443/submission/6926377'), ('2014-06-20 14:17:29', u'http://www.codeforces.com/problemset/problem/443/A', u'Anton and Letters', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/443/submission/6926384'), ('2014-06-20 15:14:05', u'http://www.codeforces.com/problemset/problem/1/A', u'Theatre Square', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/1/submission/6926712'), ('2014-06-20 15:19:19', u'http://www.codeforces.com/problemset/problem/1/A', u'Theatre Square', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/1/submission/6926744'), ('2014-06-20 15:35:33', u'http://www.codeforces.com/problemset/problem/1/A', u'Theatre Square', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/1/submission/6926822'), ('2014-06-20 15:40:22', u'http://www.codeforces.com/problemset/problem/4/A', u'Watermelon', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/4/submission/6926854'), ('2014-06-20 15:42:27', u'http://www.codeforces.com/problemset/problem/4/A', u'Watermelon', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/4/submission/6926866'), ('2014-06-20 16:19:41', u'http://www.codeforces.com/problemset/problem/158/A', u'Next Round', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/6927039'), ('2014-06-20 16:21:59', u'http://www.codeforces.com/problemset/problem/158/A', u'Next Round', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/6927057'), ('2014-06-20 16:35:40', u'http://www.codeforces.com/problemset/problem/158/A', u'Next Round', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/6927122'), ('2014-06-20 23:33:02', u'http://www.codeforces.com/problemset/problem/158/B', u'Taxi', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/6930033'), ('2014-06-20 23:46:50', u'http://www.codeforces.com/problemset/problem/158/B', u'Taxi', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/6930628'), ('2014-06-21 00:23:15', u'http://www.codeforces.com/problemset/problem/131/A', u'cAPS lOCK', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6930791'), ('2014-06-21 00:26:44', u'http://www.codeforces.com/problemset/problem/131/A', u'cAPS lOCK', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6930810'), ('2014-06-21 00:28:48', u'http://www.codeforces.com/problemset/problem/131/A', u'cAPS lOCK', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6930817'), ('2014-06-21 00:31:03', u'http://www.codeforces.com/problemset/problem/131/A', u'cAPS lOCK', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6930830'), ('2014-06-21 01:21:34', u'http://www.codeforces.com/problemset/problem/160/A', u'Twins', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/160/submission/6931006'), ('2014-06-21 01:24:10', u'http://www.codeforces.com/problemset/problem/160/A', u'Twins', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/160/submission/6931013'), ('2014-06-21 01:28:28', u'http://www.codeforces.com/problemset/problem/160/A', u'Twins', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/160/submission/6931031'), ('2014-06-21 01:42:08', u'http://www.codeforces.com/problemset/problem/131/C', u'The World is a Theatre', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6931087'), ('2014-06-21 01:55:26', u'http://www.codeforces.com/problemset/problem/131/C', u'The World is a Theatre', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6931137'), ('2014-06-21 01:58:07', u'http://www.codeforces.com/problemset/problem/131/C', u'The World is a Theatre', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6931156'), ('2014-06-21 01:59:17', u'http://www.codeforces.com/problemset/problem/131/C', u'The World is a Theatre', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6931160'), ('2014-06-21 02:02:30', u'http://www.codeforces.com/problemset/problem/131/C', u'The World is a Theatre', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6931170'), ('2014-06-21 02:04:53', u'http://www.codeforces.com/problemset/problem/131/C', u'The World is a Theatre', 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6931181'), ('2014-06-21 02:14:48', u'http://www.codeforces.com/problemset/problem/131/C', u'The World is a Theatre', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6931213'), ('2014-06-21 20:42:21', u'http://www.codeforces.com/problemset/problem/160/A', u'Twins', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/160/submission/6938158'), ('2014-06-28 01:04:59', u'http://www.codeforces.com/problemset/problem/268/B', u'Buttons', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/268/submission/6971649'), ('2014-06-28 02:06:43', u'http://www.codeforces.com/problemset/problem/37/A', u'Towers', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/37/submission/6971879'), ('2014-07-17 00:31:42', u'http://www.codeforces.com/problemset/problem/71/A', u'Way Too Long Words', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/71/submission/7118436'), ('2014-07-17 00:46:44', u'http://www.codeforces.com/problemset/problem/43/B', u'Letter', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/43/submission/7118520'), ('2014-07-24 15:36:56', u'http://www.codeforces.com/problemset/problem/447/A', u'DZY Loves Hash', 'CE', '0', u'GNU C++', 'http://www.codeforces.com/contest/447/submission/7215463'), ('2014-07-24 15:39:56', u'http://www.codeforces.com/problemset/problem/447/A', u'DZY Loves Hash', 'CE', '0', u'GNU C', 'http://www.codeforces.com/contest/447/submission/7215478'), ('2014-07-24 15:42:59', u'http://www.codeforces.com/problemset/problem/447/A', u'DZY Loves Hash', 'AC', '100', u'GNU C', 'http://www.codeforces.com/contest/447/submission/7215497'), ('2014-08-08 17:12:35', u'http://www.codeforces.com/problemset/problem/454/A', u'Little Pony and Crystal Mine', 'AC', '100', u'GNU C', 'http://www.codeforces.com/contest/454/submission/7375767'), ('2014-08-08 22:25:32', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'CE', '0', u'GNU C', 'http://www.codeforces.com/contest/456/submission/7391497'), ('2014-08-08 22:30:29', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'TLE', '0', u'GNU C', 'http://www.codeforces.com/contest/456/submission/7392085'), ('2014-08-10 01:55:39', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/456/submission/7408524'), ('2014-08-10 01:57:55', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/456/submission/7408534'), ('2014-08-10 02:03:27', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/456/submission/7408554'), ('2014-08-10 02:08:35', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/456/submission/7408575'), ('2014-08-10 02:18:38', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/456/submission/7408617'), ('2014-08-10 02:28:59', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/456/submission/7408646'), ('2014-08-31 16:22:26', u'http://www.codeforces.com/problemset/problem/87/A', u'Trains', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/87/submission/7653363'), ('2014-09-28 22:07:52', u'http://www.codeforces.com/problemset/problem/472/A', u'Design Tutorial: Learn from Math', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/472/submission/8007179'), ('2014-09-28 22:11:15', u'http://www.codeforces.com/problemset/problem/472/A', u'Design Tutorial: Learn from Math', 'AC', '100', u'GNU C', 'http://www.codeforces.com/contest/472/submission/8007515'), ('2014-09-28 23:07:59', u'http://www.codeforces.com/problemset/problem/472/B', u'Design Tutorial: Learn from Life', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/472/submission/8012494'), ('2014-09-28 23:24:42', u'http://www.codeforces.com/problemset/problem/472/B', u'Design Tutorial: Learn from Life', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/472/submission/8013925'), ('2014-09-28 23:32:59', u'http://www.codeforces.com/problemset/problem/472/B', u'Design Tutorial: Learn from Life', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/472/submission/8014748'), ('2014-09-29 02:27:25', u'http://www.codeforces.com/problemset/problem/472/B', u'Design Tutorial: Learn from Life', 'AC', '100', u'GNU C', 'http://www.codeforces.com/contest/472/submission/8017466'), ('2014-09-29 02:30:15', u'http://www.codeforces.com/problemset/problem/472/B', u'Design Tutorial: Learn from Life', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/472/submission/8017497'), ('2014-10-06 21:28:24', u'http://www.codeforces.com/problemset/problem/474/A', u'Keyboard', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/474/submission/8112225'), ('2014-10-06 21:34:57', u'http://www.codeforces.com/problemset/problem/474/A', u'Keyboard', 'AC', '100', u'GNU C', 'http://www.codeforces.com/contest/474/submission/8113048'), ('2014-10-06 23:10:09', u'http://www.codeforces.com/problemset/problem/474/B', u'Worms', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/474/submission/8120096'), ('2014-10-07 02:58:44', u'http://www.codeforces.com/problemset/problem/474/B', u'Worms', 'AC', '100', u'GNU C', 'http://www.codeforces.com/contest/474/submission/8123462'), ('2014-10-07 03:55:46', u'http://www.codeforces.com/problemset/problem/474/D', u'Flowers', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/474/submission/8123773'), ('2014-10-07 04:02:21', u'http://www.codeforces.com/problemset/problem/474/D', u'Flowers', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/474/submission/8123802'), ('2015-07-13 19:46:13', u'http://www.codeforces.com/problemset/problem/550/A', u'Two Substrings', 'CE', '0', u'GNU C', 'http://www.codeforces.com/contest/550/submission/12030270'), ('2015-07-13 19:46:47', u'http://www.codeforces.com/problemset/problem/550/A', u'Two Substrings', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/550/submission/12030276'), ('2015-07-13 20:00:28', u'http://www.codeforces.com/problemset/problem/550/A', u'Two Substrings', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/550/submission/12030404'), ('2015-07-13 20:22:36', u'http://www.codeforces.com/problemset/problem/550/B', u'Preparing Olympiad', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/550/submission/12030587'), ('2015-07-13 20:55:12', u'http://www.codeforces.com/problemset/problem/538/A', u'Cutting Banner', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/538/submission/12030895'), ('2015-07-13 20:56:42', u'http://www.codeforces.com/problemset/problem/538/A', u'Cutting Banner', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/538/submission/12030903'), ('2015-07-13 21:17:47', u'http://www.codeforces.com/problemset/problem/538/B', u'Quasi Binary', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/538/submission/12031083'), ('2015-07-13 21:32:43', u'http://www.codeforces.com/problemset/problem/538/B', u'Quasi Binary', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/538/submission/12031229'), ('2015-07-13 23:04:36', u'http://www.codeforces.com/problemset/problem/409/H', u'A + B Strikes Back', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/409/submission/12031995'), ('2015-07-13 23:07:06', u'http://www.codeforces.com/problemset/problem/409/H', u'A + B Strikes Back', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/409/submission/12032008'), ('2015-07-13 23:08:06', u'http://www.codeforces.com/problemset/problem/409/H', u'A + B Strikes Back', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/409/submission/12032015'), ('2015-07-13 23:08:45', u'http://www.codeforces.com/problemset/problem/409/H', u'A + B Strikes Back', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/409/submission/12032021'), ('2015-07-13 23:09:16', u'http://www.codeforces.com/problemset/problem/409/H', u'A + B Strikes Back', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/409/submission/12032027'), ('2015-07-13 23:10:05', u'http://www.codeforces.com/problemset/problem/409/H', u'A + B Strikes Back', 'AC', '100', u'GNU C', 'http://www.codeforces.com/contest/409/submission/12032034'), ('2015-08-22 22:26:26', u'http://www.codeforces.com/problemset/problem/572/A', u'Arrays', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/572/submission/12650084'), ('2015-08-22 22:54:57', u'http://www.codeforces.com/problemset/problem/572/B', u'Order Book', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/572/submission/12655042'), ('2015-08-22 23:20:25', u'http://www.codeforces.com/problemset/problem/572/B', u'Order Book', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/572/submission/12658463'), ('2015-08-29 22:25:27', u'http://www.codeforces.com/problemset/problem/574/A', u'Bear and Elections', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/12750171'), ('2015-08-29 22:28:28', u'http://www.codeforces.com/problemset/problem/574/A', u'Bear and Elections', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/12750679'), ('2015-08-29 22:52:25', u'http://www.codeforces.com/problemset/problem/574/C', u'Bear and Poker', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/12754477'), ('2015-08-30 00:49:08', u'http://www.codeforces.com/problemset/problem/574/C', u'Bear and Poker', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/12765492'), ('2015-08-30 00:52:15', u'http://www.codeforces.com/problemset/problem/574/C', u'Bear and Poker', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/12765623'), ('2015-09-02 20:37:01', u'http://www.codeforces.com/problemset/problem/560/A', u'Currency System in Geraldion', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/560/submission/12817055'), ('2015-09-02 20:52:50', u'http://www.codeforces.com/problemset/problem/560/B', u'Gerald is into Art', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/560/submission/12817234'), ('2015-09-02 21:19:30', u'http://www.codeforces.com/problemset/problem/560/B', u'Gerald is into Art', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/560/submission/12817559'), ('2015-09-02 21:23:37', u'http://www.codeforces.com/problemset/problem/560/B', u'Gerald is into Art', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/560/submission/12817612'), ('2015-09-10 22:08:56', u'http://www.codeforces.com/problemset/problem/577/A', u'Multiplication Table', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/577/submission/12928002'), ('2015-09-10 22:57:34', u'http://www.codeforces.com/problemset/problem/577/C', u"Vasya and Petya's Game", 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/577/submission/12937380'), ('2015-09-10 23:24:19', u'http://www.codeforces.com/problemset/problem/577/C', u"Vasya and Petya's Game", 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/577/submission/12941164'), ('2015-09-10 23:35:13', u'http://www.codeforces.com/problemset/problem/577/C', u"Vasya and Petya's Game", 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/577/submission/12942378'), ('2015-09-18 09:26:35', u'http://www.codeforces.com/problemset/problem/574/B', u'Bear and Three Musketeers', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/13080029'), ('2015-09-18 09:35:11', u'http://www.codeforces.com/problemset/problem/574/B', u'Bear and Three Musketeers', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/13080083'), ('2015-09-18 09:40:54', u'http://www.codeforces.com/problemset/problem/574/B', u'Bear and Three Musketeers', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/13080104'), ('2015-09-18 09:50:57', u'http://www.codeforces.com/problemset/problem/574/B', u'Bear and Three Musketeers', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/13080162'), ('2015-09-18 10:57:39', u'http://www.codeforces.com/problemset/problem/574/B', u'Bear and Three Musketeers', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/13080670'), ('2015-09-19 10:04:18', u'http://www.codeforces.com/problemset/problem/158/B', u'Taxi', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/13096185'), ('2015-09-19 10:06:16', u'http://www.codeforces.com/problemset/problem/158/B', u'Taxi', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/13096197'), ('2015-09-19 10:09:39', u'http://www.codeforces.com/problemset/problem/158/B', u'Taxi', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/13096220'), ('2015-09-19 10:13:38', u'http://www.codeforces.com/problemset/problem/158/B', u'Taxi', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/13096250'), ('2015-09-19 10:17:36', u'http://www.codeforces.com/problemset/problem/158/B', u'Taxi', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/13096280'), ('2015-09-19 16:27:37', u'http://www.codeforces.com/problemset/problem/160/A', u'Twins', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/160/submission/13100273'), ('2015-09-19 17:17:56', u'http://www.codeforces.com/problemset/problem/550/C', u'Divisibility by Eight', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/550/submission/13100937'), ('2015-09-19 20:29:07', u'http://www.codeforces.com/problemset/problem/519/B', u'A and B and Compilation Errors', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/519/submission/13103565'), ('2015-09-20 08:58:02', u'http://www.codeforces.com/problemset/problem/204/B', u'Little Elephant and Cards', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/204/submission/13109387'), ('2015-09-20 09:05:26', u'http://www.codeforces.com/problemset/problem/204/B', u'Little Elephant and Cards', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/204/submission/13109421'), ('2015-09-20 09:10:19', u'http://www.codeforces.com/problemset/problem/204/B', u'Little Elephant and Cards', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/204/submission/13109436'), ('2015-09-20 09:15:40', u'http://www.codeforces.com/problemset/problem/204/B', u'Little Elephant and Cards', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/204/submission/13109456'), ('2015-09-20 09:19:16', u'http://www.codeforces.com/problemset/problem/204/B', u'Little Elephant and Cards', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/204/submission/13109467'), ('2015-09-22 22:07:10', u'http://www.codeforces.com/problemset/problem/580/A', u'Kefa and First Steps', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13145925'), ('2015-09-22 22:29:58', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13152519'), ('2015-09-22 23:18:24', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13162731'), ('2015-09-22 23:24:31', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13163770'), ('2015-09-22 23:25:35', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13163942'), ('2015-09-22 23:29:09', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13164502'), ('2015-09-23 00:49:34', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13171251'), ('2015-09-23 01:03:37', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13171838'), ('2015-09-23 01:38:14', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13172926'), ('2015-09-23 14:55:02', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13181387'), ('2015-09-23 18:14:51', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13185934'), ('2015-09-23 18:16:58', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13185991'), ('2015-09-23 19:08:23', u'http://www.codeforces.com/problemset/problem/580/C', u'Kefa and Park', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13187242'), ('2015-09-23 19:24:05', u'http://www.codeforces.com/problemset/problem/580/C', u'Kefa and Park', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13187823'), ('2015-09-23 19:30:09', u'http://www.codeforces.com/problemset/problem/580/C', u'Kefa and Park', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13187946'), ('2015-09-27 19:40:44', u'http://www.codeforces.com/problemset/problem/4/C', u'Registration System', 'CE', '0', u'GNU C++', 'http://www.codeforces.com/contest/4/submission/13250390'), ('2015-09-27 19:41:55', u'http://www.codeforces.com/problemset/problem/4/C', u'Registration System', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/4/submission/13250410'), ('2015-09-27 21:19:48', u'http://www.codeforces.com/problemset/problem/159/C', u'String Manipulation 1.0', 'MLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/159/submission/13251760'), ('2015-09-28 14:34:58', u'http://www.codeforces.com/problemset/problem/581/A', u'Vasya the Hipster', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13260798'), ('2015-09-28 14:44:20', u'http://www.codeforces.com/problemset/problem/581/B', u'Luxurious Houses', 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13263305'), ('2015-09-28 14:56:03', u'http://www.codeforces.com/problemset/problem/581/C', u'Developing Skills', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13265626'), ('2015-09-28 15:17:41', u'http://www.codeforces.com/problemset/problem/581/C', u'Developing Skills', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13268882'), ('2015-09-29 12:10:51', u'http://www.codeforces.com/problemset/problem/581/B', u'Luxurious Houses', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13292365'), ('2015-09-29 12:22:40', u'http://www.codeforces.com/problemset/problem/581/B', u'Luxurious Houses', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13292509'), ('2015-09-29 12:34:16', u'http://www.codeforces.com/problemset/problem/581/C', u'Developing Skills', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13292656'), ('2015-09-29 12:43:38', u'http://www.codeforces.com/problemset/problem/581/C', u'Developing Skills', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13292768'), ('2015-09-29 12:47:20', u'http://www.codeforces.com/problemset/problem/581/B', u'Luxurious Houses', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13292809'), ('2015-09-29 12:48:18', u'http://www.codeforces.com/problemset/problem/581/C', u'Developing Skills', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13292817'), ('2015-09-29 13:10:59', u'http://www.codeforces.com/problemset/problem/581/C', u'Developing Skills', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13293101'), ('2015-09-29 13:32:07', u'http://www.codeforces.com/problemset/problem/581/C', u'Developing Skills', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13293354'), ('2015-09-29 17:43:48', u'http://www.codeforces.com/problemset/problem/581/C', u'Developing Skills', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13297010'), ('2015-09-29 20:59:18', u'http://www.codeforces.com/problemset/problem/263/A', u'Beautiful Matrix', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/263/submission/13300553'), ('2015-09-29 21:14:53', u'http://www.codeforces.com/problemset/problem/118/B', u'Present from Lena', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/118/submission/13300823'), ('2015-09-29 21:29:52', u'http://www.codeforces.com/problemset/problem/118/B', u'Present from Lena', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/118/submission/13301123'), ('2015-10-03 18:44:23', u'http://www.codeforces.com/problemset/problem/268/B', u'Buttons', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/268/submission/13359900'), ('2015-10-03 20:04:32', u'http://www.codeforces.com/problemset/problem/569/B', u'Inventory', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/569/submission/13360927'), ('2015-10-03 20:06:13', u'http://www.codeforces.com/problemset/problem/569/B', u'Inventory', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/569/submission/13360949'), ('2015-10-03 21:05:23', u'http://www.codeforces.com/problemset/problem/525/C', u'Ilya and Sticks', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/525/submission/13361790'), ('2015-10-03 21:06:58', u'http://www.codeforces.com/problemset/problem/525/C', u'Ilya and Sticks', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/525/submission/13361810'), ('2015-10-03 21:09:02', u'http://www.codeforces.com/problemset/problem/525/C', u'Ilya and Sticks', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/525/submission/13361836'), ('2015-10-03 22:25:41', u'http://www.codeforces.com/problemset/problem/583/A', u'Asphalting Roads', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/583/submission/13365272'), ('2015-10-03 23:30:49', u'http://www.codeforces.com/problemset/problem/583/B', u"Robot's Task", 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/583/submission/13378169'), ('2015-10-06 22:05:41', u'http://www.codeforces.com/problemset/problem/584/A', u'Olesya and Rodion', 'TLE', '0', u'Python 2', 'http://www.codeforces.com/contest/584/submission/13436363'), ('2015-10-06 22:17:59', u'http://www.codeforces.com/problemset/problem/584/A', u'Olesya and Rodion', 'WA', '0', u'Python 2', 'http://www.codeforces.com/contest/584/submission/13440624'), ('2015-10-06 22:24:51', u'http://www.codeforces.com/problemset/problem/584/A', u'Olesya and Rodion', 'CE', '0', u'GNU C++', 'http://www.codeforces.com/contest/584/submission/13442261'), ('2015-10-06 22:25:07', u'http://www.codeforces.com/problemset/problem/584/A', u'Olesya and Rodion', 'CE', '0', u'Python 2', 'http://www.codeforces.com/contest/584/submission/13442319'), ('2015-10-06 22:26:42', u'http://www.codeforces.com/problemset/problem/584/A', u'Olesya and Rodion', 'AC', '100', u'Python 2', 'http://www.codeforces.com/contest/584/submission/13442651'), ('2015-10-06 22:52:47', u'http://www.codeforces.com/problemset/problem/584/B', u'Kolya and Tanya ', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/584/submission/13447777'), ('2015-10-06 22:58:59', u'http://www.codeforces.com/problemset/problem/584/B', u'Kolya and Tanya ', 'AC', '100', u'Python 2', 'http://www.codeforces.com/contest/584/submission/13448876'), ('2015-10-06 23:14:57', u'http://www.codeforces.com/problemset/problem/584/C', u'Marina and Vasya', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/584/submission/13451585'), ('2015-10-06 23:35:46', u'http://www.codeforces.com/problemset/problem/584/C', u'Marina and Vasya', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/584/submission/13454813'), ('2015-10-06 23:44:55', u'http://www.codeforces.com/problemset/problem/584/C', u'Marina and Vasya', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/584/submission/13456081'), ('2015-10-07 01:04:27', u'http://www.codeforces.com/problemset/problem/584/B', u'Kolya and Tanya ', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/584/submission/13460503'), ('2015-10-07 18:02:31', u'http://www.codeforces.com/problemset/problem/584/A', u'Olesya and Rodion', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/584/submission/13473005'), ('2015-10-08 21:26:54', u'http://www.codeforces.com/problemset/problem/92/B', u'Binary Number', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/92/submission/13496730'), ('2015-10-09 01:22:57', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/456/submission/13500243'), ('2015-10-09 01:35:03', u'http://www.codeforces.com/problemset/problem/52/A', u'123-sequence', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/52/submission/13500398'), ('2015-10-09 06:38:55', u'http://www.codeforces.com/problemset/problem/266/B', u'Queue at the School', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/266/submission/13502318'), ('2015-10-09 06:45:08', u'http://www.codeforces.com/problemset/problem/479/A', u'Expression', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/479/submission/13502351'), ('2015-10-09 06:46:35', u'http://www.codeforces.com/problemset/problem/479/A', u'Expression', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/479/submission/13502358'), ('2015-10-09 06:50:39', u'http://www.codeforces.com/problemset/problem/61/A', u'Ultra-Fast Mathematician', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/61/submission/13502387'), ('2015-10-09 07:03:29', u'http://www.codeforces.com/problemset/problem/462/B', u'Appleman and Card Game', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/462/submission/13502451'), ('2015-10-09 07:05:19', u'http://www.codeforces.com/problemset/problem/462/B', u'Appleman and Card Game', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/462/submission/13502463'), ('2015-10-09 07:06:54', u'http://www.codeforces.com/problemset/problem/462/B', u'Appleman and Card Game', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/462/submission/13502474'), ('2015-10-09 22:47:48', u'http://www.codeforces.com/problemset/problem/266/B', u'Queue at the School', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/266/submission/13514395'), ('2015-10-09 23:14:22', u'http://www.codeforces.com/problemset/problem/525/B', u'Pasha and String', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/525/submission/13514840'), ('2015-10-09 23:30:20', u'http://www.codeforces.com/problemset/problem/525/B', u'Pasha and String', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/525/submission/13515120'), ('2015-10-11 04:08:55', u'http://www.codeforces.com/problemset/problem/478/A', u'Initial Bet', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/478/submission/13538926'), ('2015-10-11 04:10:18', u'http://www.codeforces.com/problemset/problem/478/A', u'Initial Bet', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/478/submission/13538931'), ('2015-10-11 04:28:02', u'http://www.codeforces.com/problemset/problem/459/B', u'Pashmak and Flowers', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/459/submission/13538989'), ('2015-10-11 04:29:51', u'http://www.codeforces.com/problemset/problem/459/B', u'Pashmak and Flowers', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/459/submission/13538995'), ('2015-10-11 04:37:27', u'http://www.codeforces.com/problemset/problem/459/B', u'Pashmak and Flowers', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/459/submission/13539018'), ('2015-10-25 14:34:14', u'http://www.codeforces.com/problemset/problem/591/A', u"Wizards' Duel", 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/591/submission/13836193'), ('2015-10-25 14:50:25', u'http://www.codeforces.com/problemset/problem/591/B', u'Rebranding', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/591/submission/13839725'), ('2015-10-25 15:34:56', u'http://www.codeforces.com/problemset/problem/591/C', u'Median Smoothing', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/591/submission/13845641'), ('2015-10-25 15:38:20', u'http://www.codeforces.com/problemset/problem/591/C', u'Median Smoothing', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/591/submission/13846000'), ('2015-10-25 22:51:09', u'http://www.codeforces.com/problemset/problem/591/B', u'Rebranding', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/591/submission/13857177'), ('2015-10-25 23:23:19', u'http://www.codeforces.com/problemset/problem/591/B', u'Rebranding', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/591/submission/13857740'), ('2015-10-26 10:46:53', u'http://www.codeforces.com/problemset/problem/591/B', u'Rebranding', 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/591/submission/13866457'), ('2015-10-26 10:53:43', u'http://www.codeforces.com/problemset/problem/591/B', u'Rebranding', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/591/submission/13866518'), ('2015-10-26 19:50:00', u'http://www.codeforces.com/problemset/problem/160/B', u'Unlucky Ticket', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/160/submission/13873974'), ('2015-10-27 02:45:23', u'http://www.codeforces.com/problemset/problem/99/A', u'Help Far Away Kingdom', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/99/submission/13881024'), ('2015-10-27 03:13:34', u'http://www.codeforces.com/problemset/problem/12/B', u'Correct Solution?', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/12/submission/13881211'), ('2015-10-28 06:05:19', u'http://www.codeforces.com/problemset/problem/405/C', u'Unusual Product', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/405/submission/13906955'), ('2015-10-28 08:04:56', u'http://www.codeforces.com/problemset/problem/270/B', u'Multithreading', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/270/submission/13907587'), ('2015-10-28 21:42:49', u'http://www.codeforces.com/problemset/problem/525/C', u'Ilya and Sticks', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/525/submission/13918621'), ('2015-10-28 23:48:03', u'http://www.codeforces.com/problemset/problem/285/C', u'Building Permutation', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/285/submission/13920882'), ('2015-10-28 23:49:59', u'http://www.codeforces.com/problemset/problem/285/C', u'Building Permutation', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/285/submission/13920913'), ('2015-10-30 10:34:56', u'http://www.codeforces.com/problemset/problem/245/A', u'System Administrator', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/245/submission/13946807'), ('2015-10-30 10:49:01', u'http://www.codeforces.com/problemset/problem/102/B', u'Sum of Digits', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/102/submission/13946899'), ('2015-10-30 10:53:35', u'http://www.codeforces.com/problemset/problem/102/B', u'Sum of Digits', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/102/submission/13946926'), ('2015-10-31 22:14:30', u'http://www.codeforces.com/problemset/problem/592/A', u'PawnChess', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/592/submission/13975670'), ('2015-10-31 22:29:27', u'http://www.codeforces.com/problemset/problem/592/B', u'The Monster and the Squirrel', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/592/submission/13978806'), ('2015-10-31 22:58:55', u'http://www.codeforces.com/problemset/problem/592/C', u'The Big Race', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/592/submission/13983585'), ('2015-10-31 23:11:05', u'http://www.codeforces.com/problemset/problem/592/C', u'The Big Race', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/592/submission/13985339'), ('2015-11-01 01:46:31', u'http://www.codeforces.com/problemset/problem/592/C', u'The Big Race', 'WA', '0', u'Python 2', 'http://www.codeforces.com/contest/592/submission/13993129'), ('2015-11-01 02:00:03', u'http://www.codeforces.com/problemset/problem/592/C', u'The Big Race', 'WA', '0', u'Python 2', 'http://www.codeforces.com/contest/592/submission/13993447'), ('2015-11-01 02:04:32', u'http://www.codeforces.com/problemset/problem/592/C', u'The Big Race', 'WA', '0', u'Python 2', 'http://www.codeforces.com/contest/592/submission/13993623'), ('2015-11-01 10:48:24', u'http://www.codeforces.com/problemset/problem/592/A', u'PawnChess', 'CE', '0', u'Python 2', 'http://www.codeforces.com/contest/592/submission/14000480'), ('2015-11-01 10:48:46', u'http://www.codeforces.com/problemset/problem/592/A', u'PawnChess', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/592/submission/14000483'), ('2015-11-03 02:17:02', u'http://www.codeforces.com/problemset/problem/592/C', u'The Big Race', 'AC', '100', u'Python 2', 'http://www.codeforces.com/contest/592/submission/14033816'), ('2015-11-03 02:30:31', u'http://www.codeforces.com/problemset/problem/592/C', u'The Big Race', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/592/submission/14033957'), ('2015-11-04 14:58:56', u'http://www.codeforces.com/problemset/problem/339/B', u'Xenia and Ringroad', 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/339/submission/14054303'), ('2015-11-04 15:00:05', u'http://www.codeforces.com/problemset/problem/339/B', u'Xenia and Ringroad', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/339/submission/14054317'), ('2015-11-04 15:29:08', u'http://www.codeforces.com/problemset/problem/11/A', u'Increasing Sequence', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/11/submission/14054735'), ('2015-11-04 16:30:38', u'http://www.codeforces.com/problemset/problem/567/A', u'Lineland Mail', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/567/submission/14055720'), ('2015-11-05 10:34:36', u'http://www.codeforces.com/problemset/problem/593/A', u'2Char', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/593/submission/14082176'), ('2015-11-06 21:20:07', u'http://www.codeforces.com/problemset/problem/159/C', u'String Manipulation 1.0', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/159/submission/14109516'), ('2015-11-06 21:47:19', u'http://www.codeforces.com/problemset/problem/159/C', u'String Manipulation 1.0', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/159/submission/14109921'), ('2015-11-08 22:05:35', u'http://www.codeforces.com/problemset/problem/595/A', u'Vitaly and Night', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/595/submission/14145703'), ('2015-11-08 22:44:17', u'http://www.codeforces.com/problemset/problem/595/B', u'Pasha and Phone', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/595/submission/14150515'), ('2015-11-08 23:28:37', u'http://www.codeforces.com/problemset/problem/595/B', u'Pasha and Phone', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/595/submission/14155293'), ('2015-11-16 01:07:14', u'http://www.codeforces.com/problemset/problem/596/A', u'Wilbur and Swimming Pool', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/596/submission/14288508'), ('2015-11-16 01:09:02', u'http://www.codeforces.com/problemset/problem/596/B', u'Wilbur and Array', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/596/submission/14288537'), ('2015-11-16 01:16:40', u'http://www.codeforces.com/problemset/problem/596/A', u'Wilbur and Swimming Pool', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/596/submission/14288651'), ('2015-11-16 01:17:38', u'http://www.codeforces.com/problemset/problem/596/A', u'Wilbur and Swimming Pool', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/596/submission/14288673'), ('2015-12-01 21:15:25', u'http://www.codeforces.com/problemset/problem/604/A', u'Uncowed Forces', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/604/submission/14587410'), ('2015-12-01 21:21:57', u'http://www.codeforces.com/problemset/problem/604/A', u'Uncowed Forces', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/604/submission/14588907'), ('2015-12-01 21:25:25', u'http://www.codeforces.com/problemset/problem/604/A', u'Uncowed Forces', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/604/submission/14589670'), ('2015-12-01 21:50:29', u'http://www.codeforces.com/problemset/problem/604/B', u'More Cowbell', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/604/submission/14593977'), ('2015-12-09 21:53:15', u'http://www.codeforces.com/problemset/problem/606/C', u'Sorting Railway Cars', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/606/submission/14718869'), ('2015-12-09 22:14:26', u'http://www.codeforces.com/problemset/problem/606/C', u'Sorting Railway Cars', 'HCK', '-50', u'GNU C++', 'http://www.codeforces.com/contest/606/submission/14722405'), ('2015-12-09 22:44:59', u'http://www.codeforces.com/problemset/problem/606/A', u'Magic Spheres', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/606/submission/14726450'), ('2015-12-09 22:55:27', u'http://www.codeforces.com/problemset/problem/606/A', u'Magic Spheres', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/606/submission/14727619'), ('2015-12-09 22:58:11', u'http://www.codeforces.com/problemset/problem/606/A', u'Magic Spheres', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/606/submission/14727938'), ('2015-12-09 23:00:38', u'http://www.codeforces.com/problemset/problem/606/A', u'Magic Spheres', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/606/submission/14728208'), ('2015-12-15 21:36:55', u'http://www.codeforces.com/problemset/problem/580/A', u'Kefa and First Steps', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/14817821'), ('2015-12-17 18:01:21', u'http://www.codeforces.com/problemset/problem/598/B', u'Queries on a String', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/598/submission/14845709'), ('2015-12-17 18:09:23', u'http://www.codeforces.com/problemset/problem/598/B', u'Queries on a String', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/598/submission/14845795'), ('2015-12-17 18:55:21', u'http://www.codeforces.com/problemset/problem/597/A', u'Divisibility', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/597/submission/14846361'), ('2015-12-17 18:56:54', u'http://www.codeforces.com/problemset/problem/597/A', u'Divisibility', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/597/submission/14846374'), ('2015-12-17 19:02:03', u'http://www.codeforces.com/problemset/problem/597/A', u'Divisibility', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/597/submission/14846436'), ('2015-12-17 19:05:46', u'http://www.codeforces.com/problemset/problem/597/A', u'Divisibility', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/597/submission/14846492'), ('2015-12-22 22:54:31', u'http://www.codeforces.com/problemset/problem/609/B', u'\u041a\u043d\u0438\u0433\u0430 - \u043b\u0443\u0447\u0448\u0438\u0439 \u043f\u043e\u0434\u0430\u0440\u043e\u043a', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/609/submission/14928518'), ('2015-12-23 01:45:32', u'http://www.codeforces.com/problemset/problem/609/C', u'Load Balancing', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/609/submission/14930319'), ('2015-12-23 01:48:44', u'http://www.codeforces.com/problemset/problem/609/C', u'Load Balancing', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/609/submission/14930347'), ('2015-12-23 02:12:32', u'http://www.codeforces.com/problemset/problem/609/C', u'Load Balancing', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/609/submission/14930527'), ('2015-12-23 02:14:12', u'http://www.codeforces.com/problemset/problem/609/C', u'Load Balancing', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/609/submission/14930545'), ('2015-12-24 03:46:52', u'http://www.codeforces.com/problemset/problem/608/A', u'Saitama Destroys Hotel', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/608/submission/14961192'), ('2015-12-24 03:56:12', u'http://www.codeforces.com/problemset/problem/600/B', u'Queries about less or equal elements', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/600/submission/14961257'), ('2015-12-24 04:11:24', u'http://www.codeforces.com/problemset/problem/600/A', u'Extract Numbers', 'AC', '100', u'PyPy 2', 'http://www.codeforces.com/contest/600/submission/14961343'), ('2015-12-26 00:19:54', u'http://www.codeforces.com/problemset/problem/600/B', u'Queries about less or equal elements', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/600/submission/15021384'), ('2015-12-31 02:06:51', u'http://www.codeforces.com/problemset/problem/611/A', u'New Year and Days', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/611/submission/15129041'), ('2015-12-31 02:07:53', u'http://www.codeforces.com/problemset/problem/611/A', u'New Year and Days', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/611/submission/15129051'), ('2015-12-31 02:39:02', u'http://www.codeforces.com/problemset/problem/611/B', u'New Year and Old Property', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/611/submission/15129360'), ('2016-01-01 00:08:10', u'http://www.codeforces.com/problemset/problem/611/B', u'New Year and Old Property', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/611/submission/15140290'), ('2016-01-02 01:17:28', u'http://www.codeforces.com/problemset/problem/610/A', u'Pasha and Stick', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/610/submission/15152467'), ('2016-01-02 02:05:01', u'http://www.codeforces.com/problemset/problem/610/B', u'Vika and Squares', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/610/submission/15152883'), ('2016-01-05 11:52:15', u'http://www.codeforces.com/problemset/problem/189/A', u'Cut Ribbon', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/189/submission/15187913'), ('2016-01-05 12:26:38', u'http://www.codeforces.com/problemset/problem/489/C', u'Given Length and Sum of Digits...', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/489/submission/15188193'), ('2016-01-06 20:03:28', u'http://www.codeforces.com/problemset/problem/570/C', u'Replacement', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/570/submission/15208011'), ('2016-01-06 20:09:17', u'http://www.codeforces.com/problemset/problem/570/C', u'Replacement', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/570/submission/15208096'), ('2016-01-09 14:53:09', u'http://www.codeforces.com/problemset/problem/615/A', u'Bulbs', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/615/submission/15266906'), ('2016-01-14 22:12:10', u'http://www.codeforces.com/problemset/problem/614/A', u'Link/Cut Tree', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15348242'), ('2016-01-14 22:19:51', u'http://www.codeforces.com/problemset/problem/614/A', u'Link/Cut Tree', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15350653'), ('2016-01-14 22:26:04', u'http://www.codeforces.com/problemset/problem/614/A', u'Link/Cut Tree', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15352533'), ('2016-01-14 22:45:52', u'http://www.codeforces.com/problemset/problem/614/B', u"Gena's Code", 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15357739'), ('2016-01-14 22:49:49', u'http://www.codeforces.com/problemset/problem/614/B', u"Gena's Code", 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15358770'), ('2016-01-14 23:13:26', u'http://www.codeforces.com/problemset/problem/614/B', u"Gena's Code", 'TLE', '0', u'PyPy 2', 'http://www.codeforces.com/contest/614/submission/15364083'), ('2016-01-14 23:17:00', u'http://www.codeforces.com/problemset/problem/614/A', u'Link/Cut Tree', 'HCK', '-50', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15364825'), ('2016-01-15 01:46:02', u'http://www.codeforces.com/problemset/problem/614/A', u'Link/Cut Tree', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15376622'), ('2016-01-15 01:50:32', u'http://www.codeforces.com/problemset/problem/614/A', u'Link/Cut Tree', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15376775'), ('2016-01-15 02:04:58', u'http://www.codeforces.com/problemset/problem/614/B', u"Gena's Code", 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15377119'), ('2016-01-31 01:01:03', u'http://www.codeforces.com/problemset/problem/618/A', u'Slime Combining', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/618/submission/15684756'), ('2016-01-31 01:44:18', u'http://www.codeforces.com/problemset/problem/618/B', u'Guess the Permutation', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/618/submission/15685235'), ('2016-02-01 07:17:18', u'http://www.codeforces.com/problemset/problem/621/A', u'Wet Shark and Odd and Even', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/621/submission/15722644'), ('2016-02-01 07:40:26', u'http://www.codeforces.com/problemset/problem/621/B', u'Wet Shark and Bishops', 'CE', '0', u'GNU C++', 'http://www.codeforces.com/contest/621/submission/15722848'), ('2016-02-01 07:40:45', u'http://www.codeforces.com/problemset/problem/621/B', u'Wet Shark and Bishops', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/621/submission/15722852'), ('2016-02-01 07:59:16', u'http://www.codeforces.com/problemset/problem/621/B', u'Wet Shark and Bishops', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/621/submission/15723041'), ('2016-02-01 08:01:58', u'http://www.codeforces.com/problemset/problem/621/B', u'Wet Shark and Bishops', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/621/submission/15723074'), ('2016-02-01 08:05:42', u'http://www.codeforces.com/problemset/problem/621/B', u'Wet Shark and Bishops', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/621/submission/15723107'), ('2016-02-01 08:07:51', u'http://www.codeforces.com/problemset/problem/621/B', u'Wet Shark and Bishops', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/621/submission/15723123'), ('2016-02-22 00:05:38', u'http://www.codeforces.com/problemset/problem/629/A', u'Far Relative\u2019s Birthday Cake', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/629/submission/16265987'), ('2016-02-28 19:19:12', u'http://www.codeforces.com/problemset/problem/629/B', u'Far Relative\u2019s Problem', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/629/submission/16404240'), ('2016-02-28 20:35:59', u'http://www.codeforces.com/problemset/problem/630/C', u'Lucky Numbers', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/630/submission/16405407'), ('2016-02-28 20:37:18', u'http://www.codeforces.com/problemset/problem/630/C', u'Lucky Numbers', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/630/submission/16405419'), ('2016-02-28 20:41:06', u'http://www.codeforces.com/problemset/problem/630/C', u'Lucky Numbers', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/630/submission/16405456'), ('2016-07-31 00:44:37', u'http://www.codeforces.com/problemset/problem/699/B', u'One Bomb', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/699/submission/19524584'), ('2016-07-31 00:49:29', u'http://www.codeforces.com/problemset/problem/699/B', u'One Bomb', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/699/submission/19524679'), ('2016-07-31 00:58:14', u'http://www.codeforces.com/problemset/problem/699/B', u'One Bomb', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/699/submission/19524873'), ('2016-07-31 18:30:30', u'http://www.codeforces.com/problemset/problem/699/B', u'One Bomb', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/699/submission/19538526'), ('2016-07-31 18:49:30', u'http://www.codeforces.com/problemset/problem/699/B', u'One Bomb', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/699/submission/19538834'), ('2016-07-31 19:01:53', u'http://www.codeforces.com/problemset/problem/699/A', u'Launch of Collider', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/699/submission/19539062'), ('2016-07-31 20:11:24', u'http://www.codeforces.com/problemset/problem/701/A', u'Cards', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/701/submission/19540208'), ('2016-07-31 20:35:26', u'http://www.codeforces.com/problemset/problem/701/B', u'Cells Not Under Attack', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/701/submission/19540595'), ('2016-07-31 20:39:11', u'http://www.codeforces.com/problemset/problem/701/B', u'Cells Not Under Attack', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/701/submission/19540660'), ('2016-08-02 03:12:36', u'http://www.codeforces.com/problemset/problem/702/A', u'Maximum Increase', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/702/submission/19568636'), ('2016-08-02 03:15:28', u'http://www.codeforces.com/problemset/problem/702/A', u'Maximum Increase', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/702/submission/19568664'), ('2016-08-02 03:16:08', u'http://www.codeforces.com/problemset/problem/702/A', u'Maximum Increase', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/702/submission/19568668'), ('2016-08-02 03:23:31', u'http://www.codeforces.com/problemset/problem/702/B', u'Powers of Two', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/702/submission/19568738'), ('2016-08-02 03:25:16', u'http://www.codeforces.com/problemset/problem/702/B', u'Powers of Two', 'TLE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/702/submission/19568745'), ('2016-08-04 20:47:23', u'http://www.codeforces.com/problemset/problem/703/A', u'Mishka and Game', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/703/submission/19617826'), ('2016-08-04 20:49:28', u'http://www.codeforces.com/problemset/problem/703/A', u'Mishka and Game', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/703/submission/19619139'), ('2016-08-04 21:22:13', u'http://www.codeforces.com/problemset/problem/703/B', u'Mishka and trip', 'SK', '0', u'GNU C++11', 'http://www.codeforces.com/contest/703/submission/19624817'), ('2016-08-04 22:36:40', u'http://www.codeforces.com/problemset/problem/703/B', u'Mishka and trip', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/703/submission/19633551'), ('2016-08-05 01:11:14', u'http://www.codeforces.com/problemset/problem/703/B', u'Mishka and trip', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/703/submission/19638245'), ('2016-08-08 15:49:19', u'http://www.codeforces.com/problemset/problem/705/A', u'Hulk', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/19725753'), ('2016-08-08 18:25:13', u'http://www.codeforces.com/problemset/problem/705/B', u'Spider Man', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/19728563'), ('2016-08-11 22:10:00', u'http://www.codeforces.com/problemset/problem/706/A', u'Beru-taxi', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/706/submission/19788500'), ('2016-08-11 22:19:02', u'http://www.codeforces.com/problemset/problem/706/B', u'Interesting drink', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/706/submission/19792085'), ('2016-08-11 22:29:00', u'http://www.codeforces.com/problemset/problem/706/B', u'Interesting drink', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/706/submission/19794671'), ('2016-08-11 22:41:28', u'http://www.codeforces.com/problemset/problem/706/B', u'Interesting drink', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/706/submission/19797228'), ('2016-08-12 01:49:03', u'http://www.codeforces.com/problemset/problem/706/A', u'Beru-taxi', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/706/submission/19812426'), ('2016-08-12 02:19:20', u'http://www.codeforces.com/problemset/problem/706/A', u'Beru-taxi', 'CE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/706/submission/19813299'), ('2016-08-12 02:22:25', u'http://www.codeforces.com/problemset/problem/706/A', u'Beru-taxi', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/706/submission/19813362'), ('2016-08-14 19:27:06', u'http://www.codeforces.com/problemset/problem/702/B', u'Powers of Two', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/702/submission/19869883'), ('2016-08-14 20:27:13', u'http://www.codeforces.com/problemset/problem/706/C', u'Hard problem', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/706/submission/19870767'), ('2016-08-15 04:49:12', u'http://www.codeforces.com/problemset/problem/706/D', u"Vasiliy's Multiset", 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/706/submission/19877506'), ('2016-08-15 04:55:02', u'http://www.codeforces.com/problemset/problem/706/D', u"Vasiliy's Multiset", 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/706/submission/19877543'), ('2016-08-15 06:38:23', u'http://www.codeforces.com/problemset/problem/706/D', u"Vasiliy's Multiset", 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/706/submission/19878193'), ('2016-08-17 22:37:54', u'http://www.codeforces.com/problemset/problem/706/D', u"Vasiliy's Multiset", 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/706/submission/19932138'), ('2016-08-20 15:22:16', u'http://www.codeforces.com/problemset/problem/29/C', u'Mail Stamps', 'CE', '0', u'GNU C++', 'http://www.codeforces.com/contest/29/submission/19979318'), ('2016-08-20 15:22:44', u'http://www.codeforces.com/problemset/problem/29/C', u'Mail Stamps', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/29/submission/19979332'), ('2016-08-20 16:20:32', u'http://www.codeforces.com/problemset/problem/637/B', u'Chat Order', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/637/submission/19980245'), ('2016-08-20 16:22:06', u'http://www.codeforces.com/problemset/problem/637/B', u'Chat Order', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/637/submission/19980267'), ('2016-08-20 16:25:04', u'http://www.codeforces.com/problemset/problem/637/B', u'Chat Order', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/637/submission/19980309'), ('2016-08-20 17:25:07', u'http://www.codeforces.com/problemset/problem/622/C', u'Not Equal on a Segment', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/622/submission/19981265'), ('2016-08-20 17:30:50', u'http://www.codeforces.com/problemset/problem/622/C', u'Not Equal on a Segment', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/622/submission/19981354'), ('2016-08-20 18:39:54', u'http://www.codeforces.com/problemset/problem/707/A', u"Brain's Photos", 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/707/submission/19983584'), ('2016-08-20 19:05:41', u'http://www.codeforces.com/problemset/problem/707/B', u'Bakery', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/707/submission/19990875'), ('2016-08-21 02:49:44', u'http://www.codeforces.com/problemset/problem/707/C', u'Pythagorean Triples', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/707/submission/20013751'), ('2016-08-24 06:34:33', u'http://www.codeforces.com/problemset/problem/710/B', u'Optimal Point on a Line', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/710/submission/20096202'), ('2016-08-24 06:44:27', u'http://www.codeforces.com/problemset/problem/710/B', u'Optimal Point on a Line', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/710/submission/20096285'), ('2016-08-24 06:49:56', u'http://www.codeforces.com/problemset/problem/710/A', u'King Moves', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/710/submission/20096337'), ('2016-08-24 06:58:51', u'http://www.codeforces.com/problemset/problem/710/C', u'Magic Odd Square', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/710/submission/20096421'), ('2016-08-24 07:05:26', u'http://www.codeforces.com/problemset/problem/710/C', u'Magic Odd Square', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/710/submission/20096477'), ('2016-08-24 07:07:46', u'http://www.codeforces.com/problemset/problem/710/C', u'Magic Odd Square', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/710/submission/20096494'), ('2016-08-25 05:52:47', u'http://www.codeforces.com/problemset/problem/709/A', u'Juicer', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/709/submission/20140096'), ('2016-08-25 06:01:00', u'http://www.codeforces.com/problemset/problem/709/C', u'Letters Cyclic Shift', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/709/submission/20140181'), ('2016-08-25 06:04:24', u'http://www.codeforces.com/problemset/problem/709/C', u'Letters Cyclic Shift', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/709/submission/20140220'), ('2016-08-25 06:05:03', u'http://www.codeforces.com/problemset/problem/709/C', u'Letters Cyclic Shift', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/709/submission/20140228'), ('2016-08-25 15:38:47', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'TLE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/20150798'), ('2016-08-25 17:26:47', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/20152979'), ('2016-08-25 17:28:05', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/20153009'), ('2016-08-25 17:29:43', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/20153046'), ('2016-08-25 17:33:09', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/20153146'), ('2016-08-25 17:35:27', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/20153204'), ('2016-08-25 17:40:33', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/20153304'), ('2016-08-25 17:41:24', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20153316'), ('2016-08-25 17:47:30', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20153471'), ('2016-08-25 17:50:56', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20153564'), ('2016-08-25 17:52:06', u'http://www.codeforces.com/problemset/problem/704/A', u'Thor', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/704/submission/20153599'), ('2016-08-25 17:53:50', u'http://www.codeforces.com/problemset/problem/704/A', u'Thor', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/704/submission/20153653'), ('2016-08-25 17:59:43', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20153767'), ('2016-08-25 18:03:16', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20153836'), ('2016-08-25 18:05:03', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20153878'), ('2016-08-25 18:09:01', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20153955'), ('2016-08-25 18:10:53', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20154001'), ('2016-08-25 18:13:15', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20154058'), ('2016-08-25 18:15:21', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20154102'), ('2016-08-25 18:16:40', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20154129'), ('2016-08-25 18:23:26', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20154295'), ('2016-08-25 18:24:26', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20154322'), ('2016-08-29 10:50:32', u'http://www.codeforces.com/problemset/problem/710/C', u'Magic Odd Square', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/710/submission/20222968'), ('2016-08-29 17:43:28', u'http://www.codeforces.com/problemset/problem/711/A', u'Bus to Udayland', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/711/submission/20230874'), ('2016-08-29 17:48:47', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/711/submission/20232719'), ('2016-08-29 17:55:00', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20234607'), ('2016-08-29 18:08:37', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20238630'), ('2016-08-29 18:11:38', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20239424'), ('2016-08-29 18:21:50', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20241874'), ('2016-08-29 18:36:36', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20245231'), ('2016-08-29 18:50:27', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'CE', '0', u'GNU C++', 'http://www.codeforces.com/contest/711/submission/20247880'), ('2016-08-29 18:50:49', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20247939'), ('2016-08-29 21:34:54', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20256999'), ('2016-08-29 22:47:30', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20260046'), ('2016-08-29 22:49:03', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20260094'), ('2016-09-03 21:04:35', u'http://www.codeforces.com/problemset/problem/510/B', u'Fox And Two Dots', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/510/submission/20365149'), ('2016-09-03 22:14:53', u'http://www.codeforces.com/problemset/problem/129/B', u'Students and Shoelaces', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/129/submission/20366343'), ('2016-09-03 22:19:35', u'http://www.codeforces.com/problemset/problem/129/B', u'Students and Shoelaces', 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/129/submission/20366460'), ('2016-09-03 23:04:55', u'http://www.codeforces.com/problemset/problem/129/B', u'Students and Shoelaces', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/129/submission/20367405'), ('2016-09-03 23:09:28', u'http://www.codeforces.com/problemset/problem/129/B', u'Students and Shoelaces', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/129/submission/20367491'), ('2016-09-03 23:44:56', u'http://www.codeforces.com/problemset/problem/129/B', u'Students and Shoelaces', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/129/submission/20368170'), ('2016-09-03 23:54:28', u'http://www.codeforces.com/problemset/problem/129/B', u'Students and Shoelaces', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/129/submission/20368355'), ('2016-09-03 23:58:44', u'http://www.codeforces.com/problemset/problem/129/B', u'Students and Shoelaces', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/129/submission/20368443'), ('2016-09-04 00:00:06', u'http://www.codeforces.com/problemset/problem/300/B', u'Coach', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/300/submission/20368473'), ('2016-09-04 00:00:23', u'http://www.codeforces.com/problemset/problem/129/B', u'Students and Shoelaces', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/129/submission/20368478'), ('2016-09-04 00:57:23', u'http://www.codeforces.com/problemset/problem/300/B', u'Coach', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/300/submission/20369438'), ('2016-09-04 01:05:04', u'http://www.codeforces.com/problemset/problem/300/B', u'Coach', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/300/submission/20369545'), ('2016-09-04 01:14:44', u'http://www.codeforces.com/problemset/problem/300/B', u'Coach', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/300/submission/20369700'), ('2016-09-05 05:28:45', u'http://www.codeforces.com/problemset/problem/602/C', u'The Two Routes', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/602/submission/20391156'), ('2016-09-12 19:52:06', u'http://www.codeforces.com/problemset/problem/712/A', u'Memory and Crow', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/712/submission/20550755'), ('2016-09-12 20:01:01', u'http://www.codeforces.com/problemset/problem/712/B', u'Memory and Trident', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/712/submission/20550916'), ('2016-09-12 20:50:03', u'http://www.codeforces.com/problemset/problem/712/C', u'Memory and De-Evolution', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/712/submission/20551627'), ('2016-09-12 21:16:55', u'http://www.codeforces.com/problemset/problem/712/C', u'Memory and De-Evolution', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/712/submission/20552025'), ('2016-09-17 22:47:03', u'http://www.codeforces.com/problemset/problem/716/A', u'Crazy Computer', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/716/submission/20714780'), ('2016-09-17 23:47:10', u'http://www.codeforces.com/problemset/problem/716/B', u'Complete the Word', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/716/submission/20716899'), ('2016-09-17 23:48:25', u'http://www.codeforces.com/problemset/problem/716/B', u'Complete the Word', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/716/submission/20716935'), ('2016-09-21 18:03:35', u'http://www.codeforces.com/problemset/problem/716/B', u'Complete the Word', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/716/submission/20794436'), ('2016-09-23 18:38:43', u'http://www.codeforces.com/problemset/problem/719/A', u'Vitya in the Countryside', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20837817'), ('2016-09-23 18:40:43', u'http://www.codeforces.com/problemset/problem/719/A', u'Vitya in the Countryside', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20838364'), ('2016-09-23 18:42:38', u'http://www.codeforces.com/problemset/problem/719/A', u'Vitya in the Countryside', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20839135'), ('2016-09-23 18:44:24', u'http://www.codeforces.com/problemset/problem/719/A', u'Vitya in the Countryside', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20840020'), ('2016-09-23 18:45:54', u'http://www.codeforces.com/problemset/problem/719/A', u'Vitya in the Countryside', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20840715'), ('2016-09-23 18:56:54', u'http://www.codeforces.com/problemset/problem/719/B', u'Anatoly and Cockroaches', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20845155'), ('2016-09-23 19:24:28', u'http://www.codeforces.com/problemset/problem/719/C', u'Efim and Strange Grade', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20852944'), ('2016-09-23 19:30:10', u'http://www.codeforces.com/problemset/problem/719/C', u'Efim and Strange Grade', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20854218'), ('2016-09-23 19:46:36', u'http://www.codeforces.com/problemset/problem/719/C', u'Efim and Strange Grade', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20857522'), ('2016-09-23 19:49:31', u'http://www.codeforces.com/problemset/problem/719/C', u'Efim and Strange Grade', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20858071'), ('2016-09-23 20:02:28', u'http://www.codeforces.com/problemset/problem/719/C', u'Efim and Strange Grade', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20860429'), ('2016-10-02 00:45:43', u'http://www.codeforces.com/problemset/problem/721/A', u'One-dimensional Japanese Crossword', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/721/submission/21096198'), ('2016-10-02 00:56:47', u'http://www.codeforces.com/problemset/problem/721/B', u'Passwords', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/721/submission/21096352'), ('2016-10-02 01:22:26', u'http://www.codeforces.com/problemset/problem/721/B', u'Passwords', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/721/submission/21096748'), ('2016-10-02 16:21:19', u'http://www.codeforces.com/problemset/problem/722/A', u'Broken Clock', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/722/submission/21112277'), ('2016-10-02 16:23:01', u'http://www.codeforces.com/problemset/problem/722/A', u'Broken Clock', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/722/submission/21112319'), ('2016-10-02 16:54:23', u'http://www.codeforces.com/problemset/problem/722/B', u'Verse Pattern', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/722/submission/21113003'), ('2016-10-02 16:56:42', u'http://www.codeforces.com/problemset/problem/722/B', u'Verse Pattern', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/722/submission/21113067'), ('2016-10-05 02:06:14', u'http://www.codeforces.com/problemset/problem/723/A', u'The New Year: Meeting Friends', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/723/submission/21196305'), ('2016-10-05 02:24:17', u'http://www.codeforces.com/problemset/problem/723/B', u'Text Document Analysis', 'AC', '100', u'Python 2', 'http://www.codeforces.com/contest/723/submission/21196529'), ('2017-03-06 05:02:38', u'http://www.codeforces.com/problemset/problem/723/B', u'Text Document Analysis', 'AC', '100', u'Python 2', 'http://www.codeforces.com/contest/723/submission/25275840'), ('2017-03-06 05:03:10', u'http://www.codeforces.com/problemset/problem/723/B', u'Text Document Analysis', 'WA', '0', u'Python 2', 'http://www.codeforces.com/contest/723/submission/25275845'), ('2017-03-06 05:07:21', u'http://www.codeforces.com/problemset/problem/429/B', u'Working out', 'TLE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/429/submission/25275894'), ('2017-03-06 05:08:26', u'http://www.codeforces.com/problemset/problem/429/B', u'Working out', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/429/submission/25275906'), ('2017-03-06 05:15:08', u'http://www.codeforces.com/problemset/problem/429/B', u'Working out', 'CE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/429/submission/25275955'), ('2018-03-01 04:19:28', u'http://www.codeforces.com/problemset/problem/577/A', u'Multiplication Table', 'RE', '0', u'Python 2', 'http://www.codeforces.com/contest/577/submission/35797975'), ('2018-03-01 04:19:47', u'http://www.codeforces.com/problemset/problem/577/A', u'Multiplication Table', 'AC', '100', u'Python 3', 'http://www.codeforces.com/contest/577/submission/35797984')],
"Spoj": [('2013-08-09 16:13:01', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'CE', '0', u'ADA95', ''), ('2013-08-09 16:13:19', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'RE', '0', u'C', ''), ('2013-08-09 16:13:50', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'RE', '0', u'C', ''), ('2013-08-09 16:15:24', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'RE', '0', u'C', ''), ('2013-08-12 10:48:56', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'CE', '0', u'ADA95', ''), ('2013-08-12 10:50:14', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'CE', '0', u'ADA95', ''), ('2013-08-13 19:11:24', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'WA', '0', u'C', ''), ('2013-08-13 19:11:50', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'WA', '0', u'C', ''), ('2015-03-24 05:08:29', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'AC', '100', u'C', ''), ('2015-03-28 00:47:43', 'https://www.spoj.com/problems/NSTEPS/', u'Number Steps', 'AC', '100', u'C++', ''), ('2015-06-30 03:38:17', 'https://www.spoj.com/problems/FCTRL/', u'Factorial', 'AC', '100', u'CPP', ''), ('2015-06-30 03:41:12', 'https://www.spoj.com/problems/FCTRL/', u'Factorial', 'AC', '100', u'CPP', ''), ('2015-06-30 03:42:49', 'https://www.spoj.com/problems/FCTRL/', u'Factorial', 'AC', '100', u'CPP', ''), ('2015-06-30 04:00:12', 'https://www.spoj.com/problems/FCTRL2/', u'Small factorials', 'AC', '100', u'C', ''), ('2015-06-30 04:16:14', 'https://www.spoj.com/problems/SAMER08F/', u'Feynman', 'AC', '100', u'CPP', ''), ('2015-06-30 04:58:12', 'https://www.spoj.com/problems/LASTDIG/', u'The last digit', 'AC', '100', u'CPP', ''), ('2015-07-25 17:08:08', 'https://www.spoj.com/problems/FARIDA/', u'Princess Farida', 'WA', '0', u'CPP', ''), ('2015-07-25 17:11:03', 'https://www.spoj.com/problems/FARIDA/', u'Princess Farida', 'WA', '0', u'CPP', ''), ('2015-07-25 17:15:01', 'https://www.spoj.com/problems/FARIDA/', u'Princess Farida', 'AC', '100', u'CPP', ''), ('2015-09-26 21:01:26', 'https://www.spoj.com/problems/MUL/', u'Fast Multiplication', 'TLE', '0', u'C++', ''), ('2015-09-26 21:04:40', 'https://www.spoj.com/problems/MUL/', u'Fast Multiplication', 'AC', '100', u'PYTHON', ''), ('2015-12-05 08:37:26', 'https://www.spoj.com/problems/PRIME1/', u'Prime Generator', 'WA', '0', u'C', ''), ('2017-05-15 17:07:43', 'https://www.spoj.com/problems/PRIME1/', u'Prime Generator', 'WA', '0', u'C', ''), ('2018-10-02 23:41:30', 'https://www.spoj.com/problems/ONP/', u'Transform the Expression', 'WA', '0', u'CPP', ''), ('2019-05-26 22:58:02', 'https://www.spoj.com/problems/BACTERIA/', u'SPOJ Custom Test', 'OTH', '0', u'PYTHON3', '')],
"HackerEarth": [('2014-06-17 14:50:52', 'https://www.hackerearth.com/practice/data-structures/hash-tables/basics-of-hash-tables/practice-problems/algorithm/mind-palaces-3/', u'Mind Palaces', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/333758'), ('2014-06-17 14:55:06', 'https://www.hackerearth.com/practice/data-structures/hash-tables/basics-of-hash-tables/practice-problems/algorithm/mind-palaces-3/', u'Mind Palaces', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/333766'), ('2014-06-17 14:56:59', 'https://www.hackerearth.com/practice/data-structures/hash-tables/basics-of-hash-tables/practice-problems/algorithm/mind-palaces-3/', u'Mind Palaces', 'PS', '0', u'C', 'https://www.hackerearth.com/submission/333770'), ('2014-06-17 15:38:24', 'https://www.hackerearth.com/practice/data-structures/hash-tables/basics-of-hash-tables/practice-problems/algorithm/mind-palaces-3/', u'Mind Palaces', 'PS', '0', u'C', 'https://www.hackerearth.com/submission/333824'), ('2014-06-17 15:53:23', 'https://www.hackerearth.com/practice/data-structures/hash-tables/basics-of-hash-tables/practice-problems/algorithm/mind-palaces-3/', u'Mind Palaces', 'PS', '0', u'C', 'https://www.hackerearth.com/submission/333833'), ('2014-06-17 16:08:55', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/palindromic-numbers-7/', u'Palindromic Numbers', 'AC', '100', u'Python', 'https://www.hackerearth.com/submission/333846'), ('2014-10-02 04:57:34', 'https://www.hackerearth.com/problem/algorithm/day-1-if-else-conditionslooping/', u"Bajirao's Rescue Operation", 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/789146'), ('2014-10-02 05:00:56', 'https://www.hackerearth.com/problem/algorithm/day-1-if-else-conditionslooping/', u"Bajirao's Rescue Operation", 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/789152'), ('2014-10-02 05:20:08', 'https://www.hackerearth.com/problem/algorithm/day-1-if-else-conditionslooping/', u"Bajirao's Rescue Operation", 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/789161'), ('2014-10-02 05:40:22', 'https://www.hackerearth.com/problem/algorithm/day-1-if-else-conditionslooping/', u"Bajirao's Rescue Operation", 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/789173'), ('2014-10-02 05:40:22', 'https://www.hackerearth.com/problem/algorithm/day-1-if-else-conditionslooping/', u"Bajirao's Rescue Operation", 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/789173'), ('2014-10-02 05:40:23', 'https://www.hackerearth.com/problem/algorithm/day-1-if-else-conditionslooping/', u"Bajirao's Rescue Operation", 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/789174'), ('2014-10-02 05:43:40', 'https://www.hackerearth.com/problem/algorithm/day-1-if-else-conditionslooping/', u"Bajirao's Rescue Operation", 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/789180'), ('2014-10-02 05:43:40', 'https://www.hackerearth.com/problem/algorithm/day-1-if-else-conditionslooping/', u"Bajirao's Rescue Operation", 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/789181'), ('2014-10-02 05:51:40', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/complete-string-4/', u'Complete String', 'TLE', '0', u'C++', 'https://www.hackerearth.com/submission/789184'), ('2014-10-02 06:01:47', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/complete-string-4/', u'Complete String', 'TLE', '0', u'C++', 'https://www.hackerearth.com/submission/789187'), ('2014-10-02 06:07:25', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/complete-string-4/', u'Complete String', 'AC', '100', u'C', 'https://www.hackerearth.com/submission/789191'), ('2015-05-30 21:46:15', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/recursive-sums/', u'Recursive Sums', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1866870'), ('2015-05-30 21:47:45', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/recursive-sums/', u'Recursive Sums', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1866905'), ('2015-05-30 21:52:07', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/recursive-sums/', u'Recursive Sums', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1867017'), ('2015-05-30 21:58:10', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/recursive-sums/', u'Recursive Sums', 'AC', '100', u'Python', 'https://www.hackerearth.com/submission/1867183'), ('2015-06-01 21:51:41', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/very-cool-numbers/', u'Very Cool Numbers', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1876428'), ('2015-06-01 22:07:31', 'https://www.hackerearth.com/problem/algorithm/children-love-candies/', u'Children Love Candies', 'PS', '0', u'C', 'https://www.hackerearth.com/submission/1877240'), ('2015-06-01 22:09:05', 'https://www.hackerearth.com/problem/algorithm/children-love-candies/', u'Children Love Candies', 'AC', '100', u'C', 'https://www.hackerearth.com/submission/1877330'), ('2015-06-01 22:18:48', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/very-cool-numbers/', u'Very Cool Numbers', 'PS', '0', u'Python', 'https://www.hackerearth.com/submission/1877835'), ('2015-06-01 22:23:44', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/very-cool-numbers/', u'Very Cool Numbers', 'AC', '100', u'Python', 'https://www.hackerearth.com/submission/1878092'), ('2015-06-01 22:33:08', 'https://www.hackerearth.com/problem/algorithm/andrew-and-max/', u'Andrew and Max', 'AC', '100', u'C', 'https://www.hackerearth.com/submission/1878567'), ('2015-06-01 22:55:56', 'https://www.hackerearth.com/problem/algorithm/zeroshark/', u'ZeroShark', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1879759'), ('2015-06-01 23:11:57', 'https://www.hackerearth.com/problem/algorithm/zeroshark/', u'ZeroShark', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1880558'), ('2015-06-01 23:17:34', 'https://www.hackerearth.com/problem/algorithm/zeroshark/', u'ZeroShark', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1880825'), ('2015-06-04 21:02:21', 'https://www.hackerearth.com/practice/algorithms/string-algorithm/basics-of-string-manipulation/practice-problems/algorithm/terrible-chandu/', u'Terrible Chandu', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/1894925'), ('2015-06-04 21:02:21', 'https://www.hackerearth.com/practice/algorithms/string-algorithm/basics-of-string-manipulation/practice-problems/algorithm/terrible-chandu/', u'Terrible Chandu', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/1894925'), ('2015-06-04 21:06:29', 'https://www.hackerearth.com/practice/algorithms/greedy/basics-of-greedy-algorithms/practice-problems/algorithm/chandu-and-consecutive-letters/', u'Chandu and Consecutive Letters', 'AC', '100', u'C', 'https://www.hackerearth.com/submission/1895133'), ('2015-06-04 21:10:59', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/prateek-and-his-friends/', u'Prateek and his Friends', 'AC', '100', u'C', 'https://www.hackerearth.com/submission/1895359'), ('2015-06-09 21:03:35', 'https://www.hackerearth.com/practice/algorithms/sorting/merge-sort/practice-problems/algorithm/chandu-and-his-girlfriend/', u'Chandu and his Girlfriend', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/1919932'), ('2015-06-09 21:07:37', 'https://www.hackerearth.com/practice/algorithms/sorting/merge-sort/practice-problems/algorithm/chandu-and-his-girlfriend-returns/', u'Chandu and his Girlfriend Returns', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1920040'), ('2015-06-09 21:12:29', 'https://www.hackerearth.com/practice/algorithms/sorting/merge-sort/practice-problems/algorithm/chandu-and-his-girlfriend-returns/', u'Chandu and his Girlfriend Returns', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1920191'), ('2015-06-09 21:18:14', 'https://www.hackerearth.com/practice/algorithms/sorting/merge-sort/practice-problems/algorithm/chandu-and-his-girlfriend-returns/', u'Chandu and his Girlfriend Returns', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/1920367'), ('2015-06-11 21:05:52', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/discover-the-monk/', u'Discover the Monk', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1930370'), ('2015-06-11 21:09:45', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/discover-the-monk/', u'Discover the Monk', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1930499'), ('2015-06-11 21:15:07', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/discover-the-monk/', u'Discover the Monk', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1930694'), ('2015-06-11 21:28:20', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/monks-encounter-with-polynomial/', u"Monk's Encounter with Polynomial", 'WA', '0', u'C', 'https://www.hackerearth.com/submission/1931189'), ('2015-06-11 21:28:38', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/monks-encounter-with-polynomial/', u"Monk's Encounter with Polynomial", 'PS', '0', u'C', 'https://www.hackerearth.com/submission/1931196'), ('2015-06-11 21:29:06', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/monks-encounter-with-polynomial/', u"Monk's Encounter with Polynomial", 'PS', '0', u'C', 'https://www.hackerearth.com/submission/1931215'), ('2015-06-11 21:30:47', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/monks-encounter-with-polynomial/', u"Monk's Encounter with Polynomial", 'PS', '0', u'C', 'https://www.hackerearth.com/submission/1931281'), ('2015-06-11 21:32:24', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/monks-encounter-with-polynomial/', u"Monk's Encounter with Polynomial", 'PS', '0', u'C', 'https://www.hackerearth.com/submission/1931332'), ('2015-06-11 21:34:35', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/monks-encounter-with-polynomial/', u"Monk's Encounter with Polynomial", 'PS', '0', u'C', 'https://www.hackerearth.com/submission/1931416'), ('2015-07-01 21:36:39', 'https://www.hackerearth.com/practice/algorithms/sorting/insertion-sort/practice-problems/algorithm/the-rise-of-the-weird-things-1/', u'The rise of the weird... things [1]', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2037234'), ('2015-07-01 21:39:00', 'https://www.hackerearth.com/practice/algorithms/sorting/insertion-sort/practice-problems/algorithm/the-rise-of-the-weird-things-1/', u'The rise of the weird... things [1]', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2037359'), ('2015-07-01 22:06:20', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/the-savior-3/', u'The savior? [3]', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2038727'), ('2015-07-01 22:14:10', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/the-savior-3/', u'The savior? [3]', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2039043'), ('2015-07-01 23:06:28', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/2-dimensional/practice-problems/algorithm/supernatural-squad-2/', u'Supernatural Squad [2]', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2040873'), ('2015-07-01 23:06:28', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/2-dimensional/practice-problems/algorithm/supernatural-squad-2/', u'Supernatural Squad [2]', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2040873'), ('2015-07-01 23:08:23', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/2-dimensional/practice-problems/algorithm/supernatural-squad-2/', u'Supernatural Squad [2]', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2040928'), ('2015-07-01 23:10:56', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/2-dimensional/practice-problems/algorithm/supernatural-squad-2/', u'Supernatural Squad [2]', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2041005'), ('2015-07-03 18:28:59', 'https://www.hackerearth.com/problem/algorithm/valentine-shopping-4/', u'Valentine Shopping', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2053959'), ('2015-07-03 18:48:11', 'https://www.hackerearth.com/challenges/hiring/bookmyshowhiringchallenge/algorithm/marut-and-girls/', u'Marut and Girls', 'PS', '0', u'Python', 'https://www.hackerearth.com/submission/2054041'), ('2015-07-03 18:48:11', 'https://www.hackerearth.com/challenges/hiring/bookmyshowhiringchallenge/algorithm/marut-and-girls/', u'Marut and Girls', 'PS', '0', u'Python', 'https://www.hackerearth.com/submission/2054042'), ('2015-07-03 18:51:55', 'https://www.hackerearth.com/challenges/hiring/bookmyshowhiringchallenge/algorithm/marut-and-girls/', u'Marut and Girls', 'PS', '0', u'Python', 'https://www.hackerearth.com/submission/2054062'), ('2015-07-03 18:57:12', 'https://www.hackerearth.com/challenges/hiring/bookmyshowhiringchallenge/algorithm/marut-and-girls/', u'Marut and Girls', 'AC', '100', u'Python', 'https://www.hackerearth.com/submission/2054105'), ('2015-07-03 18:57:12', 'https://www.hackerearth.com/challenges/hiring/bookmyshowhiringchallenge/algorithm/marut-and-girls/', u'Marut and Girls', 'AC', '100', u'Python', 'https://www.hackerearth.com/submission/2054106'), ('2015-07-03 21:37:13', 'https://www.hackerearth.com/problem/algorithm/beta-testing/', u'Beta Testing', 'WA', '0', u'Python', 'https://www.hackerearth.com/submission/2055210'), ('2015-07-03 22:22:51', 'https://www.hackerearth.com/problem/algorithm/beta-testing/', u'Beta Testing', 'AC', '100', u'Python', 'https://www.hackerearth.com/submission/2055901'), ('2015-07-04 12:55:07', 'https://www.hackerearth.com/practice/algorithms/graphs/graph-representation/practice-problems/algorithm/monk-in-the-real-estate/', u'Monk in the real estate', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2059508'), ('2015-07-06 22:30:59', 'https://www.hackerearth.com/problem/algorithm/beta-testing/', u'Beta Testing', 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/2071774'), ('2015-07-06 22:48:05', 'https://www.hackerearth.com/problem/algorithm/beta-testing/', u'Beta Testing', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2071820'), ('2015-07-06 23:04:59', 'https://www.hackerearth.com/problem/algorithm/to-be-changed-choosing-a-project/', u'Side Projects', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2071872'), ('2015-07-06 23:30:34', 'https://www.hackerearth.com/problem/algorithm/to-be-changed-compile-time-fun/', u"It's Compiling!", 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2071940'), ('2015-07-08 23:20:31', 'https://www.hackerearth.com/problem/algorithm/monk-and-the-collision/', u'Monk and the Collision', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2082091'), ('2015-07-08 23:21:06', 'https://www.hackerearth.com/problem/algorithm/monk-and-the-collision/', u'Monk and the Collision', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2082114'), ('2015-07-08 23:36:27', 'https://www.hackerearth.com/problem/algorithm/monk-in-the-land-of-pokemons/', u'Monk in the land of Pokemons!', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2082452'), ('2015-07-08 23:38:45', 'https://www.hackerearth.com/problem/algorithm/monk-in-the-land-of-pokemons/', u'Monk in the land of Pokemons!', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2082465'), ('2015-07-08 23:50:39', 'https://www.hackerearth.com/problem/algorithm/monk-in-the-land-of-pokemons/', u'Monk in the land of Pokemons!', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2082564'), ('2015-07-08 23:50:39', 'https://www.hackerearth.com/problem/algorithm/monk-in-the-land-of-pokemons/', u'Monk in the land of Pokemons!', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2082564'), ('2015-07-18 07:29:31', 'https://www.hackerearth.com/problem/algorithm/will-you-be-my-friend-pledge-easy/', u'Will you be my friend?', 'CE', '0', u'Java', 'https://www.hackerearth.com/submission/2144171'), ('2015-07-18 07:54:12', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/intelligent-girl-1/', u'Intelligent Girl ', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2144180'), ('2015-07-23 12:47:19', 'https://www.hackerearth.com/practice/data-structures/trees/heapspriority-queues/practice-problems/algorithm/monk-and-multiplication/', u'Monk and Multiplication', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2181397'), ('2015-07-23 12:48:32', 'https://www.hackerearth.com/practice/data-structures/trees/heapspriority-queues/practice-problems/algorithm/monk-and-multiplication/', u'Monk and Multiplication', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2181405'), ('2015-07-23 13:45:20', 'https://www.hackerearth.com/challenges/competitive/code-monk-heaps-and-priority-queues/algorithm/monk-and-some-queries/', u'Monk And Some Queries', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2181589'), ('2015-07-23 13:52:48', 'https://www.hackerearth.com/challenges/competitive/code-monk-heaps-and-priority-queues/algorithm/monk-and-some-queries/', u'Monk And Some Queries', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2181611'), ('2015-07-23 14:01:15', 'https://www.hackerearth.com/challenges/competitive/code-monk-heaps-and-priority-queues/algorithm/monk-and-some-queries/', u'Monk And Some Queries', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2181643'), ('2015-07-23 14:08:45', 'https://www.hackerearth.com/challenges/competitive/code-monk-heaps-and-priority-queues/algorithm/monk-and-some-queries/', u'Monk And Some Queries', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2181659'), ('2015-07-23 14:12:17', 'https://www.hackerearth.com/challenges/competitive/code-monk-heaps-and-priority-queues/algorithm/monk-and-some-queries/', u'Monk And Some Queries', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2181670'), ('2015-07-23 14:16:03', 'https://www.hackerearth.com/challenges/competitive/code-monk-heaps-and-priority-queues/algorithm/monk-and-some-queries/', u'Monk And Some Queries', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2181686'), ('2015-07-23 14:17:49', 'https://www.hackerearth.com/challenges/competitive/code-monk-heaps-and-priority-queues/algorithm/monk-and-some-queries/', u'Monk And Some Queries', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2181696'), ('2015-08-15 19:54:58', 'https://www.hackerearth.com/practice/algorithms/graphs/graph-representation/practice-problems/algorithm/monk-in-the-real-estate/', u'Monk in the real estate', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2326114'), ('2015-08-15 20:05:30', 'https://www.hackerearth.com/practice/algorithms/graphs/graph-representation/practice-problems/algorithm/monk-at-the-graph-factory/', u'Monk at the Graph Factory', 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/2326217'), ('2015-08-15 20:07:06', 'https://www.hackerearth.com/practice/algorithms/graphs/graph-representation/practice-problems/algorithm/monk-at-the-graph-factory/', u'Monk at the Graph Factory', 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/2326232'), ('2015-08-15 20:17:21', 'https://www.hackerearth.com/practice/algorithms/graphs/graph-representation/practice-problems/algorithm/monk-at-the-graph-factory/', u'Monk at the Graph Factory', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2326300'), ('2015-08-15 20:57:56', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/2326601'), ('2015-08-15 21:10:36', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2326699'), ('2015-08-15 21:13:03', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2326714'), ('2015-08-15 21:15:52', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2326727'), ('2015-08-15 21:20:43', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2326762'), ('2015-08-15 21:20:43', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2326762'), ('2015-08-15 21:27:49', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'RE', '0', u'C++', 'https://www.hackerearth.com/submission/2326799'), ('2015-08-15 21:28:47', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2326811'), ('2015-08-15 21:42:24', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2326907'), ('2015-08-28 01:03:17', 'https://www.hackerearth.com/practice/data-structures/disjoint-data-strutures/basics-of-disjoint-data-structures/practice-problems/algorithm/city-and-flood-1/', u'City and Flood', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2400169'), ('2015-09-03 19:34:56', 'https://www.hackerearth.com/problem/algorithm/guess-the-triangle/', u'Guess the triangle', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2449157'), ('2015-12-18 12:28:32', 'https://www.hackerearth.com/problem/algorithm/prime-probablity-1/', u'Prime Probablity', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/3031761'), ('2015-12-18 12:33:00', 'https://www.hackerearth.com/problem/algorithm/prime-probablity-1/', u'Prime Probablity', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/3031774'), ('2015-12-18 12:46:11', 'https://www.hackerearth.com/problem/algorithm/prime-probablity-1/', u'Prime Probablity', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/3031821'), ('2015-12-18 12:54:19', 'https://www.hackerearth.com/problem/algorithm/prime-probablity-1/', u'Prime Probablity', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/3031840'), ('2015-12-18 22:25:48', 'https://www.hackerearth.com/problem/algorithm/special-subarray-1/', u'Special Subarray', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/3035335'), ('2015-12-18 22:31:43', 'https://www.hackerearth.com/problem/algorithm/special-subarray-1/', u'Special Subarray', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/3035367'), ('2015-12-20 10:59:00', 'https://www.hackerearth.com/problem/algorithm/prime-probablity-1/', u'Prime Probablity', 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/3050348'), ('2016-01-06 23:37:02', 'https://www.hackerearth.com/problem/algorithm/digital-numbers/', u'Digital Numbers', 'WA', '0', u'C', 'https://www.hackerearth.com/submission/3120602'), ('2016-09-14 22:25:52', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/xsquare-and-two-arrays/', u'Xsquare And Two Arrays', 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/5167117'), ('2016-09-14 22:26:45', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/xsquare-and-two-arrays/', u'Xsquare And Two Arrays', 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/5167122'), ('2016-09-14 22:46:04', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/xsquare-and-two-arrays/', u'Xsquare And Two Arrays', 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/5167266'), ('2016-09-14 22:50:24', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/xsquare-and-two-arrays/', u'Xsquare And Two Arrays', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/5167320'), ('2016-09-29 21:25:56', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/choosing-the-judges-7/', u'Choosing the Judges', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/5421843'), ('2016-09-29 22:05:06', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/rhezo-and-prime-problems/', u'Rhezo and Prime Problems', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/5422329'), ('2016-09-29 22:16:01', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/rhezo-and-prime-problems/', u'Rhezo and Prime Problems', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/5422459')],
"HackerRank": [('2014-06-09 22:53:13', u'https://www.hackerrank.com/challenges/solve-me-first', u'Solve Me First', 'AC', '100', '-', ''), ('2014-06-09 23:03:21', u'https://www.hackerrank.com/challenges/find-point', u'Find the Point', 'AC', '100', '-', ''), ('2014-06-09 23:40:25', u'https://www.hackerrank.com/challenges/lonely-integer', u'Lonely Integer', 'AC', '100', '-', ''), ('2014-06-10 00:08:01', u'https://www.hackerrank.com/challenges/the-love-letter-mystery', u'The Love-Letter Mystery', 'AC', '100', '-', ''), ('2014-07-17 02:38:05', u'https://www.hackerrank.com/challenges/utopian-tree', u'Utopian Tree', 'AC', '100', '-', ''), ('2014-07-17 03:11:48', u'https://www.hackerrank.com/contests/w7/challenges/die-hard-3', u'Die Hard 3', 'AC', '100', '-', ''), ('2014-07-17 03:24:54', u'https://www.hackerrank.com/challenges/runningtime', u'Running Time of Algorithms', 'AC', '100', '-', ''), ('2014-07-17 03:49:56', u'https://www.hackerrank.com/contests/w7/challenges/string-function-calculation', u'String Function Calculation', 'AC', '100', '-', ''), ('2014-07-22 01:29:21', u'https://www.hackerrank.com/challenges/gem-stones', u'Gemstones', 'AC', '100', '-', ''), ('2014-08-08 17:24:20', u'https://www.hackerrank.com/contests/w8/challenges/counter-game', u'Counter game', 'AC', '100', '-', ''), ('2014-09-24 01:29:10', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler052', u'Project Euler #52: Permuted multiples', 'AC', '100', '-', ''), ('2014-09-27 20:48:27', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler001', u'Project Euler #1: Multiples of 3 and 5', 'AC', '100', '-', ''), ('2014-09-27 22:39:27', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler002', u'Project Euler #2: Even Fibonacci numbers', 'AC', '100', '-', ''), ('2014-09-28 00:53:48', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler016', u'Project Euler #16: Power digit sum', 'AC', '100', '-', ''), ('2014-09-28 03:59:31', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler034', u'Project Euler #34: Digit factorials', 'AC', '100', '-', ''), ('2014-10-01 19:47:25', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler042', u'Project Euler #42: Coded triangle numbers', 'AC', '100', '-', ''), ('2014-10-01 20:06:36', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler030', u'Project Euler #30: Digit Nth powers', 'AC', '100', '-', ''), ('2014-10-02 22:39:43', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler048', u'Project Euler #48: Self powers', 'AC', '100', '-', ''), ('2014-10-02 22:55:27', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler020', u'Project Euler #20: Factorial digit sum', 'AC', '100', '-', ''), ('2014-10-04 00:35:02', u'https://www.hackerrank.com/challenges/bigger-is-greater', u'Bigger is Greater', 'AC', '100', '-', ''), ('2014-10-04 05:36:38', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler005', u'Project Euler #5: Smallest multiple', 'AC', '100', '-', ''), ('2014-10-04 05:45:06', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler007', u'Project Euler #7: 10001st prime', 'AC', '100', '-', ''), ('2014-12-08 06:00:42', u'https://www.hackerrank.com/challenges/find-hackerrank', u'Find HackerRank', 'AC', '100', '-', ''), ('2014-12-08 06:08:01', u'https://www.hackerrank.com/challenges/valid-pan-format', u'Valid PAN format', 'AC', '100', '-', ''), ('2014-12-08 06:17:05', u'https://www.hackerrank.com/challenges/hackerrank-tweets', u'HackerRank Tweets', 'AC', '100', '-', ''), ('2014-12-08 06:31:09', u'https://www.hackerrank.com/challenges/split-number', u'Split the Phone Numbers', 'AC', '100', '-', ''), ('2015-05-29 07:50:36', u'https://www.hackerrank.com/challenges/select-all-sql', u'Select All', 'AC', '100', '-', ''), ('2015-05-29 07:52:08', u'https://www.hackerrank.com/challenges/select-by-id', u'Select By ID', 'AC', '100', '-', ''), ('2015-05-29 07:53:21', u'https://www.hackerrank.com/challenges/japanese-cities-attributes', u"Japanese Cities' Attributes", 'AC', '100', '-', ''), ('2015-05-29 07:54:43', u'https://www.hackerrank.com/challenges/japanese-cities-name', u"Japanese Cities' Names", 'AC', '100', '-', ''), ('2015-05-29 07:57:45', u'https://www.hackerrank.com/challenges/average-population', u'Average Population', 'AC', '100', '-', ''), ('2015-05-29 07:59:00', u'https://www.hackerrank.com/challenges/japan-population', u'Japan Population', 'AC', '100', '-', ''), ('2015-05-30 09:47:34', u'https://www.hackerrank.com/challenges/py-hello-world', u'Say "Hello, World!" With Python', 'AC', '100', '-', ''), ('2015-05-30 09:48:41', u'https://www.hackerrank.com/challenges/python-raw-input', u'Reading Raw Input', 'AC', '100', '-', ''), ('2015-05-30 09:50:03', u'https://www.hackerrank.com/challenges/python-arithmetic-operators', u'Arithmetic Operators', 'AC', '100', '-', ''), ('2015-05-30 09:53:02', u'https://www.hackerrank.com/challenges/python-division', u'Python: Division', 'AC', '100', '-', ''), ('2015-05-30 09:55:01', u'https://www.hackerrank.com/challenges/python-mod-divmod', u'Mod Divmod', 'AC', '100', '-', ''), ('2015-05-30 22:23:33', u'https://www.hackerrank.com/contests/code-cpp-may-2015/challenges/redundant-or-not', u'Redundant or Not?', 'AC', '100', '-', ''), ('2015-05-30 22:31:57', u'https://www.hackerrank.com/contests/code-cpp-may-2015/challenges/string-transformations', u'String Transformations', 'AC', '100', '-', ''), ('2015-05-31 08:52:13', u'https://www.hackerrank.com/contests/code-cpp-may-2015/challenges/linked-list-to-binary', u'Linked List to Binary', 'AC', '100', '-', ''), ('2015-05-31 09:20:17', u'https://www.hackerrank.com/contests/code-cpp-may-2015/challenges/polygon-inheritance', u'Polygon Inheritance', 'AC', '100', '-', ''), ('2015-06-01 06:19:47', u'https://www.hackerrank.com/challenges/print-the-elements-of-a-linked-list', u'Print the Elements of a Linked List', 'AC', '100', '-', ''), ('2015-06-01 06:22:43', u'https://www.hackerrank.com/challenges/insert-a-node-at-the-tail-of-a-linked-list', u'Insert a Node at the Tail of a Linked List', 'AC', '100', '-', ''), ('2015-06-01 06:24:34', u'https://www.hackerrank.com/challenges/insert-a-node-at-the-head-of-a-linked-list', u'Insert a node at the head of a linked list', 'AC', '100', '-', ''), ('2015-06-01 06:45:45', u'https://www.hackerrank.com/challenges/insert-a-node-at-a-specific-position-in-a-linked-list', u'Insert a node at a specific position in a linked list', 'AC', '100', '-', ''), ('2015-06-01 06:49:29', u'https://www.hackerrank.com/challenges/delete-a-node-from-a-linked-list', u'Delete a Node', 'AC', '100', '-', ''), ('2015-06-01 06:51:09', u'https://www.hackerrank.com/challenges/print-the-elements-of-a-linked-list-in-reverse', u'Print in Reverse', 'AC', '100', '-', ''), ('2015-06-01 06:56:24', u'https://www.hackerrank.com/challenges/reverse-a-linked-list', u'Reverse a linked list', 'AC', '100', '-', ''), ('2015-06-01 06:59:39', u'https://www.hackerrank.com/challenges/compare-two-linked-lists', u'Compare two linked lists', 'AC', '100', '-', ''), ('2015-06-01 07:07:07', u'https://www.hackerrank.com/challenges/merge-two-sorted-linked-lists', u'Merge two sorted linked lists', 'AC', '100', '-', ''), ('2015-06-01 07:12:02', u'https://www.hackerrank.com/challenges/get-the-value-of-the-node-at-a-specific-position-from-the-tail', u'Get Node Value', 'AC', '100', '-', ''), ('2015-06-01 07:18:57', u'https://www.hackerrank.com/challenges/delete-duplicate-value-nodes-from-a-sorted-linked-list', u'Delete duplicate-value nodes from a sorted linked list', 'AC', '100', '-', ''), ('2015-06-01 07:25:20', u'https://www.hackerrank.com/challenges/detect-whether-a-linked-list-contains-a-cycle', u'Cycle Detection', 'AC', '100', '-', ''), ('2015-06-01 07:39:03', u'https://www.hackerrank.com/challenges/find-the-merge-point-of-two-joined-linked-lists', u'Find Merge Point of Two Lists', 'AC', '100', '-', ''), ('2015-06-01 07:55:58', u'https://www.hackerrank.com/challenges/insert-a-node-into-a-sorted-doubly-linked-list', u'Inserting a Node Into a Sorted Doubly Linked List', 'AC', '100', '-', ''), ('2015-06-01 08:05:55', u'https://www.hackerrank.com/challenges/reverse-a-doubly-linked-list', u'Reverse a doubly linked list', 'AC', '100', '-', ''), ('2015-06-01 08:07:24', u'https://www.hackerrank.com/challenges/tree-preorder-traversal', u'Tree: Preorder Traversal', 'AC', '100', '-', ''), ('2015-06-01 08:09:21', u'https://www.hackerrank.com/challenges/tree-postorder-traversal', u'Tree: Postorder Traversal', 'AC', '100', '-', ''), ('2015-06-01 08:10:09', u'https://www.hackerrank.com/challenges/tree-inorder-traversal', u'Tree: Inorder Traversal', 'AC', '100', '-', ''), ('2015-06-03 03:08:32', u'https://www.hackerrank.com/challenges/connecting-towns', u'Connecting Towns', 'AC', '100', '-', ''), ('2015-06-03 03:13:31', u'https://www.hackerrank.com/challenges/handshake', u'Handshake', 'AC', '100', '-', ''), ('2015-06-03 03:17:17', u'https://www.hackerrank.com/challenges/correctness-invariant', u'Correctness and the Loop Invariant', 'AC', '100', '-', ''), ('2015-06-03 03:22:14', u'https://www.hackerrank.com/challenges/tutorial-intro', u'Intro to Tutorial Challenges', 'AC', '100', '-', ''), ('2015-06-10 11:27:13', u'https://www.hackerrank.com/contests/the-linux-bash-fest/challenges/text-processing-in-linux-the-grep-command-4', u"'Grep' - A", 'AC', '100', '-', ''), ('2015-06-10 11:32:34', u'https://www.hackerrank.com/contests/the-linux-bash-fest/challenges/paste-1', u'Paste - 1', 'AC', '100', '-', ''), ('2015-06-10 11:52:57', u'https://www.hackerrank.com/contests/the-linux-bash-fest/challenges/awk-1', u"'Awk' - 1", 'AC', '100', '-', ''), ('2015-06-10 11:56:28', u'https://www.hackerrank.com/contests/the-linux-bash-fest/challenges/awk-2', u"'Awk' - 2", 'AC', '100', '-', ''), ('2015-06-10 12:10:10', u'https://www.hackerrank.com/contests/the-linux-bash-fest/challenges/text-processing-in-linux-the-grep-command-5', u"'Grep' - B", 'AC', '100', '-', ''), ('2015-06-27 21:35:13', u'https://www.hackerrank.com/contests/segfault/challenges/three-loops', u'Three Loops', 'AC', '100', '-', ''), ('2015-06-27 22:25:24', u'https://www.hackerrank.com/contests/segfault/challenges/count-the-divisors', u'Count the Divisors', 'AC', '100', '-', ''), ('2015-08-01 21:58:15', u'https://www.hackerrank.com/contests/countercode/challenges/imba', u'Imba', 'AC', '100', '-', ''), ('2015-08-01 22:46:04', u'https://www.hackerrank.com/contests/countercode/challenges/campers', u'Campers', 'AC', '100', '-', ''), ('2015-10-30 02:51:27', u'https://www.hackerrank.com/contests/codestorm/challenges/emmas-notebook', u"Emma's Notebook", 'AC', '100', '-', ''), ('2016-08-06 21:37:21', u'https://www.hackerrank.com/contests/morgan-stanley-2016/challenges/jesse-and-profit', u'Jesse and Profit', 'AC', '100', '-', ''), ('2016-08-24 06:14:46', u'https://www.hackerrank.com/challenges/30-hello-world', u'Day 0: Hello, World.', 'AC', '100', '-', ''), ('2017-11-03 00:51:08', u'https://www.hackerrank.com/challenges/30-data-types', u'Day 1: Data Types', 'AC', '100', '-', '')],
"UVa": [('2016-12-11 20:21:23', 'https://uva.onlinejudge.org/index.php?option=com_onlinejudge&Itemid=8&page=show_problem&problem=38', 'Ecological Bin Packing', 'WA', '0', 'C++', ''), ('2016-12-14 05:23:40', 'https://uva.onlinejudge.org/index.php?option=com_onlinejudge&Itemid=8&page=show_problem&problem=38', 'Ecological Bin Packing', 'CE', '0', 'C++', '')],
"Timus": [('2018-07-01 01:41:04', 'http://acm.timus.ru/problem.aspx?space=1&num=1285&locale=en', u'1285. Thread in a Hyperspace', 'CE', '0', u'G++ 7.1', '')],
"AtCoder": [('2020-05-16 19:04:34', u'https://atcoder.jp/contests/abc135/tasks/abc135_d', 'D. Digits Parade', u'WA', 0.0, u'Python2 (2.7.6)', u'https://atcoder.jp/contests/abc135/submissions/13262993'), ('2020-05-18 12:04:47', u'https://atcoder.jp/contests/abc135/tasks/abc135_d', 'D. Digits Parade', u'WA', 0.0, u'Python2 (2.7.6)', u'https://atcoder.jp/contests/abc135/submissions/13368979'), ('2020-05-18 12:58:01', u'https://atcoder.jp/contests/agc010/tasks/agc010_a', 'A. Addition', u'RE', 0.0, u'Python2 (2.7.6)', u'https://atcoder.jp/contests/agc010/submissions/13370205')]
}
uva_problem_dict = utilities.get_problem_mappings(uvadb, uvadb.problem,
["problem_id",
"title"])
atcoder_problem_dict = utilities.get_problem_mappings(db,
db.atcoder_problems,
["problem_identifier",
"name"])
last_retrieved = time.strptime(str(current.INITIAL_DATE), "%Y-%m-%d %H:%M:%S")
for site in handles:
Profile = getattr(sites, site.lower()).Profile
if Profile.is_website_down():
continue
site_method = Profile(handles[site]).get_submissions
if site == "UVa":
submissions = site_method(last_retrieved, uva_problem_dict, False)
elif site == "AtCoder":
submissions = site_method(last_retrieved, atcoder_problem_dict, False)
else:
submissions = site_method(last_retrieved, False)
submissions = sorted(submissions)
if submissions != expected_result[site]:
raise RuntimeError("Submissions list does not match for " + site)
# ------------------------------------------------------------------------------
def test_retrieval(retrieval_object, method_name):
error_message = ""
for i in xrange(1):
try:
getattr(retrieval_object, method_name)()
return "Success"
except Exception as e:
error_message = method_name + " " + e.message
time.sleep(2)
return error_message
rt = RetrievalTest()
pushover_message = ""
for method_name in [
"test_tag_retrieval",
"test_editorial_retrieval",
"test_invalid_handle",
"test_download_submission",
"test_rating_graph",
"test_submissions",
"test_problem_setters_retrieval"
]:
res = test_retrieval(rt, method_name)
if res != "Success":
pushover_message += res + "\n"
if pushover_message != "":
print "pushover_message", pushover_message
response = requests.post("https://api.pushover.net/1/messages.json",
data={"token": current.pushover_api_token,
"user": current.pushover_user_token,
"message": pushover_message.strip(),
"title": "Extras retrieval failure",
"priority": 1},
verify=False).json()
|
"""
Copyright (c) 2015-2020 Raj Patel(raj454raj@gmail.com), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
import requests
import sites
import urllib3
urllib3.disable_warnings()
current.environment = "test"
# ==============================================================================
class RetrievalTest:
# --------------------------------------------------------------------------
def __init__(self):
self.profile_site = {}
for site in current.SITES:
self.profile_site[site] = getattr(sites, site.lower()).Profile
# --------------------------------------------------------------------------
def test_tag_retrieval(self):
sites_with_tags_functionality = ["CodeChef", "CodeForces", "Spoj", "HackerEarth", "HackerRank", "Timus"]
assertion_hash = {
"with_tags": {
"CodeChef": {
"plink": "https://www.codechef.com/PRACTICE/problems/FNCS",
"tags": [u'data-structure', u'devuy11', u'fenwick', u'medium-hard', u'nov14', u'segment-tree', u'sqrt-decomp']
},
"CodeForces": {
"plink": "http://www.codeforces.com/problemset/problem/323/A",
"tags": [u'combinatorics', u'constructive algorithms']
},
"Spoj": {
"plink": "https://www.spoj.com/problems/YODANESS/",
"tags": [u'graph-theory', u'number-theory', u'shortest-path', u'sorting', u'tree', u'bitmasks']
},
"HackerEarth": {
"plink": "https://www.hackerearth.com/practice/algorithms/dynamic-programming/2-dimensional/practice-problems/algorithm/candy-distribution/",
"tags": [u'Dynamic Programming', u'Mathematics', u'Number Theory']
},
"HackerRank": {
"plink": "https://www.hackerrank.com/challenges/print-the-elements-of-a-linked-list",
"tags": [u'Linked Lists']
},
"Timus": {
"plink": "http://acm.timus.ru/problem.aspx?space=1&num=1954&locale=en",
"tags": [u'hardest problem', u'palindromes', u'string algorithms']
}
},
"without_tags": {
"CodeChef": "https://www.codechef.com/ZCOPRAC/problems/ZCO14004",
"CodeForces": "http://www.codeforces.com/problemset/gymProblem/100570/C",
"Spoj": "https://www.spoj.com/problems/TOUR/",
"HackerEarth": "https://www.hackerearth.com/problem/algorithm/find-pairs-1/",
"Timus": "http://acm.timus.ru/problem.aspx?space=1&num=1559&locale=en"
}
}
for site in sites_with_tags_functionality:
P = self.profile_site[site]
if P.is_website_down():
# Don't test for websites which are acked to be down
continue
tags_func = P.get_problem_details
tags_val = tags_func(problem_link=assertion_hash["with_tags"][site]["plink"],
update_things=["tags"])["tags"]
if set(tags_val) != set(assertion_hash["with_tags"][site]["tags"]):
raise RuntimeError(site + " with tags failure")
if site in assertion_hash["without_tags"]:
tags_val = tags_func(problem_link=assertion_hash["without_tags"][site],
update_things=["tags"])["tags"]
if tags_val not in ([u"-"], []):
raise RuntimeError(site + " without tags failure")
# --------------------------------------------------------------------------
def test_editorial_retrieval(self):
sites_with_editorial_functionality = ["CodeChef", "CodeForces", "HackerEarth", "HackerRank"]
assertion_hash = {
"with_editorial": {
"CodeChef": {
"plink": "https://www.codechef.com/LTIME27/problems/INVERT",
"editorial_link": "https://discuss.codechef.com/problems/INVERT"
},
"CodeForces": {
"plink": "http://www.codeforces.com/problemset/problem/102/B",
"editorial_link": "http://www.codeforces.com/blog/entry/2393"
},
"HackerEarth": {
"plink": "https://www.hackerearth.com/problem/approximate/lots-of-circles/",
"editorial_link": "https://www.hackerearth.com/problem/approximate/lots-of-circles/editorial/"
},
"HackerRank": {
"plink": "https://www.hackerrank.com/challenges/candles-2",
"editorial_link": "https://www.hackerrank.com/challenges/candles-2/editorial/"
},
"AtCoder": {
"plink": "https://atcoder.jp/contests/agc035/tasks/agc035_c",
"editorial_link": "https://img.atcoder.jp/agc035/editorial.pdf"
}
},
"without_editorial": {
"CodeChef": "https://www.codechef.com/PRACTICE/problems/PG",
"CodeForces": "http://www.codeforces.com/problemset/problem/234/D",
"HackerEarth": "https://www.hackerearth.com/problem/algorithm/level-selections/"
}
}
for site in sites_with_editorial_functionality:
P = self.profile_site[site]
if P.is_website_down():
# Don't test for websites which are acked to be down
continue
editorial_func = P.get_problem_details
editorial_link = editorial_func(problem_link=assertion_hash["with_editorial"][site]["plink"],
update_things=["editorial_link"])["editorial_link"]
if editorial_link != assertion_hash["with_editorial"][site]["editorial_link"]:
raise RuntimeError(site + " with editorial failure")
if site in assertion_hash["without_editorial"]:
editorial_link = editorial_func(problem_link=assertion_hash["without_editorial"][site],
update_things=["editorial_link"])["editorial_link"]
if editorial_link is not None:
raise RuntimeError(site + " without editorial failure")
# --------------------------------------------------------------------------
def test_problem_setters_retrieval(self):
sites_with_problem_setters = ["CodeChef", "CodeForces", "HackerEarth", "HackerRank", "Spoj", "Timus"]
assertion_hash = {
"with_problem_setters": {
"CodeChef": {
"plink": "https://www.codechef.com/LTIME27/problems/INVERT",
"problem_setters": ["ma5termind"]
},
"CodeForces": {
"plink": "http://www.codeforces.com/problemset/problem/1200/B",
"problem_setters": ["djm03178", "nong"]
},
"HackerEarth": {
"plink": "https://www.hackerearth.com/problem/algorithm/level-selections/",
"problem_setters": ["akileshreddy40950"]
},
"HackerRank": {
"plink": "https://www.hackerrank.com/challenges/candles-2",
"problem_setters": ["gdisastery"]
},
"Timus": {
"plink": "https://acm.timus.ru/problem.aspx?space=1&num=1954&locale=en",
"problem_setters": ["Mikhail Rubinchik (prepared by Kirill Borozdin)"]
},
"Spoj": {
"plink": "https://www.spoj.com/problems/CONNECT2/",
"problem_setters": ["nikola_borisof"]
}
},
"without_problem_setters": {
"CodeForces": "http://www.codeforces.com/problemset/problem/1212/C",
"HackerEarth": "https://www.hackerearth.com/challenges/college/engineers-day-nit-silchar-challenge/algorithm/valentines-day/"
}
}
for site in sites_with_problem_setters:
P = self.profile_site[site]
if P.is_website_down():
# Don't test for websites which are acked to be down
continue
pd_func = P.get_problem_details
current_setters = pd_func(problem_link=assertion_hash["with_problem_setters"][site]["plink"],
update_things=["problem_setters"])["problem_setters"]
if current_setters != assertion_hash["with_problem_setters"][site]["problem_setters"]:
raise RuntimeError(site + " with problem_setters failure")
if site in assertion_hash["without_problem_setters"]:
current_setters = pd_func(problem_link=assertion_hash["without_problem_setters"][site],
update_things=["problem_setters"])["problem_setters"]
if current_setters is not None:
raise RuntimeError(site + " without problem_setters failure")
return
# --------------------------------------------------------------------------
def test_invalid_handle(self):
handle = "thisreallycantbeahandle308"
result = map(lambda site: (site, self.profile_site[site].is_invalid_handle(handle)),
filter(lambda site: self.profile_site[site].is_website_down() == False,
current.SITES.keys()))
failure_sites = []
for site, res in result:
if not res:
failure_sites.append(site)
if len(failure_sites) > 0:
raise RuntimeError(", ".join(failure_sites) + " " + "invalid handle failure")
# --------------------------------------------------------------------------
def test_download_submission(self):
import requests
from bs4 import BeautifulSoup
sites_with_download_functionality = ["CodeChef", "CodeForces"]
assertion_hash = {
"CodeChef": {
"view_link": "https://www.codechef.com/viewsolution/27348746",
"submission": '#include<bits/stdc++.h>\r\nusing namespace std;\r\nint main(){\r\n\tint t;\r\n\tcin>>t;\r\n\twhile(t--){\r\n\t\tint n,m,u,v;\r\n\t\tcin>>n>>m;\r\n\t\tif(m%2==0){\r\n\t\t\tint temp;\r\n\t\t\tfor(auto i=0;i<m;i++){\r\n\t\t\t\tcin>>temp>>temp;\r\n\t\t\t}\t\r\n\t\t\tcout<<1<<endl;\r\n\t\t\tfor(auto i=0;i<n;i++)\r\n\t\t\t{\r\n\t\t\t\tcout<<1<<" ";\r\n\t\t\t}\r\n\t\t\tcout<<endl;\r\n\t\t\tcontinue;\r\n\t\t}\r\n\r\n\t\t// m is odd\r\n\t\tvector<vector<int>> g(n);\r\n\t\tvector<int> d(n);\r\n\t\tfor(auto i=0;i<m;i++){\r\n\t\t\tcin>>u>>v;\r\n\t\t\td[u-1]++;\r\n\t\t\td[v-1]++;\r\n\t\t\tg[u-1].push_back(v-1);\r\n\t\t\tg[v-1].push_back(u-1);\r\n\t\t}\r\n\r\n\t\t// m is odd and we find an odd vertice\r\n\t\tint idx=-1;\r\n\t\tfor(auto i=0;i<n;i++){\r\n\t\t\tif(d[i]%2==1) {idx=i;break;}\r\n\t\t}\r\n\t\tif(idx!=-1){\r\n\t\t\tcout<<2<<endl;\r\n\t\t\tfor(auto i=0;i<n;i++)\r\n\t\t\t{\r\n\t\t\t\tcout<<((i==idx)?1:2)<<" ";\r\n\t\t\t}\r\n\t\t\tcout<<endl;\r\n\t\t\tcontinue;\r\n\r\n\t\t}\r\n\r\n\t\t// m is odd and all degrees are even\r\n\t\t// idx is 3 idx1 is 2 rest is 1\r\n\t\tidx=-1;\r\n\t\tint idx1=-1;\r\n\t\t// find a vertex removing which we get odd vertices\r\n\t\tfor(auto i=0;i<n;i++){\r\n\t\t\tif(d[i]>0){idx=i;break;}\r\n\t\t}\r\n\t\t// idx will be 3\r\n\t\t// change all degrees\r\n\t\tfor(auto i:g[idx]){\r\n\t\t\td[i]--;\r\n\t\t\tidx1=i;\r\n\t\t}\r\n\t\tcout<<3<<endl;\r\n\t\td[idx]=0;\r\n\t\tg[idx]=vector<int>();\r\n\t\tfor(auto i=0;i<n;i++)\r\n\t\t{\r\n\t\t\tif(i==idx){ \r\n\t\t\t\tcout<<1<<" ";\r\n\t\t\t}\r\n\t\t\telse if(i==idx1){\r\n\t\t\t\tcout<<2<<" ";\r\n\t\t\t}\r\n\t\t\telse{\r\n\t\t\t\tcout<<3<<" ";\r\n\t\t\t}\r\n\t\t}\r\n\t\tcout<<endl;\r\n\t}\r\n}\r\n'
},
"CodeForces": {
"view_link": "http://www.codeforces.com/contest/454/submission/7375767",
"submission": '#include<stdio.h>\nint main()\n{\n\tint n,i,j,k;\n\tscanf("%d",&n);\n\tint h=n/2+1;\n\tfor(i=0;i<h;i++)\n\t{\n\t\tfor(k=0;k<n/2-i;k++)\n\t\t\tprintf("*");\n\t\tfor(j=0;j<2*i+1;j++)\n\t\t\tprintf("D");\n\t\tfor(j=n/2+i+1;j<n;j++)\n\t\t\tprintf("*");\n\t\tprintf("\\n");\n\t}\n\tfor(i=0;i<n/2;i++)\n\t{\n\t\tfor(k=0;k<=i;k++)\n\t\t printf("*");\n\t\tfor(j=n-2*i;j>=3;j--)\n\t\t\tprintf("D");\n\t\tfor(j=0;j<=i;j++)\n\t\t\tprintf("*");\n\t\tprintf("\\n");\n\t}\n\treturn 0;\n}\n'
},
"AtCoder": {
"view_link": "https://atcoder.jp/contests/agc039/submissions/7869333",
"submission": "/**\r\n * author: tourist\r\n * created: 05.10.2019 16:12:28 \r\n**/\r\n#include <bits/stdc++.h>\r\n\r\nusing namespace std;\r\n\r\nint main() {\r\n ios::sync_with_stdio(false);\r\n cin.tie(0);\r\n int n;\r\n cin >> n;\r\n n *= 2;\r\n vector<string> g(n);\r\n for (int i = 0; i < n; i++) {\r\n cin >> g[i];\r\n }\r\n vector<vector<vector<long long>>> dp(2 * n, vector<vector<long long>>(2 * n, vector<long long>(2 * n)));\r\n for (int i = n - 1; i >= 1; i--) {\r\n for (int j = i; j < n; j++) {\r\n for (int k = j; k < n; k++) {\r\n if (i == j && j == k) {\r\n dp[i][j][k] = 1;\r\n continue;\r\n }\r\n if (i == j || j == k) {\r\n dp[i][j][k] = 0;\r\n continue;\r\n }\r\n dp[i][j][k] = 0;\r\n for (int x = i; x < j; x++) {\r\n for (int y = j + 1; y <= k; y++) {\r\n for (int u = i; u <= x; u++) {\r\n for (int v = y; v <= k; v++) {\r\n if (g[u][v] == '1') {\r\n dp[i][j][k] += dp[i][u][x] * dp[y][v][k] * dp[x + 1][j][y - 1];\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n long long ans = 0;\r\n for (int j = 1; j < n; j++) {\r\n if (g[0][j] == '1') {\r\n ans += dp[1][j][n - 1];\r\n }\r\n }\r\n cout << ans << '\\n';\r\n return 0;\r\n}\r\n"
}
}
for site in sites_with_download_functionality:
P = self.profile_site[site]
if P.is_website_down():
# Don't test for websites which are acked to be down
continue
submission_content = P.download_submission(assertion_hash[site]["view_link"])
if submission_content != assertion_hash[site]["submission"]:
raise RuntimeError(site + " download submission failed")
# --------------------------------------------------------------------------
def test_rating_graph(self):
sites_with_rating_graph_functionality = ["CodeChef", "CodeForces", "HackerRank", "HackerEarth"]
handles = {
"CodeChef": "tryingtocode",
"CodeForces": "raj454raj",
"HackerRank": "tryingtocode",
"HackerEarth": "karanaggarwal",
"AtCoder": "imanudeep111"
}
expected_list = {
"CodeChef": [{'data': {'2015-06-15 15:00:00': {'url': 'https://www.codechef.com/JUNE15', 'rating': '1605', 'name': 'June Challenge 2015', 'rank': '1913'}, '2016-06-15 15:00:00': {'url': 'https://www.codechef.com/JUNE16', 'rating': '1641', 'name': 'June Challenge 2016', 'rank': '5083'}, '2014-07-14 15:00:00': {'url': 'https://www.codechef.com/JULY14', 'rating': '1518', 'name': 'July Challenge 2014', 'rank': '2769'}, '2015-08-17 15:00:00': {'url': 'https://www.codechef.com/AUG15', 'rating': '1704', 'name': 'August Challenge 2015', 'rank': '1244'}, '2014-01-13 15:00:00': {'url': 'https://www.codechef.com/JAN14', 'rating': '1462', 'name': 'January Challenge 2014', 'rank': '3548'}, '2014-12-15 17:00:00': {'url': 'https://www.codechef.com/DEC14', 'rating': '1609', 'name': 'December Challenge 2014', 'rank': '2218'}, '2015-01-12 15:00:00': {'url': 'https://www.codechef.com/JAN15', 'rating': '1617', 'name': 'January Challenge 2015', 'rank': '3105'}, '2015-09-14 15:00:00': {'url': 'https://www.codechef.com/SEPT15', 'rating': '1829', 'name': 'September Challenge 2015', 'rank': '1417'}, '2014-11-17 15:00:00': {'url': 'https://www.codechef.com/NOV14', 'rating': '1717', 'name': 'November Challenge 2014', 'rank': '1751'}, '2015-03-16 15:00:00': {'url': 'https://www.codechef.com/MARCH15', 'rating': '1553', 'name': 'March Challenge 2015', 'rank': '2489'}, '2014-06-16 15:00:00': {'url': 'https://www.codechef.com/JUNE14', 'rating': '1455', 'name': 'June Challenge 2014', 'rank': '4382'}, '2014-02-17 15:00:00': {'url': 'https://www.codechef.com/FEB14', 'rating': '1509', 'name': 'February Challenge 2014', 'rank': '2007'}, '2015-05-18 15:00:00': {'url': 'https://www.codechef.com/MAY15', 'rating': '1519', 'name': 'May Challenge 2015', 'rank': '2946'}, '2015-07-13 15:00:00': {'url': 'https://www.codechef.com/JULY15', 'rating': '1635', 'name': 'July Challenge 2015', 'rank': '1554'}, '2014-08-11 15:00:00': {'url': 'https://www.codechef.com/AUG14', 'rating': '1633', 'name': 'August Challenge 2014', 'rank': '1293'}, '2014-10-13 15:00:00': {'url': 'https://www.codechef.com/OCT14', 'rating': '1730', 'name': 'October Challenge 2014', 'rank': '900'}}, 'title': 'CodeChef Long'}, {'data': {'2015-09-21 00:00:00': {'url': 'https://www.codechef.com/COOK62', 'rating': '1807', 'name': 'September Mega Cook-Off 2015', 'rank': '751'}, '2015-08-24 00:50:00': {'url': 'https://www.codechef.com/COOK61', 'rating': '1881', 'name': 'August Cook-Off 2015', 'rank': '221'}}, 'title': 'CodeChef Cook-off'}, {'data': {}, 'title': 'CodeChef Lunchtime'}],
"CodeForces": [{'data': {'2015-09-28 14:30:00': {'rating': '1295', 'name': u'Codeforces Round #322 (Div. 2)', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/581', 'rank': 1836, 'ratingChange': -84}, '2014-09-28 21:05:00': {'rating': '1279', 'name': u'Codeforces Round #270', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/472', 'rank': 3520, 'ratingChange': -124}, '2015-09-10 22:00:00': {'rating': '1422', 'name': u'Codeforces Round #319 (Div. 2)', 'solvedCount': 2, 'url': 'http://www.codeforces.com/contest/577', 'rank': 940, 'ratingChange': 134}, '2016-01-14 22:05:00': {'rating': '1228', 'name': u'Codeforces Round #339 (Div. 2)', 'solvedCount': 0, 'url': 'http://www.codeforces.com/contest/614', 'rank': 1929, 'ratingChange': -81}, '2016-08-20 18:35:00': {'rating': '1298', 'name': u'Codeforces Round #368 (Div. 2)', 'solvedCount': 2, 'url': 'http://www.codeforces.com/contest/707', 'rank': 1919, 'ratingChange': 82}, '2015-10-31 22:00:00': {'rating': '1284', 'name': u'Codeforces Round #328 (Div. 2)', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/592', 'rank': 2075, 'ratingChange': 11}, '2015-10-25 14:30:00': {'rating': '1273', 'name': u'Codeforces Round #327 (Div. 2)', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/591', 'rank': 2259, 'ratingChange': -25}, '2015-09-22 22:00:00': {'rating': '1379', 'name': u'Codeforces Round #321 (Div. 2)', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/580', 'rank': 2018, 'ratingChange': -43}, '2014-08-08 21:00:00': {'rating': '1403', 'name': u'Codeforces Round #260 (Div. 2)', 'solvedCount': 0, 'url': 'http://www.codeforces.com/contest/456', 'rank': 2152, 'ratingChange': -97}, '2015-12-01 21:05:00': {'rating': '1351', 'name': u'Codeforces Round #334 (Div. 2)', 'solvedCount': 2, 'url': 'http://www.codeforces.com/contest/604', 'rank': 1079, 'ratingChange': 67}, '2016-08-29 17:35:00': {'rating': '1309', 'name': u'Codeforces Round #369 (Div. 2)', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/711', 'rank': 2332, 'ratingChange': 11}, '2015-12-09 21:35:00': {'rating': '1309', 'name': u'Codeforces Round #335 (Div. 2)', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/606', 'rank': 2249, 'ratingChange': -42}, '2016-08-11 22:05:00': {'rating': '1216', 'name': u'Codeforces Round #367 (Div. 2)', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/706', 'rank': 2989, 'ratingChange': -12}, '2015-08-29 22:00:00': {'rating': '1288', 'name': u'Codeforces Round #318 [RussianCodeCup Thanks-Round] (Div. 2)', 'solvedCount': 1, 'url': 'http://www.codeforces.com/contest/574', 'rank': 2009, 'ratingChange': -70}, '2015-10-03 22:15:00': {'rating': '1285', 'name': u'Codeforces Round #323 (Div. 2)', 'solvedCount': 2, 'url': 'http://www.codeforces.com/contest/583', 'rank': 2912, 'ratingChange': -10}, '2015-10-06 22:00:00': {'rating': '1298', 'name': u'Codeforces Round #324 (Div. 2)', 'solvedCount': 2, 'url': 'http://www.codeforces.com/contest/584', 'rank': 2062, 'ratingChange': 13}, '2014-10-06 21:00:00': {'rating': '1227', 'name': u'Codeforces Round #271 (Div. 2)', 'solvedCount': 2, 'url': 'http://www.codeforces.com/contest/474', 'rank': 1654, 'ratingChange': -52}, '2015-08-22 22:00:00': {'rating': '1358', 'name': u'Codeforces Round #317 [AimFund Thanks-Round] (Div. 2)', 'solvedCount': 2, 'url': 'http://www.codeforces.com/contest/572', 'rank': 1114, 'ratingChange': 131}, '2016-09-23 18:35:00': {'rating': '1377', 'name': u'Codeforces Round #373 (Div. 2)', 'solvedCount': 2, 'url': 'http://www.codeforces.com/contest/719', 'rank': 1593, 'ratingChange': 68}}, 'title': 'Codeforces'}],
"HackerRank": [{'data': {'2014-07-21 21:30:00': {'url': u'https://www.hackerrank.com/w7', 'rating': '1554.46', 'name': u'Weekly Challenges - Week 7', 'rank': 499}, '2015-10-30 21:30:00': {'url': u'https://www.hackerrank.com/codestorm', 'rating': '1276.05', 'name': u'CodeStorm 2015', 'rank': 3743}, '2015-08-02 21:30:00': {'url': u'https://www.hackerrank.com/countercode', 'rating': '1287.0', 'name': u'CounterCode 2015', 'rank': 3605}, '2014-08-11 21:30:00': {'url': u'https://www.hackerrank.com/w8', 'rating': '1276.88', 'name': u'Weekly Challenges - Week 8', 'rank': 1204}}, 'title': u'HackerRank - Algorithms'}],
"HackerEarth": [{'data': {'2016-05-21 10:30:00': {'url': 'https://www.hackerearth.com/challenges/competitive/may-circuits/', 'rating': 1493, 'name': 'May Circuits', 'rank': 714}, '2017-10-21 10:30:00': {'url': 'https://www.hackerearth.com/challenges/competitive/october-circuits-17/', 'rating': 1491, 'name': "October Circuits '17", 'rank': 1225}, '2017-09-22 10:30:00': {'url': 'https://www.hackerearth.com/challenges/competitive/september-circuits-17/', 'rating': 1569, 'name': "September Circuits '17", 'rank': 291}, '2020-05-16 10:30:00': {'url': 'https://www.hackerearth.com/challenges/competitive/may-circuits-20/', 'rating': 1415, 'name': "May Circuits '20", 'rank': 647}, '2018-03-17 10:30:00': {'url': 'https://www.hackerearth.com/challenges/competitive/march-circuits-18/', 'rating': 1461, 'name': "March Circuits '18", 'rank': 523}, '2019-01-18 09:30:00': {'url': 'https://www.hackerearth.com/challenges/competitive/january-circuits-19/', 'rating': 1337, 'name': "January Circuits '19", 'rank': 3420}, '2017-07-28 10:30:00': {'url': 'https://www.hackerearth.com/challenges/competitive/july-circuits-17/', 'rating': 1462, 'name': "July Circuits '17", 'rank': 1326}}, 'title': 'HackerEarth'}],
"AtCoder": [{'data': {'2020-01-10 19:10:00': {'url': 'https://atcoder.jp/contests/abc150', 'rating': '-', 'ratingChange': '-', 'name': u'AtCoder Beginner Contest 150', 'rank': u'2640'}, '2020-03-14 19:10:00': {'url': 'https://atcoder.jp/contests/panasonic2020', 'rating': '33', 'ratingChange': '+31', 'name': u'Panasonic Programming Contest 2020', 'rank': u'3897'}, '2020-05-02 19:20:00': {'url': 'https://atcoder.jp/contests/abc165', 'rating': '192', 'ratingChange': '+51', 'name': u'AtCoder Beginner Contest 165', 'rank': u'6343'}, '2020-03-01 19:10:00': {'url': 'https://atcoder.jp/contests/abc157', 'rating': '2', 'ratingChange': '-', 'name': u'AtCoder Beginner Contest 157', 'rank': u'6327'}, '2020-04-26 19:10:00': {'url': 'https://atcoder.jp/contests/abc164', 'rating': '141', 'ratingChange': '+108', 'name': u'AtCoder Beginner Contest 164', 'rank': u'3184'}, '2020-04-19 19:10:00': {'url': 'https://atcoder.jp/contests/abc163', 'rating': '-', 'ratingChange': '-', 'name': u'AtCoder Beginner Contest 163', 'rank': u'4042'}}, 'title': 'AtCoder'}]
}
result = {}
for site in sites_with_rating_graph_functionality:
P = self.profile_site[site]
if P.is_website_down():
# Don't test for websites which are acked to be down
continue
get_rating_func = P.rating_graph_data
res = get_rating_func(handles[site])
if expected_list[site] != res:
raise RuntimeError("Rating graph dict does not match for " + site)
# --------------------------------------------------------------------------
def test_submissions(self):
handles = {
"CodeChef": "tryingtocode",
"CodeForces": "raj454raj",
"HackerRank": "tryingtocode",
"HackerEarth": "raj454raj",
"Spoj": "raj454raj",
"UVa": "raj454raj",
"Timus": "222187",
"AtCoder": "raj454raj"
}
expected_result = {
"CodeChef": [(u'2013-12-02 18:52:13', u'https://www.codechef.com/PRACTICE/problems/TEST', u'TEST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3017060'), (u'2013-12-02 19:02:07', u'https://www.codechef.com/PRACTICE/problems/TEST', u'TEST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3017069'), (u'2013-12-02 19:13:59', u'https://www.codechef.com/PRACTICE/problems/HS08TEST', u'HS08TEST', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3017092'), (u'2013-12-02 19:16:51', u'https://www.codechef.com/PRACTICE/problems/HS08TEST', u'HS08TEST', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3017097'), (u'2013-12-02 19:20:42', u'https://www.codechef.com/PRACTICE/problems/HS08TEST', u'HS08TEST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3017102'), (u'2013-12-02 19:31:26', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3017121'), (u'2013-12-03 01:15:08', u'https://www.codechef.com/PRACTICE/problems/FCTRL', u'FCTRL', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3017614'), (u'2013-12-03 01:15:44', u'https://www.codechef.com/PRACTICE/problems/FCTRL', u'FCTRL', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3017615'), (u'2013-12-03 01:18:21', u'https://www.codechef.com/PRACTICE/problems/FCTRL', u'FCTRL', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3017619'), (u'2013-12-03 01:23:05', u'https://www.codechef.com/PRACTICE/problems/FCTRL', u'FCTRL', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3017629'), (u'2013-12-03 01:33:10', u'https://www.codechef.com/PRACTICE/problems/FCTRL2', u'FCTRL2', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3017639'), (u'2013-12-06 13:51:02', u'https://www.codechef.com/PRACTICE/problems/PRPALIN', u'PRPALIN', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3023114'), (u'2013-12-06 13:59:27', u'https://www.codechef.com/PRACTICE/problems/PRPALIN', u'PRPALIN', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3023128'), (u'2013-12-06 14:26:23', u'https://www.codechef.com/PRACTICE/problems/NUMPATH', u'NUMPATH', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3023162'), (u'2013-12-06 14:34:44', u'https://www.codechef.com/PRACTICE/problems/PRPALIN', u'PRPALIN', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3023172'), (u'2013-12-06 14:40:45', u'https://www.codechef.com/PRACTICE/problems/PRPALIN', u'PRPALIN', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3023183'), (u'2013-12-06 14:58:49', u'https://www.codechef.com/PRACTICE/problems/PRPALIN', u'PRPALIN', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3023209'), (u'2013-12-06 15:22:57', u'https://www.codechef.com/PRACTICE/problems/HOLES', u'HOLES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3023522'), (u'2013-12-12 15:04:32', u'https://www.codechef.com/PRACTICE/problems/NAME2', u'NAME2', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3076899'), (u'2013-12-12 15:22:56', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3077003'), (u'2013-12-12 15:24:57', u'https://www.codechef.com/PRACTICE/problems/MAXCOUNT', u'MAXCOUNT', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3077013'), (u'2013-12-12 17:41:44', u'https://www.codechef.com/PRACTICE/problems/DECSTR', u'DECSTR', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3077862'), (u'2013-12-12 18:04:39', u'https://www.codechef.com/PRACTICE/problems/DECSTR', u'DECSTR', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3078001'), (u'2013-12-12 18:53:41', u'https://www.codechef.com/PRACTICE/problems/DECSTR', u'DECSTR', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3078284'), (u'2013-12-12 19:26:47', u'https://www.codechef.com/PRACTICE/problems/DECSTR', u'DECSTR', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3078484'), (u'2013-12-12 19:39:23', u'https://www.codechef.com/PRACTICE/problems/NAME2', u'NAME2', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3078558'), (u'2013-12-13 15:04:16', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3083547'), (u'2013-12-13 15:09:42', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3083574'), (u'2013-12-13 15:13:40', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3083602'), (u'2013-12-13 19:30:02', u'https://www.codechef.com/PRACTICE/problems/NAME2', u'NAME2', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3085115'), (u'2013-12-14 13:37:45', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3089188'), (u'2013-12-14 13:40:39', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3089199'), (u'2013-12-14 13:45:29', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3089226'), (u'2013-12-14 19:29:31', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3091091'), (u'2013-12-18 00:17:52', u'https://www.codechef.com/PRACTICE/problems/ONP', u'ONP', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3108217'), (u'2013-12-18 00:29:10', u'https://www.codechef.com/PRACTICE/problems/ONP', u'ONP', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3108251'), (u'2013-12-18 00:58:37', u'https://www.codechef.com/PRACTICE/problems/ONP', u'ONP', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3108323'), (u'2013-12-18 01:04:19', u'https://www.codechef.com/PRACTICE/problems/ONP', u'ONP', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3108336'), (u'2013-12-18 01:46:49', u'https://www.codechef.com/PRACTICE/problems/SUMTRIAN', u'SUMTRIAN', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3108432'), (u'2013-12-18 02:02:45', u'https://www.codechef.com/PRACTICE/problems/COINS', u'COINS', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3108454'), (u'2013-12-18 02:09:53', u'https://www.codechef.com/PRACTICE/problems/COINS', u'COINS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3108466'), (u'2013-12-18 02:19:38', u'https://www.codechef.com/PRACTICE/problems/COINS', u'COINS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3108479'), (u'2013-12-18 02:36:47', u'https://www.codechef.com/PRACTICE/problems/COINS', u'COINS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3108489'), (u'2013-12-18 02:38:40', u'https://www.codechef.com/PRACTICE/problems/COINS', u'COINS', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3108491'), (u'2013-12-18 02:40:21', u'https://www.codechef.com/PRACTICE/problems/COINS', u'COINS', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3108493'), (u'2013-12-19 23:56:23', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/3113518'), (u'2013-12-19 23:58:35', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/3113525'), (u'2013-12-20 00:00:56', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3113535'), (u'2013-12-20 02:45:48', u'https://www.codechef.com/PRACTICE/problems/FCTRL2', u'FCTRL2', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/3113821'), (u'2013-12-20 02:48:52', u'https://www.codechef.com/PRACTICE/problems/FCTRL2', u'FCTRL2', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3113825'), (u'2013-12-20 03:10:47', u'https://www.codechef.com/PRACTICE/problems/MARBLES', u'MARBLES', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3113849'), (u'2013-12-20 03:27:48', u'https://www.codechef.com/PRACTICE/problems/MARBLES', u'MARBLES', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3113865'), (u'2013-12-20 03:43:53', u'https://www.codechef.com/PRACTICE/problems/MARBLES', u'MARBLES', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3113877'), (u'2013-12-20 15:47:52', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/3114663'), (u'2013-12-20 15:49:13', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/3114664'), (u'2013-12-20 15:52:15', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3114671'), (u'2013-12-20 15:58:50', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3114683'), (u'2014-01-01 22:25:19', u'https://www.codechef.com/PRACTICE/problems/MSTICK', u'MSTICK', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3148896'), (u'2014-01-02 22:42:07', u'https://www.codechef.com/PRACTICE/problems/RESIST', u'RESIST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3150795'), (u'2014-01-02 22:54:14', u'https://www.codechef.com/PRACTICE/problems/RESIST', u'RESIST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3150836'), (u'2014-01-02 22:56:42', u'https://www.codechef.com/PRACTICE/problems/RESIST', u'RESIST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3150842'), (u'2014-01-02 22:58:50', u'https://www.codechef.com/PRACTICE/problems/RESIST', u'RESIST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3150846'), (u'2014-01-02 23:18:24', u'https://www.codechef.com/PRACTICE/problems/MSTICK', u'MSTICK', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3150913'), (u'2014-01-05 16:58:47', u'https://www.codechef.com/PRACTICE/problems/TWTCLOSE', u'TWTCLOSE', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3188137'), (u'2014-01-06 21:24:27', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3200011'), (u'2014-01-06 21:29:23', u'https://www.codechef.com/PRACTICE/problems/SAD', u'SAD', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3200056'), (u'2014-01-06 21:58:37', u'https://www.codechef.com/PRACTICE/problems/FLIPCOIN', u'FLIPCOIN', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3200313'), (u'2014-01-06 22:50:32', u'https://www.codechef.com/PRACTICE/problems/FLIPCOIN', u'FLIPCOIN', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3200883'), (u'2014-01-07 15:19:35', u'https://www.codechef.com/PRACTICE/problems/LEVY', u'LEVY', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3205638'), (u'2014-01-07 15:23:13', u'https://www.codechef.com/PRACTICE/problems/LEVY', u'LEVY', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/3205664'), (u'2014-01-07 15:38:53', u'https://www.codechef.com/PRACTICE/problems/LEVY', u'LEVY', 'CE', u'0', u'C++ 4.3.2', 'https://www.codechef.com/viewsolution/3205784'), (u'2014-01-08 17:18:58', u'https://www.codechef.com/JAN14/problems/ERROR', u'ERROR', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3215076'), (u'2014-01-08 17:32:16', u'https://www.codechef.com/JAN14/problems/ERROR', u'ERROR', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3215197'), (u'2014-01-08 17:34:26', u'https://www.codechef.com/JAN14/problems/PLZLYKME', u'PLZLYKME', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3215217'), (u'2014-01-08 17:50:31', u'https://www.codechef.com/JAN14/problems/PLZLYKME', u'PLZLYKME', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3215325'), (u'2014-01-08 23:01:50', u'https://www.codechef.com/JAN14/problems/FGFS', u'FGFS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3217930'), (u'2014-01-09 18:42:17', u'https://www.codechef.com/PRACTICE/problems/TSORT', u'TSORT', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3223261'), (u'2014-01-09 18:49:03', u'https://www.codechef.com/PRACTICE/problems/TSORT', u'TSORT', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3223313'), (u'2014-01-09 18:57:00', u'https://www.codechef.com/PRACTICE/problems/TSORT', u'TSORT', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3223384'), (u'2014-01-09 19:26:01', u'https://www.codechef.com/PRACTICE/problems/PERMUT2', u'PERMUT2', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3223635'), (u'2014-01-09 19:28:32', u'https://www.codechef.com/PRACTICE/problems/PERMUT2', u'PERMUT2', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3223652'), (u'2014-01-09 19:47:04', u'https://www.codechef.com/PRACTICE/problems/TLG', u'TLG', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3223799'), (u'2014-01-09 20:32:49', u'https://www.codechef.com/PRACTICE/problems/TLG', u'TLG', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3224190'), (u'2014-01-09 20:35:41', u'https://www.codechef.com/PRACTICE/problems/TLG', u'TLG', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3224222'), (u'2014-01-09 23:53:53', u'https://www.codechef.com/PRACTICE/problems/TLG', u'TLG', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3225832'), (u'2014-01-10 00:14:05', u'https://www.codechef.com/PRACTICE/problems/NUMGAME', u'NUMGAME', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3226019'), (u'2014-01-10 23:16:53', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3231942'), (u'2014-01-10 23:25:05', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3232000'), (u'2014-01-10 23:32:09', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3232061'), (u'2014-01-10 23:37:08', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3232115'), (u'2014-01-10 23:46:15', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3232189'), (u'2014-01-12 16:08:22', u'https://www.codechef.com/PRACTICE/problems/D1', u'D1', u'TLE', u'0', u'PYTH', 'https://www.codechef.com/viewsolution/3242893'), (u'2014-01-12 16:41:33', u'https://www.codechef.com/PRACTICE/problems/ASTRGAME', u'ASTRGAME', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3243146'), (u'2014-01-12 16:43:25', u'https://www.codechef.com/PRACTICE/problems/ASTRGAME', u'ASTRGAME', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3243158'), (u'2014-01-12 19:38:52', u'https://www.codechef.com/PRACTICE/problems/KPRIME', u'KPRIME', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3244328'), (u'2014-01-12 20:04:49', u'https://www.codechef.com/PRACTICE/problems/KPRIME', u'KPRIME', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3244480'), (u'2014-01-13 10:34:13', u'https://www.codechef.com/PRACTICE/problems/BUY1GET1', u'BUY1GET1', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3248580'), (u'2014-01-13 10:41:26', u'https://www.codechef.com/PRACTICE/problems/BUY1GET1', u'BUY1GET1', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3248611'), (u'2014-01-13 10:52:51', u'https://www.codechef.com/PRACTICE/problems/BUY1GET1', u'BUY1GET1', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3248674'), (u'2014-01-13 11:53:09', u'https://www.codechef.com/PRACTICE/problems/HORSES', u'HORSES', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3249017'), (u'2014-01-13 12:01:58', u'https://www.codechef.com/PRACTICE/problems/HORSES', u'HORSES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3249080'), (u'2014-01-13 12:13:20', u'https://www.codechef.com/PRACTICE/problems/NUMGAME', u'NUMGAME', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3249157'), (u'2014-01-13 12:30:50', u'https://www.codechef.com/PRACTICE/problems/BUY1GET1', u'BUY1GET1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3249302'), (u'2014-01-13 13:14:27', u'https://www.codechef.com/PRACTICE/problems/TWSTR', u'TWSTR', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3249663'), (u'2014-01-13 20:23:37', u'https://www.codechef.com/PRACTICE/problems/HELLO', u'HELLO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3251908'), (u'2014-01-13 21:07:57', u'https://www.codechef.com/PRACTICE/problems/DIGROT', u'DIGROT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3252038'), (u'2014-01-13 21:46:16', u'https://www.codechef.com/PRACTICE/problems/HELLO', u'HELLO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3252146'), (u'2014-01-13 22:06:21', u'https://www.codechef.com/PRACTICE/problems/HELLO', u'HELLO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3252214'), (u'2014-01-13 22:13:24', u'https://www.codechef.com/PRACTICE/problems/HELLO', u'HELLO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3252242'), (u'2014-01-13 22:15:40', u'https://www.codechef.com/PRACTICE/problems/HELLO', u'HELLO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3252253'), (u'2014-01-13 22:21:15', u'https://www.codechef.com/PRACTICE/problems/HELLO', u'HELLO', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3252279'), (u'2014-01-14 00:21:02', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3252851'), (u'2014-01-14 01:05:42', u'https://www.codechef.com/PRACTICE/problems/LAPIN', u'LAPIN', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3253032'), (u'2014-01-14 01:08:04', u'https://www.codechef.com/PRACTICE/problems/LAPIN', u'LAPIN', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3253049'), (u'2014-01-14 01:11:18', u'https://www.codechef.com/PRACTICE/problems/LAPIN', u'LAPIN', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3253069'), (u'2014-01-14 14:06:41', u'https://www.codechef.com/PRACTICE/problems/PPXOR', u'PPXOR', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3254264'), (u'2014-01-14 19:12:48', u'https://www.codechef.com/PRACTICE/problems/CHEFTEAM', u'CHEFTEAM', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3255054'), (u'2014-01-14 19:36:22', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3255134'), (u'2014-01-14 21:11:50', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3255392'), (u'2014-01-14 21:41:46', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3255474'), (u'2014-01-16 18:39:17', u'https://www.codechef.com/PRACTICE/problems/TACHSTCK', u'TACHSTCK', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3260781'), (u'2014-01-16 19:08:18', u'https://www.codechef.com/PRACTICE/problems/TACHSTCK', u'TACHSTCK', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3260885'), (u'2014-01-16 19:36:52', u'https://www.codechef.com/PRACTICE/problems/PRIMES2', u'PRIMES2', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3261016'), (u'2014-01-18 18:40:00', u'https://www.codechef.com/PRACTICE/problems/RRMATRIX', u'RRMATRIX', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3266986'), (u'2014-01-18 19:16:39', u'https://www.codechef.com/PRACTICE/problems/GRANAMA', u'GRANAMA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3267092'), (u'2014-01-18 19:25:40', u'https://www.codechef.com/PRACTICE/problems/GRANAMA', u'GRANAMA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3267123'), (u'2014-01-18 20:29:27', u'https://www.codechef.com/PRACTICE/problems/GRANAMA', u'GRANAMA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3267298'), (u'2014-01-18 20:35:24', u'https://www.codechef.com/PRACTICE/problems/GRANAMA', u'GRANAMA', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3267306'), (u'2014-01-23 10:03:37', u'https://www.codechef.com/PRACTICE/problems/NUKES', u'NUKES', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3283319'), (u'2014-01-23 10:04:57', u'https://www.codechef.com/PRACTICE/problems/JOHNY', u'JOHNY', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3283321'), (u'2014-01-23 10:06:21', u'https://www.codechef.com/PRACTICE/problems/RIGHTRI', u'RIGHTRI', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3283322'), (u'2014-01-23 10:07:29', u'https://www.codechef.com/PRACTICE/problems/RIGHTRI', u'RIGHTRI', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3283325'), (u'2014-01-23 10:19:28', u'https://www.codechef.com/PRACTICE/problems/RIGHTRI', u'RIGHTRI', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3283340'), (u'2014-01-23 10:22:56', u'https://www.codechef.com/PRACTICE/problems/NUKES', u'NUKES', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3283347'), (u'2014-01-23 10:27:39', u'https://www.codechef.com/PRACTICE/problems/NUKES', u'NUKES', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3283353'), (u'2014-01-23 10:30:21', u'https://www.codechef.com/PRACTICE/problems/NUKES', u'NUKES', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3283357'), (u'2014-01-23 10:42:45', u'https://www.codechef.com/PRACTICE/problems/LAPIN', u'LAPIN', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3283378'), (u'2014-01-23 10:50:27', u'https://www.codechef.com/PRACTICE/problems/LAPIN', u'LAPIN', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3283389'), (u'2014-01-23 10:58:07', u'https://www.codechef.com/PRACTICE/problems/NUKES', u'NUKES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3283393'), (u'2014-02-07 13:56:26', u'https://www.codechef.com/PRACTICE/problems/NUMGAME', u'NUMGAME', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3339806'), (u'2014-02-07 14:04:43', u'https://www.codechef.com/PRACTICE/problems/NUMGAME', u'NUMGAME', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3339834'), (u'2014-02-07 14:07:56', u'https://www.codechef.com/PRACTICE/problems/NUMGAME', u'NUMGAME', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3339845'), (u'2014-02-07 14:12:05', u'https://www.codechef.com/PRACTICE/problems/NUMGAME', u'NUMGAME', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3339853'), (u'2014-02-07 14:43:35', u'https://www.codechef.com/PRACTICE/problems/CIELRCPT', u'CIELRCPT', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3339922'), (u'2014-02-08 18:56:14', u'https://www.codechef.com/FEB14/problems/LCPESY', u'LCPESY', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3359518'), (u'2014-02-08 19:12:55', u'https://www.codechef.com/FEB14/problems/LCPESY', u'LCPESY', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3359744'), (u'2014-02-08 19:39:00', u'https://www.codechef.com/FEB14/problems/SUBMIN', u'SUBMIN', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3360100'), (u'2014-02-11 15:14:10', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3387212'), (u'2014-02-11 15:20:54', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3387257'), (u'2014-02-11 15:30:00', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3387312'), (u'2014-02-11 16:35:28', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3387693'), (u'2014-02-11 16:51:49', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3387801'), (u'2014-02-11 16:55:47', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3387826'), (u'2014-02-13 15:27:31', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3401986'), (u'2014-02-13 16:24:34', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3402304'), (u'2014-02-13 16:52:47', u'https://www.codechef.com/FEB14/problems/TWODOGS', u'TWODOGS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3402476'), (u'2014-02-22 21:12:12', u'https://www.codechef.com/CDMT2014/problems/MIRRORS', u'MIRRORS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3455971'), (u'2014-02-22 21:14:12', u'https://www.codechef.com/CDMT2014/problems/MIRRORS', u'MIRRORS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3456012'), (u'2014-02-22 21:21:11', u'https://www.codechef.com/CDMT2014/problems/MIRRORS', u'MIRRORS', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3456160'), (u'2014-02-23 00:04:09', u'https://www.codechef.com/CDMT2014/problems/TILE', u'TILE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3460835'), (u'2014-02-23 00:07:15', u'https://www.codechef.com/CDMT2014/problems/TILE0', u'TILE0', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3460874'), (u'2014-02-23 00:23:39', u'https://www.codechef.com/CDNCTR14/problems/QUEST', u'QUEST', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/3461126'), (u'2014-02-23 00:35:48', u'https://www.codechef.com/CDNCTR14/problems/QUEST', u'QUEST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3461310'), (u'2014-02-23 01:13:51', u'https://www.codechef.com/CDNCTR14/problems/ARRAY', u'ARRAY', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3461817'), (u'2014-02-23 01:53:29', u'https://www.codechef.com/CDNCTR14/problems/GOT', u'GOT', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3462204'), (u'2014-02-23 02:37:48', u'https://www.codechef.com/CDNCTR14/problems/JADEJA', u'JADEJA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3462594'), (u'2014-02-23 02:42:04', u'https://www.codechef.com/CDNCTR14/problems/JADEJA', u'JADEJA', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/3462619'), (u'2014-02-26 23:33:32', u'https://www.codechef.com/PRACTICE/problems/WCOUNT', u'WCOUNT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3477325'), (u'2014-03-04 16:51:10', u'https://www.codechef.com/PRACTICE/problems/TPRODUCT', u'TPRODUCT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3497768'), (u'2014-03-04 17:08:05', u'https://www.codechef.com/PRACTICE/problems/TPRODUCT', u'TPRODUCT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3497791'), (u'2014-03-04 17:11:05', u'https://www.codechef.com/PRACTICE/problems/TPRODUCT', u'TPRODUCT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/3497796'), (u'2014-05-25 02:14:27', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', 'RE', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/3938402'), (u'2014-05-25 02:16:35', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3938403'), (u'2014-05-25 02:19:23', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3938407'), (u'2014-05-25 02:28:54', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/3938415'), (u'2014-06-08 15:50:16', u'https://www.codechef.com/JUNE14/problems/CHEFZOT', u'CHEFZOT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4019362'), (u'2014-06-08 15:52:51', u'https://www.codechef.com/JUNE14/problems/CHEFZOT', u'CHEFZOT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4019398'), (u'2014-06-08 15:57:49', u'https://www.codechef.com/JUNE14/problems/CHEFZOT', u'CHEFZOT', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4019468'), (u'2014-06-08 16:11:10', u'https://www.codechef.com/JUNE14/problems/GUESS', u'GUESS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4019668'), (u'2014-06-08 16:13:49', u'https://www.codechef.com/JUNE14/problems/GUESS', u'GUESS', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4019713'), (u'2014-06-08 17:28:24', u'https://www.codechef.com/JUNE14/problems/FORGETPW', u'FORGETPW', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4020749'), (u'2014-06-09 20:48:17', u'https://www.codechef.com/JUNE14/problems/FORGETPW', u'FORGETPW', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4036865'), (u'2014-06-09 20:51:39', u'https://www.codechef.com/JUNE14/problems/FORGETPW', u'FORGETPW', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4036902'), (u'2014-06-09 20:56:28', u'https://www.codechef.com/JUNE14/problems/FORGETPW', u'FORGETPW', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4036949'), (u'2014-06-11 07:33:23', u'https://www.codechef.com/JUNE14/problems/FORGETPW', u'FORGETPW', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4053523'), (u'2014-06-11 07:54:41', u'https://www.codechef.com/PRACTICE/problems/ALEXNUMB', u'ALEXNUMB', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4053566'), (u'2014-06-11 07:57:12', u'https://www.codechef.com/PRACTICE/problems/ALEXNUMB', u'ALEXNUMB', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4053571'), (u'2014-06-11 07:59:02', u'https://www.codechef.com/PRACTICE/problems/ALEXNUMB', u'ALEXNUMB', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4053576'), (u'2014-06-11 08:04:58', u'https://www.codechef.com/PRACTICE/problems/ALEXNUMB', u'ALEXNUMB', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4053599'), (u'2014-06-11 08:08:47', u'https://www.codechef.com/PRACTICE/problems/ALEXNUMB', u'ALEXNUMB', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4053611'), (u'2014-06-11 08:20:27', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/4053646'), (u'2014-06-11 08:21:52', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/4053653'), (u'2014-06-11 08:22:42', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/4053659'), (u'2014-06-11 08:35:28', u'https://www.codechef.com/PRACTICE/problems/MAXDIFF', u'MAXDIFF', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4053715'), (u'2014-06-11 08:41:38', u'https://www.codechef.com/PRACTICE/problems/MAXDIFF', u'MAXDIFF', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4053747'), (u'2014-06-11 09:20:41', u'https://www.codechef.com/PRACTICE/problems/STONES', u'STONES', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4053906'), (u'2014-06-11 09:23:05', u'https://www.codechef.com/PRACTICE/problems/STONES', u'STONES', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4053914'), (u'2014-06-11 09:28:01', u'https://www.codechef.com/PRACTICE/problems/STONES', u'STONES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4053935'), (u'2014-06-11 09:46:27', u'https://www.codechef.com/PRACTICE/problems/SPCANDY', u'SPCANDY', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/4054028'), (u'2014-06-11 09:49:08', u'https://www.codechef.com/PRACTICE/problems/SPCANDY', u'SPCANDY', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4054050'), (u'2014-06-11 09:50:14', u'https://www.codechef.com/PRACTICE/problems/SPCANDY', u'SPCANDY', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4054056'), (u'2014-06-11 10:13:17', u'https://www.codechef.com/PRACTICE/problems/DIVIDING', u'DIVIDING', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4054186'), (u'2014-06-11 10:17:20', u'https://www.codechef.com/PRACTICE/problems/DIVIDING', u'DIVIDING', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4054200'), (u'2014-06-11 10:21:20', u'https://www.codechef.com/PRACTICE/problems/DIVIDING', u'DIVIDING', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4054222'), (u'2014-06-11 10:46:57', u'https://www.codechef.com/PRACTICE/problems/APPROX', u'APPROX', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4054403'), (u'2014-06-11 11:11:10', u'https://www.codechef.com/PRACTICE/problems/COMPILER', u'COMPILER', 'CE', u'0', u'ADA', 'https://www.codechef.com/viewsolution/4054561'), (u'2014-06-11 11:11:59', u'https://www.codechef.com/PRACTICE/problems/COMPILER', u'COMPILER', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4054571'), (u'2014-06-11 16:59:23', u'https://www.codechef.com/PRACTICE/problems/AMSGAME1', u'AMSGAME1', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4057988'), (u'2014-06-11 17:05:35', u'https://www.codechef.com/PRACTICE/problems/AMSGAME1', u'AMSGAME1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4058067'), (u'2014-06-29 01:44:47', u'https://www.codechef.com/PRACTICE/problems/TREEROOT', u'TREEROOT', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4152751'), (u'2014-06-29 02:02:26', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', u'TLE', u'0', u'PYTH', 'https://www.codechef.com/viewsolution/4152798'), (u'2014-07-04 20:23:15', u'https://www.codechef.com/JULY14/problems/CSUB', u'CSUB', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4188769'), (u'2014-07-04 20:35:55', u'https://www.codechef.com/JULY14/problems/CSUB', u'CSUB', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4189092'), (u'2014-07-04 20:42:22', u'https://www.codechef.com/JULY14/problems/CSUB', u'CSUB', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4189260'), (u'2014-07-04 20:56:59', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4189643'), (u'2014-07-04 20:58:35', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4189684'), (u'2014-07-04 21:29:16', u'https://www.codechef.com/JULY14/problems/CSUB', u'CSUB', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4190477'), (u'2014-07-05 03:32:13', u'https://www.codechef.com/PRACTICE/problems/SPOTWO', u'SPOTWO', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4198760'), (u'2014-07-05 04:31:23', u'https://www.codechef.com/PRACTICE/problems/REMISS', u'REMISS', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4199244'), (u'2014-07-05 04:48:17', u'https://www.codechef.com/PRACTICE/problems/POTATOES', u'POTATOES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4199368'), (u'2014-07-05 04:58:55', u'https://www.codechef.com/PRACTICE/problems/SDSQUARE', u'SDSQUARE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4199453'), (u'2014-07-05 05:05:28', u'https://www.codechef.com/PRACTICE/problems/SDSQUARE', u'SDSQUARE', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4199504'), (u'2014-07-05 05:14:54', u'https://www.codechef.com/PRACTICE/problems/SDSQUARE', u'SDSQUARE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4199569'), (u'2014-07-05 05:19:30', u'https://www.codechef.com/PRACTICE/problems/SDSQUARE', u'SDSQUARE', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4199592'), (u'2014-07-05 05:44:04', u'https://www.codechef.com/PRACTICE/problems/NOLOGIC', u'NOLOGIC', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4199717'), (u'2014-07-12 02:26:44', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4303371'), (u'2014-07-12 03:17:04', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4303603'), (u'2014-07-12 03:17:04', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4303608'), (u'2014-07-12 03:17:04', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4303611'), (u'2014-07-12 03:17:45', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4303624'), (u'2014-07-12 03:22:54', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4303651'), (u'2014-07-12 03:25:18', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4303661'), (u'2014-07-12 03:28:45', u'https://www.codechef.com/JULY14/problems/RETPO', u'RETPO', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4303679'), (u'2014-07-12 15:12:46', u'https://www.codechef.com/JULY14/problems/FROGV', u'FROGV', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/4307292'), (u'2014-07-13 01:07:50', u'https://www.codechef.com/JULY14/problems/FROGV', u'FROGV', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4312732'), (u'2014-07-17 02:00:29', u'https://www.codechef.com/PRACTICE/problems/BINTREE', u'BINTREE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4337506'), (u'2014-07-17 02:02:30', u'https://www.codechef.com/PRACTICE/problems/BINTREE', u'BINTREE', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4337509'), (u'2014-07-17 21:02:13', u'https://www.codechef.com/PRACTICE/problems/LUCKYSTR', u'LUCKYSTR', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/4339419'), (u'2014-07-17 21:03:35', u'https://www.codechef.com/PRACTICE/problems/LUCKYSTR', u'LUCKYSTR', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4339420'), (u'2014-07-17 21:49:38', u'https://www.codechef.com/PRACTICE/problems/NOLOGIC', u'NOLOGIC', u'WA', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4339533'), (u'2014-07-17 21:54:01', u'https://www.codechef.com/PRACTICE/problems/NOLOGIC', u'NOLOGIC', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4339548'), (u'2014-07-17 21:55:43', u'https://www.codechef.com/PRACTICE/problems/NOLOGIC', u'NOLOGIC', u'TLE', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4339554'), (u'2014-07-17 21:58:37', u'https://www.codechef.com/PRACTICE/problems/NOLOGIC', u'NOLOGIC', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/4339563'), (u'2014-07-17 21:59:31', u'https://www.codechef.com/PRACTICE/problems/NOLOGIC', u'NOLOGIC', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4339567'), (u'2014-07-18 00:42:33', u'https://www.codechef.com/PRACTICE/problems/VOTERS', u'VOTERS', u'TLE', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340137'), (u'2014-07-18 01:15:31', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'WA', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340237'), (u'2014-07-18 01:17:19', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'WA', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340243'), (u'2014-07-18 01:21:53', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'WA', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340259'), (u'2014-07-18 01:24:29', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'WA', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340266'), (u'2014-07-18 01:38:21', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340317'), (u'2014-07-18 01:41:49', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340328'), (u'2014-07-18 02:11:22', u'https://www.codechef.com/PRACTICE/problems/COMPILER', u'COMPILER', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4340405'), (u'2014-07-18 02:13:00', u'https://www.codechef.com/PRACTICE/problems/COMPILER', u'COMPILER', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4340412'), (u'2014-07-18 02:15:57', u'https://www.codechef.com/PRACTICE/problems/COMPILER', u'COMPILER', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4340421'), (u'2014-07-18 03:08:59', u'https://www.codechef.com/PRACTICE/problems/WSTRING', u'WSTRING', u'WA', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340523'), (u'2014-07-18 03:18:59', u'https://www.codechef.com/PRACTICE/problems/WSTRING', u'WSTRING', u'WA', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340535'), (u'2014-07-18 04:45:18', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/4340638'), (u'2014-07-18 04:46:15', u'https://www.codechef.com/PRACTICE/problems/RRCODE', u'RRCODE', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4340641'), (u'2014-07-18 04:50:29', u'https://www.codechef.com/PRACTICE/problems/BINTREE', u'BINTREE', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4340644'), (u'2014-07-18 04:55:56', u'https://www.codechef.com/PRACTICE/problems/RETPO', u'RETPO', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4340648'), (u'2014-07-18 04:58:27', u'https://www.codechef.com/PRACTICE/problems/BINTREE', u'BINTREE', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4340649'), (u'2014-07-18 05:04:58', u'https://www.codechef.com/PRACTICE/problems/RRMATRIX', u'RRMATRIX', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4340655'), (u'2014-07-18 05:05:52', u'https://www.codechef.com/PRACTICE/problems/RRMATRIX', u'RRMATRIX', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4340657'), (u'2014-07-21 18:05:27', u'https://www.codechef.com/PRACTICE/problems/RRCOPY', u'RRCOPY', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4362844'), (u'2014-07-21 18:24:11', u'https://www.codechef.com/PRACTICE/problems/RRCOPY', u'RRCOPY', u'WA', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4362928'), (u'2014-07-21 18:25:05', u'https://www.codechef.com/PRACTICE/problems/RRCOPY', u'RRCOPY', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4362933'), (u'2014-07-21 18:45:33', u'https://www.codechef.com/PRACTICE/problems/RRSUM', u'RRSUM', u'TLE', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4363040'), (u'2014-07-21 18:49:18', u'https://www.codechef.com/PRACTICE/problems/RRSUM', u'RRSUM', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4363058'), (u'2014-07-21 18:50:51', u'https://www.codechef.com/PRACTICE/problems/RRSUM', u'RRSUM', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4363066'), (u'2014-07-23 00:10:48', u'https://www.codechef.com/PRACTICE/problems/RECTQUER', u'RECTQUER', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4367826'), (u'2014-07-23 01:00:49', u'https://www.codechef.com/PRACTICE/problems/RECTQUER', u'RECTQUER', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4368006'), (u'2014-07-23 01:03:50', u'https://www.codechef.com/PRACTICE/problems/RECTQUER', u'RECTQUER', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4368015'), (u'2014-07-23 01:32:36', u'https://www.codechef.com/PRACTICE/problems/RECTQUER', u'RECTQUER', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4368102'), (u'2014-07-26 00:16:20', u'https://www.codechef.com/PRACTICE/problems/DOUBLE', u'DOUBLE', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4377912'), (u'2014-07-26 00:18:23', u'https://www.codechef.com/PRACTICE/problems/DOUBLE', u'DOUBLE', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4377917'), (u'2014-07-26 00:44:31', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4377999'), (u'2014-07-27 02:46:17', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'TLE', u'0', u'PYTH', 'https://www.codechef.com/viewsolution/4382136'), (u'2014-07-27 02:52:14', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4382143'), (u'2014-07-27 02:55:35', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'TLE', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4382152'), (u'2014-07-27 02:56:53', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'TLE', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4382155'), (u'2014-07-27 02:58:43', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4382159'), (u'2014-07-27 02:59:30', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4382160'), (u'2014-07-27 03:01:22', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4382164'), (u'2014-07-27 03:13:49', u'https://www.codechef.com/PRACTICE/problems/INTEST', u'INTEST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4382175'), (u'2014-07-31 22:31:14', u'https://www.codechef.com/PRACTICE/problems/MARBLES', u'MARBLES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4410407'), (u'2014-07-31 22:32:41', u'https://www.codechef.com/PRACTICE/problems/MARBLES', u'MARBLES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4410421'), (u'2014-07-31 22:36:40', u'https://www.codechef.com/PRACTICE/problems/MARBLES', u'MARBLES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4410455'), (u'2014-07-31 22:37:34', u'https://www.codechef.com/PRACTICE/problems/MARBLES', u'MARBLES', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4410461'), (u'2014-08-01 16:03:33', u'https://www.codechef.com/AUG14/problems/PRGIFT', u'PRGIFT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4418584'), (u'2014-08-01 16:10:06', u'https://www.codechef.com/AUG14/problems/PRGIFT', u'PRGIFT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4418854'), (u'2014-08-01 16:16:14', u'https://www.codechef.com/AUG14/problems/PRGIFT', u'PRGIFT', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4419068'), (u'2014-08-01 16:28:32', u'https://www.codechef.com/AUG14/problems/PRGIFT', u'PRGIFT', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4419429'), (u'2014-08-01 21:14:20', u'https://www.codechef.com/AUG14/problems/PRGIFT', u'PRGIFT', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4427549'), (u'2014-08-01 22:22:40', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/4428946'), (u'2014-08-01 22:24:47', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4428994'), (u'2014-08-01 22:25:57', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4429019'), (u'2014-08-01 22:26:55', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4429047'), (u'2014-08-02 21:41:49', u'https://www.codechef.com/AUG14/problems/CRAWA', u'CRAWA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4448115'), (u'2014-08-02 21:43:44', u'https://www.codechef.com/AUG14/problems/CRAWA', u'CRAWA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4448136'), (u'2014-08-02 21:51:09', u'https://www.codechef.com/AUG14/problems/CRAWA', u'CRAWA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4448237'), (u'2014-08-02 21:58:27', u'https://www.codechef.com/AUG14/problems/CRAWA', u'CRAWA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4448341'), (u'2014-08-02 23:04:07', u'https://www.codechef.com/AUG14/problems/CRAWA', u'CRAWA', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4449507'), (u'2014-08-06 14:47:12', u'https://www.codechef.com/AUG14/problems/CLETAB', u'CLETAB', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4494226'), (u'2014-08-07 22:22:52', u'https://www.codechef.com/AUG14/problems/CLETAB', u'CLETAB', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4508709'), (u'2014-08-07 22:57:57', u'https://www.codechef.com/AUG14/problems/CLETAB', u'CLETAB', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4509134'), (u'2014-08-07 23:22:17', u'https://www.codechef.com/AUG14/problems/CLETAB', u'CLETAB', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4509429'), (u'2014-08-07 23:31:23', u'https://www.codechef.com/AUG14/problems/CLETAB', u'CLETAB', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4509535'), (u'2014-08-10 02:57:09', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4530125'), (u'2014-08-10 03:03:19', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4530154'), (u'2014-08-10 03:14:11', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4530189'), (u'2014-08-10 03:17:14', u'https://www.codechef.com/PRACTICE/problems/PRIME1', u'PRIME1', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4530195'), (u'2014-08-10 14:56:08', u'https://www.codechef.com/AUG14/problems/REVERSE', u'REVERSE', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/4533200'), (u'2014-08-10 15:14:30', u'https://www.codechef.com/AUG14/problems/REVERSE', u'REVERSE', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4533367'), (u'2014-08-10 17:29:15', u'https://www.codechef.com/PRCNSR14/problems/GAME2048', u'GAME2048', 'RE', u'0', u'C', 'https://www.codechef.com/viewsolution/4535341'), (u'2014-08-10 17:30:22', u'https://www.codechef.com/PRCNSR14/problems/GAME2048', u'GAME2048', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4535393'), (u'2014-08-10 17:33:44', u'https://www.codechef.com/PRCNSR14/problems/GAME2048', u'GAME2048', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4535586'), (u'2014-08-10 17:34:51', u'https://www.codechef.com/PRCNSR14/problems/GAME2048', u'GAME2048', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4535650'), (u'2014-08-10 17:37:42', u'https://www.codechef.com/PRCNSR14/problems/GAME2048', u'GAME2048', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4535810'), (u'2014-08-10 17:39:14', u'https://www.codechef.com/PRCNSR14/problems/GAME2048', u'GAME2048', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4535898'), (u'2014-08-10 17:40:19', u'https://www.codechef.com/PRCNSR14/problems/GAME2048', u'GAME2048', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4535965'), (u'2014-08-10 17:47:23', u'https://www.codechef.com/PRCNSR14/problems/HLPSUG', u'HLPSUG', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4536336'), (u'2014-08-10 18:03:45', u'https://www.codechef.com/PRCNSR14/problems/HPYBDAY', u'HPYBDAY', u'TLE', u'0', u'C', 'https://www.codechef.com/viewsolution/4537126'), (u'2014-08-10 18:25:49', u'https://www.codechef.com/PRCNSR14/problems/HPYBDAY', u'HPYBDAY', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4538160'), (u'2014-08-10 18:27:37', u'https://www.codechef.com/PRCNSR14/problems/HPYBDAY', u'HPYBDAY', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4538244'), (u'2014-08-10 19:11:26', u'https://www.codechef.com/PRCNSR14/problems/PLTGRP', u'PLTGRP', u'TLE', u'0', u'C++11', 'https://www.codechef.com/viewsolution/4539947'), (u'2014-10-03 19:51:34', u'https://www.codechef.com/OCT14/problems/CHEFGR', u'CHEFGR', u'AC', u'0', u'C++ 4.8.1', 'https://www.codechef.com/viewsolution/4962359'), (u'2014-10-03 19:55:30', u'https://www.codechef.com/OCT14/problems/CHEFGR', u'CHEFGR', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4962494'), (u'2014-10-04 01:01:28', u'https://www.codechef.com/OCT14/problems/PRLADDU', u'PRLADDU', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4970823'), (u'2014-10-04 02:02:38', u'https://www.codechef.com/OCT14/problems/PRLADDU', u'PRLADDU', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/4972114'), (u'2014-10-04 02:05:31', u'https://www.codechef.com/OCT14/problems/PRLADDU', u'PRLADDU', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4972172'), (u'2014-10-04 02:08:04', u'https://www.codechef.com/OCT14/problems/PRLADDU', u'PRLADDU', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4972219'), (u'2014-10-04 02:10:59', u'https://www.codechef.com/OCT14/problems/PRLADDU', u'PRLADDU', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/4972279'), (u'2014-10-05 19:11:22', u'https://www.codechef.com/OCT14/problems/FATCHEF', u'FATCHEF', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/5008560'), (u'2014-10-05 19:46:59', u'https://www.codechef.com/OCT14/problems/PRPOTION', u'PRPOTION', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5009210'), (u'2014-10-05 20:09:50', u'https://www.codechef.com/OCT14/problems/PRPOTION', u'PRPOTION', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/5009564'), (u'2014-10-08 01:48:44', u'https://www.codechef.com/OCT14/problems/CHEFSQUA', u'CHEFSQUA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5046189'), (u'2014-10-08 19:42:52', u'https://www.codechef.com/OCT14/problems/CHEFSQUA', u'CHEFSQUA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5056254'), (u'2014-10-08 20:45:51', u'https://www.codechef.com/OCT14/problems/CHEFSQUA', u'CHEFSQUA', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/5057583'), (u'2014-10-08 20:47:41', u'https://www.codechef.com/OCT14/problems/CHEFSQUA', u'CHEFSQUA', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5057620'), (u'2014-10-08 20:49:47', u'https://www.codechef.com/OCT14/problems/CHEFSQUA', u'CHEFSQUA', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/5057673'), (u'2014-11-07 22:42:18', u'https://www.codechef.com/NOV14/problems/DISCHAR', u'DISCHAR', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5286888'), (u'2014-11-08 15:04:37', u'https://www.codechef.com/NOV14/problems/PRPALN', u'PRPALN', 'PS', u'35', u'C', 'https://www.codechef.com/viewsolution/5300598'), (u'2014-11-08 16:15:45', u'https://www.codechef.com/NOV14/problems/PRPALN', u'PRPALN', 'PS', u'35', u'C', 'https://www.codechef.com/viewsolution/5302106'), (u'2014-11-08 16:24:02', u'https://www.codechef.com/NOV14/problems/PRPALN', u'PRPALN', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5302275'), (u'2014-11-08 16:28:35', u'https://www.codechef.com/NOV14/problems/PRPALN', u'PRPALN', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5302355'), (u'2014-11-08 17:36:31', u'https://www.codechef.com/NOV14/problems/CHEFSEG', u'CHEFSEG', 'PS', u'40', u'C', 'https://www.codechef.com/viewsolution/5303576'), (u'2014-11-08 17:49:57', u'https://www.codechef.com/NOV14/problems/CHEFSEG', u'CHEFSEG', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5303832'), (u'2014-11-08 23:45:46', u'https://www.codechef.com/NOV14/problems/RBTREE', u'RBTREE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5310161'), (u'2014-11-09 00:16:54', u'https://www.codechef.com/NOV14/problems/RBTREE', u'RBTREE', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5310716'), (u'2014-11-09 00:22:33', u'https://www.codechef.com/NOV14/problems/RBTREE', u'RBTREE', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5310827'), (u'2014-11-09 20:55:47', u'https://www.codechef.com/NOV14/problems/CHEFWORD', u'CHEFWORD', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5322719'), (u'2014-11-09 21:00:47', u'https://www.codechef.com/NOV14/problems/CHEFWORD', u'CHEFWORD', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5322778'), (u'2014-11-17 01:56:38', u'https://www.codechef.com/CDSM2014/problems/CHFMAX', u'CHFMAX', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/5414098'), (u'2014-11-17 02:10:10', u'https://www.codechef.com/CDSM2014/problems/CHEFTR', u'CHEFTR', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/5414268'), (u'2014-12-06 02:22:06', u'https://www.codechef.com/DEC14/problems/CAPPLE', u'CAPPLE', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5499111'), (u'2014-12-06 02:27:09', u'https://www.codechef.com/DEC14/problems/CAPPLE', u'CAPPLE', 'PS', u'52', u'C', 'https://www.codechef.com/viewsolution/5499146'), (u'2014-12-06 02:28:40', u'https://www.codechef.com/DEC14/problems/CAPPLE', u'CAPPLE', 'PS', u'52', u'C', 'https://www.codechef.com/viewsolution/5499158'), (u'2014-12-06 02:30:42', u'https://www.codechef.com/DEC14/problems/CAPPLE', u'CAPPLE', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5499166'), (u'2015-01-02 15:18:34', u'https://www.codechef.com/JAN15/problems/GCDQ', u'GCDQ', 'PS', u'40', u'C', 'https://www.codechef.com/viewsolution/5679296'), (u'2015-01-02 15:20:33', u'https://www.codechef.com/JAN15/problems/GCDQ', u'GCDQ', 'PS', u'40', u'C', 'https://www.codechef.com/viewsolution/5679371'), (u'2015-01-02 15:37:03', u'https://www.codechef.com/JAN15/problems/CHEFSTON', u'CHEFSTON', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5679960'), (u'2015-01-02 16:16:32', u'https://www.codechef.com/JAN15/problems/GCDQ', u'GCDQ', 'PS', u'40', u'C', 'https://www.codechef.com/viewsolution/5681465'), (u'2015-01-03 21:23:57', u'https://www.codechef.com/JAN15/problems/GCDQ', u'GCDQ', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5722527'), (u'2015-01-03 21:36:43', u'https://www.codechef.com/JAN15/problems/SEAVOTE', u'SEAVOTE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5722845'), (u'2015-01-03 21:50:45', u'https://www.codechef.com/JAN15/problems/SEAVOTE', u'SEAVOTE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5723185'), (u'2015-01-06 23:28:39', u'https://www.codechef.com/JAN15/problems/SEAVOTE', u'SEAVOTE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5788244'), (u'2015-01-06 23:44:15', u'https://www.codechef.com/JAN15/problems/SEAVOTE', u'SEAVOTE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5788578'), (u'2015-01-06 23:55:07', u'https://www.codechef.com/JAN15/problems/SEAVOTE', u'SEAVOTE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/5788839'), (u'2015-01-07 00:02:10', u'https://www.codechef.com/JAN15/problems/SEAVOTE', u'SEAVOTE', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/5788999'), (u'2015-03-07 03:45:05', u'https://www.codechef.com/MARCH15/problems/CNOTE', u'CNOTE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/6413565'), (u'2015-03-07 06:18:00', u'https://www.codechef.com/MARCH15/problems/CNOTE', u'CNOTE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/6414065'), (u'2015-03-09 22:29:34', u'https://www.codechef.com/MARCH15/problems/CNOTE', u'CNOTE', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/6447577'), (u'2015-03-09 22:36:29', u'https://www.codechef.com/MARCH15/problems/CNOTE', u'CNOTE', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/6447698'), (u'2015-03-09 22:38:36', u'https://www.codechef.com/MARCH15/problems/CNOTE', u'CNOTE', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/6447737'), (u'2015-05-12 02:41:11', u'https://www.codechef.com/MAY15/problems/CHEFRP', u'CHEFRP', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/6900569'), (u'2015-05-12 03:05:02', u'https://www.codechef.com/MAY15/problems/CHEFRP', u'CHEFRP', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/6900712'), (u'2015-05-13 15:59:16', u'https://www.codechef.com/MAY15/problems/CHAPD', u'CHAPD', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/6917484'), (u'2015-05-26 03:53:20', u'https://www.codechef.com/PRACTICE/problems/CFRTEST', u'CFRTEST', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/7024771'), (u'2015-05-26 04:46:33', u'https://www.codechef.com/PRACTICE/problems/REARRSTR', u'REARRSTR', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/7024793'), (u'2015-05-26 04:54:59', u'https://www.codechef.com/PRACTICE/problems/CHAPD', u'CHAPD', u'AC', u'100', u'C++ 4.3.2', 'https://www.codechef.com/viewsolution/7024795'), (u'2015-05-30 07:38:40', u'https://www.codechef.com/PRACTICE/problems/PINOCH1', u'PINOCH1', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/7043758'), (u'2015-05-30 07:47:02', u'https://www.codechef.com/PRACTICE/problems/PINOCH1', u'PINOCH1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/7044118'), (u'2015-05-30 07:49:48', u'https://www.codechef.com/PRACTICE/problems/PINOCH1', u'PINOCH1', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/7044235'), (u'2015-05-30 08:04:35', u'https://www.codechef.com/PRACTICE/problems/PINOCH2', u'PINOCH2', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/7044809'), (u'2015-05-30 08:09:02', u'https://www.codechef.com/PRACTICE/problems/PINOCH2', u'PINOCH2', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/7044972'), (u'2015-05-30 08:27:56', u'https://www.codechef.com/PRACTICE/problems/RACEWARS', u'RACEWARS', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/7045779'), (u'2015-05-30 08:28:38', u'https://www.codechef.com/PRACTICE/problems/RACEWARS', u'RACEWARS', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7045826'), (u'2015-05-30 08:31:07', u'https://www.codechef.com/PRACTICE/problems/MXZERO', u'MXZERO', u'AC', u'0', u'C', 'https://www.codechef.com/viewsolution/7045937'), (u'2015-05-30 09:22:29', u'https://www.codechef.com/PRACTICE/problems/RACEWARS', u'RACEWARS', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7046383'), (u'2015-05-30 09:34:19', u'https://www.codechef.com/PRACTICE/problems/HOBB', u'HOBB', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/7046431'), (u'2015-05-30 12:48:40', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', u'WA', u'0', u'C', 'https://www.codechef.com/viewsolution/7047261'), (u'2015-05-30 12:50:41', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/7047270'), (u'2015-06-08 22:03:40', u'https://www.codechef.com/JUNE15/problems/CBARG', u'CBARG', 'PS', u'30', u'C', 'https://www.codechef.com/viewsolution/7139999'), (u'2015-06-08 22:10:35', u'https://www.codechef.com/JUNE15/problems/CBARG', u'CBARG', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/7140098'), (u'2015-06-09 17:03:07', u'https://www.codechef.com/JUNE15/problems/CHPLGNS', u'CHPLGNS', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7150141'), (u'2015-06-09 22:09:57', u'https://www.codechef.com/JUNE15/problems/CHPLGNS', u'CHPLGNS', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/7153650'), (u'2015-06-09 22:11:02', u'https://www.codechef.com/JUNE15/problems/CHPLGNS', u'CHPLGNS', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7153663'), (u'2015-06-10 17:52:59', u'https://www.codechef.com/JUNE15/problems/CHPLGNS', u'CHPLGNS', 'PS', u'10', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7163596'), (u'2015-06-10 18:02:31', u'https://www.codechef.com/JUNE15/problems/CHPLGNS', u'CHPLGNS', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7163696'), (u'2015-06-10 23:15:58', u'https://www.codechef.com/JUNE15/problems/CHPLGNS', u'CHPLGNS', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7168947'), (u'2015-06-10 23:27:43', u'https://www.codechef.com/PRACTICE/problems/R303', u'R303', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7169121'), (u'2015-06-11 00:01:43', u'https://www.codechef.com/PRACTICE/problems/R303', u'R303', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7169540'), (u'2015-07-04 02:09:01', u'https://www.codechef.com/JULY15/problems/CHCUBE', u'CHCUBE', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7339812'), (u'2015-07-04 02:49:18', u'https://www.codechef.com/JULY15/problems/LCKYST', u'LCKYST', 'PS', u'8', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7340359'), (u'2015-07-04 02:55:39', u'https://www.codechef.com/JULY15/problems/LCKYST', u'LCKYST', 'PS', u'30', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7340422'), (u'2015-07-04 02:57:16', u'https://www.codechef.com/JULY15/problems/LCKYST', u'LCKYST', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7340447'), (u'2015-07-04 02:59:52', u'https://www.codechef.com/JULY15/problems/LCKYST', u'LCKYST', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7340475'), (u'2015-07-06 15:49:58', u'https://www.codechef.com/JULY15/problems/EGBOBRD', u'EGBOBRD', 'PS', u'15', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7381337'), (u'2015-07-06 15:57:35', u'https://www.codechef.com/JULY15/problems/EGBOBRD', u'EGBOBRD', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7381445'), (u'2015-07-07 20:01:02', u'https://www.codechef.com/JULY15/problems/EGBOBRD', u'EGBOBRD', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7399011'), (u'2015-07-07 20:05:22', u'https://www.codechef.com/JULY15/problems/EGBOBRD', u'EGBOBRD', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7399073'), (u'2015-07-08 00:31:24', u'https://www.codechef.com/JULY15/problems/ADDMUL', u'ADDMUL', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7402380'), (u'2015-07-08 00:33:00', u'https://www.codechef.com/JULY15/problems/ADDMUL', u'ADDMUL', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7402406'), (u'2015-07-12 10:52:20', u'https://www.codechef.com/JULY15/problems/ADDMUL', u'ADDMUL', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7456100'), (u'2015-08-07 17:28:06', u'https://www.codechef.com/AUG15/problems/COOKMACH', u'COOKMACH', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7640195'), (u'2015-08-10 17:08:30', u'https://www.codechef.com/AUG15/problems/GRGUY', u'GRGUY', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7720771'), (u'2015-08-10 19:18:54', u'https://www.codechef.com/AUG15/problems/ADMAG', u'ADMAG', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/7723401'), (u'2015-08-12 06:04:32', u'https://www.codechef.com/AUG15/problems/WOUT', u'WOUT', 'PS', u'25', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7751317'), (u'2015-08-12 06:10:36', u'https://www.codechef.com/AUG15/problems/WOUT', u'WOUT', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7751339'), (u'2015-08-12 06:14:26', u'https://www.codechef.com/AUG15/problems/WOUT', u'WOUT', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7751353'), (u'2015-08-16 00:04:50', u'https://www.codechef.com/PRACTICE/problems/RRATING', u'RRATING', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7817713'), (u'2015-08-16 00:27:10', u'https://www.codechef.com/PRACTICE/problems/RRATING', u'RRATING', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7818066'), (u'2015-08-16 00:37:49', u'https://www.codechef.com/PRACTICE/problems/RRATING', u'RRATING', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7818234'), (u'2015-08-16 00:46:49', u'https://www.codechef.com/PRACTICE/problems/RRATING', u'RRATING', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7818371'), (u'2015-08-16 00:52:48', u'https://www.codechef.com/PRACTICE/problems/RRATING', u'RRATING', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7818462'), (u'2015-08-16 01:06:50', u'https://www.codechef.com/PRACTICE/problems/RRATING', u'RRATING', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7818659'), (u'2015-08-16 01:11:04', u'https://www.codechef.com/PRACTICE/problems/RRATING', u'RRATING', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7818713'), (u'2015-08-16 01:27:22', u'https://www.codechef.com/PRACTICE/problems/RRATING', u'RRATING', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7818980'), (u'2015-08-23 21:36:59', u'https://www.codechef.com/COOK61/problems/CARDLINE', u'CARDLINE', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7898648'), (u'2015-08-23 21:41:10', u'https://www.codechef.com/COOK61/problems/TWOSTR', u'TWOSTR', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7898953'), (u'2015-08-23 21:58:03', u'https://www.codechef.com/COOK61/problems/XORNUBER', u'XORNUBER', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7901142'), (u'2015-08-23 22:06:19', u'https://www.codechef.com/COOK61/problems/XORNUBER', u'XORNUBER', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/7902094'), (u'2015-09-10 02:09:12', u'https://www.codechef.com/SEPT15/problems/MSTEP', u'MSTEP', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8102573'), (u'2015-09-10 02:51:18', u'https://www.codechef.com/SEPT15/problems/DONUTS', u'DONUTS', 'PS', u'30', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8102955'), (u'2015-09-10 20:48:37', u'https://www.codechef.com/SEPT15/problems/DONUTS', u'DONUTS', 'PS', u'10', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8112817'), (u'2015-09-10 21:39:10', u'https://www.codechef.com/SEPT15/problems/DONUTS', u'DONUTS', 'PS', u'40', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8113610'), (u'2015-09-12 08:08:58', u'https://www.codechef.com/SEPT15/problems/DONUTS', u'DONUTS', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8132761'), (u'2015-09-12 08:19:28', u'https://www.codechef.com/SEPT15/problems/DONUTS', u'DONUTS', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8132775'), (u'2015-09-12 22:15:45', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8142069'), (u'2015-09-12 22:23:17', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8142165'), (u'2015-09-12 22:31:16', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8142257'), (u'2015-09-12 22:35:11', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8142304'), (u'2015-09-12 22:52:32', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8142551'), (u'2015-09-12 22:58:28', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8142618'), (u'2015-09-12 23:03:31', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8142689'), (u'2015-09-12 23:06:41', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/8142738'), (u'2015-09-12 23:09:39', u'https://www.codechef.com/SEPT15/problems/BANROB', u'BANROB', u'AC', u'100', u'C', 'https://www.codechef.com/viewsolution/8142768'), (u'2015-09-20 22:05:39', u'https://www.codechef.com/COOK62/problems/FRGTNLNG', u'FRGTNLNG', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8212884'), (u'2015-09-20 22:34:31', u'https://www.codechef.com/COOK62/problems/STACKS', u'STACKS', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8215005'), (u'2015-09-20 23:10:47', u'https://www.codechef.com/COOK62/problems/STACKS', u'STACKS', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8217486'), (u'2015-09-20 23:16:22', u'https://www.codechef.com/COOK62/problems/STACKS', u'STACKS', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8217838'), (u'2015-09-21 13:34:29', u'https://www.codechef.com/PRACTICE/problems/FRGTNLNG', u'FRGTNLNG', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8222436'), (u'2015-09-25 21:08:04', u'https://www.codechef.com/PRACTICE/problems/TPRODUCT', u'TPRODUCT', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8245383'), (u'2015-09-25 21:15:54', u'https://www.codechef.com/PRACTICE/problems/TPRODUCT', u'TPRODUCT', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8245418'), (u'2015-09-25 21:30:38', u'https://www.codechef.com/PRACTICE/problems/TPRODUCT', u'TPRODUCT', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8245472'), (u'2015-09-25 21:37:47', u'https://www.codechef.com/PRACTICE/problems/TPRODUCT', u'TPRODUCT', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8245498'), (u'2015-09-27 19:14:01', u'https://www.codechef.com/PRACTICE/problems/SPALNUM', u'SPALNUM', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8266897'), (u'2015-09-27 19:19:39', u'https://www.codechef.com/PRACTICE/problems/SPALNUM', u'SPALNUM', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8267017'), (u'2015-09-27 19:23:52', u'https://www.codechef.com/PRACTICE/problems/SPALNUM', u'SPALNUM', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8267096'), (u'2015-09-29 21:53:04', u'https://www.codechef.com/PRACTICE/problems/LUCKY', u'LUCKY', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8280451'), (u'2015-10-20 09:59:02', u'https://www.codechef.com/PRACTICE/problems/ASP', u'ASP', u'WA', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8594490'), (u'2015-10-20 10:00:30', u'https://www.codechef.com/PRACTICE/problems/ASP', u'ASP', u'AC', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8594496'), (u'2015-12-14 23:46:01', u'https://www.codechef.com/PRACTICE/problems/CHEFST', u'CHEFST', u'TLE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8959065'), (u'2015-12-14 23:47:46', u'https://www.codechef.com/PRACTICE/problems/CHEFST', u'CHEFST', 'PS', u'30', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8959080'), (u'2015-12-15 00:01:01', u'https://www.codechef.com/PRACTICE/problems/CHEFST', u'CHEFST', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/8959153'), (u'2016-05-14 16:46:03', u'https://www.codechef.com/PRACTICE/problems/KOL1509', u'KOL1509', 'RE', u'0', u'C++14', 'https://www.codechef.com/viewsolution/10082758'), (u'2016-06-05 13:55:56', u'https://www.codechef.com/JUNE16/problems/DEVARRAY', u'DEVARRAY', 'CE', u'0', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/10333457'), (u'2016-06-05 13:59:32', u'https://www.codechef.com/JUNE16/problems/DEVARRAY', u'DEVARRAY', u'AC', u'100', u'C++ 4.9.2', 'https://www.codechef.com/viewsolution/10333552'), (u'2017-11-03 00:35:24', u'https://www.codechef.com/PRACTICE/problems/BLACKCOM', u'BLACKCOM', 'CE', u'0', u'C++ 6.3', 'https://www.codechef.com/viewsolution/16037895'), (u'2017-11-03 00:41:17', u'https://www.codechef.com/PRACTICE/problems/BLACKCOM', u'BLACKCOM', u'WA', u'0', u'PYTH', 'https://www.codechef.com/viewsolution/16037935'), (u'2017-12-03 19:26:28', u'https://www.codechef.com/PRACTICE/problems/WEICOM', u'WEICOM', u'WA', u'0', u'PYTH', 'https://www.codechef.com/viewsolution/16433447'), (u'2018-10-07 19:12:16', u'https://www.codechef.com/PRACTICE/problems/BLACKCOM', u'BLACKCOM', 'CE', u'0', u'C++14', 'https://www.codechef.com/viewsolution/20545692'), (u'2018-10-23 22:36:07', u'https://www.codechef.com/PRACTICE/problems/SURCHESS', u'SURCHESS', 'CE', u'0', u'C++14', 'https://www.codechef.com/viewsolution/21187090'), (u'2018-11-07 12:50:39', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', 'CE', u'0', u'C', 'https://www.codechef.com/viewsolution/21518903'), (u'2018-11-07 12:51:53', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', u'WA', u'0', u'C++14', 'https://www.codechef.com/viewsolution/21518924'), (u'2018-11-07 12:57:36', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', u'WA', u'0', u'C++14', 'https://www.codechef.com/viewsolution/21519029'), (u'2018-11-07 12:58:22', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', u'WA', u'0', u'C++14', 'https://www.codechef.com/viewsolution/21519043'), (u'2018-11-07 13:00:37', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', u'WA', u'0', u'C++14', 'https://www.codechef.com/viewsolution/21519089'), (u'2018-11-07 13:02:45', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', 'PS', u'50', u'C++14', 'https://www.codechef.com/viewsolution/21519127'), (u'2018-11-07 13:08:22', u'https://www.codechef.com/PRACTICE/problems/TICKETS5', u'TICKETS5', u'AC', u'100', u'C++14', 'https://www.codechef.com/viewsolution/21519248')],
"CodeForces": [('2014-06-20 14:16:29', u'http://www.codeforces.com/problemset/problem/443/A', u'Anton and Letters', 'CE', '0', u'GNU C', 'http://www.codeforces.com/contest/443/submission/6926377'), ('2014-06-20 14:17:29', u'http://www.codeforces.com/problemset/problem/443/A', u'Anton and Letters', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/443/submission/6926384'), ('2014-06-20 15:14:05', u'http://www.codeforces.com/problemset/problem/1/A', u'Theatre Square', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/1/submission/6926712'), ('2014-06-20 15:19:19', u'http://www.codeforces.com/problemset/problem/1/A', u'Theatre Square', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/1/submission/6926744'), ('2014-06-20 15:35:33', u'http://www.codeforces.com/problemset/problem/1/A', u'Theatre Square', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/1/submission/6926822'), ('2014-06-20 15:40:22', u'http://www.codeforces.com/problemset/problem/4/A', u'Watermelon', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/4/submission/6926854'), ('2014-06-20 15:42:27', u'http://www.codeforces.com/problemset/problem/4/A', u'Watermelon', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/4/submission/6926866'), ('2014-06-20 16:19:41', u'http://www.codeforces.com/problemset/problem/158/A', u'Next Round', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/6927039'), ('2014-06-20 16:21:59', u'http://www.codeforces.com/problemset/problem/158/A', u'Next Round', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/6927057'), ('2014-06-20 16:35:40', u'http://www.codeforces.com/problemset/problem/158/A', u'Next Round', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/6927122'), ('2014-06-20 23:33:02', u'http://www.codeforces.com/problemset/problem/158/B', u'Taxi', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/6930033'), ('2014-06-20 23:46:50', u'http://www.codeforces.com/problemset/problem/158/B', u'Taxi', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/6930628'), ('2014-06-21 00:23:15', u'http://www.codeforces.com/problemset/problem/131/A', u'cAPS lOCK', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6930791'), ('2014-06-21 00:26:44', u'http://www.codeforces.com/problemset/problem/131/A', u'cAPS lOCK', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6930810'), ('2014-06-21 00:28:48', u'http://www.codeforces.com/problemset/problem/131/A', u'cAPS lOCK', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6930817'), ('2014-06-21 00:31:03', u'http://www.codeforces.com/problemset/problem/131/A', u'cAPS lOCK', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6930830'), ('2014-06-21 01:21:34', u'http://www.codeforces.com/problemset/problem/160/A', u'Twins', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/160/submission/6931006'), ('2014-06-21 01:24:10', u'http://www.codeforces.com/problemset/problem/160/A', u'Twins', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/160/submission/6931013'), ('2014-06-21 01:28:28', u'http://www.codeforces.com/problemset/problem/160/A', u'Twins', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/160/submission/6931031'), ('2014-06-21 01:42:08', u'http://www.codeforces.com/problemset/problem/131/C', u'The World is a Theatre', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6931087'), ('2014-06-21 01:55:26', u'http://www.codeforces.com/problemset/problem/131/C', u'The World is a Theatre', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6931137'), ('2014-06-21 01:58:07', u'http://www.codeforces.com/problemset/problem/131/C', u'The World is a Theatre', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6931156'), ('2014-06-21 01:59:17', u'http://www.codeforces.com/problemset/problem/131/C', u'The World is a Theatre', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6931160'), ('2014-06-21 02:02:30', u'http://www.codeforces.com/problemset/problem/131/C', u'The World is a Theatre', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6931170'), ('2014-06-21 02:04:53', u'http://www.codeforces.com/problemset/problem/131/C', u'The World is a Theatre', 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6931181'), ('2014-06-21 02:14:48', u'http://www.codeforces.com/problemset/problem/131/C', u'The World is a Theatre', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/131/submission/6931213'), ('2014-06-21 20:42:21', u'http://www.codeforces.com/problemset/problem/160/A', u'Twins', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/160/submission/6938158'), ('2014-06-28 01:04:59', u'http://www.codeforces.com/problemset/problem/268/B', u'Buttons', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/268/submission/6971649'), ('2014-06-28 02:06:43', u'http://www.codeforces.com/problemset/problem/37/A', u'Towers', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/37/submission/6971879'), ('2014-07-17 00:31:42', u'http://www.codeforces.com/problemset/problem/71/A', u'Way Too Long Words', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/71/submission/7118436'), ('2014-07-17 00:46:44', u'http://www.codeforces.com/problemset/problem/43/B', u'Letter', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/43/submission/7118520'), ('2014-07-24 15:36:56', u'http://www.codeforces.com/problemset/problem/447/A', u'DZY Loves Hash', 'CE', '0', u'GNU C++', 'http://www.codeforces.com/contest/447/submission/7215463'), ('2014-07-24 15:39:56', u'http://www.codeforces.com/problemset/problem/447/A', u'DZY Loves Hash', 'CE', '0', u'GNU C', 'http://www.codeforces.com/contest/447/submission/7215478'), ('2014-07-24 15:42:59', u'http://www.codeforces.com/problemset/problem/447/A', u'DZY Loves Hash', 'AC', '100', u'GNU C', 'http://www.codeforces.com/contest/447/submission/7215497'), ('2014-08-08 17:12:35', u'http://www.codeforces.com/problemset/problem/454/A', u'Little Pony and Crystal Mine', 'AC', '100', u'GNU C', 'http://www.codeforces.com/contest/454/submission/7375767'), ('2014-08-08 22:25:32', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'CE', '0', u'GNU C', 'http://www.codeforces.com/contest/456/submission/7391497'), ('2014-08-08 22:30:29', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'TLE', '0', u'GNU C', 'http://www.codeforces.com/contest/456/submission/7392085'), ('2014-08-10 01:55:39', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/456/submission/7408524'), ('2014-08-10 01:57:55', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/456/submission/7408534'), ('2014-08-10 02:03:27', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/456/submission/7408554'), ('2014-08-10 02:08:35', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/456/submission/7408575'), ('2014-08-10 02:18:38', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/456/submission/7408617'), ('2014-08-10 02:28:59', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/456/submission/7408646'), ('2014-08-31 16:22:26', u'http://www.codeforces.com/problemset/problem/87/A', u'Trains', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/87/submission/7653363'), ('2014-09-28 22:07:52', u'http://www.codeforces.com/problemset/problem/472/A', u'Design Tutorial: Learn from Math', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/472/submission/8007179'), ('2014-09-28 22:11:15', u'http://www.codeforces.com/problemset/problem/472/A', u'Design Tutorial: Learn from Math', 'AC', '100', u'GNU C', 'http://www.codeforces.com/contest/472/submission/8007515'), ('2014-09-28 23:07:59', u'http://www.codeforces.com/problemset/problem/472/B', u'Design Tutorial: Learn from Life', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/472/submission/8012494'), ('2014-09-28 23:24:42', u'http://www.codeforces.com/problemset/problem/472/B', u'Design Tutorial: Learn from Life', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/472/submission/8013925'), ('2014-09-28 23:32:59', u'http://www.codeforces.com/problemset/problem/472/B', u'Design Tutorial: Learn from Life', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/472/submission/8014748'), ('2014-09-29 02:27:25', u'http://www.codeforces.com/problemset/problem/472/B', u'Design Tutorial: Learn from Life', 'AC', '100', u'GNU C', 'http://www.codeforces.com/contest/472/submission/8017466'), ('2014-09-29 02:30:15', u'http://www.codeforces.com/problemset/problem/472/B', u'Design Tutorial: Learn from Life', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/472/submission/8017497'), ('2014-10-06 21:28:24', u'http://www.codeforces.com/problemset/problem/474/A', u'Keyboard', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/474/submission/8112225'), ('2014-10-06 21:34:57', u'http://www.codeforces.com/problemset/problem/474/A', u'Keyboard', 'AC', '100', u'GNU C', 'http://www.codeforces.com/contest/474/submission/8113048'), ('2014-10-06 23:10:09', u'http://www.codeforces.com/problemset/problem/474/B', u'Worms', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/474/submission/8120096'), ('2014-10-07 02:58:44', u'http://www.codeforces.com/problemset/problem/474/B', u'Worms', 'AC', '100', u'GNU C', 'http://www.codeforces.com/contest/474/submission/8123462'), ('2014-10-07 03:55:46', u'http://www.codeforces.com/problemset/problem/474/D', u'Flowers', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/474/submission/8123773'), ('2014-10-07 04:02:21', u'http://www.codeforces.com/problemset/problem/474/D', u'Flowers', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/474/submission/8123802'), ('2015-07-13 19:46:13', u'http://www.codeforces.com/problemset/problem/550/A', u'Two Substrings', 'CE', '0', u'GNU C', 'http://www.codeforces.com/contest/550/submission/12030270'), ('2015-07-13 19:46:47', u'http://www.codeforces.com/problemset/problem/550/A', u'Two Substrings', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/550/submission/12030276'), ('2015-07-13 20:00:28', u'http://www.codeforces.com/problemset/problem/550/A', u'Two Substrings', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/550/submission/12030404'), ('2015-07-13 20:22:36', u'http://www.codeforces.com/problemset/problem/550/B', u'Preparing Olympiad', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/550/submission/12030587'), ('2015-07-13 20:55:12', u'http://www.codeforces.com/problemset/problem/538/A', u'Cutting Banner', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/538/submission/12030895'), ('2015-07-13 20:56:42', u'http://www.codeforces.com/problemset/problem/538/A', u'Cutting Banner', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/538/submission/12030903'), ('2015-07-13 21:17:47', u'http://www.codeforces.com/problemset/problem/538/B', u'Quasi Binary', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/538/submission/12031083'), ('2015-07-13 21:32:43', u'http://www.codeforces.com/problemset/problem/538/B', u'Quasi Binary', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/538/submission/12031229'), ('2015-07-13 23:04:36', u'http://www.codeforces.com/problemset/problem/409/H', u'A + B Strikes Back', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/409/submission/12031995'), ('2015-07-13 23:07:06', u'http://www.codeforces.com/problemset/problem/409/H', u'A + B Strikes Back', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/409/submission/12032008'), ('2015-07-13 23:08:06', u'http://www.codeforces.com/problemset/problem/409/H', u'A + B Strikes Back', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/409/submission/12032015'), ('2015-07-13 23:08:45', u'http://www.codeforces.com/problemset/problem/409/H', u'A + B Strikes Back', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/409/submission/12032021'), ('2015-07-13 23:09:16', u'http://www.codeforces.com/problemset/problem/409/H', u'A + B Strikes Back', 'WA', '0', u'GNU C', 'http://www.codeforces.com/contest/409/submission/12032027'), ('2015-07-13 23:10:05', u'http://www.codeforces.com/problemset/problem/409/H', u'A + B Strikes Back', 'AC', '100', u'GNU C', 'http://www.codeforces.com/contest/409/submission/12032034'), ('2015-08-22 22:26:26', u'http://www.codeforces.com/problemset/problem/572/A', u'Arrays', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/572/submission/12650084'), ('2015-08-22 22:54:57', u'http://www.codeforces.com/problemset/problem/572/B', u'Order Book', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/572/submission/12655042'), ('2015-08-22 23:20:25', u'http://www.codeforces.com/problemset/problem/572/B', u'Order Book', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/572/submission/12658463'), ('2015-08-29 22:25:27', u'http://www.codeforces.com/problemset/problem/574/A', u'Bear and Elections', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/12750171'), ('2015-08-29 22:28:28', u'http://www.codeforces.com/problemset/problem/574/A', u'Bear and Elections', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/12750679'), ('2015-08-29 22:52:25', u'http://www.codeforces.com/problemset/problem/574/C', u'Bear and Poker', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/12754477'), ('2015-08-30 00:49:08', u'http://www.codeforces.com/problemset/problem/574/C', u'Bear and Poker', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/12765492'), ('2015-08-30 00:52:15', u'http://www.codeforces.com/problemset/problem/574/C', u'Bear and Poker', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/12765623'), ('2015-09-02 20:37:01', u'http://www.codeforces.com/problemset/problem/560/A', u'Currency System in Geraldion', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/560/submission/12817055'), ('2015-09-02 20:52:50', u'http://www.codeforces.com/problemset/problem/560/B', u'Gerald is into Art', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/560/submission/12817234'), ('2015-09-02 21:19:30', u'http://www.codeforces.com/problemset/problem/560/B', u'Gerald is into Art', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/560/submission/12817559'), ('2015-09-02 21:23:37', u'http://www.codeforces.com/problemset/problem/560/B', u'Gerald is into Art', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/560/submission/12817612'), ('2015-09-10 22:08:56', u'http://www.codeforces.com/problemset/problem/577/A', u'Multiplication Table', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/577/submission/12928002'), ('2015-09-10 22:57:34', u'http://www.codeforces.com/problemset/problem/577/C', u"Vasya and Petya's Game", 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/577/submission/12937380'), ('2015-09-10 23:24:19', u'http://www.codeforces.com/problemset/problem/577/C', u"Vasya and Petya's Game", 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/577/submission/12941164'), ('2015-09-10 23:35:13', u'http://www.codeforces.com/problemset/problem/577/C', u"Vasya and Petya's Game", 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/577/submission/12942378'), ('2015-09-18 09:26:35', u'http://www.codeforces.com/problemset/problem/574/B', u'Bear and Three Musketeers', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/13080029'), ('2015-09-18 09:35:11', u'http://www.codeforces.com/problemset/problem/574/B', u'Bear and Three Musketeers', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/13080083'), ('2015-09-18 09:40:54', u'http://www.codeforces.com/problemset/problem/574/B', u'Bear and Three Musketeers', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/13080104'), ('2015-09-18 09:50:57', u'http://www.codeforces.com/problemset/problem/574/B', u'Bear and Three Musketeers', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/13080162'), ('2015-09-18 10:57:39', u'http://www.codeforces.com/problemset/problem/574/B', u'Bear and Three Musketeers', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/574/submission/13080670'), ('2015-09-19 10:04:18', u'http://www.codeforces.com/problemset/problem/158/B', u'Taxi', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/13096185'), ('2015-09-19 10:06:16', u'http://www.codeforces.com/problemset/problem/158/B', u'Taxi', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/13096197'), ('2015-09-19 10:09:39', u'http://www.codeforces.com/problemset/problem/158/B', u'Taxi', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/13096220'), ('2015-09-19 10:13:38', u'http://www.codeforces.com/problemset/problem/158/B', u'Taxi', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/13096250'), ('2015-09-19 10:17:36', u'http://www.codeforces.com/problemset/problem/158/B', u'Taxi', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/158/submission/13096280'), ('2015-09-19 16:27:37', u'http://www.codeforces.com/problemset/problem/160/A', u'Twins', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/160/submission/13100273'), ('2015-09-19 17:17:56', u'http://www.codeforces.com/problemset/problem/550/C', u'Divisibility by Eight', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/550/submission/13100937'), ('2015-09-19 20:29:07', u'http://www.codeforces.com/problemset/problem/519/B', u'A and B and Compilation Errors', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/519/submission/13103565'), ('2015-09-20 08:58:02', u'http://www.codeforces.com/problemset/problem/204/B', u'Little Elephant and Cards', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/204/submission/13109387'), ('2015-09-20 09:05:26', u'http://www.codeforces.com/problemset/problem/204/B', u'Little Elephant and Cards', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/204/submission/13109421'), ('2015-09-20 09:10:19', u'http://www.codeforces.com/problemset/problem/204/B', u'Little Elephant and Cards', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/204/submission/13109436'), ('2015-09-20 09:15:40', u'http://www.codeforces.com/problemset/problem/204/B', u'Little Elephant and Cards', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/204/submission/13109456'), ('2015-09-20 09:19:16', u'http://www.codeforces.com/problemset/problem/204/B', u'Little Elephant and Cards', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/204/submission/13109467'), ('2015-09-22 22:07:10', u'http://www.codeforces.com/problemset/problem/580/A', u'Kefa and First Steps', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13145925'), ('2015-09-22 22:29:58', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13152519'), ('2015-09-22 23:18:24', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13162731'), ('2015-09-22 23:24:31', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13163770'), ('2015-09-22 23:25:35', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13163942'), ('2015-09-22 23:29:09', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13164502'), ('2015-09-23 00:49:34', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13171251'), ('2015-09-23 01:03:37', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13171838'), ('2015-09-23 01:38:14', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13172926'), ('2015-09-23 14:55:02', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13181387'), ('2015-09-23 18:14:51', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13185934'), ('2015-09-23 18:16:58', u'http://www.codeforces.com/problemset/problem/580/B', u'Kefa and Company', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13185991'), ('2015-09-23 19:08:23', u'http://www.codeforces.com/problemset/problem/580/C', u'Kefa and Park', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13187242'), ('2015-09-23 19:24:05', u'http://www.codeforces.com/problemset/problem/580/C', u'Kefa and Park', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13187823'), ('2015-09-23 19:30:09', u'http://www.codeforces.com/problemset/problem/580/C', u'Kefa and Park', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/13187946'), ('2015-09-27 19:40:44', u'http://www.codeforces.com/problemset/problem/4/C', u'Registration System', 'CE', '0', u'GNU C++', 'http://www.codeforces.com/contest/4/submission/13250390'), ('2015-09-27 19:41:55', u'http://www.codeforces.com/problemset/problem/4/C', u'Registration System', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/4/submission/13250410'), ('2015-09-27 21:19:48', u'http://www.codeforces.com/problemset/problem/159/C', u'String Manipulation 1.0', 'MLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/159/submission/13251760'), ('2015-09-28 14:34:58', u'http://www.codeforces.com/problemset/problem/581/A', u'Vasya the Hipster', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13260798'), ('2015-09-28 14:44:20', u'http://www.codeforces.com/problemset/problem/581/B', u'Luxurious Houses', 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13263305'), ('2015-09-28 14:56:03', u'http://www.codeforces.com/problemset/problem/581/C', u'Developing Skills', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13265626'), ('2015-09-28 15:17:41', u'http://www.codeforces.com/problemset/problem/581/C', u'Developing Skills', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13268882'), ('2015-09-29 12:10:51', u'http://www.codeforces.com/problemset/problem/581/B', u'Luxurious Houses', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13292365'), ('2015-09-29 12:22:40', u'http://www.codeforces.com/problemset/problem/581/B', u'Luxurious Houses', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13292509'), ('2015-09-29 12:34:16', u'http://www.codeforces.com/problemset/problem/581/C', u'Developing Skills', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13292656'), ('2015-09-29 12:43:38', u'http://www.codeforces.com/problemset/problem/581/C', u'Developing Skills', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13292768'), ('2015-09-29 12:47:20', u'http://www.codeforces.com/problemset/problem/581/B', u'Luxurious Houses', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13292809'), ('2015-09-29 12:48:18', u'http://www.codeforces.com/problemset/problem/581/C', u'Developing Skills', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13292817'), ('2015-09-29 13:10:59', u'http://www.codeforces.com/problemset/problem/581/C', u'Developing Skills', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13293101'), ('2015-09-29 13:32:07', u'http://www.codeforces.com/problemset/problem/581/C', u'Developing Skills', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13293354'), ('2015-09-29 17:43:48', u'http://www.codeforces.com/problemset/problem/581/C', u'Developing Skills', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/581/submission/13297010'), ('2015-09-29 20:59:18', u'http://www.codeforces.com/problemset/problem/263/A', u'Beautiful Matrix', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/263/submission/13300553'), ('2015-09-29 21:14:53', u'http://www.codeforces.com/problemset/problem/118/B', u'Present from Lena', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/118/submission/13300823'), ('2015-09-29 21:29:52', u'http://www.codeforces.com/problemset/problem/118/B', u'Present from Lena', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/118/submission/13301123'), ('2015-10-03 18:44:23', u'http://www.codeforces.com/problemset/problem/268/B', u'Buttons', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/268/submission/13359900'), ('2015-10-03 20:04:32', u'http://www.codeforces.com/problemset/problem/569/B', u'Inventory', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/569/submission/13360927'), ('2015-10-03 20:06:13', u'http://www.codeforces.com/problemset/problem/569/B', u'Inventory', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/569/submission/13360949'), ('2015-10-03 21:05:23', u'http://www.codeforces.com/problemset/problem/525/C', u'Ilya and Sticks', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/525/submission/13361790'), ('2015-10-03 21:06:58', u'http://www.codeforces.com/problemset/problem/525/C', u'Ilya and Sticks', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/525/submission/13361810'), ('2015-10-03 21:09:02', u'http://www.codeforces.com/problemset/problem/525/C', u'Ilya and Sticks', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/525/submission/13361836'), ('2015-10-03 22:25:41', u'http://www.codeforces.com/problemset/problem/583/A', u'Asphalting Roads', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/583/submission/13365272'), ('2015-10-03 23:30:49', u'http://www.codeforces.com/problemset/problem/583/B', u"Robot's Task", 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/583/submission/13378169'), ('2015-10-06 22:05:41', u'http://www.codeforces.com/problemset/problem/584/A', u'Olesya and Rodion', 'TLE', '0', u'Python 2', 'http://www.codeforces.com/contest/584/submission/13436363'), ('2015-10-06 22:17:59', u'http://www.codeforces.com/problemset/problem/584/A', u'Olesya and Rodion', 'WA', '0', u'Python 2', 'http://www.codeforces.com/contest/584/submission/13440624'), ('2015-10-06 22:24:51', u'http://www.codeforces.com/problemset/problem/584/A', u'Olesya and Rodion', 'CE', '0', u'GNU C++', 'http://www.codeforces.com/contest/584/submission/13442261'), ('2015-10-06 22:25:07', u'http://www.codeforces.com/problemset/problem/584/A', u'Olesya and Rodion', 'CE', '0', u'Python 2', 'http://www.codeforces.com/contest/584/submission/13442319'), ('2015-10-06 22:26:42', u'http://www.codeforces.com/problemset/problem/584/A', u'Olesya and Rodion', 'AC', '100', u'Python 2', 'http://www.codeforces.com/contest/584/submission/13442651'), ('2015-10-06 22:52:47', u'http://www.codeforces.com/problemset/problem/584/B', u'Kolya and Tanya ', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/584/submission/13447777'), ('2015-10-06 22:58:59', u'http://www.codeforces.com/problemset/problem/584/B', u'Kolya and Tanya ', 'AC', '100', u'Python 2', 'http://www.codeforces.com/contest/584/submission/13448876'), ('2015-10-06 23:14:57', u'http://www.codeforces.com/problemset/problem/584/C', u'Marina and Vasya', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/584/submission/13451585'), ('2015-10-06 23:35:46', u'http://www.codeforces.com/problemset/problem/584/C', u'Marina and Vasya', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/584/submission/13454813'), ('2015-10-06 23:44:55', u'http://www.codeforces.com/problemset/problem/584/C', u'Marina and Vasya', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/584/submission/13456081'), ('2015-10-07 01:04:27', u'http://www.codeforces.com/problemset/problem/584/B', u'Kolya and Tanya ', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/584/submission/13460503'), ('2015-10-07 18:02:31', u'http://www.codeforces.com/problemset/problem/584/A', u'Olesya and Rodion', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/584/submission/13473005'), ('2015-10-08 21:26:54', u'http://www.codeforces.com/problemset/problem/92/B', u'Binary Number', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/92/submission/13496730'), ('2015-10-09 01:22:57', u'http://www.codeforces.com/problemset/problem/456/A', u'Laptops', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/456/submission/13500243'), ('2015-10-09 01:35:03', u'http://www.codeforces.com/problemset/problem/52/A', u'123-sequence', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/52/submission/13500398'), ('2015-10-09 06:38:55', u'http://www.codeforces.com/problemset/problem/266/B', u'Queue at the School', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/266/submission/13502318'), ('2015-10-09 06:45:08', u'http://www.codeforces.com/problemset/problem/479/A', u'Expression', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/479/submission/13502351'), ('2015-10-09 06:46:35', u'http://www.codeforces.com/problemset/problem/479/A', u'Expression', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/479/submission/13502358'), ('2015-10-09 06:50:39', u'http://www.codeforces.com/problemset/problem/61/A', u'Ultra-Fast Mathematician', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/61/submission/13502387'), ('2015-10-09 07:03:29', u'http://www.codeforces.com/problemset/problem/462/B', u'Appleman and Card Game', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/462/submission/13502451'), ('2015-10-09 07:05:19', u'http://www.codeforces.com/problemset/problem/462/B', u'Appleman and Card Game', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/462/submission/13502463'), ('2015-10-09 07:06:54', u'http://www.codeforces.com/problemset/problem/462/B', u'Appleman and Card Game', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/462/submission/13502474'), ('2015-10-09 22:47:48', u'http://www.codeforces.com/problemset/problem/266/B', u'Queue at the School', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/266/submission/13514395'), ('2015-10-09 23:14:22', u'http://www.codeforces.com/problemset/problem/525/B', u'Pasha and String', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/525/submission/13514840'), ('2015-10-09 23:30:20', u'http://www.codeforces.com/problemset/problem/525/B', u'Pasha and String', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/525/submission/13515120'), ('2015-10-11 04:08:55', u'http://www.codeforces.com/problemset/problem/478/A', u'Initial Bet', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/478/submission/13538926'), ('2015-10-11 04:10:18', u'http://www.codeforces.com/problemset/problem/478/A', u'Initial Bet', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/478/submission/13538931'), ('2015-10-11 04:28:02', u'http://www.codeforces.com/problemset/problem/459/B', u'Pashmak and Flowers', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/459/submission/13538989'), ('2015-10-11 04:29:51', u'http://www.codeforces.com/problemset/problem/459/B', u'Pashmak and Flowers', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/459/submission/13538995'), ('2015-10-11 04:37:27', u'http://www.codeforces.com/problemset/problem/459/B', u'Pashmak and Flowers', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/459/submission/13539018'), ('2015-10-25 14:34:14', u'http://www.codeforces.com/problemset/problem/591/A', u"Wizards' Duel", 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/591/submission/13836193'), ('2015-10-25 14:50:25', u'http://www.codeforces.com/problemset/problem/591/B', u'Rebranding', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/591/submission/13839725'), ('2015-10-25 15:34:56', u'http://www.codeforces.com/problemset/problem/591/C', u'Median Smoothing', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/591/submission/13845641'), ('2015-10-25 15:38:20', u'http://www.codeforces.com/problemset/problem/591/C', u'Median Smoothing', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/591/submission/13846000'), ('2015-10-25 22:51:09', u'http://www.codeforces.com/problemset/problem/591/B', u'Rebranding', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/591/submission/13857177'), ('2015-10-25 23:23:19', u'http://www.codeforces.com/problemset/problem/591/B', u'Rebranding', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/591/submission/13857740'), ('2015-10-26 10:46:53', u'http://www.codeforces.com/problemset/problem/591/B', u'Rebranding', 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/591/submission/13866457'), ('2015-10-26 10:53:43', u'http://www.codeforces.com/problemset/problem/591/B', u'Rebranding', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/591/submission/13866518'), ('2015-10-26 19:50:00', u'http://www.codeforces.com/problemset/problem/160/B', u'Unlucky Ticket', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/160/submission/13873974'), ('2015-10-27 02:45:23', u'http://www.codeforces.com/problemset/problem/99/A', u'Help Far Away Kingdom', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/99/submission/13881024'), ('2015-10-27 03:13:34', u'http://www.codeforces.com/problemset/problem/12/B', u'Correct Solution?', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/12/submission/13881211'), ('2015-10-28 06:05:19', u'http://www.codeforces.com/problemset/problem/405/C', u'Unusual Product', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/405/submission/13906955'), ('2015-10-28 08:04:56', u'http://www.codeforces.com/problemset/problem/270/B', u'Multithreading', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/270/submission/13907587'), ('2015-10-28 21:42:49', u'http://www.codeforces.com/problemset/problem/525/C', u'Ilya and Sticks', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/525/submission/13918621'), ('2015-10-28 23:48:03', u'http://www.codeforces.com/problemset/problem/285/C', u'Building Permutation', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/285/submission/13920882'), ('2015-10-28 23:49:59', u'http://www.codeforces.com/problemset/problem/285/C', u'Building Permutation', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/285/submission/13920913'), ('2015-10-30 10:34:56', u'http://www.codeforces.com/problemset/problem/245/A', u'System Administrator', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/245/submission/13946807'), ('2015-10-30 10:49:01', u'http://www.codeforces.com/problemset/problem/102/B', u'Sum of Digits', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/102/submission/13946899'), ('2015-10-30 10:53:35', u'http://www.codeforces.com/problemset/problem/102/B', u'Sum of Digits', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/102/submission/13946926'), ('2015-10-31 22:14:30', u'http://www.codeforces.com/problemset/problem/592/A', u'PawnChess', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/592/submission/13975670'), ('2015-10-31 22:29:27', u'http://www.codeforces.com/problemset/problem/592/B', u'The Monster and the Squirrel', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/592/submission/13978806'), ('2015-10-31 22:58:55', u'http://www.codeforces.com/problemset/problem/592/C', u'The Big Race', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/592/submission/13983585'), ('2015-10-31 23:11:05', u'http://www.codeforces.com/problemset/problem/592/C', u'The Big Race', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/592/submission/13985339'), ('2015-11-01 01:46:31', u'http://www.codeforces.com/problemset/problem/592/C', u'The Big Race', 'WA', '0', u'Python 2', 'http://www.codeforces.com/contest/592/submission/13993129'), ('2015-11-01 02:00:03', u'http://www.codeforces.com/problemset/problem/592/C', u'The Big Race', 'WA', '0', u'Python 2', 'http://www.codeforces.com/contest/592/submission/13993447'), ('2015-11-01 02:04:32', u'http://www.codeforces.com/problemset/problem/592/C', u'The Big Race', 'WA', '0', u'Python 2', 'http://www.codeforces.com/contest/592/submission/13993623'), ('2015-11-01 10:48:24', u'http://www.codeforces.com/problemset/problem/592/A', u'PawnChess', 'CE', '0', u'Python 2', 'http://www.codeforces.com/contest/592/submission/14000480'), ('2015-11-01 10:48:46', u'http://www.codeforces.com/problemset/problem/592/A', u'PawnChess', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/592/submission/14000483'), ('2015-11-03 02:17:02', u'http://www.codeforces.com/problemset/problem/592/C', u'The Big Race', 'AC', '100', u'Python 2', 'http://www.codeforces.com/contest/592/submission/14033816'), ('2015-11-03 02:30:31', u'http://www.codeforces.com/problemset/problem/592/C', u'The Big Race', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/592/submission/14033957'), ('2015-11-04 14:58:56', u'http://www.codeforces.com/problemset/problem/339/B', u'Xenia and Ringroad', 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/339/submission/14054303'), ('2015-11-04 15:00:05', u'http://www.codeforces.com/problemset/problem/339/B', u'Xenia and Ringroad', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/339/submission/14054317'), ('2015-11-04 15:29:08', u'http://www.codeforces.com/problemset/problem/11/A', u'Increasing Sequence', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/11/submission/14054735'), ('2015-11-04 16:30:38', u'http://www.codeforces.com/problemset/problem/567/A', u'Lineland Mail', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/567/submission/14055720'), ('2015-11-05 10:34:36', u'http://www.codeforces.com/problemset/problem/593/A', u'2Char', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/593/submission/14082176'), ('2015-11-06 21:20:07', u'http://www.codeforces.com/problemset/problem/159/C', u'String Manipulation 1.0', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/159/submission/14109516'), ('2015-11-06 21:47:19', u'http://www.codeforces.com/problemset/problem/159/C', u'String Manipulation 1.0', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/159/submission/14109921'), ('2015-11-08 22:05:35', u'http://www.codeforces.com/problemset/problem/595/A', u'Vitaly and Night', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/595/submission/14145703'), ('2015-11-08 22:44:17', u'http://www.codeforces.com/problemset/problem/595/B', u'Pasha and Phone', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/595/submission/14150515'), ('2015-11-08 23:28:37', u'http://www.codeforces.com/problemset/problem/595/B', u'Pasha and Phone', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/595/submission/14155293'), ('2015-11-16 01:07:14', u'http://www.codeforces.com/problemset/problem/596/A', u'Wilbur and Swimming Pool', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/596/submission/14288508'), ('2015-11-16 01:09:02', u'http://www.codeforces.com/problemset/problem/596/B', u'Wilbur and Array', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/596/submission/14288537'), ('2015-11-16 01:16:40', u'http://www.codeforces.com/problemset/problem/596/A', u'Wilbur and Swimming Pool', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/596/submission/14288651'), ('2015-11-16 01:17:38', u'http://www.codeforces.com/problemset/problem/596/A', u'Wilbur and Swimming Pool', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/596/submission/14288673'), ('2015-12-01 21:15:25', u'http://www.codeforces.com/problemset/problem/604/A', u'Uncowed Forces', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/604/submission/14587410'), ('2015-12-01 21:21:57', u'http://www.codeforces.com/problemset/problem/604/A', u'Uncowed Forces', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/604/submission/14588907'), ('2015-12-01 21:25:25', u'http://www.codeforces.com/problemset/problem/604/A', u'Uncowed Forces', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/604/submission/14589670'), ('2015-12-01 21:50:29', u'http://www.codeforces.com/problemset/problem/604/B', u'More Cowbell', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/604/submission/14593977'), ('2015-12-09 21:53:15', u'http://www.codeforces.com/problemset/problem/606/C', u'Sorting Railway Cars', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/606/submission/14718869'), ('2015-12-09 22:14:26', u'http://www.codeforces.com/problemset/problem/606/C', u'Sorting Railway Cars', 'HCK', '-50', u'GNU C++', 'http://www.codeforces.com/contest/606/submission/14722405'), ('2015-12-09 22:44:59', u'http://www.codeforces.com/problemset/problem/606/A', u'Magic Spheres', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/606/submission/14726450'), ('2015-12-09 22:55:27', u'http://www.codeforces.com/problemset/problem/606/A', u'Magic Spheres', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/606/submission/14727619'), ('2015-12-09 22:58:11', u'http://www.codeforces.com/problemset/problem/606/A', u'Magic Spheres', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/606/submission/14727938'), ('2015-12-09 23:00:38', u'http://www.codeforces.com/problemset/problem/606/A', u'Magic Spheres', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/606/submission/14728208'), ('2015-12-15 21:36:55', u'http://www.codeforces.com/problemset/problem/580/A', u'Kefa and First Steps', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/580/submission/14817821'), ('2015-12-17 18:01:21', u'http://www.codeforces.com/problemset/problem/598/B', u'Queries on a String', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/598/submission/14845709'), ('2015-12-17 18:09:23', u'http://www.codeforces.com/problemset/problem/598/B', u'Queries on a String', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/598/submission/14845795'), ('2015-12-17 18:55:21', u'http://www.codeforces.com/problemset/problem/597/A', u'Divisibility', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/597/submission/14846361'), ('2015-12-17 18:56:54', u'http://www.codeforces.com/problemset/problem/597/A', u'Divisibility', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/597/submission/14846374'), ('2015-12-17 19:02:03', u'http://www.codeforces.com/problemset/problem/597/A', u'Divisibility', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/597/submission/14846436'), ('2015-12-17 19:05:46', u'http://www.codeforces.com/problemset/problem/597/A', u'Divisibility', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/597/submission/14846492'), ('2015-12-22 22:54:31', u'http://www.codeforces.com/problemset/problem/609/B', u'\u041a\u043d\u0438\u0433\u0430 - \u043b\u0443\u0447\u0448\u0438\u0439 \u043f\u043e\u0434\u0430\u0440\u043e\u043a', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/609/submission/14928518'), ('2015-12-23 01:45:32', u'http://www.codeforces.com/problemset/problem/609/C', u'Load Balancing', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/609/submission/14930319'), ('2015-12-23 01:48:44', u'http://www.codeforces.com/problemset/problem/609/C', u'Load Balancing', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/609/submission/14930347'), ('2015-12-23 02:12:32', u'http://www.codeforces.com/problemset/problem/609/C', u'Load Balancing', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/609/submission/14930527'), ('2015-12-23 02:14:12', u'http://www.codeforces.com/problemset/problem/609/C', u'Load Balancing', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/609/submission/14930545'), ('2015-12-24 03:46:52', u'http://www.codeforces.com/problemset/problem/608/A', u'Saitama Destroys Hotel', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/608/submission/14961192'), ('2015-12-24 03:56:12', u'http://www.codeforces.com/problemset/problem/600/B', u'Queries about less or equal elements', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/600/submission/14961257'), ('2015-12-24 04:11:24', u'http://www.codeforces.com/problemset/problem/600/A', u'Extract Numbers', 'AC', '100', u'PyPy 2', 'http://www.codeforces.com/contest/600/submission/14961343'), ('2015-12-26 00:19:54', u'http://www.codeforces.com/problemset/problem/600/B', u'Queries about less or equal elements', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/600/submission/15021384'), ('2015-12-31 02:06:51', u'http://www.codeforces.com/problemset/problem/611/A', u'New Year and Days', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/611/submission/15129041'), ('2015-12-31 02:07:53', u'http://www.codeforces.com/problemset/problem/611/A', u'New Year and Days', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/611/submission/15129051'), ('2015-12-31 02:39:02', u'http://www.codeforces.com/problemset/problem/611/B', u'New Year and Old Property', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/611/submission/15129360'), ('2016-01-01 00:08:10', u'http://www.codeforces.com/problemset/problem/611/B', u'New Year and Old Property', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/611/submission/15140290'), ('2016-01-02 01:17:28', u'http://www.codeforces.com/problemset/problem/610/A', u'Pasha and Stick', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/610/submission/15152467'), ('2016-01-02 02:05:01', u'http://www.codeforces.com/problemset/problem/610/B', u'Vika and Squares', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/610/submission/15152883'), ('2016-01-05 11:52:15', u'http://www.codeforces.com/problemset/problem/189/A', u'Cut Ribbon', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/189/submission/15187913'), ('2016-01-05 12:26:38', u'http://www.codeforces.com/problemset/problem/489/C', u'Given Length and Sum of Digits...', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/489/submission/15188193'), ('2016-01-06 20:03:28', u'http://www.codeforces.com/problemset/problem/570/C', u'Replacement', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/570/submission/15208011'), ('2016-01-06 20:09:17', u'http://www.codeforces.com/problemset/problem/570/C', u'Replacement', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/570/submission/15208096'), ('2016-01-09 14:53:09', u'http://www.codeforces.com/problemset/problem/615/A', u'Bulbs', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/615/submission/15266906'), ('2016-01-14 22:12:10', u'http://www.codeforces.com/problemset/problem/614/A', u'Link/Cut Tree', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15348242'), ('2016-01-14 22:19:51', u'http://www.codeforces.com/problemset/problem/614/A', u'Link/Cut Tree', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15350653'), ('2016-01-14 22:26:04', u'http://www.codeforces.com/problemset/problem/614/A', u'Link/Cut Tree', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15352533'), ('2016-01-14 22:45:52', u'http://www.codeforces.com/problemset/problem/614/B', u"Gena's Code", 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15357739'), ('2016-01-14 22:49:49', u'http://www.codeforces.com/problemset/problem/614/B', u"Gena's Code", 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15358770'), ('2016-01-14 23:13:26', u'http://www.codeforces.com/problemset/problem/614/B', u"Gena's Code", 'TLE', '0', u'PyPy 2', 'http://www.codeforces.com/contest/614/submission/15364083'), ('2016-01-14 23:17:00', u'http://www.codeforces.com/problemset/problem/614/A', u'Link/Cut Tree', 'HCK', '-50', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15364825'), ('2016-01-15 01:46:02', u'http://www.codeforces.com/problemset/problem/614/A', u'Link/Cut Tree', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15376622'), ('2016-01-15 01:50:32', u'http://www.codeforces.com/problemset/problem/614/A', u'Link/Cut Tree', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15376775'), ('2016-01-15 02:04:58', u'http://www.codeforces.com/problemset/problem/614/B', u"Gena's Code", 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/614/submission/15377119'), ('2016-01-31 01:01:03', u'http://www.codeforces.com/problemset/problem/618/A', u'Slime Combining', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/618/submission/15684756'), ('2016-01-31 01:44:18', u'http://www.codeforces.com/problemset/problem/618/B', u'Guess the Permutation', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/618/submission/15685235'), ('2016-02-01 07:17:18', u'http://www.codeforces.com/problemset/problem/621/A', u'Wet Shark and Odd and Even', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/621/submission/15722644'), ('2016-02-01 07:40:26', u'http://www.codeforces.com/problemset/problem/621/B', u'Wet Shark and Bishops', 'CE', '0', u'GNU C++', 'http://www.codeforces.com/contest/621/submission/15722848'), ('2016-02-01 07:40:45', u'http://www.codeforces.com/problemset/problem/621/B', u'Wet Shark and Bishops', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/621/submission/15722852'), ('2016-02-01 07:59:16', u'http://www.codeforces.com/problemset/problem/621/B', u'Wet Shark and Bishops', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/621/submission/15723041'), ('2016-02-01 08:01:58', u'http://www.codeforces.com/problemset/problem/621/B', u'Wet Shark and Bishops', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/621/submission/15723074'), ('2016-02-01 08:05:42', u'http://www.codeforces.com/problemset/problem/621/B', u'Wet Shark and Bishops', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/621/submission/15723107'), ('2016-02-01 08:07:51', u'http://www.codeforces.com/problemset/problem/621/B', u'Wet Shark and Bishops', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/621/submission/15723123'), ('2016-02-22 00:05:38', u'http://www.codeforces.com/problemset/problem/629/A', u'Far Relative\u2019s Birthday Cake', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/629/submission/16265987'), ('2016-02-28 19:19:12', u'http://www.codeforces.com/problemset/problem/629/B', u'Far Relative\u2019s Problem', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/629/submission/16404240'), ('2016-02-28 20:35:59', u'http://www.codeforces.com/problemset/problem/630/C', u'Lucky Numbers', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/630/submission/16405407'), ('2016-02-28 20:37:18', u'http://www.codeforces.com/problemset/problem/630/C', u'Lucky Numbers', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/630/submission/16405419'), ('2016-02-28 20:41:06', u'http://www.codeforces.com/problemset/problem/630/C', u'Lucky Numbers', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/630/submission/16405456'), ('2016-07-31 00:44:37', u'http://www.codeforces.com/problemset/problem/699/B', u'One Bomb', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/699/submission/19524584'), ('2016-07-31 00:49:29', u'http://www.codeforces.com/problemset/problem/699/B', u'One Bomb', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/699/submission/19524679'), ('2016-07-31 00:58:14', u'http://www.codeforces.com/problemset/problem/699/B', u'One Bomb', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/699/submission/19524873'), ('2016-07-31 18:30:30', u'http://www.codeforces.com/problemset/problem/699/B', u'One Bomb', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/699/submission/19538526'), ('2016-07-31 18:49:30', u'http://www.codeforces.com/problemset/problem/699/B', u'One Bomb', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/699/submission/19538834'), ('2016-07-31 19:01:53', u'http://www.codeforces.com/problemset/problem/699/A', u'Launch of Collider', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/699/submission/19539062'), ('2016-07-31 20:11:24', u'http://www.codeforces.com/problemset/problem/701/A', u'Cards', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/701/submission/19540208'), ('2016-07-31 20:35:26', u'http://www.codeforces.com/problemset/problem/701/B', u'Cells Not Under Attack', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/701/submission/19540595'), ('2016-07-31 20:39:11', u'http://www.codeforces.com/problemset/problem/701/B', u'Cells Not Under Attack', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/701/submission/19540660'), ('2016-08-02 03:12:36', u'http://www.codeforces.com/problemset/problem/702/A', u'Maximum Increase', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/702/submission/19568636'), ('2016-08-02 03:15:28', u'http://www.codeforces.com/problemset/problem/702/A', u'Maximum Increase', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/702/submission/19568664'), ('2016-08-02 03:16:08', u'http://www.codeforces.com/problemset/problem/702/A', u'Maximum Increase', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/702/submission/19568668'), ('2016-08-02 03:23:31', u'http://www.codeforces.com/problemset/problem/702/B', u'Powers of Two', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/702/submission/19568738'), ('2016-08-02 03:25:16', u'http://www.codeforces.com/problemset/problem/702/B', u'Powers of Two', 'TLE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/702/submission/19568745'), ('2016-08-04 20:47:23', u'http://www.codeforces.com/problemset/problem/703/A', u'Mishka and Game', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/703/submission/19617826'), ('2016-08-04 20:49:28', u'http://www.codeforces.com/problemset/problem/703/A', u'Mishka and Game', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/703/submission/19619139'), ('2016-08-04 21:22:13', u'http://www.codeforces.com/problemset/problem/703/B', u'Mishka and trip', 'SK', '0', u'GNU C++11', 'http://www.codeforces.com/contest/703/submission/19624817'), ('2016-08-04 22:36:40', u'http://www.codeforces.com/problemset/problem/703/B', u'Mishka and trip', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/703/submission/19633551'), ('2016-08-05 01:11:14', u'http://www.codeforces.com/problemset/problem/703/B', u'Mishka and trip', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/703/submission/19638245'), ('2016-08-08 15:49:19', u'http://www.codeforces.com/problemset/problem/705/A', u'Hulk', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/19725753'), ('2016-08-08 18:25:13', u'http://www.codeforces.com/problemset/problem/705/B', u'Spider Man', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/19728563'), ('2016-08-11 22:10:00', u'http://www.codeforces.com/problemset/problem/706/A', u'Beru-taxi', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/706/submission/19788500'), ('2016-08-11 22:19:02', u'http://www.codeforces.com/problemset/problem/706/B', u'Interesting drink', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/706/submission/19792085'), ('2016-08-11 22:29:00', u'http://www.codeforces.com/problemset/problem/706/B', u'Interesting drink', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/706/submission/19794671'), ('2016-08-11 22:41:28', u'http://www.codeforces.com/problemset/problem/706/B', u'Interesting drink', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/706/submission/19797228'), ('2016-08-12 01:49:03', u'http://www.codeforces.com/problemset/problem/706/A', u'Beru-taxi', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/706/submission/19812426'), ('2016-08-12 02:19:20', u'http://www.codeforces.com/problemset/problem/706/A', u'Beru-taxi', 'CE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/706/submission/19813299'), ('2016-08-12 02:22:25', u'http://www.codeforces.com/problemset/problem/706/A', u'Beru-taxi', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/706/submission/19813362'), ('2016-08-14 19:27:06', u'http://www.codeforces.com/problemset/problem/702/B', u'Powers of Two', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/702/submission/19869883'), ('2016-08-14 20:27:13', u'http://www.codeforces.com/problemset/problem/706/C', u'Hard problem', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/706/submission/19870767'), ('2016-08-15 04:49:12', u'http://www.codeforces.com/problemset/problem/706/D', u"Vasiliy's Multiset", 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/706/submission/19877506'), ('2016-08-15 04:55:02', u'http://www.codeforces.com/problemset/problem/706/D', u"Vasiliy's Multiset", 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/706/submission/19877543'), ('2016-08-15 06:38:23', u'http://www.codeforces.com/problemset/problem/706/D', u"Vasiliy's Multiset", 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/706/submission/19878193'), ('2016-08-17 22:37:54', u'http://www.codeforces.com/problemset/problem/706/D', u"Vasiliy's Multiset", 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/706/submission/19932138'), ('2016-08-20 15:22:16', u'http://www.codeforces.com/problemset/problem/29/C', u'Mail Stamps', 'CE', '0', u'GNU C++', 'http://www.codeforces.com/contest/29/submission/19979318'), ('2016-08-20 15:22:44', u'http://www.codeforces.com/problemset/problem/29/C', u'Mail Stamps', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/29/submission/19979332'), ('2016-08-20 16:20:32', u'http://www.codeforces.com/problemset/problem/637/B', u'Chat Order', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/637/submission/19980245'), ('2016-08-20 16:22:06', u'http://www.codeforces.com/problemset/problem/637/B', u'Chat Order', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/637/submission/19980267'), ('2016-08-20 16:25:04', u'http://www.codeforces.com/problemset/problem/637/B', u'Chat Order', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/637/submission/19980309'), ('2016-08-20 17:25:07', u'http://www.codeforces.com/problemset/problem/622/C', u'Not Equal on a Segment', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/622/submission/19981265'), ('2016-08-20 17:30:50', u'http://www.codeforces.com/problemset/problem/622/C', u'Not Equal on a Segment', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/622/submission/19981354'), ('2016-08-20 18:39:54', u'http://www.codeforces.com/problemset/problem/707/A', u"Brain's Photos", 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/707/submission/19983584'), ('2016-08-20 19:05:41', u'http://www.codeforces.com/problemset/problem/707/B', u'Bakery', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/707/submission/19990875'), ('2016-08-21 02:49:44', u'http://www.codeforces.com/problemset/problem/707/C', u'Pythagorean Triples', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/707/submission/20013751'), ('2016-08-24 06:34:33', u'http://www.codeforces.com/problemset/problem/710/B', u'Optimal Point on a Line', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/710/submission/20096202'), ('2016-08-24 06:44:27', u'http://www.codeforces.com/problemset/problem/710/B', u'Optimal Point on a Line', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/710/submission/20096285'), ('2016-08-24 06:49:56', u'http://www.codeforces.com/problemset/problem/710/A', u'King Moves', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/710/submission/20096337'), ('2016-08-24 06:58:51', u'http://www.codeforces.com/problemset/problem/710/C', u'Magic Odd Square', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/710/submission/20096421'), ('2016-08-24 07:05:26', u'http://www.codeforces.com/problemset/problem/710/C', u'Magic Odd Square', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/710/submission/20096477'), ('2016-08-24 07:07:46', u'http://www.codeforces.com/problemset/problem/710/C', u'Magic Odd Square', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/710/submission/20096494'), ('2016-08-25 05:52:47', u'http://www.codeforces.com/problemset/problem/709/A', u'Juicer', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/709/submission/20140096'), ('2016-08-25 06:01:00', u'http://www.codeforces.com/problemset/problem/709/C', u'Letters Cyclic Shift', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/709/submission/20140181'), ('2016-08-25 06:04:24', u'http://www.codeforces.com/problemset/problem/709/C', u'Letters Cyclic Shift', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/709/submission/20140220'), ('2016-08-25 06:05:03', u'http://www.codeforces.com/problemset/problem/709/C', u'Letters Cyclic Shift', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/709/submission/20140228'), ('2016-08-25 15:38:47', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'TLE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/20150798'), ('2016-08-25 17:26:47', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/20152979'), ('2016-08-25 17:28:05', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/20153009'), ('2016-08-25 17:29:43', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/20153046'), ('2016-08-25 17:33:09', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/20153146'), ('2016-08-25 17:35:27', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/20153204'), ('2016-08-25 17:40:33', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/705/submission/20153304'), ('2016-08-25 17:41:24', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20153316'), ('2016-08-25 17:47:30', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20153471'), ('2016-08-25 17:50:56', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20153564'), ('2016-08-25 17:52:06', u'http://www.codeforces.com/problemset/problem/704/A', u'Thor', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/704/submission/20153599'), ('2016-08-25 17:53:50', u'http://www.codeforces.com/problemset/problem/704/A', u'Thor', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/704/submission/20153653'), ('2016-08-25 17:59:43', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20153767'), ('2016-08-25 18:03:16', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20153836'), ('2016-08-25 18:05:03', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20153878'), ('2016-08-25 18:09:01', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20153955'), ('2016-08-25 18:10:53', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20154001'), ('2016-08-25 18:13:15', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20154058'), ('2016-08-25 18:15:21', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20154102'), ('2016-08-25 18:16:40', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20154129'), ('2016-08-25 18:23:26', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20154295'), ('2016-08-25 18:24:26', u'http://www.codeforces.com/problemset/problem/705/C', u'Thor', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/705/submission/20154322'), ('2016-08-29 10:50:32', u'http://www.codeforces.com/problemset/problem/710/C', u'Magic Odd Square', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/710/submission/20222968'), ('2016-08-29 17:43:28', u'http://www.codeforces.com/problemset/problem/711/A', u'Bus to Udayland', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/711/submission/20230874'), ('2016-08-29 17:48:47', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/711/submission/20232719'), ('2016-08-29 17:55:00', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20234607'), ('2016-08-29 18:08:37', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20238630'), ('2016-08-29 18:11:38', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20239424'), ('2016-08-29 18:21:50', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20241874'), ('2016-08-29 18:36:36', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20245231'), ('2016-08-29 18:50:27', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'CE', '0', u'GNU C++', 'http://www.codeforces.com/contest/711/submission/20247880'), ('2016-08-29 18:50:49', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20247939'), ('2016-08-29 21:34:54', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20256999'), ('2016-08-29 22:47:30', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20260046'), ('2016-08-29 22:49:03', u'http://www.codeforces.com/problemset/problem/711/B', u'Chris and Magic Square', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/711/submission/20260094'), ('2016-09-03 21:04:35', u'http://www.codeforces.com/problemset/problem/510/B', u'Fox And Two Dots', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/510/submission/20365149'), ('2016-09-03 22:14:53', u'http://www.codeforces.com/problemset/problem/129/B', u'Students and Shoelaces', 'RE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/129/submission/20366343'), ('2016-09-03 22:19:35', u'http://www.codeforces.com/problemset/problem/129/B', u'Students and Shoelaces', 'RE', '0', u'GNU C++', 'http://www.codeforces.com/contest/129/submission/20366460'), ('2016-09-03 23:04:55', u'http://www.codeforces.com/problemset/problem/129/B', u'Students and Shoelaces', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/129/submission/20367405'), ('2016-09-03 23:09:28', u'http://www.codeforces.com/problemset/problem/129/B', u'Students and Shoelaces', 'TLE', '0', u'GNU C++', 'http://www.codeforces.com/contest/129/submission/20367491'), ('2016-09-03 23:44:56', u'http://www.codeforces.com/problemset/problem/129/B', u'Students and Shoelaces', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/129/submission/20368170'), ('2016-09-03 23:54:28', u'http://www.codeforces.com/problemset/problem/129/B', u'Students and Shoelaces', 'WA', '0', u'GNU C++', 'http://www.codeforces.com/contest/129/submission/20368355'), ('2016-09-03 23:58:44', u'http://www.codeforces.com/problemset/problem/129/B', u'Students and Shoelaces', 'AC', '100', u'GNU C++', 'http://www.codeforces.com/contest/129/submission/20368443'), ('2016-09-04 00:00:06', u'http://www.codeforces.com/problemset/problem/300/B', u'Coach', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/300/submission/20368473'), ('2016-09-04 00:00:23', u'http://www.codeforces.com/problemset/problem/129/B', u'Students and Shoelaces', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/129/submission/20368478'), ('2016-09-04 00:57:23', u'http://www.codeforces.com/problemset/problem/300/B', u'Coach', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/300/submission/20369438'), ('2016-09-04 01:05:04', u'http://www.codeforces.com/problemset/problem/300/B', u'Coach', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/300/submission/20369545'), ('2016-09-04 01:14:44', u'http://www.codeforces.com/problemset/problem/300/B', u'Coach', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/300/submission/20369700'), ('2016-09-05 05:28:45', u'http://www.codeforces.com/problemset/problem/602/C', u'The Two Routes', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/602/submission/20391156'), ('2016-09-12 19:52:06', u'http://www.codeforces.com/problemset/problem/712/A', u'Memory and Crow', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/712/submission/20550755'), ('2016-09-12 20:01:01', u'http://www.codeforces.com/problemset/problem/712/B', u'Memory and Trident', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/712/submission/20550916'), ('2016-09-12 20:50:03', u'http://www.codeforces.com/problemset/problem/712/C', u'Memory and De-Evolution', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/712/submission/20551627'), ('2016-09-12 21:16:55', u'http://www.codeforces.com/problemset/problem/712/C', u'Memory and De-Evolution', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/712/submission/20552025'), ('2016-09-17 22:47:03', u'http://www.codeforces.com/problemset/problem/716/A', u'Crazy Computer', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/716/submission/20714780'), ('2016-09-17 23:47:10', u'http://www.codeforces.com/problemset/problem/716/B', u'Complete the Word', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/716/submission/20716899'), ('2016-09-17 23:48:25', u'http://www.codeforces.com/problemset/problem/716/B', u'Complete the Word', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/716/submission/20716935'), ('2016-09-21 18:03:35', u'http://www.codeforces.com/problemset/problem/716/B', u'Complete the Word', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/716/submission/20794436'), ('2016-09-23 18:38:43', u'http://www.codeforces.com/problemset/problem/719/A', u'Vitya in the Countryside', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20837817'), ('2016-09-23 18:40:43', u'http://www.codeforces.com/problemset/problem/719/A', u'Vitya in the Countryside', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20838364'), ('2016-09-23 18:42:38', u'http://www.codeforces.com/problemset/problem/719/A', u'Vitya in the Countryside', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20839135'), ('2016-09-23 18:44:24', u'http://www.codeforces.com/problemset/problem/719/A', u'Vitya in the Countryside', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20840020'), ('2016-09-23 18:45:54', u'http://www.codeforces.com/problemset/problem/719/A', u'Vitya in the Countryside', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20840715'), ('2016-09-23 18:56:54', u'http://www.codeforces.com/problemset/problem/719/B', u'Anatoly and Cockroaches', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20845155'), ('2016-09-23 19:24:28', u'http://www.codeforces.com/problemset/problem/719/C', u'Efim and Strange Grade', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20852944'), ('2016-09-23 19:30:10', u'http://www.codeforces.com/problemset/problem/719/C', u'Efim and Strange Grade', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20854218'), ('2016-09-23 19:46:36', u'http://www.codeforces.com/problemset/problem/719/C', u'Efim and Strange Grade', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20857522'), ('2016-09-23 19:49:31', u'http://www.codeforces.com/problemset/problem/719/C', u'Efim and Strange Grade', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20858071'), ('2016-09-23 20:02:28', u'http://www.codeforces.com/problemset/problem/719/C', u'Efim and Strange Grade', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/719/submission/20860429'), ('2016-10-02 00:45:43', u'http://www.codeforces.com/problemset/problem/721/A', u'One-dimensional Japanese Crossword', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/721/submission/21096198'), ('2016-10-02 00:56:47', u'http://www.codeforces.com/problemset/problem/721/B', u'Passwords', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/721/submission/21096352'), ('2016-10-02 01:22:26', u'http://www.codeforces.com/problemset/problem/721/B', u'Passwords', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/721/submission/21096748'), ('2016-10-02 16:21:19', u'http://www.codeforces.com/problemset/problem/722/A', u'Broken Clock', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/722/submission/21112277'), ('2016-10-02 16:23:01', u'http://www.codeforces.com/problemset/problem/722/A', u'Broken Clock', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/722/submission/21112319'), ('2016-10-02 16:54:23', u'http://www.codeforces.com/problemset/problem/722/B', u'Verse Pattern', 'WA', '0', u'GNU C++11', 'http://www.codeforces.com/contest/722/submission/21113003'), ('2016-10-02 16:56:42', u'http://www.codeforces.com/problemset/problem/722/B', u'Verse Pattern', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/722/submission/21113067'), ('2016-10-05 02:06:14', u'http://www.codeforces.com/problemset/problem/723/A', u'The New Year: Meeting Friends', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/723/submission/21196305'), ('2016-10-05 02:24:17', u'http://www.codeforces.com/problemset/problem/723/B', u'Text Document Analysis', 'AC', '100', u'Python 2', 'http://www.codeforces.com/contest/723/submission/21196529'), ('2017-03-06 05:02:38', u'http://www.codeforces.com/problemset/problem/723/B', u'Text Document Analysis', 'AC', '100', u'Python 2', 'http://www.codeforces.com/contest/723/submission/25275840'), ('2017-03-06 05:03:10', u'http://www.codeforces.com/problemset/problem/723/B', u'Text Document Analysis', 'WA', '0', u'Python 2', 'http://www.codeforces.com/contest/723/submission/25275845'), ('2017-03-06 05:07:21', u'http://www.codeforces.com/problemset/problem/429/B', u'Working out', 'TLE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/429/submission/25275894'), ('2017-03-06 05:08:26', u'http://www.codeforces.com/problemset/problem/429/B', u'Working out', 'AC', '100', u'GNU C++11', 'http://www.codeforces.com/contest/429/submission/25275906'), ('2017-03-06 05:15:08', u'http://www.codeforces.com/problemset/problem/429/B', u'Working out', 'CE', '0', u'GNU C++11', 'http://www.codeforces.com/contest/429/submission/25275955'), ('2018-03-01 04:19:28', u'http://www.codeforces.com/problemset/problem/577/A', u'Multiplication Table', 'RE', '0', u'Python 2', 'http://www.codeforces.com/contest/577/submission/35797975'), ('2018-03-01 04:19:47', u'http://www.codeforces.com/problemset/problem/577/A', u'Multiplication Table', 'AC', '100', u'Python 3', 'http://www.codeforces.com/contest/577/submission/35797984')],
"Spoj": [('2013-08-09 16:13:01', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'CE', '0', u'ADA95', ''), ('2013-08-09 16:13:19', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'RE', '0', u'C', ''), ('2013-08-09 16:13:50', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'RE', '0', u'C', ''), ('2013-08-09 16:15:24', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'RE', '0', u'C', ''), ('2013-08-12 10:48:56', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'CE', '0', u'ADA95', ''), ('2013-08-12 10:50:14', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'CE', '0', u'ADA95', ''), ('2013-08-13 19:11:24', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'WA', '0', u'C', ''), ('2013-08-13 19:11:50', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'WA', '0', u'C', ''), ('2015-03-24 05:08:29', 'https://www.spoj.com/problems/TEST/', u'Life, the Universe, and Everything', 'AC', '100', u'C', ''), ('2015-03-28 00:47:43', 'https://www.spoj.com/problems/NSTEPS/', u'Number Steps', 'AC', '100', u'C++', ''), ('2015-06-30 03:38:17', 'https://www.spoj.com/problems/FCTRL/', u'Factorial', 'AC', '100', u'CPP', ''), ('2015-06-30 03:41:12', 'https://www.spoj.com/problems/FCTRL/', u'Factorial', 'AC', '100', u'CPP', ''), ('2015-06-30 03:42:49', 'https://www.spoj.com/problems/FCTRL/', u'Factorial', 'AC', '100', u'CPP', ''), ('2015-06-30 04:00:12', 'https://www.spoj.com/problems/FCTRL2/', u'Small factorials', 'AC', '100', u'C', ''), ('2015-06-30 04:16:14', 'https://www.spoj.com/problems/SAMER08F/', u'Feynman', 'AC', '100', u'CPP', ''), ('2015-06-30 04:58:12', 'https://www.spoj.com/problems/LASTDIG/', u'The last digit', 'AC', '100', u'CPP', ''), ('2015-07-25 17:08:08', 'https://www.spoj.com/problems/FARIDA/', u'Princess Farida', 'WA', '0', u'CPP', ''), ('2015-07-25 17:11:03', 'https://www.spoj.com/problems/FARIDA/', u'Princess Farida', 'WA', '0', u'CPP', ''), ('2015-07-25 17:15:01', 'https://www.spoj.com/problems/FARIDA/', u'Princess Farida', 'AC', '100', u'CPP', ''), ('2015-09-26 21:01:26', 'https://www.spoj.com/problems/MUL/', u'Fast Multiplication', 'TLE', '0', u'C++', ''), ('2015-09-26 21:04:40', 'https://www.spoj.com/problems/MUL/', u'Fast Multiplication', 'AC', '100', u'PYTHON', ''), ('2015-12-05 08:37:26', 'https://www.spoj.com/problems/PRIME1/', u'Prime Generator', 'WA', '0', u'C', ''), ('2017-05-15 17:07:43', 'https://www.spoj.com/problems/PRIME1/', u'Prime Generator', 'WA', '0', u'C', ''), ('2018-10-02 23:41:30', 'https://www.spoj.com/problems/ONP/', u'Transform the Expression', 'WA', '0', u'CPP', ''), ('2019-05-26 22:58:02', 'https://www.spoj.com/problems/BACTERIA/', u'SPOJ Custom Test', 'OTH', '0', u'PYTHON3', '')],
"HackerEarth": [('2014-06-17 14:50:52', 'https://www.hackerearth.com/practice/data-structures/hash-tables/basics-of-hash-tables/practice-problems/algorithm/mind-palaces-3/', u'Mind Palaces', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/333758'), ('2014-06-17 14:55:06', 'https://www.hackerearth.com/practice/data-structures/hash-tables/basics-of-hash-tables/practice-problems/algorithm/mind-palaces-3/', u'Mind Palaces', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/333766'), ('2014-06-17 14:56:59', 'https://www.hackerearth.com/practice/data-structures/hash-tables/basics-of-hash-tables/practice-problems/algorithm/mind-palaces-3/', u'Mind Palaces', 'PS', '0', u'C', 'https://www.hackerearth.com/submission/333770'), ('2014-06-17 15:38:24', 'https://www.hackerearth.com/practice/data-structures/hash-tables/basics-of-hash-tables/practice-problems/algorithm/mind-palaces-3/', u'Mind Palaces', 'PS', '0', u'C', 'https://www.hackerearth.com/submission/333824'), ('2014-06-17 15:53:23', 'https://www.hackerearth.com/practice/data-structures/hash-tables/basics-of-hash-tables/practice-problems/algorithm/mind-palaces-3/', u'Mind Palaces', 'PS', '0', u'C', 'https://www.hackerearth.com/submission/333833'), ('2014-06-17 16:08:55', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/palindromic-numbers-7/', u'Palindromic Numbers', 'AC', '100', u'Python', 'https://www.hackerearth.com/submission/333846'), ('2014-10-02 04:57:34', 'https://www.hackerearth.com/problem/algorithm/day-1-if-else-conditionslooping/', u"Bajirao's Rescue Operation", 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/789146'), ('2014-10-02 05:00:56', 'https://www.hackerearth.com/problem/algorithm/day-1-if-else-conditionslooping/', u"Bajirao's Rescue Operation", 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/789152'), ('2014-10-02 05:20:08', 'https://www.hackerearth.com/problem/algorithm/day-1-if-else-conditionslooping/', u"Bajirao's Rescue Operation", 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/789161'), ('2014-10-02 05:40:22', 'https://www.hackerearth.com/problem/algorithm/day-1-if-else-conditionslooping/', u"Bajirao's Rescue Operation", 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/789173'), ('2014-10-02 05:40:22', 'https://www.hackerearth.com/problem/algorithm/day-1-if-else-conditionslooping/', u"Bajirao's Rescue Operation", 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/789173'), ('2014-10-02 05:40:23', 'https://www.hackerearth.com/problem/algorithm/day-1-if-else-conditionslooping/', u"Bajirao's Rescue Operation", 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/789174'), ('2014-10-02 05:43:40', 'https://www.hackerearth.com/problem/algorithm/day-1-if-else-conditionslooping/', u"Bajirao's Rescue Operation", 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/789180'), ('2014-10-02 05:43:40', 'https://www.hackerearth.com/problem/algorithm/day-1-if-else-conditionslooping/', u"Bajirao's Rescue Operation", 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/789181'), ('2014-10-02 05:51:40', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/complete-string-4/', u'Complete String', 'TLE', '0', u'C++', 'https://www.hackerearth.com/submission/789184'), ('2014-10-02 06:01:47', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/complete-string-4/', u'Complete String', 'TLE', '0', u'C++', 'https://www.hackerearth.com/submission/789187'), ('2014-10-02 06:07:25', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/complete-string-4/', u'Complete String', 'AC', '100', u'C', 'https://www.hackerearth.com/submission/789191'), ('2015-05-30 21:46:15', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/recursive-sums/', u'Recursive Sums', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1866870'), ('2015-05-30 21:47:45', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/recursive-sums/', u'Recursive Sums', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1866905'), ('2015-05-30 21:52:07', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/recursive-sums/', u'Recursive Sums', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1867017'), ('2015-05-30 21:58:10', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/recursive-sums/', u'Recursive Sums', 'AC', '100', u'Python', 'https://www.hackerearth.com/submission/1867183'), ('2015-06-01 21:51:41', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/very-cool-numbers/', u'Very Cool Numbers', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1876428'), ('2015-06-01 22:07:31', 'https://www.hackerearth.com/problem/algorithm/children-love-candies/', u'Children Love Candies', 'PS', '0', u'C', 'https://www.hackerearth.com/submission/1877240'), ('2015-06-01 22:09:05', 'https://www.hackerearth.com/problem/algorithm/children-love-candies/', u'Children Love Candies', 'AC', '100', u'C', 'https://www.hackerearth.com/submission/1877330'), ('2015-06-01 22:18:48', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/very-cool-numbers/', u'Very Cool Numbers', 'PS', '0', u'Python', 'https://www.hackerearth.com/submission/1877835'), ('2015-06-01 22:23:44', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/very-cool-numbers/', u'Very Cool Numbers', 'AC', '100', u'Python', 'https://www.hackerearth.com/submission/1878092'), ('2015-06-01 22:33:08', 'https://www.hackerearth.com/problem/algorithm/andrew-and-max/', u'Andrew and Max', 'AC', '100', u'C', 'https://www.hackerearth.com/submission/1878567'), ('2015-06-01 22:55:56', 'https://www.hackerearth.com/problem/algorithm/zeroshark/', u'ZeroShark', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1879759'), ('2015-06-01 23:11:57', 'https://www.hackerearth.com/problem/algorithm/zeroshark/', u'ZeroShark', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1880558'), ('2015-06-01 23:17:34', 'https://www.hackerearth.com/problem/algorithm/zeroshark/', u'ZeroShark', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1880825'), ('2015-06-04 21:02:21', 'https://www.hackerearth.com/practice/algorithms/string-algorithm/basics-of-string-manipulation/practice-problems/algorithm/terrible-chandu/', u'Terrible Chandu', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/1894925'), ('2015-06-04 21:02:21', 'https://www.hackerearth.com/practice/algorithms/string-algorithm/basics-of-string-manipulation/practice-problems/algorithm/terrible-chandu/', u'Terrible Chandu', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/1894925'), ('2015-06-04 21:06:29', 'https://www.hackerearth.com/practice/algorithms/greedy/basics-of-greedy-algorithms/practice-problems/algorithm/chandu-and-consecutive-letters/', u'Chandu and Consecutive Letters', 'AC', '100', u'C', 'https://www.hackerearth.com/submission/1895133'), ('2015-06-04 21:10:59', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/prateek-and-his-friends/', u'Prateek and his Friends', 'AC', '100', u'C', 'https://www.hackerearth.com/submission/1895359'), ('2015-06-09 21:03:35', 'https://www.hackerearth.com/practice/algorithms/sorting/merge-sort/practice-problems/algorithm/chandu-and-his-girlfriend/', u'Chandu and his Girlfriend', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/1919932'), ('2015-06-09 21:07:37', 'https://www.hackerearth.com/practice/algorithms/sorting/merge-sort/practice-problems/algorithm/chandu-and-his-girlfriend-returns/', u'Chandu and his Girlfriend Returns', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1920040'), ('2015-06-09 21:12:29', 'https://www.hackerearth.com/practice/algorithms/sorting/merge-sort/practice-problems/algorithm/chandu-and-his-girlfriend-returns/', u'Chandu and his Girlfriend Returns', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1920191'), ('2015-06-09 21:18:14', 'https://www.hackerearth.com/practice/algorithms/sorting/merge-sort/practice-problems/algorithm/chandu-and-his-girlfriend-returns/', u'Chandu and his Girlfriend Returns', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/1920367'), ('2015-06-11 21:05:52', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/discover-the-monk/', u'Discover the Monk', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1930370'), ('2015-06-11 21:09:45', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/discover-the-monk/', u'Discover the Monk', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1930499'), ('2015-06-11 21:15:07', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/discover-the-monk/', u'Discover the Monk', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/1930694'), ('2015-06-11 21:28:20', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/monks-encounter-with-polynomial/', u"Monk's Encounter with Polynomial", 'WA', '0', u'C', 'https://www.hackerearth.com/submission/1931189'), ('2015-06-11 21:28:38', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/monks-encounter-with-polynomial/', u"Monk's Encounter with Polynomial", 'PS', '0', u'C', 'https://www.hackerearth.com/submission/1931196'), ('2015-06-11 21:29:06', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/monks-encounter-with-polynomial/', u"Monk's Encounter with Polynomial", 'PS', '0', u'C', 'https://www.hackerearth.com/submission/1931215'), ('2015-06-11 21:30:47', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/monks-encounter-with-polynomial/', u"Monk's Encounter with Polynomial", 'PS', '0', u'C', 'https://www.hackerearth.com/submission/1931281'), ('2015-06-11 21:32:24', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/monks-encounter-with-polynomial/', u"Monk's Encounter with Polynomial", 'PS', '0', u'C', 'https://www.hackerearth.com/submission/1931332'), ('2015-06-11 21:34:35', 'https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/monks-encounter-with-polynomial/', u"Monk's Encounter with Polynomial", 'PS', '0', u'C', 'https://www.hackerearth.com/submission/1931416'), ('2015-07-01 21:36:39', 'https://www.hackerearth.com/practice/algorithms/sorting/insertion-sort/practice-problems/algorithm/the-rise-of-the-weird-things-1/', u'The rise of the weird... things [1]', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2037234'), ('2015-07-01 21:39:00', 'https://www.hackerearth.com/practice/algorithms/sorting/insertion-sort/practice-problems/algorithm/the-rise-of-the-weird-things-1/', u'The rise of the weird... things [1]', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2037359'), ('2015-07-01 22:06:20', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/the-savior-3/', u'The savior? [3]', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2038727'), ('2015-07-01 22:14:10', 'https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/the-savior-3/', u'The savior? [3]', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2039043'), ('2015-07-01 23:06:28', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/2-dimensional/practice-problems/algorithm/supernatural-squad-2/', u'Supernatural Squad [2]', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2040873'), ('2015-07-01 23:06:28', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/2-dimensional/practice-problems/algorithm/supernatural-squad-2/', u'Supernatural Squad [2]', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2040873'), ('2015-07-01 23:08:23', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/2-dimensional/practice-problems/algorithm/supernatural-squad-2/', u'Supernatural Squad [2]', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2040928'), ('2015-07-01 23:10:56', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/2-dimensional/practice-problems/algorithm/supernatural-squad-2/', u'Supernatural Squad [2]', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2041005'), ('2015-07-03 18:28:59', 'https://www.hackerearth.com/problem/algorithm/valentine-shopping-4/', u'Valentine Shopping', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2053959'), ('2015-07-03 18:48:11', 'https://www.hackerearth.com/challenges/hiring/bookmyshowhiringchallenge/algorithm/marut-and-girls/', u'Marut and Girls', 'PS', '0', u'Python', 'https://www.hackerearth.com/submission/2054041'), ('2015-07-03 18:48:11', 'https://www.hackerearth.com/challenges/hiring/bookmyshowhiringchallenge/algorithm/marut-and-girls/', u'Marut and Girls', 'PS', '0', u'Python', 'https://www.hackerearth.com/submission/2054042'), ('2015-07-03 18:51:55', 'https://www.hackerearth.com/challenges/hiring/bookmyshowhiringchallenge/algorithm/marut-and-girls/', u'Marut and Girls', 'PS', '0', u'Python', 'https://www.hackerearth.com/submission/2054062'), ('2015-07-03 18:57:12', 'https://www.hackerearth.com/challenges/hiring/bookmyshowhiringchallenge/algorithm/marut-and-girls/', u'Marut and Girls', 'AC', '100', u'Python', 'https://www.hackerearth.com/submission/2054105'), ('2015-07-03 18:57:12', 'https://www.hackerearth.com/challenges/hiring/bookmyshowhiringchallenge/algorithm/marut-and-girls/', u'Marut and Girls', 'AC', '100', u'Python', 'https://www.hackerearth.com/submission/2054106'), ('2015-07-03 21:37:13', 'https://www.hackerearth.com/problem/algorithm/beta-testing/', u'Beta Testing', 'WA', '0', u'Python', 'https://www.hackerearth.com/submission/2055210'), ('2015-07-03 22:22:51', 'https://www.hackerearth.com/problem/algorithm/beta-testing/', u'Beta Testing', 'AC', '100', u'Python', 'https://www.hackerearth.com/submission/2055901'), ('2015-07-04 12:55:07', 'https://www.hackerearth.com/practice/algorithms/graphs/graph-representation/practice-problems/algorithm/monk-in-the-real-estate/', u'Monk in the real estate', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2059508'), ('2015-07-06 22:30:59', 'https://www.hackerearth.com/problem/algorithm/beta-testing/', u'Beta Testing', 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/2071774'), ('2015-07-06 22:48:05', 'https://www.hackerearth.com/problem/algorithm/beta-testing/', u'Beta Testing', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2071820'), ('2015-07-06 23:04:59', 'https://www.hackerearth.com/problem/algorithm/to-be-changed-choosing-a-project/', u'Side Projects', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2071872'), ('2015-07-06 23:30:34', 'https://www.hackerearth.com/problem/algorithm/to-be-changed-compile-time-fun/', u"It's Compiling!", 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2071940'), ('2015-07-08 23:20:31', 'https://www.hackerearth.com/problem/algorithm/monk-and-the-collision/', u'Monk and the Collision', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2082091'), ('2015-07-08 23:21:06', 'https://www.hackerearth.com/problem/algorithm/monk-and-the-collision/', u'Monk and the Collision', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2082114'), ('2015-07-08 23:36:27', 'https://www.hackerearth.com/problem/algorithm/monk-in-the-land-of-pokemons/', u'Monk in the land of Pokemons!', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2082452'), ('2015-07-08 23:38:45', 'https://www.hackerearth.com/problem/algorithm/monk-in-the-land-of-pokemons/', u'Monk in the land of Pokemons!', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2082465'), ('2015-07-08 23:50:39', 'https://www.hackerearth.com/problem/algorithm/monk-in-the-land-of-pokemons/', u'Monk in the land of Pokemons!', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2082564'), ('2015-07-08 23:50:39', 'https://www.hackerearth.com/problem/algorithm/monk-in-the-land-of-pokemons/', u'Monk in the land of Pokemons!', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2082564'), ('2015-07-18 07:29:31', 'https://www.hackerearth.com/problem/algorithm/will-you-be-my-friend-pledge-easy/', u'Will you be my friend?', 'CE', '0', u'Java', 'https://www.hackerearth.com/submission/2144171'), ('2015-07-18 07:54:12', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/intelligent-girl-1/', u'Intelligent Girl ', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2144180'), ('2015-07-23 12:47:19', 'https://www.hackerearth.com/practice/data-structures/trees/heapspriority-queues/practice-problems/algorithm/monk-and-multiplication/', u'Monk and Multiplication', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2181397'), ('2015-07-23 12:48:32', 'https://www.hackerearth.com/practice/data-structures/trees/heapspriority-queues/practice-problems/algorithm/monk-and-multiplication/', u'Monk and Multiplication', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2181405'), ('2015-07-23 13:45:20', 'https://www.hackerearth.com/challenges/competitive/code-monk-heaps-and-priority-queues/algorithm/monk-and-some-queries/', u'Monk And Some Queries', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2181589'), ('2015-07-23 13:52:48', 'https://www.hackerearth.com/challenges/competitive/code-monk-heaps-and-priority-queues/algorithm/monk-and-some-queries/', u'Monk And Some Queries', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2181611'), ('2015-07-23 14:01:15', 'https://www.hackerearth.com/challenges/competitive/code-monk-heaps-and-priority-queues/algorithm/monk-and-some-queries/', u'Monk And Some Queries', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2181643'), ('2015-07-23 14:08:45', 'https://www.hackerearth.com/challenges/competitive/code-monk-heaps-and-priority-queues/algorithm/monk-and-some-queries/', u'Monk And Some Queries', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2181659'), ('2015-07-23 14:12:17', 'https://www.hackerearth.com/challenges/competitive/code-monk-heaps-and-priority-queues/algorithm/monk-and-some-queries/', u'Monk And Some Queries', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2181670'), ('2015-07-23 14:16:03', 'https://www.hackerearth.com/challenges/competitive/code-monk-heaps-and-priority-queues/algorithm/monk-and-some-queries/', u'Monk And Some Queries', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2181686'), ('2015-07-23 14:17:49', 'https://www.hackerearth.com/challenges/competitive/code-monk-heaps-and-priority-queues/algorithm/monk-and-some-queries/', u'Monk And Some Queries', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2181696'), ('2015-08-15 19:54:58', 'https://www.hackerearth.com/practice/algorithms/graphs/graph-representation/practice-problems/algorithm/monk-in-the-real-estate/', u'Monk in the real estate', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2326114'), ('2015-08-15 20:05:30', 'https://www.hackerearth.com/practice/algorithms/graphs/graph-representation/practice-problems/algorithm/monk-at-the-graph-factory/', u'Monk at the Graph Factory', 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/2326217'), ('2015-08-15 20:07:06', 'https://www.hackerearth.com/practice/algorithms/graphs/graph-representation/practice-problems/algorithm/monk-at-the-graph-factory/', u'Monk at the Graph Factory', 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/2326232'), ('2015-08-15 20:17:21', 'https://www.hackerearth.com/practice/algorithms/graphs/graph-representation/practice-problems/algorithm/monk-at-the-graph-factory/', u'Monk at the Graph Factory', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2326300'), ('2015-08-15 20:57:56', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/2326601'), ('2015-08-15 21:10:36', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2326699'), ('2015-08-15 21:13:03', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2326714'), ('2015-08-15 21:15:52', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2326727'), ('2015-08-15 21:20:43', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2326762'), ('2015-08-15 21:20:43', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2326762'), ('2015-08-15 21:27:49', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'RE', '0', u'C++', 'https://www.hackerearth.com/submission/2326799'), ('2015-08-15 21:28:47', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2326811'), ('2015-08-15 21:42:24', 'https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/', u'Kingdom Of Monkeys', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2326907'), ('2015-08-28 01:03:17', 'https://www.hackerearth.com/practice/data-structures/disjoint-data-strutures/basics-of-disjoint-data-structures/practice-problems/algorithm/city-and-flood-1/', u'City and Flood', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/2400169'), ('2015-09-03 19:34:56', 'https://www.hackerearth.com/problem/algorithm/guess-the-triangle/', u'Guess the triangle', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/2449157'), ('2015-12-18 12:28:32', 'https://www.hackerearth.com/problem/algorithm/prime-probablity-1/', u'Prime Probablity', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/3031761'), ('2015-12-18 12:33:00', 'https://www.hackerearth.com/problem/algorithm/prime-probablity-1/', u'Prime Probablity', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/3031774'), ('2015-12-18 12:46:11', 'https://www.hackerearth.com/problem/algorithm/prime-probablity-1/', u'Prime Probablity', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/3031821'), ('2015-12-18 12:54:19', 'https://www.hackerearth.com/problem/algorithm/prime-probablity-1/', u'Prime Probablity', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/3031840'), ('2015-12-18 22:25:48', 'https://www.hackerearth.com/problem/algorithm/special-subarray-1/', u'Special Subarray', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/3035335'), ('2015-12-18 22:31:43', 'https://www.hackerearth.com/problem/algorithm/special-subarray-1/', u'Special Subarray', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/3035367'), ('2015-12-20 10:59:00', 'https://www.hackerearth.com/problem/algorithm/prime-probablity-1/', u'Prime Probablity', 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/3050348'), ('2016-01-06 23:37:02', 'https://www.hackerearth.com/problem/algorithm/digital-numbers/', u'Digital Numbers', 'WA', '0', u'C', 'https://www.hackerearth.com/submission/3120602'), ('2016-09-14 22:25:52', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/xsquare-and-two-arrays/', u'Xsquare And Two Arrays', 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/5167117'), ('2016-09-14 22:26:45', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/xsquare-and-two-arrays/', u'Xsquare And Two Arrays', 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/5167122'), ('2016-09-14 22:46:04', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/xsquare-and-two-arrays/', u'Xsquare And Two Arrays', 'WA', '0', u'C++', 'https://www.hackerearth.com/submission/5167266'), ('2016-09-14 22:50:24', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/xsquare-and-two-arrays/', u'Xsquare And Two Arrays', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/5167320'), ('2016-09-29 21:25:56', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/choosing-the-judges-7/', u'Choosing the Judges', 'AC', '100', u'C++', 'https://www.hackerearth.com/submission/5421843'), ('2016-09-29 22:05:06', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/rhezo-and-prime-problems/', u'Rhezo and Prime Problems', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/5422329'), ('2016-09-29 22:16:01', 'https://www.hackerearth.com/practice/algorithms/dynamic-programming/introduction-to-dynamic-programming-1/practice-problems/algorithm/rhezo-and-prime-problems/', u'Rhezo and Prime Problems', 'PS', '0', u'C++', 'https://www.hackerearth.com/submission/5422459')],
"HackerRank": [('2014-06-09 22:53:13', u'https://www.hackerrank.com/challenges/solve-me-first', u'Solve Me First', 'AC', '100', '-', ''), ('2014-06-09 23:03:21', u'https://www.hackerrank.com/challenges/find-point', u'Find the Point', 'AC', '100', '-', ''), ('2014-06-09 23:40:25', u'https://www.hackerrank.com/challenges/lonely-integer', u'Lonely Integer', 'AC', '100', '-', ''), ('2014-06-10 00:08:01', u'https://www.hackerrank.com/challenges/the-love-letter-mystery', u'The Love-Letter Mystery', 'AC', '100', '-', ''), ('2014-07-17 02:38:05', u'https://www.hackerrank.com/challenges/utopian-tree', u'Utopian Tree', 'AC', '100', '-', ''), ('2014-07-17 03:11:48', u'https://www.hackerrank.com/contests/w7/challenges/die-hard-3', u'Die Hard 3', 'AC', '100', '-', ''), ('2014-07-17 03:24:54', u'https://www.hackerrank.com/challenges/runningtime', u'Running Time of Algorithms', 'AC', '100', '-', ''), ('2014-07-17 03:49:56', u'https://www.hackerrank.com/contests/w7/challenges/string-function-calculation', u'String Function Calculation', 'AC', '100', '-', ''), ('2014-07-22 01:29:21', u'https://www.hackerrank.com/challenges/gem-stones', u'Gemstones', 'AC', '100', '-', ''), ('2014-08-08 17:24:20', u'https://www.hackerrank.com/contests/w8/challenges/counter-game', u'Counter game', 'AC', '100', '-', ''), ('2014-09-24 01:29:10', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler052', u'Project Euler #52: Permuted multiples', 'AC', '100', '-', ''), ('2014-09-27 20:48:27', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler001', u'Project Euler #1: Multiples of 3 and 5', 'AC', '100', '-', ''), ('2014-09-27 22:39:27', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler002', u'Project Euler #2: Even Fibonacci numbers', 'AC', '100', '-', ''), ('2014-09-28 00:53:48', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler016', u'Project Euler #16: Power digit sum', 'AC', '100', '-', ''), ('2014-09-28 03:59:31', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler034', u'Project Euler #34: Digit factorials', 'AC', '100', '-', ''), ('2014-10-01 19:47:25', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler042', u'Project Euler #42: Coded triangle numbers', 'AC', '100', '-', ''), ('2014-10-01 20:06:36', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler030', u'Project Euler #30: Digit Nth powers', 'AC', '100', '-', ''), ('2014-10-02 22:39:43', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler048', u'Project Euler #48: Self powers', 'AC', '100', '-', ''), ('2014-10-02 22:55:27', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler020', u'Project Euler #20: Factorial digit sum', 'AC', '100', '-', ''), ('2014-10-04 00:35:02', u'https://www.hackerrank.com/challenges/bigger-is-greater', u'Bigger is Greater', 'AC', '100', '-', ''), ('2014-10-04 05:36:38', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler005', u'Project Euler #5: Smallest multiple', 'AC', '100', '-', ''), ('2014-10-04 05:45:06', u'https://www.hackerrank.com/contests/projecteuler/challenges/euler007', u'Project Euler #7: 10001st prime', 'AC', '100', '-', ''), ('2014-12-08 06:00:42', u'https://www.hackerrank.com/challenges/find-hackerrank', u'Find HackerRank', 'AC', '100', '-', ''), ('2014-12-08 06:08:01', u'https://www.hackerrank.com/challenges/valid-pan-format', u'Valid PAN format', 'AC', '100', '-', ''), ('2014-12-08 06:17:05', u'https://www.hackerrank.com/challenges/hackerrank-tweets', u'HackerRank Tweets', 'AC', '100', '-', ''), ('2014-12-08 06:31:09', u'https://www.hackerrank.com/challenges/split-number', u'Split the Phone Numbers', 'AC', '100', '-', ''), ('2015-05-29 07:50:36', u'https://www.hackerrank.com/challenges/select-all-sql', u'Select All', 'AC', '100', '-', ''), ('2015-05-29 07:52:08', u'https://www.hackerrank.com/challenges/select-by-id', u'Select By ID', 'AC', '100', '-', ''), ('2015-05-29 07:53:21', u'https://www.hackerrank.com/challenges/japanese-cities-attributes', u"Japanese Cities' Attributes", 'AC', '100', '-', ''), ('2015-05-29 07:54:43', u'https://www.hackerrank.com/challenges/japanese-cities-name', u"Japanese Cities' Names", 'AC', '100', '-', ''), ('2015-05-29 07:57:45', u'https://www.hackerrank.com/challenges/average-population', u'Average Population', 'AC', '100', '-', ''), ('2015-05-29 07:59:00', u'https://www.hackerrank.com/challenges/japan-population', u'Japan Population', 'AC', '100', '-', ''), ('2015-05-30 09:47:34', u'https://www.hackerrank.com/challenges/py-hello-world', u'Say "Hello, World!" With Python', 'AC', '100', '-', ''), ('2015-05-30 09:48:41', u'https://www.hackerrank.com/challenges/python-raw-input', u'Reading Raw Input', 'AC', '100', '-', ''), ('2015-05-30 09:50:03', u'https://www.hackerrank.com/challenges/python-arithmetic-operators', u'Arithmetic Operators', 'AC', '100', '-', ''), ('2015-05-30 09:53:02', u'https://www.hackerrank.com/challenges/python-division', u'Python: Division', 'AC', '100', '-', ''), ('2015-05-30 09:55:01', u'https://www.hackerrank.com/challenges/python-mod-divmod', u'Mod Divmod', 'AC', '100', '-', ''), ('2015-05-30 22:23:33', u'https://www.hackerrank.com/contests/code-cpp-may-2015/challenges/redundant-or-not', u'Redundant or Not?', 'AC', '100', '-', ''), ('2015-05-30 22:31:57', u'https://www.hackerrank.com/contests/code-cpp-may-2015/challenges/string-transformations', u'String Transformations', 'AC', '100', '-', ''), ('2015-05-31 08:52:13', u'https://www.hackerrank.com/contests/code-cpp-may-2015/challenges/linked-list-to-binary', u'Linked List to Binary', 'AC', '100', '-', ''), ('2015-05-31 09:20:17', u'https://www.hackerrank.com/contests/code-cpp-may-2015/challenges/polygon-inheritance', u'Polygon Inheritance', 'AC', '100', '-', ''), ('2015-06-01 06:19:47', u'https://www.hackerrank.com/challenges/print-the-elements-of-a-linked-list', u'Print the Elements of a Linked List', 'AC', '100', '-', ''), ('2015-06-01 06:22:43', u'https://www.hackerrank.com/challenges/insert-a-node-at-the-tail-of-a-linked-list', u'Insert a Node at the Tail of a Linked List', 'AC', '100', '-', ''), ('2015-06-01 06:24:34', u'https://www.hackerrank.com/challenges/insert-a-node-at-the-head-of-a-linked-list', u'Insert a node at the head of a linked list', 'AC', '100', '-', ''), ('2015-06-01 06:45:45', u'https://www.hackerrank.com/challenges/insert-a-node-at-a-specific-position-in-a-linked-list', u'Insert a node at a specific position in a linked list', 'AC', '100', '-', ''), ('2015-06-01 06:49:29', u'https://www.hackerrank.com/challenges/delete-a-node-from-a-linked-list', u'Delete a Node', 'AC', '100', '-', ''), ('2015-06-01 06:51:09', u'https://www.hackerrank.com/challenges/print-the-elements-of-a-linked-list-in-reverse', u'Print in Reverse', 'AC', '100', '-', ''), ('2015-06-01 06:56:24', u'https://www.hackerrank.com/challenges/reverse-a-linked-list', u'Reverse a linked list', 'AC', '100', '-', ''), ('2015-06-01 06:59:39', u'https://www.hackerrank.com/challenges/compare-two-linked-lists', u'Compare two linked lists', 'AC', '100', '-', ''), ('2015-06-01 07:07:07', u'https://www.hackerrank.com/challenges/merge-two-sorted-linked-lists', u'Merge two sorted linked lists', 'AC', '100', '-', ''), ('2015-06-01 07:12:02', u'https://www.hackerrank.com/challenges/get-the-value-of-the-node-at-a-specific-position-from-the-tail', u'Get Node Value', 'AC', '100', '-', ''), ('2015-06-01 07:18:57', u'https://www.hackerrank.com/challenges/delete-duplicate-value-nodes-from-a-sorted-linked-list', u'Delete duplicate-value nodes from a sorted linked list', 'AC', '100', '-', ''), ('2015-06-01 07:25:20', u'https://www.hackerrank.com/challenges/detect-whether-a-linked-list-contains-a-cycle', u'Cycle Detection', 'AC', '100', '-', ''), ('2015-06-01 07:39:03', u'https://www.hackerrank.com/challenges/find-the-merge-point-of-two-joined-linked-lists', u'Find Merge Point of Two Lists', 'AC', '100', '-', ''), ('2015-06-01 07:55:58', u'https://www.hackerrank.com/challenges/insert-a-node-into-a-sorted-doubly-linked-list', u'Inserting a Node Into a Sorted Doubly Linked List', 'AC', '100', '-', ''), ('2015-06-01 08:05:55', u'https://www.hackerrank.com/challenges/reverse-a-doubly-linked-list', u'Reverse a doubly linked list', 'AC', '100', '-', ''), ('2015-06-01 08:07:24', u'https://www.hackerrank.com/challenges/tree-preorder-traversal', u'Tree: Preorder Traversal', 'AC', '100', '-', ''), ('2015-06-01 08:09:21', u'https://www.hackerrank.com/challenges/tree-postorder-traversal', u'Tree: Postorder Traversal', 'AC', '100', '-', ''), ('2015-06-01 08:10:09', u'https://www.hackerrank.com/challenges/tree-inorder-traversal', u'Tree: Inorder Traversal', 'AC', '100', '-', ''), ('2015-06-03 03:08:32', u'https://www.hackerrank.com/challenges/connecting-towns', u'Connecting Towns', 'AC', '100', '-', ''), ('2015-06-03 03:13:31', u'https://www.hackerrank.com/challenges/handshake', u'Handshake', 'AC', '100', '-', ''), ('2015-06-03 03:17:17', u'https://www.hackerrank.com/challenges/correctness-invariant', u'Correctness and the Loop Invariant', 'AC', '100', '-', ''), ('2015-06-03 03:22:14', u'https://www.hackerrank.com/challenges/tutorial-intro', u'Intro to Tutorial Challenges', 'AC', '100', '-', ''), ('2015-06-10 11:27:13', u'https://www.hackerrank.com/contests/the-linux-bash-fest/challenges/text-processing-in-linux-the-grep-command-4', u"'Grep' - A", 'AC', '100', '-', ''), ('2015-06-10 11:32:34', u'https://www.hackerrank.com/contests/the-linux-bash-fest/challenges/paste-1', u'Paste - 1', 'AC', '100', '-', ''), ('2015-06-10 11:52:57', u'https://www.hackerrank.com/contests/the-linux-bash-fest/challenges/awk-1', u"'Awk' - 1", 'AC', '100', '-', ''), ('2015-06-10 11:56:28', u'https://www.hackerrank.com/contests/the-linux-bash-fest/challenges/awk-2', u"'Awk' - 2", 'AC', '100', '-', ''), ('2015-06-10 12:10:10', u'https://www.hackerrank.com/contests/the-linux-bash-fest/challenges/text-processing-in-linux-the-grep-command-5', u"'Grep' - B", 'AC', '100', '-', ''), ('2015-06-27 21:35:13', u'https://www.hackerrank.com/contests/segfault/challenges/three-loops', u'Three Loops', 'AC', '100', '-', ''), ('2015-06-27 22:25:24', u'https://www.hackerrank.com/contests/segfault/challenges/count-the-divisors', u'Count the Divisors', 'AC', '100', '-', ''), ('2015-08-01 21:58:15', u'https://www.hackerrank.com/contests/countercode/challenges/imba', u'Imba', 'AC', '100', '-', ''), ('2015-08-01 22:46:04', u'https://www.hackerrank.com/contests/countercode/challenges/campers', u'Campers', 'AC', '100', '-', ''), ('2015-10-30 02:51:27', u'https://www.hackerrank.com/contests/codestorm/challenges/emmas-notebook', u"Emma's Notebook", 'AC', '100', '-', ''), ('2016-08-06 21:37:21', u'https://www.hackerrank.com/contests/morgan-stanley-2016/challenges/jesse-and-profit', u'Jesse and Profit', 'AC', '100', '-', ''), ('2016-08-24 06:14:46', u'https://www.hackerrank.com/challenges/30-hello-world', u'Day 0: Hello, World.', 'AC', '100', '-', ''), ('2017-11-03 00:51:08', u'https://www.hackerrank.com/challenges/30-data-types', u'Day 1: Data Types', 'AC', '100', '-', '')],
"UVa": [('2016-12-11 20:21:23', 'https://uva.onlinejudge.org/index.php?option=com_onlinejudge&Itemid=8&page=show_problem&problem=38', 'Ecological Bin Packing', 'WA', '0', 'C++', ''), ('2016-12-14 05:23:40', 'https://uva.onlinejudge.org/index.php?option=com_onlinejudge&Itemid=8&page=show_problem&problem=38', 'Ecological Bin Packing', 'CE', '0', 'C++', '')],
"Timus": [('2018-07-01 01:41:04', 'http://acm.timus.ru/problem.aspx?space=1&num=1285&locale=en', u'1285. Thread in a Hyperspace', 'CE', '0', u'G++ 7.1', '')],
"AtCoder": [('2020-05-16 19:04:34', u'https://atcoder.jp/contests/abc135/tasks/abc135_d', 'D. Digits Parade', u'WA', 0.0, u'Python2 (2.7.6)', u'https://atcoder.jp/contests/abc135/submissions/13262993'), ('2020-05-18 12:04:47', u'https://atcoder.jp/contests/abc135/tasks/abc135_d', 'D. Digits Parade', u'WA', 0.0, u'Python2 (2.7.6)', u'https://atcoder.jp/contests/abc135/submissions/13368979'), ('2020-05-18 12:58:01', u'https://atcoder.jp/contests/agc010/tasks/agc010_a', 'A. Addition', u'RE', 0.0, u'Python2 (2.7.6)', u'https://atcoder.jp/contests/agc010/submissions/13370205')]
}
uva_problem_dict = utilities.get_problem_mappings(uvadb, uvadb.problem,
["problem_id",
"title"])
atcoder_problem_dict = utilities.get_problem_mappings(db,
db.atcoder_problems,
["problem_identifier",
"name"])
last_retrieved = time.strptime(str(current.INITIAL_DATE), "%Y-%m-%d %H:%M:%S")
for site in handles:
Profile = getattr(sites, site.lower()).Profile
if Profile.is_website_down():
continue
site_method = Profile(handles[site]).get_submissions
if site == "UVa":
submissions = site_method(last_retrieved, uva_problem_dict, False)
elif site == "AtCoder":
submissions = site_method(last_retrieved, atcoder_problem_dict, False)
else:
submissions = site_method(last_retrieved, False)
submissions = sorted(submissions)
if submissions != expected_result[site]:
raise RuntimeError("Submissions list does not match for " + site)
# ------------------------------------------------------------------------------
def test_retrieval(retrieval_object, method_name):
error_message = ""
for i in xrange(1):
try:
getattr(retrieval_object, method_name)()
return "Success"
except Exception as e:
error_message = method_name + " " + e.message
time.sleep(2)
return error_message
rt = RetrievalTest()
pushover_message = ""
for method_name in [
"test_tag_retrieval",
"test_editorial_retrieval",
"test_invalid_handle",
"test_download_submission",
"test_rating_graph",
"test_submissions",
"test_problem_setters_retrieval"
]:
res = test_retrieval(rt, method_name)
if res != "Success":
pushover_message += res + "\n"
if pushover_message != "":
print "pushover_message", pushover_message
response = requests.post("https://api.pushover.net/1/messages.json",
data={"token": current.pushover_api_token,
"user": current.pushover_user_token,
"message": pushover_message.strip(),
"title": "Extras retrieval failure",
"priority": 1},
verify=False).json()
|
from reportlab.platypus import Paragraph, Spacer, Table, TableStyle, PageBreak
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib import colors
from reportlab.lib.units import mm
from copy import deepcopy
from reportlab.lib.enums import TA_CENTER, TA_LEFT, TA_JUSTIFY
from directions.models import Napravleniya
from appconf.manager import SettingManager
import os.path
from laboratory.settings import FONTS_FOLDER
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from .flowable import FrameDataUniversal
from directions.models import Issledovaniya
from ..prepare_data import fields_result_only_title_fields
import simplejson as json
import datetime
from dateutil.relativedelta import relativedelta
from hospitals.models import Hospitals
pdfmetrics.registerFont(TTFont('PTAstraSerifBold', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Bold.ttf')))
pdfmetrics.registerFont(TTFont('PTAstraSerifReg', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Regular.ttf')))
styleSheet = getSampleStyleSheet()
style = styleSheet["Normal"]
style.fontName = "PTAstraSerifReg"
style.fontSize = 9
style.alignment = TA_JUSTIFY
style.leading = 3 * mm
styleCentre = deepcopy(style)
styleCentre.alignment = TA_CENTER
styleBold = deepcopy(style)
styleBold.fontName = "PTAstraSerifBold"
styleCentreBold = deepcopy(styleBold)
styleCentreBold.alignment = TA_CENTER
hospital_name = SettingManager.get("org_title")
hospital_address = SettingManager.get("org_address")
hospital_kod_ogrn = SettingManager.get("org_ogrn")
styleT = deepcopy(style)
styleT.alignment = TA_LEFT
styleT.fontSize = 9
styleT.leading = 3 * mm
styleOrg = deepcopy(styleT)
styleOrg.fontSize = 8
styleColontitul = deepcopy(styleT)
styleColontitul.fontSize = 7
styleColontitul.leading = 2 * mm
styleColontitulBold = deepcopy(styleColontitul)
styleColontitulBold.fontName = "PTAstraSerifBold"
styleTBold = deepcopy(styleT)
styleTBold.fontName = "PTAstraSerifBold"
styleOrgBold = deepcopy(styleOrg)
styleOrgBold.fontName = "PTAstraSerifBold"
styleOrgBold.leading = 2 * mm
op_bold_tag = '<font face="PTAstraSerifBold">'
cl_bold_tag = '</font>'
space_symbol = ' '
def form_01(direction: Napravleniya, iss: Issledovaniya, fwb, doc, leftnone, user=None):
# Мед. св-во о смерти 106/у
data_individual = direction.client.get_data_individual()
data = {}
title_fields = [
"Серия",
"Префикс номера",
"Номер",
"Дата выдачи",
"Вид медицинского свидетельства о смерти",
"Серия предшествующего",
"Номер предшествующего",
"Дата выдачи предшествующего",
"Дата рождения",
"Дата смерти",
"Время смерти",
"Место постоянного жительства (регистрации)",
"Вид места жительства",
"Место смерти",
"Вид места смерти",
"Типы мест наступления смерти",
"Новорожденый",
"Доношенность новорожденного",
"Место рождения",
"Масса тела ребёнка при рождении",
"По счету был ребенок",
"Дата рождения матери",
"Возраст матери",
"ФИО матери",
"Семейное положение",
"Образование",
"Социальная группа",
"Полис ОМС",
"СНИЛС",
"Тип ДУЛ",
"ДУЛ",
"Род причины смерти",
"Смерть от внешних причин",
"Дата смерти от внешних причин",
"Время смерти от внешних причин",
"Дата события",
"Время события",
"Место и обстоятельства",
"Тип медицинского работника",
"Основания для определения причины смерти",
"а) Болезнь или состояние, непосредственно приведшее к смерти",
"б) патологическое состояние, которое привело к возникновению вышеуказанной причины:",
"в) первоначальная причина смерти:",
"г) внешняя причина при травмах и отравлениях:",
"II. Прочие важные состояния, способствовавшие смерти, но не связанные с болезнью или патологическим состоянием, приведшим к ней",
"ДТП",
"Связь смерти с ДТП",
"Беременность",
"Связь смерти с беременностью",
"ФИО (получатель)",
"Документ (получатель)",
"Серия (получатель)",
"Номер (получатель)",
"Кем и когда выдан (получатель)",
"СНИЛС (получатель)",
"Заполнил",
"Проверил",
"Главный врач",
"Должность",
"Время известно",
]
result = fields_result_only_title_fields(iss, title_fields, False)
for i in result:
data[i["title"]] = i["value"]
data['fio'] = data_individual["fio"]
data['sex'] = data_individual["sex"]
ends = datetime.datetime.strptime(data["Дата рождения"], '%d.%m.%Y')
start = datetime.datetime.strptime(data["Дата смерти"], '%d.%m.%Y')
diff = relativedelta(start, ends)
if diff.years == 0:
data['число месяцев жизни'] = diff.months
data['число дней жизни'] = diff.days
else:
data['число месяцев жизни'] = ""
data['число дней жизни'] = ""
if not data.get("Место рождения", None):
data[
"Место рождения"] = '{"details": {"region": "", "region_type": "", "area": "", "area_type": "", "city": "", "city_type": "", "settlement": "", "settlement_type": "", ' \
'"street": "", "street_type": "", "house": "", "house_type": "", "flat": "", "flat_type": "", "postal_code": "", "custom": false}}'
if not data.get("Место смерти", None):
data[
"Место смерти"] = '{"details": {"region": "", "region_type": "", "area": "", "area_type": "", "city": "", "city_type": "", "settlement": "", "settlement_type": "", ' \
'"street": "", "street_type": "", "house": "", "house_type": "", "flat": "", "flat_type": "", "postal_code": "", "custom": false}}'
if not data.get("Доношенность новорожденного", None):
data["Доношенность новорожденного"] = '{"code": "", "title": ""}'
if not data.get("Связь смерти с ДТП", None):
data["Связь смерти с ДТП"] = '{"code": "", "title": ""}'
if not data.get("Связь смерти с беременностью", None):
data["Связь смерти с беременностью"] = '{"code": "", "title": ""}'
if not data.get("Тип медицинского работника", None):
data["Тип медицинского работника"] = '{"code": "", "title": ""}'
if not data.get("Основания для определения причины смерти", None):
data["Основания для определения причины смерти"] = '{"code": "", "title": ""}'
if not data.get("Род причины смерти", None):
data["Род причины смерти"] = '{"code": "", "title": ""}'
if not data.get("Масса тела ребёнка при рождении", None):
data["Масса тела ребёнка при рождении"] = ""
if not data.get("По счету был ребенок", None):
data["По счету был ребенок"] = ""
if not data.get("Дата рождения матери", None):
data["Дата рождения матери"] = ""
if not data.get("Возраст матери", None):
data["Возраст матери"] = ""
if not data.get("ФИО (получатель)", None):
data["ФИО (получатель)"] = ""
if not data.get("Документ (получатель)", None):
data["Документ (получатель)"] = ""
if not data.get("Серия (получатель)", None):
data["Серия (получатель)"] = ""
if not data.get("Номер (получатель)", None):
data["Номер (получатель)"] = ""
if not data.get("Кем и когда выдан (получатель)", None):
data["Кем и когда выдан (получатель)"] = ""
if not data.get("СНИЛС (получатель)", None):
data["СНИЛС (получатель)"] = ""
if not data.get("Заполнил", None):
data["Заполнил"] = iss.doc_confirmation.get_full_fio() if iss.doc_confirmation else ""
if not data.get("Должность", None):
data["Должность"] = iss.doc_position if iss.doc_confirmation else ""
if not data.get("Проверил", None):
data["Проверил"] = ""
if not data.get("Главный врач", None):
data["Главный врач"] = ""
if not data.get("ФИО матери"):
data["ФИО матери"] = '{"columns":{"titles":["Фамилия","Имя","Отчество"], "rows":[["иванова","Марья","Олеговна"]]}'
mother_data = json.loads(data["ФИО матери"])
data["mother_fio"] = f"{mother_data["rows"][0][0]} {mother_data["rows"][0][1]} {mother_data["rows"][0][2]}"
data["Фамилия матери"] = ""
data["Имя матери"] = ""
data["Отчество матери"] = ""
if data["Новорожденый"] in ["от 168 час. до 1 года", "от 168 час. до 1 месяца"]:
data["Фамилия матери"] = mother_data['rows'][0][0]
data["Имя матери"] = mother_data['rows'][0][1]
data["Отчество матери"] = mother_data['rows'][0][2]
hospital_obj: Hospitals = user.doctorprofile.get_hospital()
data['org'] = {"full_title": hospital_obj.title, "org_address": hospital_obj.address, "org_license": hospital_obj.license_data,
"org_okpo": hospital_obj.okpo}
data["а"] = json.loads(data["а) Болезнь или состояние, непосредственно приведшее к смерти"])
data["б"] = json.loads(data["б) патологическое состояние, которое привело к возникновению вышеуказанной причины:"])
data["в"] = json.loads(data["в) первоначальная причина смерти:"])
data["г"] = json.loads(data["г) внешняя причина при травмах и отравлениях:"])
data["ii"] = json.loads(data["II. Прочие важные состояния, способствовавшие смерти, но не связанные с болезнью или патологическим состоянием, приведшим к ней"])
template = add_template(iss, direction, data, 5 * mm)
fwb.extend(template)
template = add_line_split(iss, direction, 4 * mm)
fwb.extend(template)
template = death_data(iss, direction, data, 0 * mm)
fwb.extend(template)
fwb.append(PageBreak())
template = second_page_add_template(iss, direction, data, 0 * mm)
fwb.extend(template)
template = add_line_split(iss, direction, -1 * mm)
fwb.extend(template)
template = death_data2(iss, direction, data, -5 * mm)
fwb.extend(template)
return fwb
def add_template(iss: Issledovaniya, direction, fields, offset=0):
# Мед. св-во о смерти 106/у
text = []
text = title_data("КОРЕШОК МЕДИЦИНСКОГО СВИДЕТЕЛЬСТВА О СМЕРТИ", "К УЧЕТНОЙ ФОРМЕ № 106/У", text, fields.get("Серия", ""), fields.get("Номер", ""), fields.get("Дата выдачи", ""),
fields.get("Вид медицинского свидетельства о смерти", ""), fields)
text.append(Spacer(1, 1.7 * mm))
text = fio_tbl(text, "1. Фамилия, имя, отчество (при наличии) умершего(ей):", fields.get('fio',''))
# Пол
text.append(Spacer(1, 0.3 * mm))
text = sex_tbl(text, fields.get('sex',''))
# Дата рождения
text = born_tbl(text, fields.get('Дата рождения', ''))
text.append(Spacer(1, 0.3 * mm))
# Дата смерти
text = death_tbl(text, "4. Дата смерти:", fields.get('Дата смерти', '-'), fields.get('Время смерти', '-'))
text = address_tbl(text, "5. Регистрация по месту жительства (пребывания) умершего(ей):", fields.get("Место постоянного жительства (регистрации)", ""))
# Смерть наступила
text = where_death_start_tbl(text, fields.get("Типы мест наступления смерти"), "6")
text.append(Spacer(1, 0.2 * mm))
text.append(Paragraph('Для детей, умерших в возрасте до 1 года:', styleBold))
text.append(Spacer(1, 0.5 * mm))
opinion = gen_opinion(['7. Дата рождения', 'число', fields['Дата рождения'].split('.')[0],
', месяц', fields['Дата рождения'].split('.')[1],
', год', fields['Дата рождения'].split('.')[2],
', число месяцев', fields["число месяцев жизни"],
', число дней', fields["число дней жизни"], 'жизни'])
col_width = (29 * mm, 17 * mm, 8 * mm, 15 * mm, 8 * mm, 10 * mm, 12 * mm, 24 * mm, 8 * mm, 20 * mm, 8 * mm, 15 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), 0 * mm),
('LINEBELOW', (2, 0), (2, 0), 0.75, colors.black),
('LINEBELOW', (4, 0), (4, 0), 0.75, colors.black),
('LINEBELOW', (6, 0), (6, 0), 0.75, colors.black),
('LINEBELOW', (8, 0), (8, 0), 0.75, colors.black),
('LINEBELOW', (10, 0), (10, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
text = address_tbl(text, "8. Место рождения", fields["Место рождения"])
text = fio_tbl(text, "9. Фамилия, имя, отчество (при наличии) матери:", fields["mother_fio"])
obj = []
obj.append(FrameDataUniversal(0 * mm, offset, 190 * mm, 95 * mm, text=text))
return obj
def add_line_split(iss: Issledovaniya, direction, offset=0):
# Лини отреза
text = []
text = line_split(text)
obj = [(FrameDataUniversal(0 * mm, offset, 190 * mm, 5 * mm, text=text))]
return obj
def death_data(iss: Issledovaniya, direction, fields, offset=0):
# Лини отреза
text = []
text = title_med_organization(text, fields['org'])
text = title_data("МЕДИЦИНСКОЕ СВИДЕТЕЛЬСТВО О СМЕРТИ", "", text, fields["Серия"], fields.get("Номер", ""), fields["Дата выдачи"], fields["Вид медицинского свидетельства о смерти"],
fields)
text.append(Spacer(1, 1.7 * mm))
text = fio_tbl(text, "1. Фамилия, имя, отчество (при наличии) умершего(ей):", fields["fio"])
# Пол
text.append(Spacer(1, 0.3 * mm))
text = sex_tbl(text, fields['sex'])
# Дата рождения
text = born_tbl(text, fields['Дата рождения'])
# print(fields["Тип ДУЛ"])
dul = json.loads(fields["ДУЛ"])
text = patient_passport(text, {"type": fields["Тип ДУЛ"], "serial": dul['rows'][0][0], "number": dul['rows'][0][1]})
text = who_issue_passport(text, {"who_issue": dul['rows'][0][2], "date_issue": dul['rows'][0][3]})
text = patient_snils(text, fields["СНИЛС"] or "")
text = patient_polis(text, fields["Полис ОМС"] or "")
text = death_tbl(text, "7. Дата смерти:", fields.get('Дата смерти', '-'), fields.get('Время смерти', '-'))
text = address_tbl(text, "8. Регистрация по месту жительства (пребывания) умершего(ей):", fields["Место постоянного жительства (регистрации)"])
text = type_city(text, "9. Местность:", fields["Вид места жительства"])
text = address_tbl(text, "10. Место смерти:", fields["Место смерти"])
text = type_city(text, "11. Местность: ", fields["Вид места смерти"])
text = where_death_start_tbl(text, fields["Типы мест наступления смерти"], "12")
text = child_death_befor_month(text, fields["Доношенность новорожденного"])
text = child_death_befor_year(text, {"weight": fields["Масса тела ребёнка при рождении"],
"child_count": fields["По счету был ребенок"],
"mother_born": fields["Дата рождения матери"],
"mother_age": fields["Возраст матери"],
"mother_family": fields["Фамилия матери"], "mother_name": fields["Имя матери"],
"mother_patronimyc": fields["Отчество матери"]})
text = family_status(text, fields["Семейное положение"])
text = education(text, fields["Образование"])
text = work_position(text, fields["Социальная группа"])
text = bottom_colontitul(text, "* В случае смерти детей, возраст которых указан в пунктах 13 - 14, пункты 15 - 17 заполняются в отношении их матерей.")
obj = []
obj.append(FrameDataUniversal(0 * mm, offset, 190 * mm, 178 * mm, text=text))
return obj
def second_page_add_template(iss: Issledovaniya, direction, fields, offset=0):
text = []
text = back_size(text)
text = why_death(text, fields, '10', '11', '12', '13')
text = fio_tbl(text, "14. Фамилия, имя, отчество (при наличии) получателя", fields["ФИО (получатель)"])
text.append(Paragraph("Документ, удостоверяющий личность получателя (серия, номер, кем выдан)", styleT))
text = destination_person_passport(text, f'{fields['Документ (получатель)']} {fields['Серия (получатель)']} {fields['Номер (получатель)']} {fields['Кем и когда выдан (получатель)']}')
text = destination_person_snils(text, f'{fields['СНИЛС (получатель)']}')
text.append(Spacer(1, 2 * mm))
text.append(Paragraph(f"«___» ___________ 20 ___ г.{space_symbol * 30} Подпись получателя _________________________", styleT))
obj = []
obj.append(FrameDataUniversal(0 * mm, offset, 190 * mm, 95 * mm, text=text))
return obj
def death_data2(iss: Issledovaniya, direction, fields, offset=0):
text = []
text = death_happaned(text, fields["Род причины смерти"])
date, month, year, hour, min = "____", "____", "_________", "____", "____"
date_event_data = fields.get("Дата события", None)
time_event_data = fields.get("Время события", None)
if date_event_data:
date_event_data = date_event_data.split(".")
date = f"<u>{space_symbol * 3}{date_event_data[0]}{space_symbol * 3}</u>"
month = f"<u>{space_symbol * 3}{date_event_data[1]}{space_symbol * 3}</u>"
year = f"<u>{space_symbol * 3}{date_event_data[2]}{space_symbol * 3}</u>"
if time_event_data:
time_event_data = time_event_data.split(":")
hour = f"<u>{space_symbol * 3}{time_event_data[0]}{space_symbol * 3}</u>"
min = f"<u>{space_symbol * 3}{time_event_data[1]}{space_symbol * 3}</u>"
text.append(Paragraph(
f"19. В случае смерти от несчастного случая, убийства, самоубийства, от военных и террористических действий, при неустановленном роде смерти - указать дату травмы (отравления): "
f"число {date} месяц {month} год {year} час. {hour} мин. {min} , а также место и обстоятельства, при которых произошла травма (отравление)",
styleT))
unfortunate_and_other_info = "________________________________________________________________________________________________________________________"
place_and_reasons = fields.get("Место и обстоятельства", None)
if place_and_reasons:
unfortunate_and_other_info = f"<u>{space_symbol * 2}{place_and_reasons} {space_symbol * 2}</u>"
text.append(Paragraph(f"{unfortunate_and_other_info}", styleT))
text = who_set_death(text, fields["Тип медицинского работника"])
text = doctor_fio(text, fields, iss)
text.append(Spacer(1, 1 * mm))
text = why_death(text, fields, "22", "23", "24", "25")
text.append(Spacer(1, 2 * mm))
text.append(
Paragraph("<u>Руководитель (иное уполномоченное лицо **) медицинской организации</u>, индивидуальный предприниматель, осуществляющий медицинскую деятельность (подчеркнуть)", styleT))
text.append(Spacer(1, 2 * mm))
text = hospital_manager_stamp(text, fields["Главный врач"])
text.append(Spacer(1, 2 * mm))
text.append(Paragraph("26 Свидетельство проверено ответственным за правильность заполнения медицинских свидетельств.", styleT))
text = check_person_data(text, fields["Проверил"])
text = bottom_colontitul(text, '** В случае, установленном частью 10 статьи 9 Федерального закона от 5 июня 2012 г. № 50-ФЗ "О регулировании деятельности российских граждан и '
'российских юридических лиц в Антарктике" (Собрание законодательства Российской Федерации, 2012, № 24, ст. 3067). ')
obj = []
obj.append(FrameDataUniversal(0 * mm, offset, 190 * mm, 168 * mm, text=text))
return obj
# общие функции
def title_data(title_name, title_form, text, serial, number, date_issue, type_document, data_fields):
text.append(Paragraph(f"{title_name}", styleCentreBold))
text.append(Spacer(1, 0.1 * mm))
text.append(Paragraph(f"{title_form}", styleCentreBold))
text.append(Spacer(1, 0.2 * mm))
prefix = data_fields.get("Префикс номера", "")
text.append(Paragraph(f"СЕРИЯ {serial} № {prefix}{number}", styleCentreBold))
text.append(Spacer(1, 0.1 * mm))
text.append(Paragraph(f"Дата выдачи {date_issue}", styleCentreBold))
final, preparatory, instead_preparatory, instead_final = "окончательного", "предварительного", "взамен предварительного", "взамен окончательного"
if title_name == "МЕДИЦИНСКОЕ СВИДЕТЕЛЬСТВО О СМЕРТИ":
final, preparatory = "окончательное", "предварительное"
type_death_document = json.loads(type_document)
if type_death_document["code"] == '4':
instead_final = f"<u>{op_bold_tag}{instead_final}{cl_bold_tag}</u>"
elif type_death_document["code"] == '3':
instead_preparatory = f"<u>{op_bold_tag}{instead_preparatory}{cl_bold_tag}</u>"
elif type_death_document["code"] == '1':
final = f"{op_bold_tag}<u>{final}</u>{cl_bold_tag}"
elif type_death_document["code"] == '2':
preparatory = f"<u>{op_bold_tag}{preparatory}{cl_bold_tag}</u>"
text.append(Paragraph(f"({final}, {preparatory}, {instead_preparatory}, {instead_final}) (подчеркнуть)", styleCentre))
if data_fields.get("Серия предшествующего", None):
text.append(Paragraph("ранее выданное свидетельство", styleCentre))
text.append(Paragraph(f"серия {data_fields["Серия предшествующего"]} No {data_fields["Номер предшествующего"]} от {data_fields["Дата выдачи предшествующего"]} г.", styleCentre))
return text
def gen_opinion(data):
opinion = [[Paragraph(f"{k}", styleT) for k in data]]
return opinion
def gen_opinion_diag(data):
opinion = [[Paragraph(f"{k}", styleOrgBold) for k in data]]
return opinion
def gen_table(opinion, col_width, tbl_style, row_height=None):
tbl = Table(opinion, colWidths=col_width, rowHeights=row_height, hAlign='LEFT', )
tbl.setStyle(TableStyle(tbl_style))
return tbl
def fio_tbl(text, type, fio):
opinion = gen_opinion([type, fio])
col_width = (80 * mm, 110 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def sex_tbl(text, sex):
if sex == "м":
sex_m = f'{op_bold_tag}<u>мужской</u>{cl_bold_tag}'
else:
sex_m = ' мужской'
if sex == "ж":
sex_w = f'{op_bold_tag}<u>женский</u>{cl_bold_tag}'
else:
sex_w = ', женский'
opinion = gen_opinion(['2.Пол:', sex_m, '1', sex_w, '2'])
col_width = (11 * mm, 17 * mm, 6 * mm, 19 * mm, 6 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (-1, -1), (-1, -1), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def born_tbl(text, born_data):
# Дата рождения
born = born_data.split('.')
born_day = born[0]
born_month = born[1]
born_year = born[2]
opinion = gen_opinion(['3.Дата рождения:', 'число', born_day, 'месяц', born_month, 'год', born_year])
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LEFTPADDING', (0, 1), (0, 1), 0 * mm),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LINEBELOW', (2, 0), (2, 0), 0.75, colors.black),
('LINEBELOW', (4, 0), (4, 0), 0.75, colors.black),
('LINEBELOW', (6, 0), (6, 0), 0.75, colors.black),
('LINEBELOW', (2, 1), (2, 1), 0.75, colors.black),
('LINEBELOW', (4, 1), (4, 1), 0.75, colors.black),
('LINEBELOW', (6, 1), (6, 1), 0.75, colors.black),
('LINEBELOW', (8, 1), (8, 1), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
col_width = (28 * mm, 14 * mm, 8 * mm, 14 * mm, 8 * mm, 10 * mm, 12 * mm)
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def death_tbl(text, number, death_data, death_time):
# Дата смерти
death_data = death_data.split('.')
death_day = death_data[0]
death_month = death_data[1]
death_year = death_data[2]
death_hour, death_min = "", ""
if death_time:
death_time = death_time.split(":")
death_hour = death_time[0] if len(death_time) >= 1 else " "
death_min = death_time[1] if len(death_time) >= 2 else " "
opinion = gen_opinion([number, 'число', death_day, 'месяц', death_month, 'год', death_year, 'час.', death_hour, 'мин.', death_min])
tbl_style = [
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LEFTPADDING', (0, 1), (0, 1), 0 * mm),
('TOPPADDING', (0, 0), (-1, -1), -1 * mm),
('LINEBELOW', (2, 0), (2, 0), 0.75, colors.black),
('LINEBELOW', (4, 0), (4, 0), 0.75, colors.black),
('LINEBELOW', (6, 0), (6, 0), 0.75, colors.black),
('LINEBELOW', (8, 0), (8, 0), 0.75, colors.black),
('LINEBELOW', (10, 0), (10, 0), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
col_width = (28 * mm, 14 * mm, 8 * mm, 14 * mm, 8 * mm, 10 * mm, 12 * mm, 10 * mm, 8 * mm, 12 * mm, 8 * mm)
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def address_tbl(text, type_address, address):
data_address = json.loads(address)
address_details = data_address["details"]
opinion = gen_opinion([f'{type_address} субъект Российской Федерации:', f"{address_details['region_type']} {address_details['region']}"])
col_widths = (135 * mm, 55 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_widths, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
# город
opinion = gen_opinion(['район', f"{address_details["area_type"]} {address_details["area"]}", 'город', f"{address_details["city_type"]} {address_details["city"]}"])
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('LINEBELOW', (3, 0), (3, 0), 0.75, colors.black),
]
col_width = (17 * mm, 77 * mm, 16 * mm, 80 * mm,)
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
# населенный пунк
opinion = gen_opinion(
['населенный пункт', f"{address_details["settlement_type"]} {address_details["settlement"]}", 'улица', f"{address_details["street_type"]} {address_details["street"]}"])
col_width = (37 * mm, 67 * mm, 16 * mm, 70 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('LINEBELOW', (3, 0), (3, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
# дом, стр, корп, кв, комн
opinion = gen_opinion(['дом', address_details['house'], 'стр.', '', 'корп.', '', 'кв.', address_details.get("flat", ""), 'комн.', ''])
col_width = (14 * mm, 15 * mm, 12 * mm, 12 * mm, 14 * mm, 15 * mm, 12 * mm, 15 * mm, 14 * mm, 15 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('LINEBELOW', (3, 0), (3, 0), 0.75, colors.black),
('LINEBELOW', (5, 0), (5, 0), 0.75, colors.black),
('LINEBELOW', (7, 0), (7, 0), 0.75, colors.black),
('LINEBELOW', (9, 0), (9, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def where_death_start_tbl(text, params, item_param):
whera_data = json.loads(params)
place, car, hospital, home = ' на месте происшествия', ', в машине скорой помощи', ', в стационаре', ', дома'
if whera_data["code"] == '1':
place = f"<u>{op_bold_tag}{place}{cl_bold_tag}</u>"
elif whera_data["code"] == '2':
car = f"<u>{op_bold_tag}{car}{cl_bold_tag}</u>"
elif whera_data["code"] == '3':
hospital = f"<u>{op_bold_tag}{hospital}{cl_bold_tag}</u>"
elif whera_data["code"] == '4':
home = f"<u>{op_bold_tag}{home}{cl_bold_tag}</u>"
opinion = gen_opinion([f'{item_param}.Смерть наступила:', place, '1', car, '2', hospital, '3', home, '4'])
col_width = (32 * mm, 37 * mm, 6 * mm, 42 * mm, 6 * mm, 24 * mm, 6 * mm, 12 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
('GRID', (6, 0), (6, 0), 0.75, colors.black),
('GRID', (8, 0), (8, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
# Смерть наступила
education_place, other_place = 'в образовательной организации', 'в другом месте'
if whera_data["code"] == '7':
education_place = f"<u>{op_bold_tag}{education_place}{cl_bold_tag}</u>"
elif whera_data["code"] == '5':
other_place = f"<u>{op_bold_tag}{other_place}{cl_bold_tag}</u>"
opinion = gen_opinion([education_place, '5', other_place, '6'])
col_width = (55 * mm, 6 * mm, 24 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
return text
def line_split(text):
step_round_dash = (1.5 * mm, 1 * mm)
styleColor = deepcopy(style)
styleColor.textColor = colors.gray
opinion = [[Paragraph('', style), Paragraph('линия отреза', styleColor), Paragraph('', style), ], ]
tbl = Table(opinion, hAlign='LEFT', rowHeights=5 * mm, colWidths=(80 * mm, 25 * mm, 80 * mm))
tbl.setStyle(
TableStyle(
[
('LINEBELOW', (0, 0), (0, 0), 0.2 * mm, colors.gray, 'round', step_round_dash),
('LINEBELOW', (2, 0), (2, 0), 0.2 * mm, colors.gray, 'round', step_round_dash),
('BOTTOMPADDING', (1, 0), (1, 0), -0.5 * mm),
]
)
)
text.append(tbl)
return text
def patient_passport(text, data_document):
if "-" in data_document["type"]:
document_type = data_document["type"].split("-")
document_type_print = document_type[1]
else:
document_type_print = data_document["type"]
opinion = gen_opinion(['4.Документ, удостоверяющий личность умершего:', document_type_print, 'серия', data_document["serial"], 'номер', data_document['number']])
tbl_style = [
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('LINEBELOW', (3, 0), (3, 0), 0.75, colors.black),
('LINEBELOW', (5, 0), (5, 0), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
col_width = (71 * mm, 68 * mm, 12 * mm, 11 * mm, 14 * mm, 14 * mm)
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def who_issue_passport(text, data_document):
opinion = gen_opinion(['кем и когда выдан', f"{data_document["who_issue"]} {data_document["date_issue"]}"])
tbl_style = [
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
col_width = (33 * mm, 157 * mm)
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def patient_snils(text, snils_number):
opinion = gen_opinion(['5.СНИЛС', snils_number])
tbl_style = [
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
col_width = (23 * mm, 167 * mm)
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def patient_polis(text, polis_number):
opinion = gen_opinion(['6.Полис ОМС:', polis_number])
tbl_style = [
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
col_width = (23 * mm, 167 * mm)
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def type_city(text, type_value, type, ):
type_gorod, type_selo = ' городская', ', сельская'
type = json.loads(type)
if type["code"] == "1":
type_gorod = f'{op_bold_tag}<u>городская</u>{cl_bold_tag}'
if type["code"] == "2":
type_selo = f'{op_bold_tag}<u>сельская</u>{cl_bold_tag}'
opinion = gen_opinion([type_value, type_gorod, '1', type_selo, '2'])
col_width = (23 * mm, 19 * mm, 6 * mm, 18 * mm, 6 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (-1, -1), (-1, -1), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
return text
def child_death_befor_month(text, params):
params = json.loads(params)
week37_41, week_smaller, week_more_42 = ' доношенный (37-41 недель)', ' , недоношенный (менее 37 недель)', ', переношенный (42 недель и более)'
if params["code"] == "1":
week37_41 = f"{op_bold_tag}<u>{week37_41}</u>{cl_bold_tag}"
if params["code"] == "2":
week_smaller = f"{op_bold_tag}<u>{week_smaller}</u>{cl_bold_tag}"
if params["code"] == "3":
week_more_42 = f"{op_bold_tag}<u>{week_more_42}</u>{cl_bold_tag}"
opinion = gen_opinion(['13. * Для детей, умерших в возрасте от 168 час. до 1 месяца:', week37_41, '1'])
col_width = (85 * mm, 42 * mm, 6 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
opinion = gen_opinion([week_smaller, '2', week_more_42, '3'])
col_width = (57 * mm, 6 * mm, 55 * mm, 6 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (4, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
return text
def child_death_befor_year(text, params):
opinion = gen_opinion(['14.*Для детей, умерших в возрасте от 168 час. до 1 года:', ' масса тела ребёнка при рождении', params["weight"], ' грамм', '1'])
col_width = (82 * mm, 50 * mm, 12 * mm, 12 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('LINEBELOW', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
opinion = gen_opinion(['каким по счету был ребенок у матери (считая умерших и не считая мертворождённых)', params["child_count"], '', '2'])
col_width = (125 * mm, 6 * mm, 5 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
opinion = gen_opinion(['дата рождения матери', params["mother_born"], '', '3', 'возраст матери (полных лет)', params["mother_age"], '', '4'])
col_width = (40 * mm, 19 * mm, 5 * mm, 6 * mm, 45 * mm, 15 * mm, 5 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
('LINEBELOW', (5, 0), (5, 0), 0.75, colors.black),
('GRID', (7, 0), (7, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.5 * mm))
text.append(tbl)
opinion = gen_opinion(['фамилия матери', params["mother_family"], '', '5', ', имя', params["mother_name"], '', '6', ' , отчество (при наличии)', params["mother_patronimyc"], '', '7'])
col_width = (30 * mm, 25 * mm, 5 * mm, 6 * mm, 14 * mm, 20 * mm, 5 * mm, 6 * mm, 40 * mm, 25 * mm, 5 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
('LINEBELOW', (5, 0), (5, 0), 0.75, colors.black),
('GRID', (7, 0), (7, 0), 0.75, colors.black),
('LINEBELOW', (9, 0), (9, 0), 0.75, colors.black),
('GRID', (11, 0), (11, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.5 * mm))
text.append(tbl)
return text
def family_status(text, params):
params = json.loads(params)
brak, not_brak, not_known = "состоял(а) в зарегистрированном браке", "не состоял(а) в зарегистрированном браке", "неизвестно"
if params["code"] == '3':
not_known = f"{op_bold_tag}<u>{not_known}</u>{cl_bold_tag}"
elif params["code"] == '4':
brak = f"{op_bold_tag}<u>{brak}</u>{cl_bold_tag}"
elif params["code"] == '5':
not_brak = f"{op_bold_tag}<u>{not_brak}</u>{cl_bold_tag}"
opinion = gen_opinion(['15.*Семейное положение:', brak, '1', not_brak, '2', not_known, '3'])
col_width = (38 * mm, 60 * mm, 6 * mm, 60 * mm, 6 * mm, 18 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
('GRID', (6, 0), (6, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
return text
def education(text, params):
high_prof, not_high_prof, middle_prof, middle_common = "профессиональное: высшее", ", неполное высшее", ", среднее профессиональное", "общее: среднее"
params = json.loads(params)
if params["code"] == '1':
high_prof = f"{op_bold_tag}<u>{high_prof}</u>{cl_bold_tag}"
elif params["code"] == '2':
not_high_prof = f"{op_bold_tag}<u>{not_high_prof}</u>{cl_bold_tag}"
elif params["code"] == '3':
middle_prof = f"{op_bold_tag}<u>{middle_prof}</u>{cl_bold_tag}"
elif params["code"] == '5':
middle_common = f"{op_bold_tag}<u>{middle_common}</u>{cl_bold_tag}"
opinion = gen_opinion(['16.* Образование:', high_prof, '1', not_high_prof, '2', middle_prof, '3', middle_common, '4'])
col_width = (29 * mm, 42 * mm, 6 * mm, 30 * mm, 6 * mm, 41 * mm, 6 * mm, 25 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -1 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
('GRID', (6, 0), (6, 0), 0.75, colors.black),
('GRID', (8, 0), (8, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
common, start, before_school, not_has_start, not_known = "основное", ", начальное", ", дошкольное", ", не имеет начального образования", ", неизвестно"
if params["code"] == '6':
common = f"{op_bold_tag}<u>{common}</u>{cl_bold_tag}"
elif params["code"] == '7':
start = f"{op_bold_tag}<u>{start}</u>{cl_bold_tag}"
elif params["code"] == '10':
before_school = f"{op_bold_tag}<u>{before_school}</u>{cl_bold_tag}"
elif params["code"] == '11':
not_has_start = f"{op_bold_tag}<u>{not_has_start}</u>{cl_bold_tag}"
elif params["code"] == '9':
not_known = f"{op_bold_tag}<u>{not_known}</u>{cl_bold_tag}"
opinion = gen_opinion([common, '5', start, '6', before_school, '7', not_has_start, '8', not_known, '9'])
col_width = (20 * mm, 6 * mm, 20 * mm, 6 * mm, 21 * mm, 6 * mm, 50 * mm, 6 * mm, 19 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
('GRID', (5, 0), (5, 0), 0.75, colors.black),
('GRID', (7, 0), (7, 0), 0.75, colors.black),
('GRID', (9, 0), (9, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
return text
def work_position(text, params):
params = json.loads(params)
worked, military, pensioner, student = "работал(а)", ", проходил(а) военную или приравненную к ней службу", ", пенсионер(ка)", "студент(ка)"
if params["code"] == '5':
worked = f"{op_bold_tag}<u>{worked}</u>{cl_bold_tag}"
elif params["code"] == '17':
military = f"{op_bold_tag}<u>{military}</u>{cl_bold_tag}"
elif params["code"] == '7':
pensioner = f"{op_bold_tag}<u>{pensioner}</u>{cl_bold_tag}"
elif params["code"] == '4':
student = f"{op_bold_tag}<u>{student}</u>{cl_bold_tag}"
opinion = gen_opinion(['17. * Занятость:', worked, '1', military, '2', pensioner, '3', student, '4'])
col_width = (24 * mm, 18 * mm, 6 * mm, 80 * mm, 6 * mm, 24 * mm, 6 * mm, 20 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
('GRID', (6, 0), (6, 0), 0.75, colors.black),
('GRID', (8, 0), (8, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
not_work, others, not_known = "не работал(а)", ", прочие", ", неизвестно"
if params["code"] == '8':
not_work = f"{op_bold_tag}<u>{not_work}</u>{cl_bold_tag}"
elif params["code"] == '10':
others = f"{op_bold_tag}<u>{others}</u>{cl_bold_tag}"
elif params["code"] == '22':
not_known = f"{op_bold_tag}<u>{not_known}</u>{cl_bold_tag}"
opinion = gen_opinion([not_work, '5', others, '6', not_known, '7'])
col_width = (28 * mm, 6 * mm, 17 * mm, 6 * mm, 21 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
('GRID', (5, 0), (5, 0), 0.75, colors.black),
('GRID', (7, 0), (7, 0), 0.75, colors.black),
('GRID', (9, 0), (9, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
return text
def title_med_organization(text, params):
opinion = [
[
Paragraph(f'{params['full_title']}<br/>'
f'адрес места нахождения {params['org_address']}<br/>'
f'Код по ОКПО {params['org_okpo']}<br/>'
f'Номер и дата выдачи лицензии на осуществление медицинской деятельности: <br/>{params['org_license']}<br/>', styleOrg),
Paragraph('', styleOrg),
Paragraph('Код формы по ОКУД _______<br/>Медицинская документация<br/>Учётная форма № 106/У<br/>Утверждена приказом Минздрава России <br/>от «15» апреля 2021 г. № 352н',
styleOrg),
],
]
col_width = (125 * mm, 5 * mm, 60 * mm,)
tbl_style = [
('GRID', (0, 0), (0, 0), 0.75, colors.black),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (-1, -1), 1 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
return text
def bottom_colontitul(text, params):
opinion = [[Paragraph(f'{params}', styleColontitul), ], ]
col_width = (190 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 10 * mm),
('LEFTPADDING', (0, 0), (-1, -1), 1 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
return text
def back_size(text):
opinion = [[Paragraph('Оборотная сторона', styleColontitulBold), ], ]
col_width = (190 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (-1, -1), (-1, -1), 166 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
return text
def why_death(text, params, item_why, item_dtp, item_pregnant, item_doc):
opinion = [
[
Paragraph(f"{item_why}. Причины смерти:", styleT),
Paragraph('Приблизительный период времени между началом патологического процесса и смертью', styleOrg),
Paragraph('Коды по МКБ', styleOrg),
],
]
col_width = (114 * mm, 36 * mm, 40 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (-1, -1), (-1, -1), 1 * mm),
('LEFTPADDING', (2, 0), (2, 0), 8 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
tbl = diagnos_tbl({"para": "I", "item": "а)", "result": params["а"]["rows"][0]})
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
tbl = about_diagnos("(болезнь или состояние, непосредственно приведшее к смерти)")
text.append(Spacer(1, 0.1 * mm))
text.append(tbl)
tbl = diagnos_tbl({"para": "", "item": "б)", "result": params["б"]["rows"][0]})
text.append(Spacer(1, 0 * mm))
text.append(tbl)
tbl = about_diagnos("(патологическое состояние, которое привело к возникновению причины, указанной в пункте «а»)")
text.append(Spacer(1, 0 * mm))
text.append(tbl)
tbl = diagnos_tbl({"para": "", "item": "в)", "result": params["в"]["rows"][0]})
text.append(Spacer(1, 0 * mm))
text.append(tbl)
tbl = about_diagnos("(первоначальная причина смерти указывается последней)")
text.append(Spacer(1, 0 * mm))
text.append(tbl)
tbl = diagnos_tbl({"para": "", "item": "г)", "result": params["г"]["rows"][0]})
text.append(Spacer(1, 0 * mm))
text.append(tbl)
tbl = about_diagnos("(внешняя причина при травмах и отравлениях)")
text.append(Spacer(1, 0 * mm))
text.append(tbl)
opinion = [
[
Paragraph('II. Прочие важные состояния, способствовавшие смерти, но не связанные с болезнью или патологическим состоянием, приведшим к ней, включая употребление '
'алкоголя, наркотических средств, психотропных и других токсических веществ, содержание их в крови, а также операции (название, дата)', styleColontitul),
],
]
col_width = (190 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (-1, -1), (-1, -1), 1 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.1 * mm))
text.append(tbl)
text.append(Spacer(1, 0.6 * mm))
data_ii = params["ii"]["rows"]
for k in range(len(data_ii)):
tbl = diagnos_tbl({"para": "", "item": "", "result": data_ii[k], "top_padd": -1.2 * mm})
text.append(Spacer(1, 0 * mm))
text.append(tbl)
days30, days7 = "смерть наступила - в течение 30 суток", ", из них в течение 7 суток"
dtp_death = json.loads(params["Связь смерти с ДТП"])
if dtp_death["code"] == "1":
days30 = f"{op_bold_tag}<u>{days30}</u>{cl_bold_tag}"
elif dtp_death["code"] == "2":
days7 = f"{op_bold_tag}<u>{days7}</u>{cl_bold_tag}"
opinion = gen_opinion([f'{item_dtp}.В случае смерти в результате ДТП:', days30, '1', days7, '2'])
col_width = (55 * mm, 55 * mm, 6 * mm, 40 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
pregnant, process_birth = "(независимо от срока и локализации)", ", в процессе родов"
pregnant_data = json.loads(params["Связь смерти с беременностью"])
if pregnant_data["code"] == "1":
pregnant = f"{op_bold_tag}<u>{pregnant}</u>{cl_bold_tag}"
elif pregnant_data["code"] == "2":
process_birth = f"{op_bold_tag}<u>{process_birth}</u>{cl_bold_tag}"
opinion = gen_opinion([f'{item_pregnant}.В случае смерти беременной', pregnant, '1', process_birth, '2'])
col_width = (50 * mm, 52 * mm, 6 * mm, 30 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LEFTPADDING', (1, 0), (1, 0), -2 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
final_process_birth_42days, final_process_birth_365days = "в течение 42 дней после окончания беременности, родов", ", кроме того в течение 43-365 дней после окончания беременности"
if pregnant_data["code"] == "3":
final_process_birth_42days = f"{op_bold_tag}<u>{final_process_birth_42days}</u>{cl_bold_tag}"
elif pregnant_data["code"] == "4":
final_process_birth_365days = f"{op_bold_tag}<u>{final_process_birth_365days}</u>{cl_bold_tag}"
opinion = gen_opinion([final_process_birth_42days, '3', final_process_birth_365days, '4'])
col_width = (84 * mm, 6 * mm, 98 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 4 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
opinion = gen_opinion([f'{item_doc}.Фамилия, имя, отчество (при наличии) врача (фельдшера, акушерки), заполнившего Медицинское свидетельство о смерти'])
col_width = (190 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
opinion = gen_opinion([f'{params['Заполнил']}', 'Подпись', ''])
col_width = (140 * mm, 20 * mm, 30 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LINEBELOW', (0, 0), (0, 0), 0.75, colors.black),
('LINEBELOW', (2, 0), (2, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def diagnos_tbl(data):
description_diag = data["result"][2]
description_diag_json = None
if len(description_diag) > 1:
description_diag_json = json.loads(description_diag)
decription = ''
period = ""
top_padd = 0 * mm
mkb10 = ["", "", "", "", ""]
if len(description_diag) > 1:
decription = description_diag_json["title"]
mkb10 = list(description_diag_json["code"])
if len(list(decription)) > 72:
top_padd = -2 * mm
period = f'{data['result'][0]} {data['result'][1]}'
if data.get("top_padd", None):
top_padd = data.get("top_padd")
elements = []
for element in range(5):
try:
elements.insert(element, mkb10[element])
except:
elements.insert(element, "")
opinion = gen_opinion_diag([data["para"], data["item"], decription, period, '', elements[0], elements[1], elements[2], '.', elements[4]])
col_width = (6 * mm, 7 * mm, 102 * mm, 36 * mm, 5 * mm, 8 * mm, 7 * mm, 7 * mm, 6 * mm, 7 * mm,)
tbl_style = [
('GRID', (5, 0), (5, 0), 0.75, colors.black),
('GRID', (6, 0), (6, 0), 0.75, colors.black),
('GRID', (7, 0), (7, 0), 0.75, colors.black),
('GRID', (9, 0), (9, 0), 0.75, colors.black),
('LINEBELOW', (0, 0), (3, 0), 0.75, colors.black),
('LINEBEFORE', (3, 0), (3, 0), 0.75, colors.black),
('LINEAFTER', (3, 0), (3, 0), 0.75, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('TOPPADDING', (2, 0), (2, 0), top_padd),
('LEFTPADDING', (2, 0), (2, 0), -2 * mm),
('LEFTPADDING', (3, 0), (3, 0), 10 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style, 4 * mm)
return tbl
def about_diagnos(data):
styleMicro = deepcopy(styleT)
styleMicro.fontSize = 5.5
styleMicro.alignment = TA_CENTER
opinion = [
[
Paragraph('', styleT),
Paragraph('', styleT),
Paragraph(f'{data}', styleMicro),
Paragraph('', styleT),
Paragraph('', styleT),
Paragraph('', styleT),
Paragraph('', styleT),
Paragraph('', styleT),
Paragraph('', styleT),
Paragraph('', styleT),
],
]
col_width = (6 * mm, 7 * mm, 102 * mm, 36 * mm, 5 * mm, 7 * mm, 7 * mm, 7 * mm, 6 * mm, 7 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), -0.5 * mm),
('LINEBEFORE', (3, 0), (3, 0), 0.75, colors.black),
('LINEAFTER', (3, 0), (3, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
return tbl
def destination_person_passport(text, data):
opinion = gen_opinion([data])
tbl_style = [
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LINEBELOW', (0, 0), (-1, -1), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
col_width = (190 * mm)
tbl = gen_table(opinion, col_width, tbl_style, 4 * mm)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def destination_person_snils(text, data):
opinion = gen_opinion(['СНИЛС получателя (при наличии)', data])
tbl_style = [
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
col_width = (50 * mm, 140 * mm)
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def death_happaned(text, params):
ill, unfortunate_not_work, unfortunate_work = "от заболевания", "несчастного случая: не связанного с производством", "связанного с производством"
type_happend = json.loads(params)
if type_happend["code"] == "1":
ill = f"{op_bold_tag}<u>{ill}</u>{cl_bold_tag}"
elif type_happend["code"] == "2":
unfortunate_not_work = f"{op_bold_tag}<u>{unfortunate_not_work}</u>{cl_bold_tag}"
elif type_happend["code"] == "3":
unfortunate_work = f"{op_bold_tag}<u>{unfortunate_work}</u>{cl_bold_tag}"
opinion = gen_opinion(['18. Смерть произошла:', ill, '1', unfortunate_not_work, '2', unfortunate_work, '3'])
col_width = (34 * mm, 24 * mm, 6 * mm, 74 * mm, 6 * mm, 43 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
('GRID', (6, 0), (6, 0), 0.75, colors.black),
('GRID', (8, 0), (8, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
kill, self_kill, military, terrorist, not_know = "убийства", "самоубийства", ", в ходе действий: военных", "террористических", ", род смерти не установлен"
if type_happend["code"] == "4":
kill = f"{op_bold_tag}<u>{kill}</u>{cl_bold_tag}"
elif type_happend["code"] == "5":
self_kill = f"{op_bold_tag}<u>{self_kill}</u>{cl_bold_tag}"
elif type_happend["code"] == "6":
military = f"{op_bold_tag}<u>{military}</u>{cl_bold_tag}"
elif type_happend["code"] == "7":
terrorist = f"{op_bold_tag}<u>{terrorist}</u>{cl_bold_tag}"
elif type_happend["code"] == "8":
not_know = f"{op_bold_tag}<u>{not_know}</u>{cl_bold_tag}"
opinion = gen_opinion([kill, '4', self_kill, '5', military, '6', terrorist, '7', not_know, '8'])
col_width = (22 * mm, 6 * mm, 23 * mm, 6 * mm, 40 * mm, 6 * mm, 30 * mm, 6 * mm, 40 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
('GRID', (5, 0), (5, 0), 0.75, colors.black),
('GRID', (7, 0), (7, 0), 0.75, colors.black),
('GRID', (9, 0), (9, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
return text
def who_set_death(text, params):
only_doc_death, doc_work, paramedic = "врачом, только установившем смерть", "лечащим врачом", "фельдшером (акушеркой)"
param_who_set = json.loads(params)
if param_who_set["code"] == "1":
only_doc_death = f"{op_bold_tag}<u>{only_doc_death}</u>{cl_bold_tag}"
elif param_who_set["code"] == "2" or param_who_set["code"] == "7":
doc_work = f"{op_bold_tag}<u>{doc_work}</u>{cl_bold_tag}"
elif param_who_set["code"] == "3" or param_who_set["code"] == "8" or param_who_set["code"] == "9":
paramedic = f"{op_bold_tag}<u>{paramedic}</u>{cl_bold_tag}"
opinion = gen_opinion(['20. Причины смерти установлены:', only_doc_death, '1', doc_work, '2', paramedic, '3'])
col_width = (49 * mm, 58 * mm, 6 * mm, 27 * mm, 6 * mm, 40 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
('GRID', (6, 0), (6, 0), 0.75, colors.black),
('GRID', (8, 0), (8, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.9 * mm))
text.append(tbl)
doc_anatomy, expert = "врачом-патологоанатомом", "судебно-медицинским экспертом"
if param_who_set["code"] == "4":
doc_anatomy = f"{op_bold_tag}<u>{doc_anatomy}</u>{cl_bold_tag}"
elif param_who_set["code"] == "5" or param_who_set["code"] == "7":
expert = f"{op_bold_tag}<u>{expert}</u>{cl_bold_tag}"
opinion = gen_opinion([doc_anatomy, '4', expert, '5'])
col_width = (50 * mm, 6 * mm, 50 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
('GRID', (5, 0), (5, 0), 0.75, colors.black),
('GRID', (7, 0), (7, 0), 0.75, colors.black),
('GRID', (9, 0), (9, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
return text
def doctor_fio(text, params, iss: Issledovaniya):
doc_fio = params["Заполнил"]
opinion = gen_opinion(['21. Я, врач (фельдшер, акушерка)', doc_fio])
col_width = (50 * mm, 140 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
doc_position = params["Должность"]
opinion = gen_opinion(['должность', doc_position])
col_width = (25 * mm, 165 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
see_body, write_medical_dicument = 'осмотра трупа', ', записей в медицинской документации',
base_diagnos = json.loads(params["Основания для определения причины смерти"])
if base_diagnos["code"] == "1":
see_body = f"{op_bold_tag}<u>{see_body}</u>{cl_bold_tag}"
elif base_diagnos["code"] == "2":
write_medical_dicument = f"{op_bold_tag}<u>{write_medical_dicument}</u>{cl_bold_tag}"
opinion = gen_opinion(['удостоверяю, что на основании:', see_body, '1', write_medical_dicument, '2'])
col_width = (53 * mm, 26 * mm, 6 * mm, 61 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
see_patient, open_body = 'предшествующего наблюдения за больным(ой)', ', вскрытия',
if base_diagnos["code"] == "3" or base_diagnos["code"] == "5":
see_patient = f"{op_bold_tag}<u>{see_patient}</u>{cl_bold_tag}"
elif base_diagnos["code"] == "4":
open_body = f"{op_bold_tag}<u>{open_body}</u>{cl_bold_tag}"
opinion = gen_opinion([see_patient, '3', open_body, '4', ' мною установлены причины смерти'])
col_width = (75 * mm, 6 * mm, 21 * mm, 6 * mm, 70 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('GRID', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
return text
def hospital_manager_stamp(text, fio_manager):
opinion = gen_opinion(['', '', '', '', fio_manager])
col_width = (45 * mm, 5 * mm, 45 * mm, 5 * mm, 90 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('LINEBELOW', (0, 0), (0, 0), 0.75, colors.black),
('LINEBELOW', (2, 0), (2, 0), 0.75, colors.black),
('LINEBELOW', (4, 0), (4, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 3 * mm))
text.append(tbl)
opinion = gen_opinion(['печать', 'подпись', '(фамилия, имя, отчество (при наличии)'])
col_width = (45 * mm, 45 * mm, 100 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 15 * mm),
('LEFTPADDING', (1, 0), (1, 0), 15 * mm),
('LEFTPADDING', (2, 0), (2, 0), 15 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
return text
def check_person_data(text, fio_check):
date_value = "«___» ___________ 20 ___ г."
opinion = gen_opinion([date_value, fio_check])
col_width = (60 * mm, 130 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 3 * mm))
text.append(tbl)
return text
|
from reportlab.platypus import Paragraph, Spacer, Table, TableStyle, PageBreak
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib import colors
from reportlab.lib.units import mm
from copy import deepcopy
from reportlab.lib.enums import TA_CENTER, TA_LEFT, TA_JUSTIFY
from directions.models import Napravleniya
from appconf.manager import SettingManager
import os.path
from laboratory.settings import FONTS_FOLDER
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from .flowable import FrameDataUniversal
from directions.models import Issledovaniya
from ..prepare_data import fields_result_only_title_fields
import simplejson as json
import datetime
from dateutil.relativedelta import relativedelta
from hospitals.models import Hospitals
pdfmetrics.registerFont(TTFont('PTAstraSerifBold', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Bold.ttf')))
pdfmetrics.registerFont(TTFont('PTAstraSerifReg', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Regular.ttf')))
styleSheet = getSampleStyleSheet()
style = styleSheet["Normal"]
style.fontName = "PTAstraSerifReg"
style.fontSize = 9
style.alignment = TA_JUSTIFY
style.leading = 3 * mm
styleCentre = deepcopy(style)
styleCentre.alignment = TA_CENTER
styleBold = deepcopy(style)
styleBold.fontName = "PTAstraSerifBold"
styleCentreBold = deepcopy(styleBold)
styleCentreBold.alignment = TA_CENTER
hospital_name = SettingManager.get("org_title")
hospital_address = SettingManager.get("org_address")
hospital_kod_ogrn = SettingManager.get("org_ogrn")
styleT = deepcopy(style)
styleT.alignment = TA_LEFT
styleT.fontSize = 9
styleT.leading = 3 * mm
styleOrg = deepcopy(styleT)
styleOrg.fontSize = 8
styleColontitul = deepcopy(styleT)
styleColontitul.fontSize = 7
styleColontitul.leading = 2 * mm
styleColontitulBold = deepcopy(styleColontitul)
styleColontitulBold.fontName = "PTAstraSerifBold"
styleTBold = deepcopy(styleT)
styleTBold.fontName = "PTAstraSerifBold"
styleOrgBold = deepcopy(styleOrg)
styleOrgBold.fontName = "PTAstraSerifBold"
styleOrgBold.leading = 2 * mm
op_bold_tag = '<font face="PTAstraSerifBold">'
cl_bold_tag = '</font>'
space_symbol = ' '
def form_01(direction: Napravleniya, iss: Issledovaniya, fwb, doc, leftnone, user=None):
# Мед. св-во о смерти 106/у
data_individual = direction.client.get_data_individual()
data = {}
title_fields = [
"Серия",
"Префикс номера",
"Номер",
"Дата выдачи",
"Вид медицинского свидетельства о смерти",
"Серия предшествующего",
"Номер предшествующего",
"Дата выдачи предшествующего",
"Дата рождения",
"Дата смерти",
"Время смерти",
"Место постоянного жительства (регистрации)",
"Вид места жительства",
"Место смерти",
"Вид места смерти",
"Типы мест наступления смерти",
"Новорожденый",
"Доношенность новорожденного",
"Место рождения",
"Масса тела ребёнка при рождении",
"По счету был ребенок",
"Дата рождения матери",
"Возраст матери",
"ФИО матери",
"Семейное положение",
"Образование",
"Социальная группа",
"Полис ОМС",
"СНИЛС",
"Тип ДУЛ",
"ДУЛ",
"Род причины смерти",
"Смерть от внешних причин",
"Дата смерти от внешних причин",
"Время смерти от внешних причин",
"Дата события",
"Время события",
"Место и обстоятельства",
"Тип медицинского работника",
"Основания для определения причины смерти",
"а) Болезнь или состояние, непосредственно приведшее к смерти",
"б) патологическое состояние, которое привело к возникновению вышеуказанной причины:",
"в) первоначальная причина смерти:",
"г) внешняя причина при травмах и отравлениях:",
"II. Прочие важные состояния, способствовавшие смерти, но не связанные с болезнью или патологическим состоянием, приведшим к ней",
"ДТП",
"Связь смерти с ДТП",
"Беременность",
"Связь смерти с беременностью",
"ФИО (получатель)",
"Документ (получатель)",
"Серия (получатель)",
"Номер (получатель)",
"Кем и когда выдан (получатель)",
"СНИЛС (получатель)",
"Заполнил",
"Проверил",
"Главный врач",
"Должность",
"Время известно",
]
result = fields_result_only_title_fields(iss, title_fields, False)
for i in result:
data[i["title"]] = i["value"]
data['fio'] = data_individual["fio"]
data['sex'] = data_individual["sex"]
ends = datetime.datetime.strptime(data["Дата рождения"], '%d.%m.%Y')
start = datetime.datetime.strptime(data["Дата смерти"], '%d.%m.%Y')
diff = relativedelta(start, ends)
if diff.years == 0:
data['число месяцев жизни'] = diff.months
data['число дней жизни'] = diff.days
else:
data['число месяцев жизни'] = ""
data['число дней жизни'] = ""
if not data.get("Место рождения", None):
data[
"Место рождения"] = '{"details": {"region": "", "region_type": "", "area": "", "area_type": "", "city": "", "city_type": "", "settlement": "", "settlement_type": "", ' \
'"street": "", "street_type": "", "house": "", "house_type": "", "flat": "", "flat_type": "", "postal_code": "", "custom": false}}'
if not data.get("Место смерти", None):
data[
"Место смерти"] = '{"details": {"region": "", "region_type": "", "area": "", "area_type": "", "city": "", "city_type": "", "settlement": "", "settlement_type": "", ' \
'"street": "", "street_type": "", "house": "", "house_type": "", "flat": "", "flat_type": "", "postal_code": "", "custom": false}}'
if not data.get("Доношенность новорожденного", None):
data["Доношенность новорожденного"] = '{"code": "", "title": ""}'
if not data.get("Связь смерти с ДТП", None):
data["Связь смерти с ДТП"] = '{"code": "", "title": ""}'
if not data.get("Связь смерти с беременностью", None):
data["Связь смерти с беременностью"] = '{"code": "", "title": ""}'
if not data.get("Тип медицинского работника", None):
data["Тип медицинского работника"] = '{"code": "", "title": ""}'
if not data.get("Основания для определения причины смерти", None):
data["Основания для определения причины смерти"] = '{"code": "", "title": ""}'
if not data.get("Род причины смерти", None):
data["Род причины смерти"] = '{"code": "", "title": ""}'
if not data.get("Масса тела ребёнка при рождении", None):
data["Масса тела ребёнка при рождении"] = ""
if not data.get("По счету был ребенок", None):
data["По счету был ребенок"] = ""
if not data.get("Дата рождения матери", None):
data["Дата рождения матери"] = ""
if not data.get("Возраст матери", None):
data["Возраст матери"] = ""
if not data.get("ФИО (получатель)", None):
data["ФИО (получатель)"] = ""
if not data.get("Документ (получатель)", None):
data["Документ (получатель)"] = ""
if not data.get("Серия (получатель)", None):
data["Серия (получатель)"] = ""
if not data.get("Номер (получатель)", None):
data["Номер (получатель)"] = ""
if not data.get("Кем и когда выдан (получатель)", None):
data["Кем и когда выдан (получатель)"] = ""
if not data.get("СНИЛС (получатель)", None):
data["СНИЛС (получатель)"] = ""
if not data.get("Заполнил", None):
data["Заполнил"] = iss.doc_confirmation.get_full_fio() if iss.doc_confirmation else ""
if not data.get("Должность", None):
data["Должность"] = iss.doc_position if iss.doc_confirmation else ""
if not data.get("Проверил", None):
data["Проверил"] = ""
if not data.get("Главный врач", None):
data["Главный врач"] = ""
if not data.get("ФИО матери"):
data["ФИО матери"] = '{"columns":{"titles":["Фамилия","Имя","Отчество"], "rows":[["иванова","Марья","Олеговна"]]}'
mother_data = json.loads(data["ФИО матери"])
data["mother_fio"] = f"{mother_data['rows'][0][0]} {mother_data['rows'][0][1]} {mother_data['rows'][0][2]}"
data["Фамилия матери"] = ""
data["Имя матери"] = ""
data["Отчество матери"] = ""
if data["Новорожденый"] in ["от 168 час. до 1 года", "от 168 час. до 1 месяца"]:
data["Фамилия матери"] = mother_data['rows'][0][0]
data["Имя матери"] = mother_data['rows'][0][1]
data["Отчество матери"] = mother_data['rows'][0][2]
hospital_obj: Hospitals = user.doctorprofile.get_hospital()
data['org'] = {"full_title": hospital_obj.title, "org_address": hospital_obj.address, "org_license": hospital_obj.license_data,
"org_okpo": hospital_obj.okpo}
data["а"] = json.loads(data["а) Болезнь или состояние, непосредственно приведшее к смерти"])
data["б"] = json.loads(data["б) патологическое состояние, которое привело к возникновению вышеуказанной причины:"])
data["в"] = json.loads(data["в) первоначальная причина смерти:"])
data["г"] = json.loads(data["г) внешняя причина при травмах и отравлениях:"])
data["ii"] = json.loads(data["II. Прочие важные состояния, способствовавшие смерти, но не связанные с болезнью или патологическим состоянием, приведшим к ней"])
template = add_template(iss, direction, data, 5 * mm)
fwb.extend(template)
template = add_line_split(iss, direction, 4 * mm)
fwb.extend(template)
template = death_data(iss, direction, data, 0 * mm)
fwb.extend(template)
fwb.append(PageBreak())
template = second_page_add_template(iss, direction, data, 0 * mm)
fwb.extend(template)
template = add_line_split(iss, direction, -1 * mm)
fwb.extend(template)
template = death_data2(iss, direction, data, -5 * mm)
fwb.extend(template)
return fwb
def add_template(iss: Issledovaniya, direction, fields, offset=0):
# Мед. св-во о смерти 106/у
text = []
text = title_data("КОРЕШОК МЕДИЦИНСКОГО СВИДЕТЕЛЬСТВА О СМЕРТИ", "К УЧЕТНОЙ ФОРМЕ № 106/У", text, fields.get("Серия", ""), fields.get("Номер", ""), fields.get("Дата выдачи", ""),
fields.get("Вид медицинского свидетельства о смерти", ""), fields)
text.append(Spacer(1, 1.7 * mm))
text = fio_tbl(text, "1. Фамилия, имя, отчество (при наличии) умершего(ей):", fields.get('fio',''))
# Пол
text.append(Spacer(1, 0.3 * mm))
text = sex_tbl(text, fields.get('sex',''))
# Дата рождения
text = born_tbl(text, fields.get('Дата рождения', ''))
text.append(Spacer(1, 0.3 * mm))
# Дата смерти
text = death_tbl(text, "4. Дата смерти:", fields.get('Дата смерти', '-'), fields.get('Время смерти', '-'))
text = address_tbl(text, "5. Регистрация по месту жительства (пребывания) умершего(ей):", fields.get("Место постоянного жительства (регистрации)", ""))
# Смерть наступила
text = where_death_start_tbl(text, fields.get("Типы мест наступления смерти"), "6")
text.append(Spacer(1, 0.2 * mm))
text.append(Paragraph('Для детей, умерших в возрасте до 1 года:', styleBold))
text.append(Spacer(1, 0.5 * mm))
opinion = gen_opinion(['7. Дата рождения', 'число', fields['Дата рождения'].split('.')[0],
', месяц', fields['Дата рождения'].split('.')[1],
', год', fields['Дата рождения'].split('.')[2],
', число месяцев', fields["число месяцев жизни"],
', число дней', fields["число дней жизни"], 'жизни'])
col_width = (29 * mm, 17 * mm, 8 * mm, 15 * mm, 8 * mm, 10 * mm, 12 * mm, 24 * mm, 8 * mm, 20 * mm, 8 * mm, 15 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), 0 * mm),
('LINEBELOW', (2, 0), (2, 0), 0.75, colors.black),
('LINEBELOW', (4, 0), (4, 0), 0.75, colors.black),
('LINEBELOW', (6, 0), (6, 0), 0.75, colors.black),
('LINEBELOW', (8, 0), (8, 0), 0.75, colors.black),
('LINEBELOW', (10, 0), (10, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
text = address_tbl(text, "8. Место рождения", fields["Место рождения"])
text = fio_tbl(text, "9. Фамилия, имя, отчество (при наличии) матери:", fields["mother_fio"])
obj = []
obj.append(FrameDataUniversal(0 * mm, offset, 190 * mm, 95 * mm, text=text))
return obj
def add_line_split(iss: Issledovaniya, direction, offset=0):
# Лини отреза
text = []
text = line_split(text)
obj = [(FrameDataUniversal(0 * mm, offset, 190 * mm, 5 * mm, text=text))]
return obj
def death_data(iss: Issledovaniya, direction, fields, offset=0):
# Лини отреза
text = []
text = title_med_organization(text, fields['org'])
text = title_data("МЕДИЦИНСКОЕ СВИДЕТЕЛЬСТВО О СМЕРТИ", "", text, fields["Серия"], fields.get("Номер", ""), fields["Дата выдачи"], fields["Вид медицинского свидетельства о смерти"],
fields)
text.append(Spacer(1, 1.7 * mm))
text = fio_tbl(text, "1. Фамилия, имя, отчество (при наличии) умершего(ей):", fields["fio"])
# Пол
text.append(Spacer(1, 0.3 * mm))
text = sex_tbl(text, fields['sex'])
# Дата рождения
text = born_tbl(text, fields['Дата рождения'])
# print(fields["Тип ДУЛ"])
dul = json.loads(fields["ДУЛ"])
text = patient_passport(text, {"type": fields["Тип ДУЛ"], "serial": dul['rows'][0][0], "number": dul['rows'][0][1]})
text = who_issue_passport(text, {"who_issue": dul['rows'][0][2], "date_issue": dul['rows'][0][3]})
text = patient_snils(text, fields["СНИЛС"] or "")
text = patient_polis(text, fields["Полис ОМС"] or "")
text = death_tbl(text, "7. Дата смерти:", fields.get('Дата смерти', '-'), fields.get('Время смерти', '-'))
text = address_tbl(text, "8. Регистрация по месту жительства (пребывания) умершего(ей):", fields["Место постоянного жительства (регистрации)"])
text = type_city(text, "9. Местность:", fields["Вид места жительства"])
text = address_tbl(text, "10. Место смерти:", fields["Место смерти"])
text = type_city(text, "11. Местность: ", fields["Вид места смерти"])
text = where_death_start_tbl(text, fields["Типы мест наступления смерти"], "12")
text = child_death_befor_month(text, fields["Доношенность новорожденного"])
text = child_death_befor_year(text, {"weight": fields["Масса тела ребёнка при рождении"],
"child_count": fields["По счету был ребенок"],
"mother_born": fields["Дата рождения матери"],
"mother_age": fields["Возраст матери"],
"mother_family": fields["Фамилия матери"], "mother_name": fields["Имя матери"],
"mother_patronimyc": fields["Отчество матери"]})
text = family_status(text, fields["Семейное положение"])
text = education(text, fields["Образование"])
text = work_position(text, fields["Социальная группа"])
text = bottom_colontitul(text, "* В случае смерти детей, возраст которых указан в пунктах 13 - 14, пункты 15 - 17 заполняются в отношении их матерей.")
obj = []
obj.append(FrameDataUniversal(0 * mm, offset, 190 * mm, 178 * mm, text=text))
return obj
def second_page_add_template(iss: Issledovaniya, direction, fields, offset=0):
text = []
text = back_size(text)
text = why_death(text, fields, '10', '11', '12', '13')
text = fio_tbl(text, "14. Фамилия, имя, отчество (при наличии) получателя", fields["ФИО (получатель)"])
text.append(Paragraph("Документ, удостоверяющий личность получателя (серия, номер, кем выдан)", styleT))
text = destination_person_passport(text, f'{fields["Документ (получатель)"]} {fields["Серия (получатель)"]} {fields["Номер (получатель)"]} {fields["Кем и когда выдан (получатель)"]}')
text = destination_person_snils(text, f'{fields["СНИЛС (получатель)"]}')
text.append(Spacer(1, 2 * mm))
text.append(Paragraph(f"«___» ___________ 20 ___ г.{space_symbol * 30} Подпись получателя _________________________", styleT))
obj = []
obj.append(FrameDataUniversal(0 * mm, offset, 190 * mm, 95 * mm, text=text))
return obj
def death_data2(iss: Issledovaniya, direction, fields, offset=0):
text = []
text = death_happaned(text, fields["Род причины смерти"])
date, month, year, hour, min = "____", "____", "_________", "____", "____"
date_event_data = fields.get("Дата события", None)
time_event_data = fields.get("Время события", None)
if date_event_data:
date_event_data = date_event_data.split(".")
date = f"<u>{space_symbol * 3}{date_event_data[0]}{space_symbol * 3}</u>"
month = f"<u>{space_symbol * 3}{date_event_data[1]}{space_symbol * 3}</u>"
year = f"<u>{space_symbol * 3}{date_event_data[2]}{space_symbol * 3}</u>"
if time_event_data:
time_event_data = time_event_data.split(":")
hour = f"<u>{space_symbol * 3}{time_event_data[0]}{space_symbol * 3}</u>"
min = f"<u>{space_symbol * 3}{time_event_data[1]}{space_symbol * 3}</u>"
text.append(Paragraph(
f"19. В случае смерти от несчастного случая, убийства, самоубийства, от военных и террористических действий, при неустановленном роде смерти - указать дату травмы (отравления): "
f"число {date} месяц {month} год {year} час. {hour} мин. {min} , а также место и обстоятельства, при которых произошла травма (отравление)",
styleT))
unfortunate_and_other_info = "________________________________________________________________________________________________________________________"
place_and_reasons = fields.get("Место и обстоятельства", None)
if place_and_reasons:
unfortunate_and_other_info = f"<u>{space_symbol * 2}{place_and_reasons} {space_symbol * 2}</u>"
text.append(Paragraph(f"{unfortunate_and_other_info}", styleT))
text = who_set_death(text, fields["Тип медицинского работника"])
text = doctor_fio(text, fields, iss)
text.append(Spacer(1, 1 * mm))
text = why_death(text, fields, "22", "23", "24", "25")
text.append(Spacer(1, 2 * mm))
text.append(
Paragraph("<u>Руководитель (иное уполномоченное лицо **) медицинской организации</u>, индивидуальный предприниматель, осуществляющий медицинскую деятельность (подчеркнуть)", styleT))
text.append(Spacer(1, 2 * mm))
text = hospital_manager_stamp(text, fields["Главный врач"])
text.append(Spacer(1, 2 * mm))
text.append(Paragraph("26 Свидетельство проверено ответственным за правильность заполнения медицинских свидетельств.", styleT))
text = check_person_data(text, fields["Проверил"])
text = bottom_colontitul(text, '** В случае, установленном частью 10 статьи 9 Федерального закона от 5 июня 2012 г. № 50-ФЗ "О регулировании деятельности российских граждан и '
'российских юридических лиц в Антарктике" (Собрание законодательства Российской Федерации, 2012, № 24, ст. 3067). ')
obj = []
obj.append(FrameDataUniversal(0 * mm, offset, 190 * mm, 168 * mm, text=text))
return obj
# общие функции
def title_data(title_name, title_form, text, serial, number, date_issue, type_document, data_fields):
text.append(Paragraph(f"{title_name}", styleCentreBold))
text.append(Spacer(1, 0.1 * mm))
text.append(Paragraph(f"{title_form}", styleCentreBold))
text.append(Spacer(1, 0.2 * mm))
prefix = data_fields.get("Префикс номера", "")
text.append(Paragraph(f"СЕРИЯ {serial} № {prefix}{number}", styleCentreBold))
text.append(Spacer(1, 0.1 * mm))
text.append(Paragraph(f"Дата выдачи {date_issue}", styleCentreBold))
final, preparatory, instead_preparatory, instead_final = "окончательного", "предварительного", "взамен предварительного", "взамен окончательного"
if title_name == "МЕДИЦИНСКОЕ СВИДЕТЕЛЬСТВО О СМЕРТИ":
final, preparatory = "окончательное", "предварительное"
type_death_document = json.loads(type_document)
if type_death_document["code"] == '4':
instead_final = f"<u>{op_bold_tag}{instead_final}{cl_bold_tag}</u>"
elif type_death_document["code"] == '3':
instead_preparatory = f"<u>{op_bold_tag}{instead_preparatory}{cl_bold_tag}</u>"
elif type_death_document["code"] == '1':
final = f"{op_bold_tag}<u>{final}</u>{cl_bold_tag}"
elif type_death_document["code"] == '2':
preparatory = f"<u>{op_bold_tag}{preparatory}{cl_bold_tag}</u>"
text.append(Paragraph(f"({final}, {preparatory}, {instead_preparatory}, {instead_final}) (подчеркнуть)", styleCentre))
if data_fields.get("Серия предшествующего", None):
text.append(Paragraph("ранее выданное свидетельство", styleCentre))
text.append(Paragraph(f"серия {data_fields['Серия предшествующего']} No {data_fields['Номер предшествующего']} от {data_fields['Дата выдачи предшествующего']} г.", styleCentre))
return text
def gen_opinion(data):
opinion = [[Paragraph(f"{k}", styleT) for k in data]]
return opinion
def gen_opinion_diag(data):
opinion = [[Paragraph(f"{k}", styleOrgBold) for k in data]]
return opinion
def gen_table(opinion, col_width, tbl_style, row_height=None):
tbl = Table(opinion, colWidths=col_width, rowHeights=row_height, hAlign='LEFT', )
tbl.setStyle(TableStyle(tbl_style))
return tbl
def fio_tbl(text, type, fio):
opinion = gen_opinion([type, fio])
col_width = (80 * mm, 110 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def sex_tbl(text, sex):
if sex == "м":
sex_m = f'{op_bold_tag}<u>мужской</u>{cl_bold_tag}'
else:
sex_m = ' мужской'
if sex == "ж":
sex_w = f'{op_bold_tag}<u>женский</u>{cl_bold_tag}'
else:
sex_w = ', женский'
opinion = gen_opinion(['2.Пол:', sex_m, '1', sex_w, '2'])
col_width = (11 * mm, 17 * mm, 6 * mm, 19 * mm, 6 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (-1, -1), (-1, -1), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def born_tbl(text, born_data):
# Дата рождения
born = born_data.split('.')
born_day = born[0]
born_month = born[1]
born_year = born[2]
opinion = gen_opinion(['3.Дата рождения:', 'число', born_day, 'месяц', born_month, 'год', born_year])
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LEFTPADDING', (0, 1), (0, 1), 0 * mm),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LINEBELOW', (2, 0), (2, 0), 0.75, colors.black),
('LINEBELOW', (4, 0), (4, 0), 0.75, colors.black),
('LINEBELOW', (6, 0), (6, 0), 0.75, colors.black),
('LINEBELOW', (2, 1), (2, 1), 0.75, colors.black),
('LINEBELOW', (4, 1), (4, 1), 0.75, colors.black),
('LINEBELOW', (6, 1), (6, 1), 0.75, colors.black),
('LINEBELOW', (8, 1), (8, 1), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
col_width = (28 * mm, 14 * mm, 8 * mm, 14 * mm, 8 * mm, 10 * mm, 12 * mm)
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def death_tbl(text, number, death_data, death_time):
# Дата смерти
death_data = death_data.split('.')
death_day = death_data[0]
death_month = death_data[1]
death_year = death_data[2]
death_hour, death_min = "", ""
if death_time:
death_time = death_time.split(":")
death_hour = death_time[0] if len(death_time) >= 1 else " "
death_min = death_time[1] if len(death_time) >= 2 else " "
opinion = gen_opinion([number, 'число', death_day, 'месяц', death_month, 'год', death_year, 'час.', death_hour, 'мин.', death_min])
tbl_style = [
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LEFTPADDING', (0, 1), (0, 1), 0 * mm),
('TOPPADDING', (0, 0), (-1, -1), -1 * mm),
('LINEBELOW', (2, 0), (2, 0), 0.75, colors.black),
('LINEBELOW', (4, 0), (4, 0), 0.75, colors.black),
('LINEBELOW', (6, 0), (6, 0), 0.75, colors.black),
('LINEBELOW', (8, 0), (8, 0), 0.75, colors.black),
('LINEBELOW', (10, 0), (10, 0), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
col_width = (28 * mm, 14 * mm, 8 * mm, 14 * mm, 8 * mm, 10 * mm, 12 * mm, 10 * mm, 8 * mm, 12 * mm, 8 * mm)
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def address_tbl(text, type_address, address):
data_address = json.loads(address)
address_details = data_address["details"]
opinion = gen_opinion([f'{type_address} субъект Российской Федерации:', f"{address_details['region_type']} {address_details['region']}"])
col_widths = (135 * mm, 55 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_widths, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
# город
opinion = gen_opinion(['район', f"{address_details['area_type']} {address_details['area']}", 'город', f"{address_details['city_type']} {address_details['city']}"])
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('LINEBELOW', (3, 0), (3, 0), 0.75, colors.black),
]
col_width = (17 * mm, 77 * mm, 16 * mm, 80 * mm,)
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
# населенный пунк
opinion = gen_opinion(
['населенный пункт', f"{address_details['settlement_type']} {address_details['settlement']}", 'улица', f"{address_details['street_type']} {address_details['street']}"])
col_width = (37 * mm, 67 * mm, 16 * mm, 70 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('LINEBELOW', (3, 0), (3, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
# дом, стр, корп, кв, комн
opinion = gen_opinion(['дом', address_details['house'], 'стр.', '', 'корп.', '', 'кв.', address_details.get("flat", ""), 'комн.', ''])
col_width = (14 * mm, 15 * mm, 12 * mm, 12 * mm, 14 * mm, 15 * mm, 12 * mm, 15 * mm, 14 * mm, 15 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('LINEBELOW', (3, 0), (3, 0), 0.75, colors.black),
('LINEBELOW', (5, 0), (5, 0), 0.75, colors.black),
('LINEBELOW', (7, 0), (7, 0), 0.75, colors.black),
('LINEBELOW', (9, 0), (9, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def where_death_start_tbl(text, params, item_param):
whera_data = json.loads(params)
place, car, hospital, home = ' на месте происшествия', ', в машине скорой помощи', ', в стационаре', ', дома'
if whera_data["code"] == '1':
place = f"<u>{op_bold_tag}{place}{cl_bold_tag}</u>"
elif whera_data["code"] == '2':
car = f"<u>{op_bold_tag}{car}{cl_bold_tag}</u>"
elif whera_data["code"] == '3':
hospital = f"<u>{op_bold_tag}{hospital}{cl_bold_tag}</u>"
elif whera_data["code"] == '4':
home = f"<u>{op_bold_tag}{home}{cl_bold_tag}</u>"
opinion = gen_opinion([f'{item_param}.Смерть наступила:', place, '1', car, '2', hospital, '3', home, '4'])
col_width = (32 * mm, 37 * mm, 6 * mm, 42 * mm, 6 * mm, 24 * mm, 6 * mm, 12 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
('GRID', (6, 0), (6, 0), 0.75, colors.black),
('GRID', (8, 0), (8, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
# Смерть наступила
education_place, other_place = 'в образовательной организации', 'в другом месте'
if whera_data["code"] == '7':
education_place = f"<u>{op_bold_tag}{education_place}{cl_bold_tag}</u>"
elif whera_data["code"] == '5':
other_place = f"<u>{op_bold_tag}{other_place}{cl_bold_tag}</u>"
opinion = gen_opinion([education_place, '5', other_place, '6'])
col_width = (55 * mm, 6 * mm, 24 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
return text
def line_split(text):
step_round_dash = (1.5 * mm, 1 * mm)
styleColor = deepcopy(style)
styleColor.textColor = colors.gray
opinion = [[Paragraph('', style), Paragraph('линия отреза', styleColor), Paragraph('', style), ], ]
tbl = Table(opinion, hAlign='LEFT', rowHeights=5 * mm, colWidths=(80 * mm, 25 * mm, 80 * mm))
tbl.setStyle(
TableStyle(
[
('LINEBELOW', (0, 0), (0, 0), 0.2 * mm, colors.gray, 'round', step_round_dash),
('LINEBELOW', (2, 0), (2, 0), 0.2 * mm, colors.gray, 'round', step_round_dash),
('BOTTOMPADDING', (1, 0), (1, 0), -0.5 * mm),
]
)
)
text.append(tbl)
return text
def patient_passport(text, data_document):
if "-" in data_document["type"]:
document_type = data_document["type"].split("-")
document_type_print = document_type[1]
else:
document_type_print = data_document["type"]
opinion = gen_opinion(['4.Документ, удостоверяющий личность умершего:', document_type_print, 'серия', data_document["serial"], 'номер', data_document['number']])
tbl_style = [
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('LINEBELOW', (3, 0), (3, 0), 0.75, colors.black),
('LINEBELOW', (5, 0), (5, 0), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
col_width = (71 * mm, 68 * mm, 12 * mm, 11 * mm, 14 * mm, 14 * mm)
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def who_issue_passport(text, data_document):
opinion = gen_opinion(['кем и когда выдан', f"{data_document['who_issue']} {data_document['date_issue']}"])
tbl_style = [
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
col_width = (33 * mm, 157 * mm)
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def patient_snils(text, snils_number):
opinion = gen_opinion(['5.СНИЛС', snils_number])
tbl_style = [
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
col_width = (23 * mm, 167 * mm)
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def patient_polis(text, polis_number):
opinion = gen_opinion(['6.Полис ОМС:', polis_number])
tbl_style = [
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
col_width = (23 * mm, 167 * mm)
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def type_city(text, type_value, type, ):
type_gorod, type_selo = ' городская', ', сельская'
type = json.loads(type)
if type["code"] == "1":
type_gorod = f'{op_bold_tag}<u>городская</u>{cl_bold_tag}'
if type["code"] == "2":
type_selo = f'{op_bold_tag}<u>сельская</u>{cl_bold_tag}'
opinion = gen_opinion([type_value, type_gorod, '1', type_selo, '2'])
col_width = (23 * mm, 19 * mm, 6 * mm, 18 * mm, 6 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (-1, -1), (-1, -1), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
return text
def child_death_befor_month(text, params):
params = json.loads(params)
week37_41, week_smaller, week_more_42 = ' доношенный (37-41 недель)', ' , недоношенный (менее 37 недель)', ', переношенный (42 недель и более)'
if params["code"] == "1":
week37_41 = f"{op_bold_tag}<u>{week37_41}</u>{cl_bold_tag}"
if params["code"] == "2":
week_smaller = f"{op_bold_tag}<u>{week_smaller}</u>{cl_bold_tag}"
if params["code"] == "3":
week_more_42 = f"{op_bold_tag}<u>{week_more_42}</u>{cl_bold_tag}"
opinion = gen_opinion(['13. * Для детей, умерших в возрасте от 168 час. до 1 месяца:', week37_41, '1'])
col_width = (85 * mm, 42 * mm, 6 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
opinion = gen_opinion([week_smaller, '2', week_more_42, '3'])
col_width = (57 * mm, 6 * mm, 55 * mm, 6 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (4, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
return text
def child_death_befor_year(text, params):
opinion = gen_opinion(['14.*Для детей, умерших в возрасте от 168 час. до 1 года:', ' масса тела ребёнка при рождении', params["weight"], ' грамм', '1'])
col_width = (82 * mm, 50 * mm, 12 * mm, 12 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('LINEBELOW', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
opinion = gen_opinion(['каким по счету был ребенок у матери (считая умерших и не считая мертворождённых)', params["child_count"], '', '2'])
col_width = (125 * mm, 6 * mm, 5 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
opinion = gen_opinion(['дата рождения матери', params["mother_born"], '', '3', 'возраст матери (полных лет)', params["mother_age"], '', '4'])
col_width = (40 * mm, 19 * mm, 5 * mm, 6 * mm, 45 * mm, 15 * mm, 5 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
('LINEBELOW', (5, 0), (5, 0), 0.75, colors.black),
('GRID', (7, 0), (7, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.5 * mm))
text.append(tbl)
opinion = gen_opinion(['фамилия матери', params["mother_family"], '', '5', ', имя', params["mother_name"], '', '6', ' , отчество (при наличии)', params["mother_patronimyc"], '', '7'])
col_width = (30 * mm, 25 * mm, 5 * mm, 6 * mm, 14 * mm, 20 * mm, 5 * mm, 6 * mm, 40 * mm, 25 * mm, 5 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
('LINEBELOW', (5, 0), (5, 0), 0.75, colors.black),
('GRID', (7, 0), (7, 0), 0.75, colors.black),
('LINEBELOW', (9, 0), (9, 0), 0.75, colors.black),
('GRID', (11, 0), (11, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.5 * mm))
text.append(tbl)
return text
def family_status(text, params):
params = json.loads(params)
brak, not_brak, not_known = "состоял(а) в зарегистрированном браке", "не состоял(а) в зарегистрированном браке", "неизвестно"
if params["code"] == '3':
not_known = f"{op_bold_tag}<u>{not_known}</u>{cl_bold_tag}"
elif params["code"] == '4':
brak = f"{op_bold_tag}<u>{brak}</u>{cl_bold_tag}"
elif params["code"] == '5':
not_brak = f"{op_bold_tag}<u>{not_brak}</u>{cl_bold_tag}"
opinion = gen_opinion(['15.*Семейное положение:', brak, '1', not_brak, '2', not_known, '3'])
col_width = (38 * mm, 60 * mm, 6 * mm, 60 * mm, 6 * mm, 18 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
('GRID', (6, 0), (6, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
return text
def education(text, params):
high_prof, not_high_prof, middle_prof, middle_common = "профессиональное: высшее", ", неполное высшее", ", среднее профессиональное", "общее: среднее"
params = json.loads(params)
if params["code"] == '1':
high_prof = f"{op_bold_tag}<u>{high_prof}</u>{cl_bold_tag}"
elif params["code"] == '2':
not_high_prof = f"{op_bold_tag}<u>{not_high_prof}</u>{cl_bold_tag}"
elif params["code"] == '3':
middle_prof = f"{op_bold_tag}<u>{middle_prof}</u>{cl_bold_tag}"
elif params["code"] == '5':
middle_common = f"{op_bold_tag}<u>{middle_common}</u>{cl_bold_tag}"
opinion = gen_opinion(['16.* Образование:', high_prof, '1', not_high_prof, '2', middle_prof, '3', middle_common, '4'])
col_width = (29 * mm, 42 * mm, 6 * mm, 30 * mm, 6 * mm, 41 * mm, 6 * mm, 25 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -1 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
('GRID', (6, 0), (6, 0), 0.75, colors.black),
('GRID', (8, 0), (8, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
common, start, before_school, not_has_start, not_known = "основное", ", начальное", ", дошкольное", ", не имеет начального образования", ", неизвестно"
if params["code"] == '6':
common = f"{op_bold_tag}<u>{common}</u>{cl_bold_tag}"
elif params["code"] == '7':
start = f"{op_bold_tag}<u>{start}</u>{cl_bold_tag}"
elif params["code"] == '10':
before_school = f"{op_bold_tag}<u>{before_school}</u>{cl_bold_tag}"
elif params["code"] == '11':
not_has_start = f"{op_bold_tag}<u>{not_has_start}</u>{cl_bold_tag}"
elif params["code"] == '9':
not_known = f"{op_bold_tag}<u>{not_known}</u>{cl_bold_tag}"
opinion = gen_opinion([common, '5', start, '6', before_school, '7', not_has_start, '8', not_known, '9'])
col_width = (20 * mm, 6 * mm, 20 * mm, 6 * mm, 21 * mm, 6 * mm, 50 * mm, 6 * mm, 19 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
('GRID', (5, 0), (5, 0), 0.75, colors.black),
('GRID', (7, 0), (7, 0), 0.75, colors.black),
('GRID', (9, 0), (9, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
return text
def work_position(text, params):
params = json.loads(params)
worked, military, pensioner, student = "работал(а)", ", проходил(а) военную или приравненную к ней службу", ", пенсионер(ка)", "студент(ка)"
if params["code"] == '5':
worked = f"{op_bold_tag}<u>{worked}</u>{cl_bold_tag}"
elif params["code"] == '17':
military = f"{op_bold_tag}<u>{military}</u>{cl_bold_tag}"
elif params["code"] == '7':
pensioner = f"{op_bold_tag}<u>{pensioner}</u>{cl_bold_tag}"
elif params["code"] == '4':
student = f"{op_bold_tag}<u>{student}</u>{cl_bold_tag}"
opinion = gen_opinion(['17. * Занятость:', worked, '1', military, '2', pensioner, '3', student, '4'])
col_width = (24 * mm, 18 * mm, 6 * mm, 80 * mm, 6 * mm, 24 * mm, 6 * mm, 20 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
('GRID', (6, 0), (6, 0), 0.75, colors.black),
('GRID', (8, 0), (8, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
not_work, others, not_known = "не работал(а)", ", прочие", ", неизвестно"
if params["code"] == '8':
not_work = f"{op_bold_tag}<u>{not_work}</u>{cl_bold_tag}"
elif params["code"] == '10':
others = f"{op_bold_tag}<u>{others}</u>{cl_bold_tag}"
elif params["code"] == '22':
not_known = f"{op_bold_tag}<u>{not_known}</u>{cl_bold_tag}"
opinion = gen_opinion([not_work, '5', others, '6', not_known, '7'])
col_width = (28 * mm, 6 * mm, 17 * mm, 6 * mm, 21 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
('GRID', (5, 0), (5, 0), 0.75, colors.black),
('GRID', (7, 0), (7, 0), 0.75, colors.black),
('GRID', (9, 0), (9, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
return text
def title_med_organization(text, params):
opinion = [
[
Paragraph(f'{params["full_title"]}<br/>'
f'адрес места нахождения {params["org_address"]}<br/>'
f'Код по ОКПО {params["org_okpo"]}<br/>'
f'Номер и дата выдачи лицензии на осуществление медицинской деятельности: <br/>{params["org_license"]}<br/>', styleOrg),
Paragraph('', styleOrg),
Paragraph('Код формы по ОКУД _______<br/>Медицинская документация<br/>Учётная форма № 106/У<br/>Утверждена приказом Минздрава России <br/>от «15» апреля 2021 г. № 352н',
styleOrg),
],
]
col_width = (125 * mm, 5 * mm, 60 * mm,)
tbl_style = [
('GRID', (0, 0), (0, 0), 0.75, colors.black),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (-1, -1), 1 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
return text
def bottom_colontitul(text, params):
opinion = [[Paragraph(f'{params}', styleColontitul), ], ]
col_width = (190 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 10 * mm),
('LEFTPADDING', (0, 0), (-1, -1), 1 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
return text
def back_size(text):
opinion = [[Paragraph('Оборотная сторона', styleColontitulBold), ], ]
col_width = (190 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (-1, -1), (-1, -1), 166 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
return text
def why_death(text, params, item_why, item_dtp, item_pregnant, item_doc):
opinion = [
[
Paragraph(f"{item_why}. Причины смерти:", styleT),
Paragraph('Приблизительный период времени между началом патологического процесса и смертью', styleOrg),
Paragraph('Коды по МКБ', styleOrg),
],
]
col_width = (114 * mm, 36 * mm, 40 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (-1, -1), (-1, -1), 1 * mm),
('LEFTPADDING', (2, 0), (2, 0), 8 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
tbl = diagnos_tbl({"para": "I", "item": "а)", "result": params["а"]["rows"][0]})
text.append(Spacer(1, 0.3 * mm))
text.append(tbl)
tbl = about_diagnos("(болезнь или состояние, непосредственно приведшее к смерти)")
text.append(Spacer(1, 0.1 * mm))
text.append(tbl)
tbl = diagnos_tbl({"para": "", "item": "б)", "result": params["б"]["rows"][0]})
text.append(Spacer(1, 0 * mm))
text.append(tbl)
tbl = about_diagnos("(патологическое состояние, которое привело к возникновению причины, указанной в пункте «а»)")
text.append(Spacer(1, 0 * mm))
text.append(tbl)
tbl = diagnos_tbl({"para": "", "item": "в)", "result": params["в"]["rows"][0]})
text.append(Spacer(1, 0 * mm))
text.append(tbl)
tbl = about_diagnos("(первоначальная причина смерти указывается последней)")
text.append(Spacer(1, 0 * mm))
text.append(tbl)
tbl = diagnos_tbl({"para": "", "item": "г)", "result": params["г"]["rows"][0]})
text.append(Spacer(1, 0 * mm))
text.append(tbl)
tbl = about_diagnos("(внешняя причина при травмах и отравлениях)")
text.append(Spacer(1, 0 * mm))
text.append(tbl)
opinion = [
[
Paragraph('II. Прочие важные состояния, способствовавшие смерти, но не связанные с болезнью или патологическим состоянием, приведшим к ней, включая употребление '
'алкоголя, наркотических средств, психотропных и других токсических веществ, содержание их в крови, а также операции (название, дата)', styleColontitul),
],
]
col_width = (190 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (-1, -1), (-1, -1), 1 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.1 * mm))
text.append(tbl)
text.append(Spacer(1, 0.6 * mm))
data_ii = params["ii"]["rows"]
for k in range(len(data_ii)):
tbl = diagnos_tbl({"para": "", "item": "", "result": data_ii[k], "top_padd": -1.2 * mm})
text.append(Spacer(1, 0 * mm))
text.append(tbl)
days30, days7 = "смерть наступила - в течение 30 суток", ", из них в течение 7 суток"
dtp_death = json.loads(params["Связь смерти с ДТП"])
if dtp_death["code"] == "1":
days30 = f"{op_bold_tag}<u>{days30}</u>{cl_bold_tag}"
elif dtp_death["code"] == "2":
days7 = f"{op_bold_tag}<u>{days7}</u>{cl_bold_tag}"
opinion = gen_opinion([f'{item_dtp}.В случае смерти в результате ДТП:', days30, '1', days7, '2'])
col_width = (55 * mm, 55 * mm, 6 * mm, 40 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
pregnant, process_birth = "(независимо от срока и локализации)", ", в процессе родов"
pregnant_data = json.loads(params["Связь смерти с беременностью"])
if pregnant_data["code"] == "1":
pregnant = f"{op_bold_tag}<u>{pregnant}</u>{cl_bold_tag}"
elif pregnant_data["code"] == "2":
process_birth = f"{op_bold_tag}<u>{process_birth}</u>{cl_bold_tag}"
opinion = gen_opinion([f'{item_pregnant}.В случае смерти беременной', pregnant, '1', process_birth, '2'])
col_width = (50 * mm, 52 * mm, 6 * mm, 30 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LEFTPADDING', (1, 0), (1, 0), -2 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
final_process_birth_42days, final_process_birth_365days = "в течение 42 дней после окончания беременности, родов", ", кроме того в течение 43-365 дней после окончания беременности"
if pregnant_data["code"] == "3":
final_process_birth_42days = f"{op_bold_tag}<u>{final_process_birth_42days}</u>{cl_bold_tag}"
elif pregnant_data["code"] == "4":
final_process_birth_365days = f"{op_bold_tag}<u>{final_process_birth_365days}</u>{cl_bold_tag}"
opinion = gen_opinion([final_process_birth_42days, '3', final_process_birth_365days, '4'])
col_width = (84 * mm, 6 * mm, 98 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 4 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
opinion = gen_opinion([f'{item_doc}.Фамилия, имя, отчество (при наличии) врача (фельдшера, акушерки), заполнившего Медицинское свидетельство о смерти'])
col_width = (190 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
opinion = gen_opinion([f'{params["Заполнил"]}', 'Подпись', ''])
col_width = (140 * mm, 20 * mm, 30 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LINEBELOW', (0, 0), (0, 0), 0.75, colors.black),
('LINEBELOW', (2, 0), (2, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def diagnos_tbl(data):
description_diag = data["result"][2]
description_diag_json = None
if len(description_diag) > 1:
description_diag_json = json.loads(description_diag)
decription = ''
period = ""
top_padd = 0 * mm
mkb10 = ["", "", "", "", ""]
if len(description_diag) > 1:
decription = description_diag_json["title"]
mkb10 = list(description_diag_json["code"])
if len(list(decription)) > 72:
top_padd = -2 * mm
period = f'{data["result"][0]} {data["result"][1]}'
if data.get("top_padd", None):
top_padd = data.get("top_padd")
elements = []
for element in range(5):
try:
elements.insert(element, mkb10[element])
except:
elements.insert(element, "")
opinion = gen_opinion_diag([data["para"], data["item"], decription, period, '', elements[0], elements[1], elements[2], '.', elements[4]])
col_width = (6 * mm, 7 * mm, 102 * mm, 36 * mm, 5 * mm, 8 * mm, 7 * mm, 7 * mm, 6 * mm, 7 * mm,)
tbl_style = [
('GRID', (5, 0), (5, 0), 0.75, colors.black),
('GRID', (6, 0), (6, 0), 0.75, colors.black),
('GRID', (7, 0), (7, 0), 0.75, colors.black),
('GRID', (9, 0), (9, 0), 0.75, colors.black),
('LINEBELOW', (0, 0), (3, 0), 0.75, colors.black),
('LINEBEFORE', (3, 0), (3, 0), 0.75, colors.black),
('LINEAFTER', (3, 0), (3, 0), 0.75, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('TOPPADDING', (2, 0), (2, 0), top_padd),
('LEFTPADDING', (2, 0), (2, 0), -2 * mm),
('LEFTPADDING', (3, 0), (3, 0), 10 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style, 4 * mm)
return tbl
def about_diagnos(data):
styleMicro = deepcopy(styleT)
styleMicro.fontSize = 5.5
styleMicro.alignment = TA_CENTER
opinion = [
[
Paragraph('', styleT),
Paragraph('', styleT),
Paragraph(f'{data}', styleMicro),
Paragraph('', styleT),
Paragraph('', styleT),
Paragraph('', styleT),
Paragraph('', styleT),
Paragraph('', styleT),
Paragraph('', styleT),
Paragraph('', styleT),
],
]
col_width = (6 * mm, 7 * mm, 102 * mm, 36 * mm, 5 * mm, 7 * mm, 7 * mm, 7 * mm, 6 * mm, 7 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), -0.5 * mm),
('LINEBEFORE', (3, 0), (3, 0), 0.75, colors.black),
('LINEAFTER', (3, 0), (3, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
return tbl
def destination_person_passport(text, data):
opinion = gen_opinion([data])
tbl_style = [
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LINEBELOW', (0, 0), (-1, -1), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
col_width = (190 * mm)
tbl = gen_table(opinion, col_width, tbl_style, 4 * mm)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def destination_person_snils(text, data):
opinion = gen_opinion(['СНИЛС получателя (при наличии)', data])
tbl_style = [
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
]
col_width = (50 * mm, 140 * mm)
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.2 * mm))
text.append(tbl)
return text
def death_happaned(text, params):
ill, unfortunate_not_work, unfortunate_work = "от заболевания", "несчастного случая: не связанного с производством", "связанного с производством"
type_happend = json.loads(params)
if type_happend["code"] == "1":
ill = f"{op_bold_tag}<u>{ill}</u>{cl_bold_tag}"
elif type_happend["code"] == "2":
unfortunate_not_work = f"{op_bold_tag}<u>{unfortunate_not_work}</u>{cl_bold_tag}"
elif type_happend["code"] == "3":
unfortunate_work = f"{op_bold_tag}<u>{unfortunate_work}</u>{cl_bold_tag}"
opinion = gen_opinion(['18. Смерть произошла:', ill, '1', unfortunate_not_work, '2', unfortunate_work, '3'])
col_width = (34 * mm, 24 * mm, 6 * mm, 74 * mm, 6 * mm, 43 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
('GRID', (6, 0), (6, 0), 0.75, colors.black),
('GRID', (8, 0), (8, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
kill, self_kill, military, terrorist, not_know = "убийства", "самоубийства", ", в ходе действий: военных", "террористических", ", род смерти не установлен"
if type_happend["code"] == "4":
kill = f"{op_bold_tag}<u>{kill}</u>{cl_bold_tag}"
elif type_happend["code"] == "5":
self_kill = f"{op_bold_tag}<u>{self_kill}</u>{cl_bold_tag}"
elif type_happend["code"] == "6":
military = f"{op_bold_tag}<u>{military}</u>{cl_bold_tag}"
elif type_happend["code"] == "7":
terrorist = f"{op_bold_tag}<u>{terrorist}</u>{cl_bold_tag}"
elif type_happend["code"] == "8":
not_know = f"{op_bold_tag}<u>{not_know}</u>{cl_bold_tag}"
opinion = gen_opinion([kill, '4', self_kill, '5', military, '6', terrorist, '7', not_know, '8'])
col_width = (22 * mm, 6 * mm, 23 * mm, 6 * mm, 40 * mm, 6 * mm, 30 * mm, 6 * mm, 40 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
('GRID', (5, 0), (5, 0), 0.75, colors.black),
('GRID', (7, 0), (7, 0), 0.75, colors.black),
('GRID', (9, 0), (9, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
return text
def who_set_death(text, params):
only_doc_death, doc_work, paramedic = "врачом, только установившем смерть", "лечащим врачом", "фельдшером (акушеркой)"
param_who_set = json.loads(params)
if param_who_set["code"] == "1":
only_doc_death = f"{op_bold_tag}<u>{only_doc_death}</u>{cl_bold_tag}"
elif param_who_set["code"] == "2" or param_who_set["code"] == "7":
doc_work = f"{op_bold_tag}<u>{doc_work}</u>{cl_bold_tag}"
elif param_who_set["code"] == "3" or param_who_set["code"] == "8" or param_who_set["code"] == "9":
paramedic = f"{op_bold_tag}<u>{paramedic}</u>{cl_bold_tag}"
opinion = gen_opinion(['20. Причины смерти установлены:', only_doc_death, '1', doc_work, '2', paramedic, '3'])
col_width = (49 * mm, 58 * mm, 6 * mm, 27 * mm, 6 * mm, 40 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
('GRID', (6, 0), (6, 0), 0.75, colors.black),
('GRID', (8, 0), (8, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.9 * mm))
text.append(tbl)
doc_anatomy, expert = "врачом-патологоанатомом", "судебно-медицинским экспертом"
if param_who_set["code"] == "4":
doc_anatomy = f"{op_bold_tag}<u>{doc_anatomy}</u>{cl_bold_tag}"
elif param_who_set["code"] == "5" or param_who_set["code"] == "7":
expert = f"{op_bold_tag}<u>{expert}</u>{cl_bold_tag}"
opinion = gen_opinion([doc_anatomy, '4', expert, '5'])
col_width = (50 * mm, 6 * mm, 50 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('RIGHTPADDING', (1, 0), (-1, -1), -2 * mm),
('GRID', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
('GRID', (5, 0), (5, 0), 0.75, colors.black),
('GRID', (7, 0), (7, 0), 0.75, colors.black),
('GRID', (9, 0), (9, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
return text
def doctor_fio(text, params, iss: Issledovaniya):
doc_fio = params["Заполнил"]
opinion = gen_opinion(['21. Я, врач (фельдшер, акушерка)', doc_fio])
col_width = (50 * mm, 140 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 0 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
doc_position = params["Должность"]
opinion = gen_opinion(['должность', doc_position])
col_width = (25 * mm, 165 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
see_body, write_medical_dicument = 'осмотра трупа', ', записей в медицинской документации',
base_diagnos = json.loads(params["Основания для определения причины смерти"])
if base_diagnos["code"] == "1":
see_body = f"{op_bold_tag}<u>{see_body}</u>{cl_bold_tag}"
elif base_diagnos["code"] == "2":
write_medical_dicument = f"{op_bold_tag}<u>{write_medical_dicument}</u>{cl_bold_tag}"
opinion = gen_opinion(['удостоверяю, что на основании:', see_body, '1', write_medical_dicument, '2'])
col_width = (53 * mm, 26 * mm, 6 * mm, 61 * mm, 6 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('GRID', (2, 0), (2, 0), 0.75, colors.black),
('GRID', (4, 0), (4, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
see_patient, open_body = 'предшествующего наблюдения за больным(ой)', ', вскрытия',
if base_diagnos["code"] == "3" or base_diagnos["code"] == "5":
see_patient = f"{op_bold_tag}<u>{see_patient}</u>{cl_bold_tag}"
elif base_diagnos["code"] == "4":
open_body = f"{op_bold_tag}<u>{open_body}</u>{cl_bold_tag}"
opinion = gen_opinion([see_patient, '3', open_body, '4', ' мною установлены причины смерти'])
col_width = (75 * mm, 6 * mm, 21 * mm, 6 * mm, 70 * mm)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('GRID', (1, 0), (1, 0), 0.75, colors.black),
('GRID', (3, 0), (3, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
return text
def hospital_manager_stamp(text, fio_manager):
opinion = gen_opinion(['', '', '', '', fio_manager])
col_width = (45 * mm, 5 * mm, 45 * mm, 5 * mm, 90 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('LINEBELOW', (0, 0), (0, 0), 0.75, colors.black),
('LINEBELOW', (2, 0), (2, 0), 0.75, colors.black),
('LINEBELOW', (4, 0), (4, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 3 * mm))
text.append(tbl)
opinion = gen_opinion(['печать', 'подпись', '(фамилия, имя, отчество (при наличии)'])
col_width = (45 * mm, 45 * mm, 100 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 15 * mm),
('LEFTPADDING', (1, 0), (1, 0), 15 * mm),
('LEFTPADDING', (2, 0), (2, 0), 15 * mm),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 0.4 * mm))
text.append(tbl)
return text
def check_person_data(text, fio_check):
date_value = "«___» ___________ 20 ___ г."
opinion = gen_opinion([date_value, fio_check])
col_width = (60 * mm, 130 * mm,)
tbl_style = [
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('TOPPADDING', (0, 0), (-1, -1), 0 * mm),
('LEFTPADDING', (0, 0), (0, 0), 5 * mm),
('LINEBELOW', (1, 0), (1, 0), 0.75, colors.black),
]
tbl = gen_table(opinion, col_width, tbl_style)
text.append(Spacer(1, 3 * mm))
text.append(tbl)
return text
|
# pylint: disable=missing-docstring
import copy
import itertools
import functools
import pathlib
from collections import namedtuple
from contextlib import suppress
import pytest
from rollit.runtime import Runner
from rollit.runtime.towers import IncrementalTower
from rollit.util import is_valid_iterable
try:
from ruamel import yaml
except ImportError:
import yaml
def __load():
ScriptStatement = namedtuple('Stmt', ('statement', 'result'))
_script_tests_by_category = {}
def _convert_lists(entry):
if isinstance(entry, dict):
return {k: _convert_lists(v) for k, v in entry.items()}
elif isinstance(entry, list):
return tuple(_convert_lists(i) for i in entry)
return entry
class _ScriptTest(namedtuple('_ScriptTestBase', ('statements',))):
def __new__(cls, entry, parent_categories=(), *, _copying=False):
if _copying:
return super().__new__(cls, **entry)
categories = tuple(itertools.chain(entry.get('categories', ()), parent_categories))
tests = tuple(cls(child, categories) for child in entry.get('tests', ()))
script = entry.get('script')
statements = []
for s, r in entry.get('statements', ()):
statements.append(ScriptStatement(s, _convert_lists(r)))
self = super().__new__(
cls,
statements=tuple(statements),
)
# We only want to add tests that are actual tests.
if self.statements:
for category in categories:
_script_tests_by_category.setdefault(category, []).append(self)
return self
return None
def __str__(self):
return f'ST({' | '.join(s.statement for s in self.statements)})'
def __repr__(self):
return str(self)
def __eq__(self, actual):
pass
for path in (pathlib.Path(__file__).parent / 'scripts').iterdir():
with path.open() as f:
_ScriptTest(yaml.safe_load(f))
for cat, items in _script_tests_by_category.items():
_script_tests_by_category[cat] = tuple(items)
ScriptTests = namedtuple('ScriptTests', tuple(_script_tests_by_category.keys()))
return ScriptTests(**_script_tests_by_category)
script_tests = __load()
with suppress(Exception):
del __load
@pytest.fixture
def runner():
return Runner(dice_tower=IncrementalTower())
|
# pylint: disable=missing-docstring
import copy
import itertools
import functools
import pathlib
from collections import namedtuple
from contextlib import suppress
import pytest
from rollit.runtime import Runner
from rollit.runtime.towers import IncrementalTower
from rollit.util import is_valid_iterable
try:
from ruamel import yaml
except ImportError:
import yaml
def __load():
ScriptStatement = namedtuple('Stmt', ('statement', 'result'))
_script_tests_by_category = {}
def _convert_lists(entry):
if isinstance(entry, dict):
return {k: _convert_lists(v) for k, v in entry.items()}
elif isinstance(entry, list):
return tuple(_convert_lists(i) for i in entry)
return entry
class _ScriptTest(namedtuple('_ScriptTestBase', ('statements',))):
def __new__(cls, entry, parent_categories=(), *, _copying=False):
if _copying:
return super().__new__(cls, **entry)
categories = tuple(itertools.chain(entry.get('categories', ()), parent_categories))
tests = tuple(cls(child, categories) for child in entry.get('tests', ()))
script = entry.get('script')
statements = []
for s, r in entry.get('statements', ()):
statements.append(ScriptStatement(s, _convert_lists(r)))
self = super().__new__(
cls,
statements=tuple(statements),
)
# We only want to add tests that are actual tests.
if self.statements:
for category in categories:
_script_tests_by_category.setdefault(category, []).append(self)
return self
return None
def __str__(self):
return f'ST({" | ".join(s.statement for s in self.statements)})'
def __repr__(self):
return str(self)
def __eq__(self, actual):
pass
for path in (pathlib.Path(__file__).parent / 'scripts').iterdir():
with path.open() as f:
_ScriptTest(yaml.safe_load(f))
for cat, items in _script_tests_by_category.items():
_script_tests_by_category[cat] = tuple(items)
ScriptTests = namedtuple('ScriptTests', tuple(_script_tests_by_category.keys()))
return ScriptTests(**_script_tests_by_category)
script_tests = __load()
with suppress(Exception):
del __load
@pytest.fixture
def runner():
return Runner(dice_tower=IncrementalTower())
|
import discord
from discord.ext import commands
from discord.commands import slash_command, Option
import asyncio
from bot import GoModBot
from discord.ui import InputText, Modal
class Modal(Modal):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_item(InputText(label="What is your name?", placeholder="John Doe"))
async def callback(self, interaction: discord.Interaction):
await interaction.response.send_message(f"Hello, {self.children[0].value}!")
class Moderation(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
user = self.bot.get_user(payload.user_id)
channel = self.bot.get_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
guild = self.bot.get_guild(payload.guild_id)
if user is None or message is None or channel is None or guild is None:
return
member = guild.get_member(user.id)
if member is None:
return
if user.bot:
return
lookup = await self.bot.db.fetch("SELECT * FROM reactroles WHERE message = $1 AND channel = $2", message.id, message.channel.id)
if lookup:
for entry in lookup:
if str(payload.emoji) == str(entry['reaction']):
role = discord.utils.get(guild.roles, id=entry['role'])
if role == None:
return
if role in member.roles:
pass
else:
try:
await member.add_roles(role)
except discord.Forbidden:
embed = discord.Embed(title="Urgent message", description=f"A [reaction role]({message.jump_url}) in your server ({guild.name}) is failing to add roles to members. Please check if the reaction role's role ({role.name}) is below GoMod's role and GoMod is able to add roles.", color=discord.Color.red())
await guild.owner.send(embed=embed)
@commands.Cog.listener()
async def on_raw_reaction_remove(self, payload):
user = self.bot.get_user(payload.user_id)
channel = self.bot.get_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
guild = self.bot.get_guild(payload.guild_id)
if user is None or message is None or channel is None or guild is None:
return
member = guild.get_member(user.id)
if member is None:
return
if user.bot:
return
lookup = await self.bot.db.fetch("SELECT * FROM reactroles WHERE message = $1 AND channel = $2", message.id, message.channel.id)
if lookup:
for entry in lookup:
if str(payload.emoji) == str(entry['reaction']):
role = discord.utils.get(guild.roles, id=entry['role'])
if role == None:
return
if role in member.roles:
try:
await member.remove_roles(role)
except discord.Forbidden:
embed = discord.Embed(title="Urgent message", description=f"A [reaction role]({message.jump_url}) in your server ({guild.name}) is failing to remove roles from members. Please check if the reaction role's role ({role.name}) is below GoMod's role and GoMod is able to remove roles.", color=discord.Color.red())
await guild.owner.send(embed=embed)
@commands.Cog.listener()
async def on_message_delete(self, message):
lookup = await self.bot.db.fetchrow("SELECT * FROM reactroles WHERE message = $1 AND channel = $2", message.id, message.channel.id)
if lookup:
await self.bot.db.execute("DELETE FROM reactroles WHERE message = $1 AND channel = $2", message.id, message.channel.id)
# @commands.command()
# async def modaltest(self, ctx):
# class MyView(discord.ui.View):
# @discord.ui.button(label="Tell GoMod your name.", style=discord.ButtonStyle.primary)
# async def button_callback(self, button, interaction):
# modal = Modal(title="Greetings.")
# await interaction.response.send_modal(modal)
# view = MyView()
# await ctx.send("Hello! I am GoMod.", view=view)
@slash_command()
async def kick(self, ctx, member: Option(discord.Member, "Member to kick"), reason: Option(str, "Reason for kicking", required=False)):
"""
Kick a member from the server.
"""
if not ctx.author.guild_permissions.kick_members:
await ctx.respond("You do not have permission to kick members.", delete_after=3)
return
if member == ctx.author:
await ctx.respond("You cannot kick yourself.", delete_after=3)
return
if len(member.roles) > 0 and member.top_role >= ctx.author.top_role:
await ctx.respond("You cannot kick members with a higher role than you.", delete_after=3)
return
try:
embed = discord.Embed(title=f"Kicked from {ctx.guild.name}", description=f"You have been kicked from {ctx.guild.name} by {ctx.author.name} with reason: {reason}", color=discord.Color.red())
await member.send(embed=embed)
except:
pass
await ctx.guild.kick(member, reason=reason)
embed = discord.Embed(title="Kicked", description=f"{member.mention} has been kicked from {ctx.guild.name} with reason: {reason}", color=0x00b2ff)
await ctx.respond(embed=embed)
@slash_command()
async def ban(self, ctx, member: Option(discord.Member, "Member to ban"), reason: Option(str, "Reason for banning", required=False)):
"""
Bans a member from the server.
"""
if not ctx.author.guild_permissions.ban_members:
await ctx.respond("You do not have the ban members permission.", delete_after=3)
return
if member == ctx.author:
await ctx.respond("You cannot ban yourself.", delete_after=3)
return
if len(member.roles) > 0 and member.top_role >= ctx.author.top_role:
await ctx.respond("You cannot ban members with a higher role than you.", delete_after=3)
return
try:
embed = discord.Embed(title=f"Banned from {ctx.guild.name}", description=f"You have been banned from {ctx.guild.name} by {ctx.author.name} with reason: {reason}", color=discord.Color.red())
await member.send(embed=embed)
except:
pass
await ctx.guild.ban(member, reason=reason)
embed = discord.Embed(title="Banned", description=f"{member.mention} has been banned from {ctx.guild.name} with reason: {reason}", color=0x00b2ff)
await ctx.respond(embed=embed)
@slash_command()
async def block(self, ctx, member: discord.Member):
"""
Blocks a member from the current channel.
"""
if not ctx.author.guild_permissions.manage_roles:
await ctx.respond("You do not have the manage roles permission.", delete_after=3)
return
if member == ctx.author:
await ctx.respond("You cannot block yourself.", delete_after=3)
return
if len(member.roles) > 0 and member.top_role >= ctx.author.top_role:
await ctx.respond("You cannot block members with a higher role than you.", delete_after=3)
return
await ctx.channel.set_permissions(member, add_reactions = False, send_messages = False)
embed = discord.Embed(title="Blocked", description=f"{member.mention} has been blocked from {ctx.channel.mention}", color=0x00b2ff)
await ctx.respond(embed=embed)
# @commands.command()
# @commands.has_guild_permissions(manage_messages=True, manage_channels=True)
# async def unblock(self, ctx, member: discord.Member):
# if member == ctx.author:
# await ctx.send("You cannot unblock yourself.", delete_after=3)
# return
# if member.top_role >= ctx.author.top_role:
# await ctx.send("You cannot unblock members with a higher role than you.", delete_after=3)
# return
# await ctx.channel.set_permissions(member, add_reactions = True, send_messages = True)
# embed = discord.Embed(title="Unblocked", description=f"{member.mention} has been unblocked from {ctx.channel.mention}", color=0x00b2ff)
# await ctx.send(embed=embed)
@slash_command()
async def unblock(self, ctx, member: Option(discord.Member, "Member to unblock")):
"""
Unblocks a member from the current channel.
"""
if not ctx.author.guild_permissions.manage_roles:
await ctx.respond("You do not have the manage roles permission.", delete_after=3)
return
if member == ctx.author:
await ctx.respond("You cannot unblock yourself.", delete_after=3)
return
if len(member.roles) > 0 and member.top_role >= ctx.author.top_role:
await ctx.respond("You cannot unblock members with a higher role than you.", delete_after=3)
return
await ctx.channel.set_permissions(member, add_reactions = True, send_messages = True)
embed = discord.Embed(title="Unblocked", description=f"{member.mention} has been unblocked from {ctx.channel.mention}", color=0x00b2ff)
await ctx.respond(embed=embed)
@slash_command()
async def warn(self, ctx, member: Option(discord.Member, "Member to warn"), reason: Option(str, "Reason for warning", required=False)):
"""
Warns a member.
"""
if not ctx.author.guild_permissions.manage_messages:
await ctx.respond("You do not have the manage messages permission.", delete_after=3)
return
if member == ctx.author:
await ctx.respond("You cannot warn yourself.", delete_after=3)
return
if len(member.roles) > 0 and member.top_role >= ctx.author.top_role:
await ctx.respond("You cannot warn members with a higher role than you.", delete_after=3)
return
try:
embed = discord.Embed(title=f"Warned", description=f"You have been warned from {ctx.guild.name} by {ctx.author.name} with reason: {reason}", color=discord.Color.orange())
await member.send(embed=embed)
except:
pass
if reason == None:
await self.bot.db.execute("INSERT INTO warns VALUES ($1, $2, $3, $4)", member.id, ctx.guild.id, ctx.author.id, "No reason given.")
reason = "no reason"
else:
await self.bot.db.execute("INSERT INTO warns VALUES ($1, $2, $3, $4)", member.id, ctx.guild.id, ctx.author.id, reason)
embed = discord.Embed(title="Warned", description=f"{member.mention} has been warned by {ctx.author.mention} for {reason}", color=0x00b2ff)
await ctx.respond(embed=embed)
@slash_command()
async def clearwarns(self, ctx, member: Option(discord.Member, "Member to clear warnings for")):
"""
Clears all warnings for a member.
"""
if not ctx.author.guild_permissions.manage_messages:
await ctx.respond("You do not have the manage messages permission.", delete_after=3)
return
if member == ctx.author:
await ctx.respond("You cannot clear your own warnings.", delete_after=3)
return
if len(member.roles) > 0 and member.top_role >= ctx.author.top_role:
await ctx.respond("You cannot clear warnings of members with a higher role than you.", delete_after=3)
return
await self.bot.db.execute("DELETE FROM warns WHERE userid = $1 AND serverid = $2", member.id, ctx.guild.id)
embed = discord.Embed(title="Warns cleared", description=f"{member.mention}'s warnings have been cleared.", color=0x00b2ff)
await ctx.respond(embed=embed)
@slash_command()
async def purge(self, ctx, amount: Option(int, "Amount of messages to delete", min_value=1, max_value=1000)):
"""
Purges a specified amount of messages from the current channel.
"""
if not ctx.author.guild_permissions.manage_messages:
await ctx.respond("You do not have the manage messages permission.", delete_after=3)
return
await ctx.channel.purge(limit=amount+1)
embed = discord.Embed(title="Messages purged", description=f"{amount} messages have been purged.", color=0x00b2ff)
await ctx.send(embed=embed, delete_after=3)
@slash_command()
async def warns(self, ctx, member: Option(discord.Member, "Member to view warnings for")):
"""
Lists all the warns a member has.
"""
if not ctx.author.guild_permissions.manage_messages:
await ctx.respond("You do not have the manage messages permission.", delete_after=3)
return
if member == ctx.author:
await ctx.respond("You cannot view your own warnings.", delete_after=3)
return
if len(member.roles) > 0 and member.top_role >= ctx.author.top_role:
await ctx.respond("You cannot view warnings of members with a higher role than you.", delete_after=3)
return
warns = await self.bot.db.fetch("SELECT * FROM warns WHERE userid = $1 AND serverid = $2", member.id, ctx.guild.id)
if warns == []:
embed = discord.Embed(title="No warns", description=f"{member.mention} has no warns.", color=0x00b2ff)
await ctx.respond(embed=embed)
return
embed = discord.Embed(title="Warns", description=f"{member.mention} has {len(warns)} warns.", color=0x00b2ff)
for warn in warns:
embed.add_field(name=f"{warn["reason"]}", value=f"Warned by {ctx.guild.get_member(warn["invokerid"]).mention}", inline=False)
await ctx.respond(embed=embed)
@slash_command()
async def reactrole(self, ctx, channel: Option(discord.TextChannel, "The channel the message is in"), message: Option(str, "The message that will have the reaction in ID form."), emoji: Option(str, "The emoji to react with"), role: Option(discord.Role, "The role to give to the user")):
"""
Run a reaction role setup.
"""
if not ctx.author.guild_permissions.manage_roles or not ctx.author.guild_permissions.manage_messages:
await ctx.respond("You do not have the manage roles or manage messages permission.", delete_after=3)
return
try:
id = int(message)
except:
await ctx.respond("The message ID must be an integer.", delete_after=3)
return
try:
messageobj = await channel.fetch_message(id)
except Exception as e:
await ctx.respond("The message ID is invalid.", delete_after=3)
print(e)
return
await self.bot.db.execute("INSERT INTO reactroles VALUES ($1, $2, $3, $4)", messageobj.id, channel.id, role.id, emoji)
reaction = await messageobj.add_reaction(emoji)
embed = discord.Embed(title="Reaction role setup", description="Reaction role setup complete.", color=0x00b2ff)
await ctx.respond(embed=embed)
# @commands.command()
# @commands.has_guild_permissions(manage_messages=True)
# async def reactrole(self, ctx):
# embed = discord.Embed(title="Reaction role setup", description="1/4\nWhat channel is the message you're using is in? (Do NOT mention the channel. Instead, use the name.\nStuck? Read our [wiki](https://github.com/Joystickplays/GoMod/wiki/Verification-systems).", color=0x00b2ff)
# msg = await ctx.send(embed=embed)
# def check(m):
# return m.channel == ctx.channel and m.author == ctx.author
# while True:
# try:
# msg = await self.bot.wait_for('message', check=check, timeout=60)
# except asyncio.TimeoutError:
# await ctx.send("Timed out.", delete_after=3)
# return
# if msg.content.lower() == "cancel":
# await ctx.send("Cancelled.", delete_after=3)
# return
# channelcheck = msg.content.replace(" ", "-")
# channelcheck2 = channelcheck.lower()
# channel = discord.utils.get(ctx.guild.text_channels, name=channelcheck2)
# if channel != None:
# break
# await ctx.send("That channel doesn't exist. Try again...", delete_after=3)
# embed = discord.Embed(title="Reaction role setup", description="2/4\nWhat is your message's ID? More on getting message IDs [here](https://support.discord.com/hc/en-us/articles/206346498-Where-can-I-find-my-User-Server-Message-ID-). You can also say \"create one\" to make GoMod create a message for you.", color=0x00b2ff)
# msg = await ctx.send(embed=embed)
# while True:
# try:
# msg = await self.bot.wait_for('message', check=check, timeout=60)
# except asyncio.TimeoutError:
# await ctx.send("Timed out.", delete_after=3)
# return
# if msg.content.lower() == "cancel":
# await ctx.send("Cancelled.", delete_after=3)
# return
# message = None
# if msg.content.lower() == "create one":
# embed = discord.Embed(title="Reaction role setup", description="3.5/4\nWhat will be the title of the message?", color=0x00b2ff)
# msg = await ctx.send(embed=embed)
# try:
# title = await self.bot.wait_for('message', check=check, timeout=60)
# except asyncio.TimeoutError:
# await ctx.send("Timed out.", delete_after=3)
# return
# if title.content.lower() == "cancel":
# await ctx.send("Cancelled.", delete_after=3)
# return
# embed = discord.Embed(title="Reaction role setup", description="3.5/4\nWhat will be the description of the message?", color=0x00b2ff)
# msg = await ctx.send(embed=embed)
# try:
# description = await self.bot.wait_for('message', check=check, timeout=60)
# except asyncio.TimeoutError:
# await ctx.send("Timed out.", delete_after=3)
# return
# if description.content.lower() == "cancel":
# await ctx.send("Cancelled.", delete_after=3)
# return
# embed = discord.Embed(title=title.content, description=description.content, color=0x00b2ff)
# message = await channel.send(embed=embed)
# break
# if message == None:
# try:
# message = await channel.fetch_message(int(msg.content))
# break
# except:
# await ctx.send("That message doesn't exist. Try again...", delete_after=3)
# while True:
# embed = discord.Embed(title="Reaction role setup", description="3/4\nWhat will be the emoji for your reaction?", color=0x00b2ff)
# msg = await ctx.send(embed=embed)
# try:
# msg = await self.bot.wait_for('message', check=check, timeout=60)
# except asyncio.TimeoutError:
# await ctx.send("Timed out.", delete_after=3)
# return
# if msg.content.lower() == "cancel":
# await ctx.send("Cancelled.", delete_after=3)
# return
# reactionname = msg.content
# try:
# reaction = await message.add_reaction(msg.content)
# break
# except:
# await ctx.send("That emoji is invalid. Try again...", delete_after=3)
# while True:
# embed = discord.Embed(title="Reaction role setup", description="4/4\nWhat role will be given to the user when they react? (Do NOT mention the role. Instead, use the name.", color=0x00b2ff)
# msg = await ctx.send(embed=embed)
# try:
# msg = await self.bot.wait_for('message', check=check, timeout=60)
# except asyncio.TimeoutError:
# await ctx.send("Timed out.", delete_after=3)
# return
# if msg.content.lower() == "cancel":
# await ctx.send("Cancelled.", delete_after=3)
# return
# role = discord.utils.get(ctx.guild.roles, name=msg.content)
# if role != None:
# break
# await ctx.send("That role doesn't exist. Try again...", delete_after=3)
# await self.bot.db.execute("INSERT INTO reactroles VALUES ($1, $2, $3, $4)", message.id, channel.id, role.id, reactionname)
# embed = discord.Embed(title="Reaction role setup", description="Reaction role setup complete.", color=0x00b2ff)
# await ctx.send(embed=embed)
# @commands.command()
# async def qasetup(self, ctx):
# lookup = await self.bot.db.fetchrow("SELECT * FROM qas WHERE guild = $1", ctx.guild.id)
# if lookup != None:
# embed = discord.Embed(title="Error", description="Question and answer are limited to one per server. If you want to change the question and answer, please delete the current one and run this command again.", color=0x00b2ff)
# await ctx.send(embed=embed)
def setup(bot:GoModBot):
bot.add_cog(Moderation(bot))
|
import discord
from discord.ext import commands
from discord.commands import slash_command, Option
import asyncio
from bot import GoModBot
from discord.ui import InputText, Modal
class Modal(Modal):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_item(InputText(label="What is your name?", placeholder="John Doe"))
async def callback(self, interaction: discord.Interaction):
await interaction.response.send_message(f"Hello, {self.children[0].value}!")
class Moderation(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
user = self.bot.get_user(payload.user_id)
channel = self.bot.get_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
guild = self.bot.get_guild(payload.guild_id)
if user is None or message is None or channel is None or guild is None:
return
member = guild.get_member(user.id)
if member is None:
return
if user.bot:
return
lookup = await self.bot.db.fetch("SELECT * FROM reactroles WHERE message = $1 AND channel = $2", message.id, message.channel.id)
if lookup:
for entry in lookup:
if str(payload.emoji) == str(entry['reaction']):
role = discord.utils.get(guild.roles, id=entry['role'])
if role == None:
return
if role in member.roles:
pass
else:
try:
await member.add_roles(role)
except discord.Forbidden:
embed = discord.Embed(title="Urgent message", description=f"A [reaction role]({message.jump_url}) in your server ({guild.name}) is failing to add roles to members. Please check if the reaction role's role ({role.name}) is below GoMod's role and GoMod is able to add roles.", color=discord.Color.red())
await guild.owner.send(embed=embed)
@commands.Cog.listener()
async def on_raw_reaction_remove(self, payload):
user = self.bot.get_user(payload.user_id)
channel = self.bot.get_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
guild = self.bot.get_guild(payload.guild_id)
if user is None or message is None or channel is None or guild is None:
return
member = guild.get_member(user.id)
if member is None:
return
if user.bot:
return
lookup = await self.bot.db.fetch("SELECT * FROM reactroles WHERE message = $1 AND channel = $2", message.id, message.channel.id)
if lookup:
for entry in lookup:
if str(payload.emoji) == str(entry['reaction']):
role = discord.utils.get(guild.roles, id=entry['role'])
if role == None:
return
if role in member.roles:
try:
await member.remove_roles(role)
except discord.Forbidden:
embed = discord.Embed(title="Urgent message", description=f"A [reaction role]({message.jump_url}) in your server ({guild.name}) is failing to remove roles from members. Please check if the reaction role's role ({role.name}) is below GoMod's role and GoMod is able to remove roles.", color=discord.Color.red())
await guild.owner.send(embed=embed)
@commands.Cog.listener()
async def on_message_delete(self, message):
lookup = await self.bot.db.fetchrow("SELECT * FROM reactroles WHERE message = $1 AND channel = $2", message.id, message.channel.id)
if lookup:
await self.bot.db.execute("DELETE FROM reactroles WHERE message = $1 AND channel = $2", message.id, message.channel.id)
# @commands.command()
# async def modaltest(self, ctx):
# class MyView(discord.ui.View):
# @discord.ui.button(label="Tell GoMod your name.", style=discord.ButtonStyle.primary)
# async def button_callback(self, button, interaction):
# modal = Modal(title="Greetings.")
# await interaction.response.send_modal(modal)
# view = MyView()
# await ctx.send("Hello! I am GoMod.", view=view)
@slash_command()
async def kick(self, ctx, member: Option(discord.Member, "Member to kick"), reason: Option(str, "Reason for kicking", required=False)):
"""
Kick a member from the server.
"""
if not ctx.author.guild_permissions.kick_members:
await ctx.respond("You do not have permission to kick members.", delete_after=3)
return
if member == ctx.author:
await ctx.respond("You cannot kick yourself.", delete_after=3)
return
if len(member.roles) > 0 and member.top_role >= ctx.author.top_role:
await ctx.respond("You cannot kick members with a higher role than you.", delete_after=3)
return
try:
embed = discord.Embed(title=f"Kicked from {ctx.guild.name}", description=f"You have been kicked from {ctx.guild.name} by {ctx.author.name} with reason: {reason}", color=discord.Color.red())
await member.send(embed=embed)
except:
pass
await ctx.guild.kick(member, reason=reason)
embed = discord.Embed(title="Kicked", description=f"{member.mention} has been kicked from {ctx.guild.name} with reason: {reason}", color=0x00b2ff)
await ctx.respond(embed=embed)
@slash_command()
async def ban(self, ctx, member: Option(discord.Member, "Member to ban"), reason: Option(str, "Reason for banning", required=False)):
"""
Bans a member from the server.
"""
if not ctx.author.guild_permissions.ban_members:
await ctx.respond("You do not have the ban members permission.", delete_after=3)
return
if member == ctx.author:
await ctx.respond("You cannot ban yourself.", delete_after=3)
return
if len(member.roles) > 0 and member.top_role >= ctx.author.top_role:
await ctx.respond("You cannot ban members with a higher role than you.", delete_after=3)
return
try:
embed = discord.Embed(title=f"Banned from {ctx.guild.name}", description=f"You have been banned from {ctx.guild.name} by {ctx.author.name} with reason: {reason}", color=discord.Color.red())
await member.send(embed=embed)
except:
pass
await ctx.guild.ban(member, reason=reason)
embed = discord.Embed(title="Banned", description=f"{member.mention} has been banned from {ctx.guild.name} with reason: {reason}", color=0x00b2ff)
await ctx.respond(embed=embed)
@slash_command()
async def block(self, ctx, member: discord.Member):
"""
Blocks a member from the current channel.
"""
if not ctx.author.guild_permissions.manage_roles:
await ctx.respond("You do not have the manage roles permission.", delete_after=3)
return
if member == ctx.author:
await ctx.respond("You cannot block yourself.", delete_after=3)
return
if len(member.roles) > 0 and member.top_role >= ctx.author.top_role:
await ctx.respond("You cannot block members with a higher role than you.", delete_after=3)
return
await ctx.channel.set_permissions(member, add_reactions = False, send_messages = False)
embed = discord.Embed(title="Blocked", description=f"{member.mention} has been blocked from {ctx.channel.mention}", color=0x00b2ff)
await ctx.respond(embed=embed)
# @commands.command()
# @commands.has_guild_permissions(manage_messages=True, manage_channels=True)
# async def unblock(self, ctx, member: discord.Member):
# if member == ctx.author:
# await ctx.send("You cannot unblock yourself.", delete_after=3)
# return
# if member.top_role >= ctx.author.top_role:
# await ctx.send("You cannot unblock members with a higher role than you.", delete_after=3)
# return
# await ctx.channel.set_permissions(member, add_reactions = True, send_messages = True)
# embed = discord.Embed(title="Unblocked", description=f"{member.mention} has been unblocked from {ctx.channel.mention}", color=0x00b2ff)
# await ctx.send(embed=embed)
@slash_command()
async def unblock(self, ctx, member: Option(discord.Member, "Member to unblock")):
"""
Unblocks a member from the current channel.
"""
if not ctx.author.guild_permissions.manage_roles:
await ctx.respond("You do not have the manage roles permission.", delete_after=3)
return
if member == ctx.author:
await ctx.respond("You cannot unblock yourself.", delete_after=3)
return
if len(member.roles) > 0 and member.top_role >= ctx.author.top_role:
await ctx.respond("You cannot unblock members with a higher role than you.", delete_after=3)
return
await ctx.channel.set_permissions(member, add_reactions = True, send_messages = True)
embed = discord.Embed(title="Unblocked", description=f"{member.mention} has been unblocked from {ctx.channel.mention}", color=0x00b2ff)
await ctx.respond(embed=embed)
@slash_command()
async def warn(self, ctx, member: Option(discord.Member, "Member to warn"), reason: Option(str, "Reason for warning", required=False)):
"""
Warns a member.
"""
if not ctx.author.guild_permissions.manage_messages:
await ctx.respond("You do not have the manage messages permission.", delete_after=3)
return
if member == ctx.author:
await ctx.respond("You cannot warn yourself.", delete_after=3)
return
if len(member.roles) > 0 and member.top_role >= ctx.author.top_role:
await ctx.respond("You cannot warn members with a higher role than you.", delete_after=3)
return
try:
embed = discord.Embed(title=f"Warned", description=f"You have been warned from {ctx.guild.name} by {ctx.author.name} with reason: {reason}", color=discord.Color.orange())
await member.send(embed=embed)
except:
pass
if reason == None:
await self.bot.db.execute("INSERT INTO warns VALUES ($1, $2, $3, $4)", member.id, ctx.guild.id, ctx.author.id, "No reason given.")
reason = "no reason"
else:
await self.bot.db.execute("INSERT INTO warns VALUES ($1, $2, $3, $4)", member.id, ctx.guild.id, ctx.author.id, reason)
embed = discord.Embed(title="Warned", description=f"{member.mention} has been warned by {ctx.author.mention} for {reason}", color=0x00b2ff)
await ctx.respond(embed=embed)
@slash_command()
async def clearwarns(self, ctx, member: Option(discord.Member, "Member to clear warnings for")):
"""
Clears all warnings for a member.
"""
if not ctx.author.guild_permissions.manage_messages:
await ctx.respond("You do not have the manage messages permission.", delete_after=3)
return
if member == ctx.author:
await ctx.respond("You cannot clear your own warnings.", delete_after=3)
return
if len(member.roles) > 0 and member.top_role >= ctx.author.top_role:
await ctx.respond("You cannot clear warnings of members with a higher role than you.", delete_after=3)
return
await self.bot.db.execute("DELETE FROM warns WHERE userid = $1 AND serverid = $2", member.id, ctx.guild.id)
embed = discord.Embed(title="Warns cleared", description=f"{member.mention}'s warnings have been cleared.", color=0x00b2ff)
await ctx.respond(embed=embed)
@slash_command()
async def purge(self, ctx, amount: Option(int, "Amount of messages to delete", min_value=1, max_value=1000)):
"""
Purges a specified amount of messages from the current channel.
"""
if not ctx.author.guild_permissions.manage_messages:
await ctx.respond("You do not have the manage messages permission.", delete_after=3)
return
await ctx.channel.purge(limit=amount+1)
embed = discord.Embed(title="Messages purged", description=f"{amount} messages have been purged.", color=0x00b2ff)
await ctx.send(embed=embed, delete_after=3)
@slash_command()
async def warns(self, ctx, member: Option(discord.Member, "Member to view warnings for")):
"""
Lists all the warns a member has.
"""
if not ctx.author.guild_permissions.manage_messages:
await ctx.respond("You do not have the manage messages permission.", delete_after=3)
return
if member == ctx.author:
await ctx.respond("You cannot view your own warnings.", delete_after=3)
return
if len(member.roles) > 0 and member.top_role >= ctx.author.top_role:
await ctx.respond("You cannot view warnings of members with a higher role than you.", delete_after=3)
return
warns = await self.bot.db.fetch("SELECT * FROM warns WHERE userid = $1 AND serverid = $2", member.id, ctx.guild.id)
if warns == []:
embed = discord.Embed(title="No warns", description=f"{member.mention} has no warns.", color=0x00b2ff)
await ctx.respond(embed=embed)
return
embed = discord.Embed(title="Warns", description=f"{member.mention} has {len(warns)} warns.", color=0x00b2ff)
for warn in warns:
embed.add_field(name=f"{warn['reason']}", value=f"Warned by {ctx.guild.get_member(warn['invokerid']).mention}", inline=False)
await ctx.respond(embed=embed)
@slash_command()
async def reactrole(self, ctx, channel: Option(discord.TextChannel, "The channel the message is in"), message: Option(str, "The message that will have the reaction in ID form."), emoji: Option(str, "The emoji to react with"), role: Option(discord.Role, "The role to give to the user")):
"""
Run a reaction role setup.
"""
if not ctx.author.guild_permissions.manage_roles or not ctx.author.guild_permissions.manage_messages:
await ctx.respond("You do not have the manage roles or manage messages permission.", delete_after=3)
return
try:
id = int(message)
except:
await ctx.respond("The message ID must be an integer.", delete_after=3)
return
try:
messageobj = await channel.fetch_message(id)
except Exception as e:
await ctx.respond("The message ID is invalid.", delete_after=3)
print(e)
return
await self.bot.db.execute("INSERT INTO reactroles VALUES ($1, $2, $3, $4)", messageobj.id, channel.id, role.id, emoji)
reaction = await messageobj.add_reaction(emoji)
embed = discord.Embed(title="Reaction role setup", description="Reaction role setup complete.", color=0x00b2ff)
await ctx.respond(embed=embed)
# @commands.command()
# @commands.has_guild_permissions(manage_messages=True)
# async def reactrole(self, ctx):
# embed = discord.Embed(title="Reaction role setup", description="1/4\nWhat channel is the message you're using is in? (Do NOT mention the channel. Instead, use the name.\nStuck? Read our [wiki](https://github.com/Joystickplays/GoMod/wiki/Verification-systems).", color=0x00b2ff)
# msg = await ctx.send(embed=embed)
# def check(m):
# return m.channel == ctx.channel and m.author == ctx.author
# while True:
# try:
# msg = await self.bot.wait_for('message', check=check, timeout=60)
# except asyncio.TimeoutError:
# await ctx.send("Timed out.", delete_after=3)
# return
# if msg.content.lower() == "cancel":
# await ctx.send("Cancelled.", delete_after=3)
# return
# channelcheck = msg.content.replace(" ", "-")
# channelcheck2 = channelcheck.lower()
# channel = discord.utils.get(ctx.guild.text_channels, name=channelcheck2)
# if channel != None:
# break
# await ctx.send("That channel doesn't exist. Try again...", delete_after=3)
# embed = discord.Embed(title="Reaction role setup", description="2/4\nWhat is your message's ID? More on getting message IDs [here](https://support.discord.com/hc/en-us/articles/206346498-Where-can-I-find-my-User-Server-Message-ID-). You can also say \"create one\" to make GoMod create a message for you.", color=0x00b2ff)
# msg = await ctx.send(embed=embed)
# while True:
# try:
# msg = await self.bot.wait_for('message', check=check, timeout=60)
# except asyncio.TimeoutError:
# await ctx.send("Timed out.", delete_after=3)
# return
# if msg.content.lower() == "cancel":
# await ctx.send("Cancelled.", delete_after=3)
# return
# message = None
# if msg.content.lower() == "create one":
# embed = discord.Embed(title="Reaction role setup", description="3.5/4\nWhat will be the title of the message?", color=0x00b2ff)
# msg = await ctx.send(embed=embed)
# try:
# title = await self.bot.wait_for('message', check=check, timeout=60)
# except asyncio.TimeoutError:
# await ctx.send("Timed out.", delete_after=3)
# return
# if title.content.lower() == "cancel":
# await ctx.send("Cancelled.", delete_after=3)
# return
# embed = discord.Embed(title="Reaction role setup", description="3.5/4\nWhat will be the description of the message?", color=0x00b2ff)
# msg = await ctx.send(embed=embed)
# try:
# description = await self.bot.wait_for('message', check=check, timeout=60)
# except asyncio.TimeoutError:
# await ctx.send("Timed out.", delete_after=3)
# return
# if description.content.lower() == "cancel":
# await ctx.send("Cancelled.", delete_after=3)
# return
# embed = discord.Embed(title=title.content, description=description.content, color=0x00b2ff)
# message = await channel.send(embed=embed)
# break
# if message == None:
# try:
# message = await channel.fetch_message(int(msg.content))
# break
# except:
# await ctx.send("That message doesn't exist. Try again...", delete_after=3)
# while True:
# embed = discord.Embed(title="Reaction role setup", description="3/4\nWhat will be the emoji for your reaction?", color=0x00b2ff)
# msg = await ctx.send(embed=embed)
# try:
# msg = await self.bot.wait_for('message', check=check, timeout=60)
# except asyncio.TimeoutError:
# await ctx.send("Timed out.", delete_after=3)
# return
# if msg.content.lower() == "cancel":
# await ctx.send("Cancelled.", delete_after=3)
# return
# reactionname = msg.content
# try:
# reaction = await message.add_reaction(msg.content)
# break
# except:
# await ctx.send("That emoji is invalid. Try again...", delete_after=3)
# while True:
# embed = discord.Embed(title="Reaction role setup", description="4/4\nWhat role will be given to the user when they react? (Do NOT mention the role. Instead, use the name.", color=0x00b2ff)
# msg = await ctx.send(embed=embed)
# try:
# msg = await self.bot.wait_for('message', check=check, timeout=60)
# except asyncio.TimeoutError:
# await ctx.send("Timed out.", delete_after=3)
# return
# if msg.content.lower() == "cancel":
# await ctx.send("Cancelled.", delete_after=3)
# return
# role = discord.utils.get(ctx.guild.roles, name=msg.content)
# if role != None:
# break
# await ctx.send("That role doesn't exist. Try again...", delete_after=3)
# await self.bot.db.execute("INSERT INTO reactroles VALUES ($1, $2, $3, $4)", message.id, channel.id, role.id, reactionname)
# embed = discord.Embed(title="Reaction role setup", description="Reaction role setup complete.", color=0x00b2ff)
# await ctx.send(embed=embed)
# @commands.command()
# async def qasetup(self, ctx):
# lookup = await self.bot.db.fetchrow("SELECT * FROM qas WHERE guild = $1", ctx.guild.id)
# if lookup != None:
# embed = discord.Embed(title="Error", description="Question and answer are limited to one per server. If you want to change the question and answer, please delete the current one and run this command again.", color=0x00b2ff)
# await ctx.send(embed=embed)
def setup(bot:GoModBot):
bot.add_cog(Moderation(bot))
|
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PiT train and eval using multiple GPU without teacher model and distillation"""
import sys
import os
import time
import argparse
import random
import math
import numpy as np
import paddle
from datasets import get_dataloader
from datasets import get_dataset
from config import get_config
from config import update_config
from utils import AverageMeter
from utils import get_logger
from utils import write_log
from utils import all_reduce_mean
from utils import skip_weight_decay_fn
from mixup import Mixup
from model_ema import ModelEma
from losses import LabelSmoothingCrossEntropyLoss
from losses import SoftTargetCrossEntropyLoss
from losses import DistillationLoss
from regnet import build_regnet as build_teacher_model
from pit import build_pit as build_model
def get_arguments():
"""return argumeents, this will overwrite the config by (1) yaml file (2) argument values"""
parser = argparse.ArgumentParser('PiT')
parser.add_argument('-cfg', type=str, default=None)
parser.add_argument('-dataset', type=str, default=None)
parser.add_argument('-data_path', type=str, default=None)
parser.add_argument('-output', type=str, default=None)
parser.add_argument('-batch_size', type=int, default=None)
parser.add_argument('-batch_size_eval', type=int, default=None)
parser.add_argument('-image_size', type=int, default=None)
parser.add_argument('-accum_iter', type=int, default=None)
parser.add_argument('-pretrained', type=str, default=None)
parser.add_argument('-teacher_model_path', type=str, default=None)
parser.add_argument('-resume', type=str, default=None)
parser.add_argument('-last_epoch', type=int, default=None)
parser.add_argument('-eval', action='store_true')
parser.add_argument('-amp', action='store_true')
arguments = parser.parse_args()
return arguments
def train(dataloader,
model,
optimizer,
criterion,
epoch,
total_epochs,
total_batches,
debug_steps=100,
accum_iter=1,
model_ema=None,
mixup_fn=None,
amp_grad_scaler=None,
local_logger=None,
master_logger=None):
"""Training for one epoch
Args:
dataloader: paddle.io.DataLoader, dataloader instance
model: nn.Layer, a ViT model
optimizer: nn.optimizer
criterion: nn.XXLoss
epoch: int, current epoch
total_epochs: int, total num of epochs
total_batches: int, total num of batches for one epoch
debug_steps: int, num of iters to log info, default: 100
accum_iter: int, num of iters for accumulating gradients, default: 1
model_ema: ModelEma, model moving average instance
mixup_fn: Mixup, mixup instance, default: None
amp_grad_scaler: GradScaler, if not None pass the GradScaler and enable AMP, default: None
local_logger: logger for local process/gpu, default: None
master_logger: logger for main process, default: None
Returns:
train_loss_meter.avg: float, average loss on current process/gpu
train_acc_meter.avg: float, average acc@1 on current process/gpu
master_loss_meter.avg: float, average loss on all processes/gpus
master_acc_meter.avg: float, average acc@1 on all processes/gpus
train_time: float, training time
"""
time_st = time.time()
train_loss_meter = AverageMeter()
train_acc_meter = AverageMeter()
master_loss_meter = AverageMeter()
master_acc_meter = AverageMeter()
model.train()
optimizer.clear_grad()
for batch_id, data in enumerate(dataloader):
# get data
images = data[0]
label = data[1]
label_orig = label.clone()
batch_size = images.shape[0]
if mixup_fn is not None:
images, label = mixup_fn(images, label_orig)
# forward
with paddle.amp.auto_cast(amp_grad_scaler is not None):
output = model(images)
loss = criterion(images, output, label)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
loss = loss / accum_iter
# backward and step
if amp_grad_scaler is None: # fp32
loss.backward()
if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):
optimizer.step()
optimizer.clear_grad()
else: # amp
scaled_loss = amp_grad_scaler.scale(loss)
scaled_loss.backward()
if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):
# amp for param group reference: https://github.com/PaddlePaddle/Paddle/issues/37188
amp_grad_scaler.step(optimizer)
amp_grad_scaler.update()
optimizer.clear_grad()
if model_ema is not None and paddle.distributed.get_rank() == 0:
model_ema.update(model)
# average of output and kd_output, same as eval mode
pred = paddle.nn.functional.softmax((output[0] + output[1]) / 2)
acc = paddle.metric.accuracy(pred,
label_orig if mixup_fn else label_orig.unsqueeze(1)).item()
# sync from other gpus for overall loss and acc
master_loss = all_reduce_mean(loss_value)
master_acc = all_reduce_mean(acc)
master_batch_size = all_reduce_mean(batch_size)
master_loss_meter.update(master_loss, master_batch_size)
master_acc_meter.update(master_acc, master_batch_size)
train_loss_meter.update(loss_value, batch_size)
train_acc_meter.update(acc, batch_size)
if batch_id % debug_steps == 0 or batch_id + 1 == len(dataloader):
general_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], "
f"Step[{batch_id:04d}/{total_batches:04d}], "
f"Lr: {optimizer.get_lr():04f}, ")
local_message = (general_message +
f"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f}), "
f"Avg Acc: {train_acc_meter.avg:.4f}")
master_message = (general_message +
f"Loss: {master_loss:.4f} ({master_loss_meter.avg:.4f}), "
f"Avg Acc: {master_acc_meter.avg:.4f}")
write_log(local_logger, master_logger, local_message, master_message)
paddle.distributed.barrier()
train_time = time.time() - time_st
return (train_loss_meter.avg,
train_acc_meter.avg,
master_loss_meter.avg,
master_acc_meter.avg,
train_time)
@paddle.no_grad()
def validate(dataloader,
model,
criterion,
total_batches,
debug_steps=100,
local_logger=None,
master_logger=None):
"""Validation for the whole dataset
Args:
dataloader: paddle.io.DataLoader, dataloader instance
model: nn.Layer, a ViT model
total_batches: int, total num of batches for one epoch
debug_steps: int, num of iters to log info, default: 100
local_logger: logger for local process/gpu, default: None
master_logger: logger for main process, default: None
Returns:
val_loss_meter.avg: float, average loss on current process/gpu
val_acc1_meter.avg: float, average top1 accuracy on current processes/gpus
val_acc5_meter.avg: float, average top5 accuracy on current processes/gpus
master_loss_meter.avg: float, average loss on all processes/gpus
master_acc1_meter.avg: float, average top1 accuracy on all processes/gpus
master_acc5_meter.avg: float, average top5 accuracy on all processes/gpus
val_time: float, validation time
"""
model.eval()
val_loss_meter = AverageMeter()
val_acc1_meter = AverageMeter()
val_acc5_meter = AverageMeter()
master_loss_meter = AverageMeter()
master_acc1_meter = AverageMeter()
master_acc5_meter = AverageMeter()
time_st = time.time()
for batch_id, data in enumerate(dataloader):
# get data
images = data[0]
label = data[1]
batch_size = images.shape[0]
output = model(images)
loss = criterion(output, label)
loss_value = loss.item()
pred = paddle.nn.functional.softmax(output)
acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)).item()
acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5).item()
# sync from other gpus for overall loss and acc
master_loss = all_reduce_mean(loss_value)
master_acc1 = all_reduce_mean(acc1)
master_acc5 = all_reduce_mean(acc5)
master_batch_size = all_reduce_mean(batch_size)
master_loss_meter.update(master_loss, master_batch_size)
master_acc1_meter.update(master_acc1, master_batch_size)
master_acc5_meter.update(master_acc5, master_batch_size)
val_loss_meter.update(loss_value, batch_size)
val_acc1_meter.update(acc1, batch_size)
val_acc5_meter.update(acc5, batch_size)
if batch_id % debug_steps == 0:
local_message = (f"Step[{batch_id:04d}/{total_batches:04d}], "
f"Avg Loss: {val_loss_meter.avg:.4f}, "
f"Avg Acc@1: {val_acc1_meter.avg:.4f}, "
f"Avg Acc@5: {val_acc5_meter.avg:.4f}")
master_message = (f"Step[{batch_id:04d}/{total_batches:04d}], "
f"Avg Loss: {master_loss_meter.avg:.4f}, "
f"Avg Acc@1: {master_acc1_meter.avg:.4f}, "
f"Avg Acc@5: {master_acc5_meter.avg:.4f}")
write_log(local_logger, master_logger, local_message, master_message)
paddle.distributed.barrier()
val_time = time.time() - time_st
return (val_loss_meter.avg,
val_acc1_meter.avg,
val_acc5_meter.avg,
master_loss_meter.avg,
master_acc1_meter.avg,
master_acc5_meter.avg,
val_time)
def main_worker(*args):
"""main method for each process"""
# STEP 0: Preparation
paddle.device.set_device('gpu')
paddle.distributed.init_parallel_env()
world_size = paddle.distributed.get_world_size()
local_rank = paddle.distributed.get_rank()
config = args[0]
last_epoch = config.TRAIN.LAST_EPOCH
seed = config.SEED + local_rank
paddle.seed(seed)
np.random.seed(seed)
random.seed(seed)
local_logger, master_logger = get_logger(config.SAVE)
message = (f'----- world_size = {world_size}, local_rank = {local_rank} \n'
f'----- {config}')
write_log(local_logger, master_logger, message)
# STEP 1: Create model
model = build_model(config)
# define model ema
model_ema = None
if not config.EVAL and config.TRAIN.MODEL_EMA and local_rank == 0:
model_ema = ModelEma(model, decay=config.TRAIN.MODEL_EMA_DECAY)
if config.TRAIN.MODEL_EMA_FORCE_CPU:
model_ema.to('cpu')
# STEP 2: Create train and val dataloader
if not config.EVAL:
dataset_train = args[1]
dataloader_train = get_dataloader(config, dataset_train, True, True)
total_batch_train = len(dataloader_train)
message = f'----- Total # of train batch (single gpu): {total_batch_train}'
write_log(local_logger, master_logger, message)
dataset_val = args[2]
dataloader_val = get_dataloader(config, dataset_val, False, True)
total_batch_val = len(dataloader_val)
message = f'----- Total # of val batch (single gpu): {total_batch_val}'
write_log(local_logger, master_logger, message)
# STEP 3: (Optional) Define Mixup function
mixup_fn = None
if (config.TRAIN.MIXUP_PROB > 0 or config.TRAIN.CUTMIX_ALPHA > 0 or
config.TRAIN.CUTMIX_MINMAX is not None):
mixup_fn = Mixup(mixup_alpha=config.TRAIN.MIXUP_ALPHA,
cutmix_alpha=config.TRAIN.CUTMIX_ALPHA,
cutmix_minmax=config.TRAIN.CUTMIX_MINMAX,
prob=config.TRAIN.MIXUP_PROB,
switch_prob=config.TRAIN.MIXUP_SWITCH_PROB,
mode=config.TRAIN.MIXUP_MODE,
label_smoothing=config.TRAIN.SMOOTHING)#
# STEP 4: Define loss/criterion
if mixup_fn is not None:
criterion = SoftTargetCrossEntropyLoss()
elif config.TRAIN.SMOOTHING:
criterion = LabelSmoothingCrossEntropyLoss()
else:
criterion = paddle.nn.CrossEntropyLoss()
# Use CrossEntropyLoss for val
criterion_val = paddle.nn.CrossEntropyLoss()
# STEP 5: Create Teacher model and distill loss
teacher_model = None
if not config.EVAL:
if config.TRAIN.DISTILLATION_TYPE != 'none':
write_log(local_logger, master_logger,
f'----- Load teacher model: {config.TRAIN.TEACHER_MODEL}')
teacher_model = build_teacher_model()
assert os.path.isfile(config.TRAIN.TEACHER_MODEL)
teacher_model_state = paddle.load(config.TRAIN.TEACHER_MODEL)
teacher_model.set_state_dict(teacher_model_state)
teacher_model.eval()
teacher_model = paddle.DataParallel(teacher_model)
# wrap the criterion:
criterion = DistillationLoss(criterion,
teacher_model,
config.TRAIN.DISTILLATION_TYPE,
config.TRAIN.DISTILLATION_ALPHA,
config.TRAIN.DISTILLATION_TAU)
# STEP 5: Define optimizer and lr_scheduler
if not config.EVAL:
# set lr according to batch size and world size
if config.TRAIN.LINEAR_SCALED_LR is not None:
effective_batch_size = config.DATA.BATCH_SIZE * config.TRAIN.ACCUM_ITER * world_size
config.TRAIN.BASE_LR = (
config.TRAIN.BASE_LR * effective_batch_size / config.TRAIN.LINEAR_SCALED_LR
)
config.TRAIN.WARMUP_START_LR = (
config.TRAIN.WARMUP_START_LR* effective_batch_size / config.TRAIN.LINEAR_SCALED_LR
)
config.TRAIN.END_LR = (
config.TRAIN.END_LR * effective_batch_size / config.TRAIN.LINEAR_SCALED_LR
)
message = (f'Base lr is scaled to: {config.TRAIN.BASE_LR}, '
f'warmup start lr is scaled to: {config.TRAIN.WARMUP_START_LR}, '
f'end lr is scaled to: {config.TRAIN.BASE_LR}')
write_log(local_logger, master_logger, message)
# define scaler for amp training
amp_grad_scaler = paddle.amp.GradScaler() if config.AMP else None
# warmup + cosine lr scheduler
if config.TRAIN.WARMUP_EPOCHS > 0:
cosine_lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay(
learning_rate=config.TRAIN.BASE_LR,
T_max=config.TRAIN.NUM_EPOCHS - config.TRAIN.WARMUP_EPOCHS,
eta_min=config.TRAIN.END_LR,
last_epoch=-1) # do not set last epoch, handled in warmup sched get_lr()
lr_scheduler = paddle.optimizer.lr.LinearWarmup(
learning_rate=cosine_lr_scheduler, # use cosine lr sched after warmup
warmup_steps=config.TRAIN.WARMUP_EPOCHS, # only support position integet
start_lr=config.TRAIN.WARMUP_START_LR,
end_lr=config.TRAIN.BASE_LR,
last_epoch=config.TRAIN.LAST_EPOCH)
else:
lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay(
learning_rate=config.TRAIN.BASE_LR,
T_max=config.TRAIN.NUM_EPOCHS,
eta_min=config.TRAIN.END_LR,
last_epoch=config.TRAIN.LAST_EPOCH)
# set gradient clip
if config.TRAIN.GRAD_CLIP:
clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP)
else:
clip = None
# set optimizer
optimizer = paddle.optimizer.AdamW(
parameters=model.parameters(),
learning_rate=lr_scheduler, # set to scheduler
beta1=config.TRAIN.OPTIMIZER.BETAS[0],
beta2=config.TRAIN.OPTIMIZER.BETAS[1],
weight_decay=config.TRAIN.WEIGHT_DECAY,
epsilon=config.TRAIN.OPTIMIZER.EPS,
grad_clip=clip,
apply_decay_param_fun=skip_weight_decay_fn(
model, # skip bn and bias
['pos_embed', 'cls_token', 'dist_token']), # skip custom ops
)
# STEP 6: (Optional) Load pretrained model weights for evaluation or finetuning
if config.MODEL.PRETRAINED:
assert os.path.isfile(config.MODEL.PRETRAINED) is True
model_state = paddle.load(config.MODEL.PRETRAINED)
if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch
# pretrain only load model weight, opt and epoch are ignored
if 'model_ema' in model_state:
model_state = model_state['model_ema']
else:
model_state = model_state['model']
model.set_state_dict(model_state)
message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}"
write_log(local_logger, master_logger, message)
# STEP 7: (Optional) Load model weights and status for resume training
if config.MODEL.RESUME:
assert os.path.isfile(config.MODEL.RESUME) is True
model_state = paddle.load(config.MODEL.RESUME)
if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch
model.set_state_dict(model_state['model'])
if 'optimizer' in model_state:
optimizer.set_state_dict(model_state['optimizer'])
if 'epoch' in model_state:
config.TRAIN.LAST_EPOCH = model_state['epoch']
last_epoch = model_state['epoch']
if 'lr_scheduler' in model_state:
lr_scheduler.set_state_dict(model_state['lr_scheduler'])
if 'amp_grad_scaler' in model_state and amp_grad_scaler is not None:
amp_grad_scaler.load_state_dict(model_state['amp_grad_scaler'])
if config.TRAIN.MODEL_EMA and local_rank == 0:
model_ema.module.set_state_dict(model_state['model_ema'])
lr_scheduler.step(last_epoch + 1)
message = (f"----- Resume Training: Load model from {config.MODEL.RESUME}, w/t "
f"opt = [{"optimizer" in model_state}], "
f"lr_scheduler = [{"lr_scheduler" in model_state}], "
f"model_ema = [{"model_ema" in model_state}], "
f"epoch = [{model_state.get("epoch", -1)}], "
f"amp_grad_scaler = [{"amp_grad_scaler" in model_state}]")
write_log(local_logger, master_logger, message)
else: # direct load pdparams without other items
message = f"----- Resume Training: Load {config.MODEL.RESUME}, w/o opt/epoch/scaler"
write_log(local_logger, master_logger, message, 'warning')
model.set_state_dict(model_state)
lr_scheduler.step(last_epoch + 1)
# STEP 8: Enable model data parallelism on multi processes
model = paddle.DataParallel(model)
# STEP 9: (Optional) Run evaluation and return
if config.EVAL:
write_log(local_logger, master_logger, "----- Start Validation")
val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate(
dataloader=dataloader_val,
model=model,
criterion=criterion_val,
total_batches=total_batch_val,
debug_steps=config.REPORT_FREQ,
local_logger=local_logger,
master_logger=master_logger)
local_message = ("----- Validation: " +
f"Validation Loss: {val_loss:.4f}, " +
f"Validation Acc@1: {val_acc1:.4f}, " +
f"Validation Acc@5: {val_acc5:.4f}, " +
f"time: {val_time:.2f}")
master_message = ("----- Validation: " +
f"Validation Loss: {avg_loss:.4f}, " +
f"Validation Acc@1: {avg_acc1:.4f}, " +
f"Validation Acc@5: {avg_acc5:.4f}, " +
f"time: {val_time:.2f}")
write_log(local_logger, master_logger, local_message, master_message)
return
# STEP 10: Run training
write_log(local_logger, master_logger, f"----- Start training from epoch {last_epoch+1}.")
for epoch in range(last_epoch + 1, config.TRAIN.NUM_EPOCHS + 1):
# Train one epoch
write_log(local_logger, master_logger, f"Train epoch {epoch}. LR={optimizer.get_lr():.6e}")
train_loss, train_acc, avg_loss, avg_acc, train_time = train(
dataloader=dataloader_train,
model=model,
optimizer=optimizer,
criterion=criterion,
epoch=epoch,
total_epochs=config.TRAIN.NUM_EPOCHS,
total_batches=total_batch_train,
debug_steps=config.REPORT_FREQ,
accum_iter=config.TRAIN.ACCUM_ITER,
model_ema=model_ema,
mixup_fn=mixup_fn,
amp_grad_scaler=amp_grad_scaler,
local_logger=local_logger,
master_logger=master_logger)
# update lr
lr_scheduler.step()
general_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], "
f"Lr: {optimizer.get_lr():.4f}, "
f"time: {train_time:.2f}, ")
local_message = (general_message +
f"Train Loss: {train_loss:.4f}, "
f"Train Acc: {train_acc:.4f}")
master_message = (general_message +
f"Train Loss: {avg_loss:.4f}, "
f"Train Acc: {avg_acc:.4f}")
write_log(local_logger, master_logger, local_message, master_message)
# Evaluation (optional)
if epoch % config.VALIDATE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS:
write_log(local_logger, master_logger, f'----- Validation after Epoch: {epoch}')
val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate(
dataloader=dataloader_val,
model=model,
criterion=criterion_val,
total_batches=total_batch_val,
debug_steps=config.REPORT_FREQ,
local_logger=local_logger,
master_logger=master_logger)
local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " +
f"Validation Loss: {val_loss:.4f}, " +
f"Validation Acc@1: {val_acc1:.4f}, " +
f"Validation Acc@5: {val_acc5:.4f}, " +
f"time: {val_time:.2f}")
master_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " +
f"Validation Loss: {avg_loss:.4f}, " +
f"Validation Acc@1: {avg_acc1:.4f}, " +
f"Validation Acc@5: {avg_acc5:.4f}, " +
f"time: {val_time:.2f}")
write_log(local_logger, master_logger, local_message, master_message)
# Save model weights and training status
if local_rank == 0:
if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS:
model_path = os.path.join(
config.SAVE, f"Epoch-{epoch}-Loss-{avg_loss}.pdparams")
state_dict = dict()
state_dict['model'] = model.state_dict()
if model_ema is not None:
state_dict['model_ema'] = model_ema.state_dict()
state_dict['optimizer'] = optimizer.state_dict()
state_dict['epoch'] = epoch
if lr_scheduler is not None:
state_dict['lr_scheduler'] = lr_scheduler.state_dict()
if amp_grad_scaler is not None:
state_dict['amp_grad_scaler'] = amp_grad_scaler.state_dict()
paddle.save(state_dict, model_path)
message = (f"----- Save model: {model_path}")
write_log(local_logger, master_logger, message)
def main():
# config is updated in order: (1) default in config.py, (2) yaml file, (3) arguments
config = update_config(get_config(), get_arguments())
# set output folder
config.SAVE = os.path.join(config.SAVE,
f"{"eval" if config.EVAL else "train"}-{time.strftime("%Y%m%d-%H-%M")}")
if not os.path.exists(config.SAVE):
os.makedirs(config.SAVE, exist_ok=True)
# get train dataset if in train mode and val dataset
dataset_train = get_dataset(config, is_train=True) if not config.EVAL else None
dataset_val = get_dataset(config, is_train=False)
# dist spawn lunch: use CUDA_VISIBLE_DEVICES to set available gpus
paddle.distributed.spawn(main_worker, args=(config, dataset_train, dataset_val))
if __name__ == "__main__":
main()
|
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PiT train and eval using multiple GPU without teacher model and distillation"""
import sys
import os
import time
import argparse
import random
import math
import numpy as np
import paddle
from datasets import get_dataloader
from datasets import get_dataset
from config import get_config
from config import update_config
from utils import AverageMeter
from utils import get_logger
from utils import write_log
from utils import all_reduce_mean
from utils import skip_weight_decay_fn
from mixup import Mixup
from model_ema import ModelEma
from losses import LabelSmoothingCrossEntropyLoss
from losses import SoftTargetCrossEntropyLoss
from losses import DistillationLoss
from regnet import build_regnet as build_teacher_model
from pit import build_pit as build_model
def get_arguments():
"""return argumeents, this will overwrite the config by (1) yaml file (2) argument values"""
parser = argparse.ArgumentParser('PiT')
parser.add_argument('-cfg', type=str, default=None)
parser.add_argument('-dataset', type=str, default=None)
parser.add_argument('-data_path', type=str, default=None)
parser.add_argument('-output', type=str, default=None)
parser.add_argument('-batch_size', type=int, default=None)
parser.add_argument('-batch_size_eval', type=int, default=None)
parser.add_argument('-image_size', type=int, default=None)
parser.add_argument('-accum_iter', type=int, default=None)
parser.add_argument('-pretrained', type=str, default=None)
parser.add_argument('-teacher_model_path', type=str, default=None)
parser.add_argument('-resume', type=str, default=None)
parser.add_argument('-last_epoch', type=int, default=None)
parser.add_argument('-eval', action='store_true')
parser.add_argument('-amp', action='store_true')
arguments = parser.parse_args()
return arguments
def train(dataloader,
model,
optimizer,
criterion,
epoch,
total_epochs,
total_batches,
debug_steps=100,
accum_iter=1,
model_ema=None,
mixup_fn=None,
amp_grad_scaler=None,
local_logger=None,
master_logger=None):
"""Training for one epoch
Args:
dataloader: paddle.io.DataLoader, dataloader instance
model: nn.Layer, a ViT model
optimizer: nn.optimizer
criterion: nn.XXLoss
epoch: int, current epoch
total_epochs: int, total num of epochs
total_batches: int, total num of batches for one epoch
debug_steps: int, num of iters to log info, default: 100
accum_iter: int, num of iters for accumulating gradients, default: 1
model_ema: ModelEma, model moving average instance
mixup_fn: Mixup, mixup instance, default: None
amp_grad_scaler: GradScaler, if not None pass the GradScaler and enable AMP, default: None
local_logger: logger for local process/gpu, default: None
master_logger: logger for main process, default: None
Returns:
train_loss_meter.avg: float, average loss on current process/gpu
train_acc_meter.avg: float, average acc@1 on current process/gpu
master_loss_meter.avg: float, average loss on all processes/gpus
master_acc_meter.avg: float, average acc@1 on all processes/gpus
train_time: float, training time
"""
time_st = time.time()
train_loss_meter = AverageMeter()
train_acc_meter = AverageMeter()
master_loss_meter = AverageMeter()
master_acc_meter = AverageMeter()
model.train()
optimizer.clear_grad()
for batch_id, data in enumerate(dataloader):
# get data
images = data[0]
label = data[1]
label_orig = label.clone()
batch_size = images.shape[0]
if mixup_fn is not None:
images, label = mixup_fn(images, label_orig)
# forward
with paddle.amp.auto_cast(amp_grad_scaler is not None):
output = model(images)
loss = criterion(images, output, label)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
loss = loss / accum_iter
# backward and step
if amp_grad_scaler is None: # fp32
loss.backward()
if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):
optimizer.step()
optimizer.clear_grad()
else: # amp
scaled_loss = amp_grad_scaler.scale(loss)
scaled_loss.backward()
if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):
# amp for param group reference: https://github.com/PaddlePaddle/Paddle/issues/37188
amp_grad_scaler.step(optimizer)
amp_grad_scaler.update()
optimizer.clear_grad()
if model_ema is not None and paddle.distributed.get_rank() == 0:
model_ema.update(model)
# average of output and kd_output, same as eval mode
pred = paddle.nn.functional.softmax((output[0] + output[1]) / 2)
acc = paddle.metric.accuracy(pred,
label_orig if mixup_fn else label_orig.unsqueeze(1)).item()
# sync from other gpus for overall loss and acc
master_loss = all_reduce_mean(loss_value)
master_acc = all_reduce_mean(acc)
master_batch_size = all_reduce_mean(batch_size)
master_loss_meter.update(master_loss, master_batch_size)
master_acc_meter.update(master_acc, master_batch_size)
train_loss_meter.update(loss_value, batch_size)
train_acc_meter.update(acc, batch_size)
if batch_id % debug_steps == 0 or batch_id + 1 == len(dataloader):
general_message = (f"Epoch[{epoch:03d}/{total_epochs:03d}], "
f"Step[{batch_id:04d}/{total_batches:04d}], "
f"Lr: {optimizer.get_lr():04f}, ")
local_message = (general_message +
f"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f}), "
f"Avg Acc: {train_acc_meter.avg:.4f}")
master_message = (general_message +
f"Loss: {master_loss:.4f} ({master_loss_meter.avg:.4f}), "
f"Avg Acc: {master_acc_meter.avg:.4f}")
write_log(local_logger, master_logger, local_message, master_message)
paddle.distributed.barrier()
train_time = time.time() - time_st
return (train_loss_meter.avg,
train_acc_meter.avg,
master_loss_meter.avg,
master_acc_meter.avg,
train_time)
@paddle.no_grad()
def validate(dataloader,
model,
criterion,
total_batches,
debug_steps=100,
local_logger=None,
master_logger=None):
"""Validation for the whole dataset
Args:
dataloader: paddle.io.DataLoader, dataloader instance
model: nn.Layer, a ViT model
total_batches: int, total num of batches for one epoch
debug_steps: int, num of iters to log info, default: 100
local_logger: logger for local process/gpu, default: None
master_logger: logger for main process, default: None
Returns:
val_loss_meter.avg: float, average loss on current process/gpu
val_acc1_meter.avg: float, average top1 accuracy on current processes/gpus
val_acc5_meter.avg: float, average top5 accuracy on current processes/gpus
master_loss_meter.avg: float, average loss on all processes/gpus
master_acc1_meter.avg: float, average top1 accuracy on all processes/gpus
master_acc5_meter.avg: float, average top5 accuracy on all processes/gpus
val_time: float, validation time
"""
model.eval()
val_loss_meter = AverageMeter()
val_acc1_meter = AverageMeter()
val_acc5_meter = AverageMeter()
master_loss_meter = AverageMeter()
master_acc1_meter = AverageMeter()
master_acc5_meter = AverageMeter()
time_st = time.time()
for batch_id, data in enumerate(dataloader):
# get data
images = data[0]
label = data[1]
batch_size = images.shape[0]
output = model(images)
loss = criterion(output, label)
loss_value = loss.item()
pred = paddle.nn.functional.softmax(output)
acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)).item()
acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5).item()
# sync from other gpus for overall loss and acc
master_loss = all_reduce_mean(loss_value)
master_acc1 = all_reduce_mean(acc1)
master_acc5 = all_reduce_mean(acc5)
master_batch_size = all_reduce_mean(batch_size)
master_loss_meter.update(master_loss, master_batch_size)
master_acc1_meter.update(master_acc1, master_batch_size)
master_acc5_meter.update(master_acc5, master_batch_size)
val_loss_meter.update(loss_value, batch_size)
val_acc1_meter.update(acc1, batch_size)
val_acc5_meter.update(acc5, batch_size)
if batch_id % debug_steps == 0:
local_message = (f"Step[{batch_id:04d}/{total_batches:04d}], "
f"Avg Loss: {val_loss_meter.avg:.4f}, "
f"Avg Acc@1: {val_acc1_meter.avg:.4f}, "
f"Avg Acc@5: {val_acc5_meter.avg:.4f}")
master_message = (f"Step[{batch_id:04d}/{total_batches:04d}], "
f"Avg Loss: {master_loss_meter.avg:.4f}, "
f"Avg Acc@1: {master_acc1_meter.avg:.4f}, "
f"Avg Acc@5: {master_acc5_meter.avg:.4f}")
write_log(local_logger, master_logger, local_message, master_message)
paddle.distributed.barrier()
val_time = time.time() - time_st
return (val_loss_meter.avg,
val_acc1_meter.avg,
val_acc5_meter.avg,
master_loss_meter.avg,
master_acc1_meter.avg,
master_acc5_meter.avg,
val_time)
def main_worker(*args):
"""main method for each process"""
# STEP 0: Preparation
paddle.device.set_device('gpu')
paddle.distributed.init_parallel_env()
world_size = paddle.distributed.get_world_size()
local_rank = paddle.distributed.get_rank()
config = args[0]
last_epoch = config.TRAIN.LAST_EPOCH
seed = config.SEED + local_rank
paddle.seed(seed)
np.random.seed(seed)
random.seed(seed)
local_logger, master_logger = get_logger(config.SAVE)
message = (f'----- world_size = {world_size}, local_rank = {local_rank} \n'
f'----- {config}')
write_log(local_logger, master_logger, message)
# STEP 1: Create model
model = build_model(config)
# define model ema
model_ema = None
if not config.EVAL and config.TRAIN.MODEL_EMA and local_rank == 0:
model_ema = ModelEma(model, decay=config.TRAIN.MODEL_EMA_DECAY)
if config.TRAIN.MODEL_EMA_FORCE_CPU:
model_ema.to('cpu')
# STEP 2: Create train and val dataloader
if not config.EVAL:
dataset_train = args[1]
dataloader_train = get_dataloader(config, dataset_train, True, True)
total_batch_train = len(dataloader_train)
message = f'----- Total # of train batch (single gpu): {total_batch_train}'
write_log(local_logger, master_logger, message)
dataset_val = args[2]
dataloader_val = get_dataloader(config, dataset_val, False, True)
total_batch_val = len(dataloader_val)
message = f'----- Total # of val batch (single gpu): {total_batch_val}'
write_log(local_logger, master_logger, message)
# STEP 3: (Optional) Define Mixup function
mixup_fn = None
if (config.TRAIN.MIXUP_PROB > 0 or config.TRAIN.CUTMIX_ALPHA > 0 or
config.TRAIN.CUTMIX_MINMAX is not None):
mixup_fn = Mixup(mixup_alpha=config.TRAIN.MIXUP_ALPHA,
cutmix_alpha=config.TRAIN.CUTMIX_ALPHA,
cutmix_minmax=config.TRAIN.CUTMIX_MINMAX,
prob=config.TRAIN.MIXUP_PROB,
switch_prob=config.TRAIN.MIXUP_SWITCH_PROB,
mode=config.TRAIN.MIXUP_MODE,
label_smoothing=config.TRAIN.SMOOTHING)#
# STEP 4: Define loss/criterion
if mixup_fn is not None:
criterion = SoftTargetCrossEntropyLoss()
elif config.TRAIN.SMOOTHING:
criterion = LabelSmoothingCrossEntropyLoss()
else:
criterion = paddle.nn.CrossEntropyLoss()
# Use CrossEntropyLoss for val
criterion_val = paddle.nn.CrossEntropyLoss()
# STEP 5: Create Teacher model and distill loss
teacher_model = None
if not config.EVAL:
if config.TRAIN.DISTILLATION_TYPE != 'none':
write_log(local_logger, master_logger,
f'----- Load teacher model: {config.TRAIN.TEACHER_MODEL}')
teacher_model = build_teacher_model()
assert os.path.isfile(config.TRAIN.TEACHER_MODEL)
teacher_model_state = paddle.load(config.TRAIN.TEACHER_MODEL)
teacher_model.set_state_dict(teacher_model_state)
teacher_model.eval()
teacher_model = paddle.DataParallel(teacher_model)
# wrap the criterion:
criterion = DistillationLoss(criterion,
teacher_model,
config.TRAIN.DISTILLATION_TYPE,
config.TRAIN.DISTILLATION_ALPHA,
config.TRAIN.DISTILLATION_TAU)
# STEP 5: Define optimizer and lr_scheduler
if not config.EVAL:
# set lr according to batch size and world size
if config.TRAIN.LINEAR_SCALED_LR is not None:
effective_batch_size = config.DATA.BATCH_SIZE * config.TRAIN.ACCUM_ITER * world_size
config.TRAIN.BASE_LR = (
config.TRAIN.BASE_LR * effective_batch_size / config.TRAIN.LINEAR_SCALED_LR
)
config.TRAIN.WARMUP_START_LR = (
config.TRAIN.WARMUP_START_LR* effective_batch_size / config.TRAIN.LINEAR_SCALED_LR
)
config.TRAIN.END_LR = (
config.TRAIN.END_LR * effective_batch_size / config.TRAIN.LINEAR_SCALED_LR
)
message = (f'Base lr is scaled to: {config.TRAIN.BASE_LR}, '
f'warmup start lr is scaled to: {config.TRAIN.WARMUP_START_LR}, '
f'end lr is scaled to: {config.TRAIN.BASE_LR}')
write_log(local_logger, master_logger, message)
# define scaler for amp training
amp_grad_scaler = paddle.amp.GradScaler() if config.AMP else None
# warmup + cosine lr scheduler
if config.TRAIN.WARMUP_EPOCHS > 0:
cosine_lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay(
learning_rate=config.TRAIN.BASE_LR,
T_max=config.TRAIN.NUM_EPOCHS - config.TRAIN.WARMUP_EPOCHS,
eta_min=config.TRAIN.END_LR,
last_epoch=-1) # do not set last epoch, handled in warmup sched get_lr()
lr_scheduler = paddle.optimizer.lr.LinearWarmup(
learning_rate=cosine_lr_scheduler, # use cosine lr sched after warmup
warmup_steps=config.TRAIN.WARMUP_EPOCHS, # only support position integet
start_lr=config.TRAIN.WARMUP_START_LR,
end_lr=config.TRAIN.BASE_LR,
last_epoch=config.TRAIN.LAST_EPOCH)
else:
lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay(
learning_rate=config.TRAIN.BASE_LR,
T_max=config.TRAIN.NUM_EPOCHS,
eta_min=config.TRAIN.END_LR,
last_epoch=config.TRAIN.LAST_EPOCH)
# set gradient clip
if config.TRAIN.GRAD_CLIP:
clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP)
else:
clip = None
# set optimizer
optimizer = paddle.optimizer.AdamW(
parameters=model.parameters(),
learning_rate=lr_scheduler, # set to scheduler
beta1=config.TRAIN.OPTIMIZER.BETAS[0],
beta2=config.TRAIN.OPTIMIZER.BETAS[1],
weight_decay=config.TRAIN.WEIGHT_DECAY,
epsilon=config.TRAIN.OPTIMIZER.EPS,
grad_clip=clip,
apply_decay_param_fun=skip_weight_decay_fn(
model, # skip bn and bias
['pos_embed', 'cls_token', 'dist_token']), # skip custom ops
)
# STEP 6: (Optional) Load pretrained model weights for evaluation or finetuning
if config.MODEL.PRETRAINED:
assert os.path.isfile(config.MODEL.PRETRAINED) is True
model_state = paddle.load(config.MODEL.PRETRAINED)
if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch
# pretrain only load model weight, opt and epoch are ignored
if 'model_ema' in model_state:
model_state = model_state['model_ema']
else:
model_state = model_state['model']
model.set_state_dict(model_state)
message = f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}"
write_log(local_logger, master_logger, message)
# STEP 7: (Optional) Load model weights and status for resume training
if config.MODEL.RESUME:
assert os.path.isfile(config.MODEL.RESUME) is True
model_state = paddle.load(config.MODEL.RESUME)
if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch
model.set_state_dict(model_state['model'])
if 'optimizer' in model_state:
optimizer.set_state_dict(model_state['optimizer'])
if 'epoch' in model_state:
config.TRAIN.LAST_EPOCH = model_state['epoch']
last_epoch = model_state['epoch']
if 'lr_scheduler' in model_state:
lr_scheduler.set_state_dict(model_state['lr_scheduler'])
if 'amp_grad_scaler' in model_state and amp_grad_scaler is not None:
amp_grad_scaler.load_state_dict(model_state['amp_grad_scaler'])
if config.TRAIN.MODEL_EMA and local_rank == 0:
model_ema.module.set_state_dict(model_state['model_ema'])
lr_scheduler.step(last_epoch + 1)
message = (f"----- Resume Training: Load model from {config.MODEL.RESUME}, w/t "
f"opt = [{'optimizer' in model_state}], "
f"lr_scheduler = [{'lr_scheduler' in model_state}], "
f"model_ema = [{'model_ema' in model_state}], "
f"epoch = [{model_state.get('epoch', -1)}], "
f"amp_grad_scaler = [{'amp_grad_scaler' in model_state}]")
write_log(local_logger, master_logger, message)
else: # direct load pdparams without other items
message = f"----- Resume Training: Load {config.MODEL.RESUME}, w/o opt/epoch/scaler"
write_log(local_logger, master_logger, message, 'warning')
model.set_state_dict(model_state)
lr_scheduler.step(last_epoch + 1)
# STEP 8: Enable model data parallelism on multi processes
model = paddle.DataParallel(model)
# STEP 9: (Optional) Run evaluation and return
if config.EVAL:
write_log(local_logger, master_logger, "----- Start Validation")
val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate(
dataloader=dataloader_val,
model=model,
criterion=criterion_val,
total_batches=total_batch_val,
debug_steps=config.REPORT_FREQ,
local_logger=local_logger,
master_logger=master_logger)
local_message = ("----- Validation: " +
f"Validation Loss: {val_loss:.4f}, " +
f"Validation Acc@1: {val_acc1:.4f}, " +
f"Validation Acc@5: {val_acc5:.4f}, " +
f"time: {val_time:.2f}")
master_message = ("----- Validation: " +
f"Validation Loss: {avg_loss:.4f}, " +
f"Validation Acc@1: {avg_acc1:.4f}, " +
f"Validation Acc@5: {avg_acc5:.4f}, " +
f"time: {val_time:.2f}")
write_log(local_logger, master_logger, local_message, master_message)
return
# STEP 10: Run training
write_log(local_logger, master_logger, f"----- Start training from epoch {last_epoch+1}.")
for epoch in range(last_epoch + 1, config.TRAIN.NUM_EPOCHS + 1):
# Train one epoch
write_log(local_logger, master_logger, f"Train epoch {epoch}. LR={optimizer.get_lr():.6e}")
train_loss, train_acc, avg_loss, avg_acc, train_time = train(
dataloader=dataloader_train,
model=model,
optimizer=optimizer,
criterion=criterion,
epoch=epoch,
total_epochs=config.TRAIN.NUM_EPOCHS,
total_batches=total_batch_train,
debug_steps=config.REPORT_FREQ,
accum_iter=config.TRAIN.ACCUM_ITER,
model_ema=model_ema,
mixup_fn=mixup_fn,
amp_grad_scaler=amp_grad_scaler,
local_logger=local_logger,
master_logger=master_logger)
# update lr
lr_scheduler.step()
general_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], "
f"Lr: {optimizer.get_lr():.4f}, "
f"time: {train_time:.2f}, ")
local_message = (general_message +
f"Train Loss: {train_loss:.4f}, "
f"Train Acc: {train_acc:.4f}")
master_message = (general_message +
f"Train Loss: {avg_loss:.4f}, "
f"Train Acc: {avg_acc:.4f}")
write_log(local_logger, master_logger, local_message, master_message)
# Evaluation (optional)
if epoch % config.VALIDATE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS:
write_log(local_logger, master_logger, f'----- Validation after Epoch: {epoch}')
val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate(
dataloader=dataloader_val,
model=model,
criterion=criterion_val,
total_batches=total_batch_val,
debug_steps=config.REPORT_FREQ,
local_logger=local_logger,
master_logger=master_logger)
local_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " +
f"Validation Loss: {val_loss:.4f}, " +
f"Validation Acc@1: {val_acc1:.4f}, " +
f"Validation Acc@5: {val_acc5:.4f}, " +
f"time: {val_time:.2f}")
master_message = (f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " +
f"Validation Loss: {avg_loss:.4f}, " +
f"Validation Acc@1: {avg_acc1:.4f}, " +
f"Validation Acc@5: {avg_acc5:.4f}, " +
f"time: {val_time:.2f}")
write_log(local_logger, master_logger, local_message, master_message)
# Save model weights and training status
if local_rank == 0:
if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS:
model_path = os.path.join(
config.SAVE, f"Epoch-{epoch}-Loss-{avg_loss}.pdparams")
state_dict = dict()
state_dict['model'] = model.state_dict()
if model_ema is not None:
state_dict['model_ema'] = model_ema.state_dict()
state_dict['optimizer'] = optimizer.state_dict()
state_dict['epoch'] = epoch
if lr_scheduler is not None:
state_dict['lr_scheduler'] = lr_scheduler.state_dict()
if amp_grad_scaler is not None:
state_dict['amp_grad_scaler'] = amp_grad_scaler.state_dict()
paddle.save(state_dict, model_path)
message = (f"----- Save model: {model_path}")
write_log(local_logger, master_logger, message)
def main():
# config is updated in order: (1) default in config.py, (2) yaml file, (3) arguments
config = update_config(get_config(), get_arguments())
# set output folder
config.SAVE = os.path.join(config.SAVE,
f"{'eval' if config.EVAL else 'train'}-{time.strftime('%Y%m%d-%H-%M')}")
if not os.path.exists(config.SAVE):
os.makedirs(config.SAVE, exist_ok=True)
# get train dataset if in train mode and val dataset
dataset_train = get_dataset(config, is_train=True) if not config.EVAL else None
dataset_val = get_dataset(config, is_train=False)
# dist spawn lunch: use CUDA_VISIBLE_DEVICES to set available gpus
paddle.distributed.spawn(main_worker, args=(config, dataset_train, dataset_val))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
import os
import re
import sys
import time
import packaging.version
import requests
PROJECT = "praw"
HEADERS = {"Authorization": f"token {os.environ.get("READTHEDOCS_TOKEN")}"}
def fetch_versions():
response = requests.get(
f"https://readthedocs.org/api/v3/projects/{PROJECT}/versions?active=true",
headers=HEADERS,
)
versions = None
if response.status_code == 200:
active_versions = response.json()
versions = [
packaging.version.parse(slug["slug"].strip("v"))
for slug in active_versions["results"]
if not slug["hidden"] and not slug["slug"] in ["stable", "latest"]
]
if versions is None:
sys.stderr.write("Failed to get current active versions\n")
return versions
def main():
with open(f"{PROJECT}/const.py") as fp:
current_version = packaging.version.parse(
re.search('__version__ = "([^"]+)"', fp.read()).group(1)
)
if current_version.is_devrelease:
current_version = packaging.version.parse(
f"{current_version.major}.{current_version.minor}.{current_version.micro - 1}"
)
versions = fetch_versions()
if versions is None:
return 1
max_retry_count = 5
retry_count = 0
while current_version not in versions:
versions = fetch_versions()
if versions is None:
return 1
if current_version in versions:
break
else:
if retry_count >= max_retry_count:
sys.stderr.write(
f"Current version {current_version!s} failed to build\n"
)
return 1
sys.stdout.write("Waiting 30 seconds for build to finish\n")
retry_count += 1
time.sleep(30)
aggregated_versions = {}
for version in versions:
aggregated_versions.setdefault(version.major, [])
aggregated_versions[version.major].append(version)
latest_major_versions = [
max(aggregated_versions[major]) for major in aggregated_versions
]
major_versions = [version.major for version in versions]
is_new_major = major_versions.count(current_version.major) == 1
for version in versions:
if (is_new_major and version not in latest_major_versions) or (
(version.major, version.minor)
== (
current_version.major,
current_version.minor,
)
and version.micro != current_version.micro
):
response = requests.patch(
f"https://readthedocs.org/api/v3/projects/{PROJECT}/versions/v{version}/",
json={"active": True, "hidden": True},
headers=HEADERS,
)
if response.status_code == 204:
sys.stderr.write(f"Version {version!s} was hidden successfully\n")
else:
sys.stderr.write(f"Failed to hide version {version!s}\n")
return 1
if __name__ == "__main__":
sys.exit(main())
|
#!/usr/bin/env python3
import os
import re
import sys
import time
import packaging.version
import requests
PROJECT = "praw"
HEADERS = {"Authorization": f"token {os.environ.get('READTHEDOCS_TOKEN')}"}
def fetch_versions():
response = requests.get(
f"https://readthedocs.org/api/v3/projects/{PROJECT}/versions?active=true",
headers=HEADERS,
)
versions = None
if response.status_code == 200:
active_versions = response.json()
versions = [
packaging.version.parse(slug["slug"].strip("v"))
for slug in active_versions["results"]
if not slug["hidden"] and not slug["slug"] in ["stable", "latest"]
]
if versions is None:
sys.stderr.write("Failed to get current active versions\n")
return versions
def main():
with open(f"{PROJECT}/const.py") as fp:
current_version = packaging.version.parse(
re.search('__version__ = "([^"]+)"', fp.read()).group(1)
)
if current_version.is_devrelease:
current_version = packaging.version.parse(
f"{current_version.major}.{current_version.minor}.{current_version.micro - 1}"
)
versions = fetch_versions()
if versions is None:
return 1
max_retry_count = 5
retry_count = 0
while current_version not in versions:
versions = fetch_versions()
if versions is None:
return 1
if current_version in versions:
break
else:
if retry_count >= max_retry_count:
sys.stderr.write(
f"Current version {current_version!s} failed to build\n"
)
return 1
sys.stdout.write("Waiting 30 seconds for build to finish\n")
retry_count += 1
time.sleep(30)
aggregated_versions = {}
for version in versions:
aggregated_versions.setdefault(version.major, [])
aggregated_versions[version.major].append(version)
latest_major_versions = [
max(aggregated_versions[major]) for major in aggregated_versions
]
major_versions = [version.major for version in versions]
is_new_major = major_versions.count(current_version.major) == 1
for version in versions:
if (is_new_major and version not in latest_major_versions) or (
(version.major, version.minor)
== (
current_version.major,
current_version.minor,
)
and version.micro != current_version.micro
):
response = requests.patch(
f"https://readthedocs.org/api/v3/projects/{PROJECT}/versions/v{version}/",
json={"active": True, "hidden": True},
headers=HEADERS,
)
if response.status_code == 204:
sys.stderr.write(f"Version {version!s} was hidden successfully\n")
else:
sys.stderr.write(f"Failed to hide version {version!s}\n")
return 1
if __name__ == "__main__":
sys.exit(main())
|
import logging
import os
import unittest
from typing import (
Any,
Optional,
)
from galaxy.tool_util.verify.test_data import TestDataResolver
from galaxy_test.base.env import (
setup_keep_outdir,
target_url_parts,
)
log = logging.getLogger(__name__)
class FunctionalTestCase(unittest.TestCase):
"""Base class for tests targetting actual Galaxy servers.
Subclass should override galaxy_driver_class if a Galaxy server
needs to be launched to run the test, this base class assumes a
server is already running.
"""
galaxy_driver_class: Optional[type] = None
history_id: Optional[str]
host: str
port: Optional[str]
url: str
keepOutdir: str
test_data_resolver: TestDataResolver
_test_driver: Optional[Any]
def setUp(self) -> None:
self.history_id = os.environ.get("GALAXY_TEST_HISTORY_ID", None)
self.host, self.port, self.url = target_url_parts()
server_wrapper = (
self._test_driver and self._test_driver.server_wrappers and self._test_driver.server_wrappers[0]
)
if server_wrapper:
self.host = server_wrapper.host
self.port = server_wrapper.port
self.url = f"http://{self.host}:{self.port}{server_wrapper.prefix.rstrip("/")}/"
self.test_data_resolver = TestDataResolver()
self.keepOutdir = setup_keep_outdir()
@classmethod
def setUpClass(cls):
"""Configure and start Galaxy for a test."""
cls._test_driver = None
if cls.galaxy_driver_class is not None and not os.environ.get("GALAXY_TEST_ENVIRONMENT_CONFIGURED"):
cls._test_driver = cls.galaxy_driver_class()
cls._test_driver.setup(config_object=cls)
@classmethod
def tearDownClass(cls):
"""Shutdown Galaxy server and cleanup temp directory."""
if cls._test_driver:
cls._test_driver.tear_down()
def get_filename(self, filename: str) -> str:
# No longer used by tool tests - drop if isn't used else where.
return self.test_data_resolver.get_filename(filename)
|
import logging
import os
import unittest
from typing import (
Any,
Optional,
)
from galaxy.tool_util.verify.test_data import TestDataResolver
from galaxy_test.base.env import (
setup_keep_outdir,
target_url_parts,
)
log = logging.getLogger(__name__)
class FunctionalTestCase(unittest.TestCase):
"""Base class for tests targetting actual Galaxy servers.
Subclass should override galaxy_driver_class if a Galaxy server
needs to be launched to run the test, this base class assumes a
server is already running.
"""
galaxy_driver_class: Optional[type] = None
history_id: Optional[str]
host: str
port: Optional[str]
url: str
keepOutdir: str
test_data_resolver: TestDataResolver
_test_driver: Optional[Any]
def setUp(self) -> None:
self.history_id = os.environ.get("GALAXY_TEST_HISTORY_ID", None)
self.host, self.port, self.url = target_url_parts()
server_wrapper = (
self._test_driver and self._test_driver.server_wrappers and self._test_driver.server_wrappers[0]
)
if server_wrapper:
self.host = server_wrapper.host
self.port = server_wrapper.port
self.url = f"http://{self.host}:{self.port}{server_wrapper.prefix.rstrip('/')}/"
self.test_data_resolver = TestDataResolver()
self.keepOutdir = setup_keep_outdir()
@classmethod
def setUpClass(cls):
"""Configure and start Galaxy for a test."""
cls._test_driver = None
if cls.galaxy_driver_class is not None and not os.environ.get("GALAXY_TEST_ENVIRONMENT_CONFIGURED"):
cls._test_driver = cls.galaxy_driver_class()
cls._test_driver.setup(config_object=cls)
@classmethod
def tearDownClass(cls):
"""Shutdown Galaxy server and cleanup temp directory."""
if cls._test_driver:
cls._test_driver.tear_down()
def get_filename(self, filename: str) -> str:
# No longer used by tool tests - drop if isn't used else where.
return self.test_data_resolver.get_filename(filename)
|
# %% [markdown]
# #
import itertools
import os
import time
from itertools import chain
import colorcet as cc
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from anytree import LevelOrderGroupIter, Node, RenderTree
from joblib import Parallel, delayed
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.decomposition import PCA
from graspy.plot import heatmap, pairplot
from src.data import load_metagraph
from src.graph import MetaGraph, preprocess
from src.io import savecsv, savefig, saveskels
from src.traverse import (
cascades_from_node,
generate_cascade_tree,
generate_random_walks,
path_to_visits,
to_markov_matrix,
to_path_graph,
)
from src.visualization import (
CLASS_COLOR_DICT,
barplot_text,
draw_networkx_nice,
draw_separators,
matrixplot,
remove_shared_ax,
remove_spines,
screeplot,
sort_meta,
stacked_barplot,
)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
def stashcsv(df, name, **kws):
savecsv(df, name, foldername=FNAME, save_on=True, **kws)
#%% Load and preprocess the data
VERSION = "2020-03-09"
print(f"Using version {VERSION}")
plot_examples = False
plot_embed = False
plot_full_mat = False
graph_type = "Gad"
threshold = 0
weight = "weight"
mg = load_metagraph(graph_type, VERSION)
mg = preprocess(
mg,
threshold=threshold,
sym_threshold=False,
remove_pdiff=True,
binarize=False,
weight=weight,
)
print(f"Preprocessed graph {graph_type} with threshold={threshold}, weight={weight}")
# TODO update this with the mixed groups
# TODO make these functional for selecting proper paths
out_classes = [
"O_dSEZ",
"O_dSEZ;CN",
"O_dSEZ;LHN",
"O_dVNC",
"O_dVNC;O_RG",
"O_dVNC;CN",
"O_RG",
"O_dUnk",
"O_RG-IPC",
"O_RG-ITP",
"O_RG-CA-LP",
]
from_groups = [
("sens-ORN",),
("sens-photoRh5", "sens-photoRh6"),
("sens-MN",),
("sens-thermo",),
("sens-vtd",),
("sens-AN",),
]
from_group_names = ["Odor", "Photo", "MN", "Temp", "VTD", "AN"]
out_groups = [
("motor-mAN", "motormVAN", "motor-mPaN"),
("O_dSEZ", "O_dVNC;O_dSEZ", "O_dSEZ;CN", "LHN;O_dSEZ"),
("O_dVNC", "O_dVNC;CN", "O_RG;O_dVNC", "O_dVNC;O_dSEZ"),
("O_RG", "O_RG-IPC", "O_RG-ITP", "O_RG-CA-LP", "O_RG;O_dVNC"),
("O_dUnk",),
]
out_group_names = ["Motor", "SEZ", "VNC", "RG", "dUnk"]
from_classes = list(chain.from_iterable(from_groups)) # make this a flat list
out_classes = list(chain.from_iterable(out_groups))
class_key = "Merge Class"
adj = nx.to_numpy_array(mg.g, weight=weight, nodelist=mg.meta.index.values)
n_verts = len(adj)
meta = mg.meta.copy()
g = mg.g.copy()
meta["idx"] = range(len(meta))
from_inds = meta[meta[class_key].isin(from_classes)]["idx"].values
out_inds = meta[meta[class_key].isin(out_classes)]["idx"].values
ind_map = dict(zip(meta.index, meta["idx"]))
g = nx.relabel_nodes(g, ind_map, copy=True)
out_ind_map = dict(zip(out_inds, range(len(out_inds))))
# %% [markdown]
# # Use a method to generate visits
path_type = "cascade"
if path_type == "cascade":
p = 0.01
not_probs = (
1 - p
) ** adj # probability of none of the synapses causing postsynaptic
probs = 1 - not_probs # probability of ANY of the synapses firing onto next
elif path_type == "fancy-cascade":
alpha = 0.5
flat = np.full(adj.shape, alpha)
deg = meta["dendrite_input"].values
deg[deg == 0] = 1
flat = flat / deg[None, :]
not_probs = np.power((1 - flat), adj)
probs = 1 - not_probs
#%%
seed = 8888
max_depth = 10
n_bins = 10
n_sims = 100
method = "tree"
normalize_n_source = False
basename = f"-{graph_type}-t{threshold}-pt{path_type}-b{n_bins}-n{n_sims}-m{method}"
basename += f"-norm{normalize_n_source}"
basename += f"-plus-inverted"
np.random.seed(seed)
if method == "tree":
seeds = np.random.choice(int(1e8), size=len(from_inds), replace=False)
outs = Parallel(n_jobs=1, verbose=10)(
delayed(cascades_from_node)(
fi, probs, out_inds, max_depth, n_sims, seed, n_bins, method
)
for fi, seed in zip(from_inds, seeds)
)
elif method == "path":
outs = []
for start_ind in from_inds:
temp_hist = cascades_from_node(
start_ind, probs, out_inds, max_depth, n_sims, seed, n_bins, method
)
outs.append(temp_hist)
from_hist_mat = np.concatenate(outs, axis=-1)
###
# invert
if method == "tree":
seeds = np.random.choice(int(1e8), size=len(out_inds), replace=False)
outs = Parallel(n_jobs=1, verbose=10)(
delayed(cascades_from_node)(
fi, probs.T, from_inds, max_depth, n_sims, seed, n_bins, method
)
for fi, seed in zip(out_inds, seeds)
)
elif method == "path":
outs = []
for start_ind in from_inds:
temp_hist = cascades_from_node(
start_ind, probs.T, out_inds, max_depth, n_sims, seed, n_bins, method
)
outs.append(temp_hist)
out_hist_mat = np.concatenate(outs, axis=-1)
# generate_cascade_paths(start_ind, probs, 1, stop_inds=out_inds, max_depth=10)
# %% [markdown]
# # Sort metadata
full_hist_mat = np.concatenate((from_hist_mat, out_hist_mat), axis=1)
hist_mat = full_hist_mat
# row metadata
ids = pd.Series(index=meta["idx"], data=meta.index, name="id")
to_class = ids.map(meta["Merge Class"])
to_class.name = "to_class"
row_df = pd.concat([ids, to_class], axis=1)
# col metadata
orders = pd.Series(data=len(from_inds) * list(range(n_bins)), name="order")
from_idx = pd.Series(data=np.repeat(from_inds, n_bins), name="idx")
from_ids = from_idx.map(ids)
from_ids.name = "id"
from_class = from_ids.map(meta["Merge Class"])
from_class.name = "class"
from_col_df = pd.concat([orders, from_idx, from_ids, from_class], axis=1)
orders = pd.Series(data=len(out_inds) * list(range(n_bins)), name="order")
out_idx = pd.Series(data=np.repeat(out_inds, n_bins), name="idx")
out_ids = out_idx.map(ids)
out_ids.name = "id"
out_class = out_ids.map(meta["Merge Class"])
out_class.name = "class"
out_col_df = pd.concat([orders, out_idx, out_ids, out_class], axis=1)
col_df = pd.concat([from_col_df, out_col_df], axis=0, ignore_index=True)
# %% [markdown]
# #
log_mat = np.log10(hist_mat + 1)
if plot_full_mat:
shape = log_mat.shape
figsize = (10, 20)
fig, ax = plt.subplots(1, 1, figsize=figsize)
matrixplot(
log_mat,
ax=ax,
col_meta=col_df,
col_sort_class=["from_class"],
row_meta=row_df,
row_sort_class=["to_class"],
plot_type="scattermap",
sizes=(0.5, 0.5),
tick_rot=45,
)
stashfig("log-full-scatter" + basename)
fig, ax = plt.subplots(1, 1, figsize=figsize)
matrixplot(
log_mat,
ax=ax,
col_meta=col_df,
col_sort_class=["from_class"],
row_colors=CLASS_COLOR_DICT,
row_meta=row_df,
row_sort_class=["to_class"],
plot_type="heatmap",
sizes=(0.5, 0.5),
tick_rot=45,
)
stashfig("log-full-heat" + basename)
# %% [markdown]
# # Screeplots
if plot_embed:
screeplot(hist_mat.astype(float), title="Raw hist mat (full)")
stashfig("scree-raw-mat" + basename)
screeplot(log_mat, title="Log hist mat (full)")
stashfig("scree-log-mat" + basename)
# %% [markdown]
# # Pairplots
if plot_embed:
pca = PCA(n_components=6)
embed = pca.fit_transform(log_mat)
loadings = pca.components_.T
pg = pairplot(
embed,
labels=to_class.values,
palette=CLASS_COLOR_DICT,
height=5,
title="Node response embedding (log)",
)
pg._legend.remove()
stashfig("node-pca-log" + basename)
pg = pairplot(
loadings,
labels=from_class.values,
height=5,
title="Source class embedding (log)",
)
stashfig("source-pca-log" + basename)
pca = PCA(n_components=6)
embed = pca.fit_transform(hist_mat.astype(float))
loadings = pca.components_.T
pg = pairplot(
embed,
labels=to_class.values,
palette=CLASS_COLOR_DICT,
height=5,
title="Node response embedding (raw)",
)
pg._legend.remove()
stashfig("node-pca-log" + basename)
pg = pairplot(
loadings,
labels=from_class.values,
height=5,
title="Source class embedding (raw)",
)
stashfig("source-pca-log" + basename)
# %% [markdown]
# # Collapse that matrix
hist_mat = full_hist_mat
collapsed_hist = []
collapsed_col_df = []
groups = from_groups + out_groups
names = from_group_names + out_group_names
for fg, fg_name in zip(groups, names):
from_df = col_df[col_df["class"].isin(fg)]
n_in_group = len(from_df)
for order in from_df["order"].unique():
inds = from_df[from_df["order"] == order].index
col = hist_mat[:, inds].sum(axis=1)
if normalize_n_source:
col = col.astype(float)
col /= n_in_group
collapsed_hist.append(col)
row = {"order": order, "class": fg_name}
collapsed_col_df.append(row)
collapsed_col_df = pd.DataFrame(collapsed_col_df)
collapsed_hist = np.array(collapsed_hist).T
log_collapsed_hist = np.log10(collapsed_hist + 1)
# %% [markdown]
# #
if plot_embed:
pca = PCA(n_components=6)
embed = pca.fit_transform(log_collapsed_hist)
loadings = pca.components_.T
pg = pairplot(
embed,
labels=to_class.values,
palette=CLASS_COLOR_DICT,
height=5,
title="Collapsed node response embedding (log)",
)
pg._legend.remove()
stashfig("coll-node-pca-log" + basename)
pg = pairplot(
loadings,
labels=collapsed_col_df["from_class"].values,
height=5,
title="Collapsed source class embedding (log)",
)
stashfig("coll-source-pca-log" + basename)
pca = PCA(n_components=6)
embed = pca.fit_transform(collapsed_hist.astype(float))
loadings = pca.components_.T
pg = pairplot(
embed,
labels=to_class.values,
palette=CLASS_COLOR_DICT,
height=5,
title="Collapsed node response embedding (raw)",
)
pg._legend.remove()
stashfig("coll-node-pca-log" + basename)
pg = pairplot(
loadings,
labels=collapsed_col_df["from_class"].values,
height=5,
title="Collapsed source class embedding (raw)",
)
stashfig("coll-source-pca-log" + basename)
# %% [markdown]
# # Compute mean visit over all sources, for plotting
def mean_visit(row):
n_groups = len(row) // n_bins
s = 0
for i in range(n_groups):
group = row[i * n_bins : (i + 1) * n_bins]
for j, val in enumerate(group):
s += j * val
s /= row.sum()
return s
visits = []
for r in collapsed_hist:
mv = mean_visit(r)
visits.append(mv)
visits = np.array(visits)
visits[np.isnan(visits)] = n_bins + 1
row_df["visit_order"] = visits
mean_visit_order = row_df.groupby(["to_class"])["visit_order"].mean()
row_df["group_visit_order"] = row_df["to_class"].map(mean_visit_order)
row_df["n_visit"] = collapsed_hist.sum(axis=1)
# %% [markdown]
# #
fig, ax = plt.subplots(1, 1, figsize=(15, 15))
sns.set_context("talk", font_scale=0.8)
gridline_kws = dict(color="grey", linestyle="--", alpha=0.7, linewidth=0.3)
matrixplot(
log_collapsed_hist,
ax=ax,
col_meta=collapsed_col_df,
col_sort_class=["class"],
row_meta=row_df,
row_sort_class=["to_class"],
row_colors=CLASS_COLOR_DICT,
row_class_order="group_visit_order",
row_item_order=["visit_order"],
plot_type="heatmap",
tick_rot=0,
row_ticks=False,
gridline_kws=gridline_kws,
)
stashfig("collapsed-log-heat" + basename)
# %% [markdown]
# #
sns.set_context("talk", font_scale=1)
gridline_kws = dict(color="grey", linestyle="--", alpha=0.7, linewidth=0.3)
fig, ax = plt.subplots(1, 1, figsize=(25, 15))
ax, divider, top_cax, left_cax = matrixplot(
log_collapsed_hist.T,
ax=ax,
row_meta=collapsed_col_df,
row_sort_class=["class"],
col_meta=row_df,
col_sort_class=["to_class"],
col_colors=CLASS_COLOR_DICT,
col_class_order="group_visit_order",
col_item_order=["visit_order"],
plot_type="heatmap",
tick_rot=45,
col_ticks=False,
gridline_kws=gridline_kws,
)
cax = divider.append_axes("right", size="1%", pad=0.02, sharey=ax)
remove_shared_ax(cax)
sns.heatmap(
collapsed_col_df["order"][:, None], ax=cax, cbar=False, cmap="RdBu", center=0
)
cax.set_xticks([])
cax.set_yticks([])
cax.set_ylabel(r"Hops $\to$", rotation=-90, ha="center", va="center", labelpad=20)
cax.yaxis.set_label_position("right")
top_cax.set_yticks([0.5])
top_cax.set_yticklabels(["Class"], va="center")
ax.set_xlabel("Neuron")
ax.set_ylabel("Source class")
stashfig("collapsed-log-heat-transpose" + basename, dpi=200)
fig, ax = plt.subplots(1, 1, figsize=(25, 15))
ax, divider, top_cax, left_cax = matrixplot(
log_collapsed_hist.T,
ax=ax,
row_meta=collapsed_col_df,
row_sort_class=["class"],
col_meta=row_df,
col_sort_class=["to_class"],
col_colors=CLASS_COLOR_DICT,
col_class_order="group_visit_order",
col_item_order=["visit_order"],
plot_type="heatmap",
tick_rot=45,
col_ticks=True,
gridline_kws=gridline_kws,
)
cax = divider.append_axes("right", size="1%", pad=0.02, sharey=ax)
remove_shared_ax(cax)
sns.heatmap(
collapsed_col_df["order"][:, None], ax=cax, cbar=False, cmap="RdBu", center=0
)
cax.set_xticks([])
cax.set_yticks([])
cax.set_ylabel(r"Hops $\to$", rotation=-90, ha="center", va="center", labelpad=20)
cax.yaxis.set_label_position("right")
top_cax.set_yticks([0.5])
top_cax.set_yticklabels(["Class"], va="center")
ax.set_xlabel("Neuron")
ax.set_ylabel("Source class")
stashfig("collapsed-log-heat-transpose-labeled" + basename, dpi=200)
# %% [markdown]
# # clustermap the matrix
sns.set_context("talk", font_scale=1)
linkage = "average"
metric = "euclidean"
colors = np.vectorize(CLASS_COLOR_DICT.get)(row_df["to_class"])
perm_inds, sort_collapsed_col_df = sort_meta(
collapsed_col_df, sort_class=["from_class"]
)
sort_log_collapsed_hist = log_collapsed_hist[:, perm_inds]
cg = sns.clustermap(
data=sort_log_collapsed_hist.T,
col_cluster=True,
row_cluster=False,
col_colors=colors,
cmap="RdBu_r",
center=0,
cbar_pos=None,
method=linkage,
metric=metric,
)
ax = cg.ax_heatmap
draw_separators(
ax,
ax_type="y",
sort_meta=sort_collapsed_col_df,
sort_class=["from_class"],
tick_rot=0,
)
ax.xaxis.set_ticks([])
# ax.set_ylabel(r"Visits over time $\to$")
ax.set_xlabel("Neuron")
ax.yaxis.tick_left()
# ax.set_yticklabels(ax.get_yticklabels(), ha="left")
stashfig("collapsed-log-clustermap" + basename)
# stashfig("collapsed-log-clustermap" + basename, fmt="pdf")
# %% [markdown]
# # Do some plotting for illustration only
if plot_examples:
sns.set_context("talk")
sns.set_palette("Set1")
examples = [742, 605, 743, 2282, 596, 2367, 1690, 2313]
for target_ind in examples:
row = collapsed_hist[target_ind, :]
perm_inds, sort_col_df = sort_meta(collapsed_col_df, sort_class=["from_class"])
sort_row = row[perm_inds]
fig, ax = plt.subplots(1, 1)
xs = np.arange(len(sort_row)) + 0.5
divider = make_axes_locatable(ax)
bot_cax = divider.append_axes("bottom", size="3%", pad=0.02, sharex=ax)
remove_shared_ax(bot_cax)
ax.bar(x=xs, height=sort_row, width=0.8)
draw_separators(
ax, sort_meta=sort_col_df, sort_class=["from_class"], tick_rot=0
)
ax.set_xlim(0, len(xs))
ax.set_ylabel("# hits @ time")
sns.heatmap(
collapsed_col_df["order"][None, :],
ax=bot_cax,
cbar=False,
cmap="RdBu",
center=0,
)
bot_cax.set_xticks([])
bot_cax.set_yticks([])
bot_cax.set_xlabel(r"Hops $\to$", x=0.1, ha="left", labelpad=-22)
bot_cax.set_xticks([20.5, 24.5, 28.5])
bot_cax.set_xticklabels([1, 5, 9], rotation=0)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
target_skid = meta.iloc[target_ind, :].name
ax.set_title(
f"Response for cell {target_skid} ({meta[meta["idx"] == target_ind]["Merge Class"].values[0]})"
)
stashfig(f"{target_skid}-response-hist" + basename)
|
# %% [markdown]
# #
import itertools
import os
import time
from itertools import chain
import colorcet as cc
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from anytree import LevelOrderGroupIter, Node, RenderTree
from joblib import Parallel, delayed
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.decomposition import PCA
from graspy.plot import heatmap, pairplot
from src.data import load_metagraph
from src.graph import MetaGraph, preprocess
from src.io import savecsv, savefig, saveskels
from src.traverse import (
cascades_from_node,
generate_cascade_tree,
generate_random_walks,
path_to_visits,
to_markov_matrix,
to_path_graph,
)
from src.visualization import (
CLASS_COLOR_DICT,
barplot_text,
draw_networkx_nice,
draw_separators,
matrixplot,
remove_shared_ax,
remove_spines,
screeplot,
sort_meta,
stacked_barplot,
)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
def stashcsv(df, name, **kws):
savecsv(df, name, foldername=FNAME, save_on=True, **kws)
#%% Load and preprocess the data
VERSION = "2020-03-09"
print(f"Using version {VERSION}")
plot_examples = False
plot_embed = False
plot_full_mat = False
graph_type = "Gad"
threshold = 0
weight = "weight"
mg = load_metagraph(graph_type, VERSION)
mg = preprocess(
mg,
threshold=threshold,
sym_threshold=False,
remove_pdiff=True,
binarize=False,
weight=weight,
)
print(f"Preprocessed graph {graph_type} with threshold={threshold}, weight={weight}")
# TODO update this with the mixed groups
# TODO make these functional for selecting proper paths
out_classes = [
"O_dSEZ",
"O_dSEZ;CN",
"O_dSEZ;LHN",
"O_dVNC",
"O_dVNC;O_RG",
"O_dVNC;CN",
"O_RG",
"O_dUnk",
"O_RG-IPC",
"O_RG-ITP",
"O_RG-CA-LP",
]
from_groups = [
("sens-ORN",),
("sens-photoRh5", "sens-photoRh6"),
("sens-MN",),
("sens-thermo",),
("sens-vtd",),
("sens-AN",),
]
from_group_names = ["Odor", "Photo", "MN", "Temp", "VTD", "AN"]
out_groups = [
("motor-mAN", "motormVAN", "motor-mPaN"),
("O_dSEZ", "O_dVNC;O_dSEZ", "O_dSEZ;CN", "LHN;O_dSEZ"),
("O_dVNC", "O_dVNC;CN", "O_RG;O_dVNC", "O_dVNC;O_dSEZ"),
("O_RG", "O_RG-IPC", "O_RG-ITP", "O_RG-CA-LP", "O_RG;O_dVNC"),
("O_dUnk",),
]
out_group_names = ["Motor", "SEZ", "VNC", "RG", "dUnk"]
from_classes = list(chain.from_iterable(from_groups)) # make this a flat list
out_classes = list(chain.from_iterable(out_groups))
class_key = "Merge Class"
adj = nx.to_numpy_array(mg.g, weight=weight, nodelist=mg.meta.index.values)
n_verts = len(adj)
meta = mg.meta.copy()
g = mg.g.copy()
meta["idx"] = range(len(meta))
from_inds = meta[meta[class_key].isin(from_classes)]["idx"].values
out_inds = meta[meta[class_key].isin(out_classes)]["idx"].values
ind_map = dict(zip(meta.index, meta["idx"]))
g = nx.relabel_nodes(g, ind_map, copy=True)
out_ind_map = dict(zip(out_inds, range(len(out_inds))))
# %% [markdown]
# # Use a method to generate visits
path_type = "cascade"
if path_type == "cascade":
p = 0.01
not_probs = (
1 - p
) ** adj # probability of none of the synapses causing postsynaptic
probs = 1 - not_probs # probability of ANY of the synapses firing onto next
elif path_type == "fancy-cascade":
alpha = 0.5
flat = np.full(adj.shape, alpha)
deg = meta["dendrite_input"].values
deg[deg == 0] = 1
flat = flat / deg[None, :]
not_probs = np.power((1 - flat), adj)
probs = 1 - not_probs
#%%
seed = 8888
max_depth = 10
n_bins = 10
n_sims = 100
method = "tree"
normalize_n_source = False
basename = f"-{graph_type}-t{threshold}-pt{path_type}-b{n_bins}-n{n_sims}-m{method}"
basename += f"-norm{normalize_n_source}"
basename += f"-plus-inverted"
np.random.seed(seed)
if method == "tree":
seeds = np.random.choice(int(1e8), size=len(from_inds), replace=False)
outs = Parallel(n_jobs=1, verbose=10)(
delayed(cascades_from_node)(
fi, probs, out_inds, max_depth, n_sims, seed, n_bins, method
)
for fi, seed in zip(from_inds, seeds)
)
elif method == "path":
outs = []
for start_ind in from_inds:
temp_hist = cascades_from_node(
start_ind, probs, out_inds, max_depth, n_sims, seed, n_bins, method
)
outs.append(temp_hist)
from_hist_mat = np.concatenate(outs, axis=-1)
###
# invert
if method == "tree":
seeds = np.random.choice(int(1e8), size=len(out_inds), replace=False)
outs = Parallel(n_jobs=1, verbose=10)(
delayed(cascades_from_node)(
fi, probs.T, from_inds, max_depth, n_sims, seed, n_bins, method
)
for fi, seed in zip(out_inds, seeds)
)
elif method == "path":
outs = []
for start_ind in from_inds:
temp_hist = cascades_from_node(
start_ind, probs.T, out_inds, max_depth, n_sims, seed, n_bins, method
)
outs.append(temp_hist)
out_hist_mat = np.concatenate(outs, axis=-1)
# generate_cascade_paths(start_ind, probs, 1, stop_inds=out_inds, max_depth=10)
# %% [markdown]
# # Sort metadata
full_hist_mat = np.concatenate((from_hist_mat, out_hist_mat), axis=1)
hist_mat = full_hist_mat
# row metadata
ids = pd.Series(index=meta["idx"], data=meta.index, name="id")
to_class = ids.map(meta["Merge Class"])
to_class.name = "to_class"
row_df = pd.concat([ids, to_class], axis=1)
# col metadata
orders = pd.Series(data=len(from_inds) * list(range(n_bins)), name="order")
from_idx = pd.Series(data=np.repeat(from_inds, n_bins), name="idx")
from_ids = from_idx.map(ids)
from_ids.name = "id"
from_class = from_ids.map(meta["Merge Class"])
from_class.name = "class"
from_col_df = pd.concat([orders, from_idx, from_ids, from_class], axis=1)
orders = pd.Series(data=len(out_inds) * list(range(n_bins)), name="order")
out_idx = pd.Series(data=np.repeat(out_inds, n_bins), name="idx")
out_ids = out_idx.map(ids)
out_ids.name = "id"
out_class = out_ids.map(meta["Merge Class"])
out_class.name = "class"
out_col_df = pd.concat([orders, out_idx, out_ids, out_class], axis=1)
col_df = pd.concat([from_col_df, out_col_df], axis=0, ignore_index=True)
# %% [markdown]
# #
log_mat = np.log10(hist_mat + 1)
if plot_full_mat:
shape = log_mat.shape
figsize = (10, 20)
fig, ax = plt.subplots(1, 1, figsize=figsize)
matrixplot(
log_mat,
ax=ax,
col_meta=col_df,
col_sort_class=["from_class"],
row_meta=row_df,
row_sort_class=["to_class"],
plot_type="scattermap",
sizes=(0.5, 0.5),
tick_rot=45,
)
stashfig("log-full-scatter" + basename)
fig, ax = plt.subplots(1, 1, figsize=figsize)
matrixplot(
log_mat,
ax=ax,
col_meta=col_df,
col_sort_class=["from_class"],
row_colors=CLASS_COLOR_DICT,
row_meta=row_df,
row_sort_class=["to_class"],
plot_type="heatmap",
sizes=(0.5, 0.5),
tick_rot=45,
)
stashfig("log-full-heat" + basename)
# %% [markdown]
# # Screeplots
if plot_embed:
screeplot(hist_mat.astype(float), title="Raw hist mat (full)")
stashfig("scree-raw-mat" + basename)
screeplot(log_mat, title="Log hist mat (full)")
stashfig("scree-log-mat" + basename)
# %% [markdown]
# # Pairplots
if plot_embed:
pca = PCA(n_components=6)
embed = pca.fit_transform(log_mat)
loadings = pca.components_.T
pg = pairplot(
embed,
labels=to_class.values,
palette=CLASS_COLOR_DICT,
height=5,
title="Node response embedding (log)",
)
pg._legend.remove()
stashfig("node-pca-log" + basename)
pg = pairplot(
loadings,
labels=from_class.values,
height=5,
title="Source class embedding (log)",
)
stashfig("source-pca-log" + basename)
pca = PCA(n_components=6)
embed = pca.fit_transform(hist_mat.astype(float))
loadings = pca.components_.T
pg = pairplot(
embed,
labels=to_class.values,
palette=CLASS_COLOR_DICT,
height=5,
title="Node response embedding (raw)",
)
pg._legend.remove()
stashfig("node-pca-log" + basename)
pg = pairplot(
loadings,
labels=from_class.values,
height=5,
title="Source class embedding (raw)",
)
stashfig("source-pca-log" + basename)
# %% [markdown]
# # Collapse that matrix
hist_mat = full_hist_mat
collapsed_hist = []
collapsed_col_df = []
groups = from_groups + out_groups
names = from_group_names + out_group_names
for fg, fg_name in zip(groups, names):
from_df = col_df[col_df["class"].isin(fg)]
n_in_group = len(from_df)
for order in from_df["order"].unique():
inds = from_df[from_df["order"] == order].index
col = hist_mat[:, inds].sum(axis=1)
if normalize_n_source:
col = col.astype(float)
col /= n_in_group
collapsed_hist.append(col)
row = {"order": order, "class": fg_name}
collapsed_col_df.append(row)
collapsed_col_df = pd.DataFrame(collapsed_col_df)
collapsed_hist = np.array(collapsed_hist).T
log_collapsed_hist = np.log10(collapsed_hist + 1)
# %% [markdown]
# #
if plot_embed:
pca = PCA(n_components=6)
embed = pca.fit_transform(log_collapsed_hist)
loadings = pca.components_.T
pg = pairplot(
embed,
labels=to_class.values,
palette=CLASS_COLOR_DICT,
height=5,
title="Collapsed node response embedding (log)",
)
pg._legend.remove()
stashfig("coll-node-pca-log" + basename)
pg = pairplot(
loadings,
labels=collapsed_col_df["from_class"].values,
height=5,
title="Collapsed source class embedding (log)",
)
stashfig("coll-source-pca-log" + basename)
pca = PCA(n_components=6)
embed = pca.fit_transform(collapsed_hist.astype(float))
loadings = pca.components_.T
pg = pairplot(
embed,
labels=to_class.values,
palette=CLASS_COLOR_DICT,
height=5,
title="Collapsed node response embedding (raw)",
)
pg._legend.remove()
stashfig("coll-node-pca-log" + basename)
pg = pairplot(
loadings,
labels=collapsed_col_df["from_class"].values,
height=5,
title="Collapsed source class embedding (raw)",
)
stashfig("coll-source-pca-log" + basename)
# %% [markdown]
# # Compute mean visit over all sources, for plotting
def mean_visit(row):
n_groups = len(row) // n_bins
s = 0
for i in range(n_groups):
group = row[i * n_bins : (i + 1) * n_bins]
for j, val in enumerate(group):
s += j * val
s /= row.sum()
return s
visits = []
for r in collapsed_hist:
mv = mean_visit(r)
visits.append(mv)
visits = np.array(visits)
visits[np.isnan(visits)] = n_bins + 1
row_df["visit_order"] = visits
mean_visit_order = row_df.groupby(["to_class"])["visit_order"].mean()
row_df["group_visit_order"] = row_df["to_class"].map(mean_visit_order)
row_df["n_visit"] = collapsed_hist.sum(axis=1)
# %% [markdown]
# #
fig, ax = plt.subplots(1, 1, figsize=(15, 15))
sns.set_context("talk", font_scale=0.8)
gridline_kws = dict(color="grey", linestyle="--", alpha=0.7, linewidth=0.3)
matrixplot(
log_collapsed_hist,
ax=ax,
col_meta=collapsed_col_df,
col_sort_class=["class"],
row_meta=row_df,
row_sort_class=["to_class"],
row_colors=CLASS_COLOR_DICT,
row_class_order="group_visit_order",
row_item_order=["visit_order"],
plot_type="heatmap",
tick_rot=0,
row_ticks=False,
gridline_kws=gridline_kws,
)
stashfig("collapsed-log-heat" + basename)
# %% [markdown]
# #
sns.set_context("talk", font_scale=1)
gridline_kws = dict(color="grey", linestyle="--", alpha=0.7, linewidth=0.3)
fig, ax = plt.subplots(1, 1, figsize=(25, 15))
ax, divider, top_cax, left_cax = matrixplot(
log_collapsed_hist.T,
ax=ax,
row_meta=collapsed_col_df,
row_sort_class=["class"],
col_meta=row_df,
col_sort_class=["to_class"],
col_colors=CLASS_COLOR_DICT,
col_class_order="group_visit_order",
col_item_order=["visit_order"],
plot_type="heatmap",
tick_rot=45,
col_ticks=False,
gridline_kws=gridline_kws,
)
cax = divider.append_axes("right", size="1%", pad=0.02, sharey=ax)
remove_shared_ax(cax)
sns.heatmap(
collapsed_col_df["order"][:, None], ax=cax, cbar=False, cmap="RdBu", center=0
)
cax.set_xticks([])
cax.set_yticks([])
cax.set_ylabel(r"Hops $\to$", rotation=-90, ha="center", va="center", labelpad=20)
cax.yaxis.set_label_position("right")
top_cax.set_yticks([0.5])
top_cax.set_yticklabels(["Class"], va="center")
ax.set_xlabel("Neuron")
ax.set_ylabel("Source class")
stashfig("collapsed-log-heat-transpose" + basename, dpi=200)
fig, ax = plt.subplots(1, 1, figsize=(25, 15))
ax, divider, top_cax, left_cax = matrixplot(
log_collapsed_hist.T,
ax=ax,
row_meta=collapsed_col_df,
row_sort_class=["class"],
col_meta=row_df,
col_sort_class=["to_class"],
col_colors=CLASS_COLOR_DICT,
col_class_order="group_visit_order",
col_item_order=["visit_order"],
plot_type="heatmap",
tick_rot=45,
col_ticks=True,
gridline_kws=gridline_kws,
)
cax = divider.append_axes("right", size="1%", pad=0.02, sharey=ax)
remove_shared_ax(cax)
sns.heatmap(
collapsed_col_df["order"][:, None], ax=cax, cbar=False, cmap="RdBu", center=0
)
cax.set_xticks([])
cax.set_yticks([])
cax.set_ylabel(r"Hops $\to$", rotation=-90, ha="center", va="center", labelpad=20)
cax.yaxis.set_label_position("right")
top_cax.set_yticks([0.5])
top_cax.set_yticklabels(["Class"], va="center")
ax.set_xlabel("Neuron")
ax.set_ylabel("Source class")
stashfig("collapsed-log-heat-transpose-labeled" + basename, dpi=200)
# %% [markdown]
# # clustermap the matrix
sns.set_context("talk", font_scale=1)
linkage = "average"
metric = "euclidean"
colors = np.vectorize(CLASS_COLOR_DICT.get)(row_df["to_class"])
perm_inds, sort_collapsed_col_df = sort_meta(
collapsed_col_df, sort_class=["from_class"]
)
sort_log_collapsed_hist = log_collapsed_hist[:, perm_inds]
cg = sns.clustermap(
data=sort_log_collapsed_hist.T,
col_cluster=True,
row_cluster=False,
col_colors=colors,
cmap="RdBu_r",
center=0,
cbar_pos=None,
method=linkage,
metric=metric,
)
ax = cg.ax_heatmap
draw_separators(
ax,
ax_type="y",
sort_meta=sort_collapsed_col_df,
sort_class=["from_class"],
tick_rot=0,
)
ax.xaxis.set_ticks([])
# ax.set_ylabel(r"Visits over time $\to$")
ax.set_xlabel("Neuron")
ax.yaxis.tick_left()
# ax.set_yticklabels(ax.get_yticklabels(), ha="left")
stashfig("collapsed-log-clustermap" + basename)
# stashfig("collapsed-log-clustermap" + basename, fmt="pdf")
# %% [markdown]
# # Do some plotting for illustration only
if plot_examples:
sns.set_context("talk")
sns.set_palette("Set1")
examples = [742, 605, 743, 2282, 596, 2367, 1690, 2313]
for target_ind in examples:
row = collapsed_hist[target_ind, :]
perm_inds, sort_col_df = sort_meta(collapsed_col_df, sort_class=["from_class"])
sort_row = row[perm_inds]
fig, ax = plt.subplots(1, 1)
xs = np.arange(len(sort_row)) + 0.5
divider = make_axes_locatable(ax)
bot_cax = divider.append_axes("bottom", size="3%", pad=0.02, sharex=ax)
remove_shared_ax(bot_cax)
ax.bar(x=xs, height=sort_row, width=0.8)
draw_separators(
ax, sort_meta=sort_col_df, sort_class=["from_class"], tick_rot=0
)
ax.set_xlim(0, len(xs))
ax.set_ylabel("# hits @ time")
sns.heatmap(
collapsed_col_df["order"][None, :],
ax=bot_cax,
cbar=False,
cmap="RdBu",
center=0,
)
bot_cax.set_xticks([])
bot_cax.set_yticks([])
bot_cax.set_xlabel(r"Hops $\to$", x=0.1, ha="left", labelpad=-22)
bot_cax.set_xticks([20.5, 24.5, 28.5])
bot_cax.set_xticklabels([1, 5, 9], rotation=0)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
target_skid = meta.iloc[target_ind, :].name
ax.set_title(
f"Response for cell {target_skid} ({meta[meta['idx'] == target_ind]['Merge Class'].values[0]})"
)
stashfig(f"{target_skid}-response-hist" + basename)
|
# Import
import traceback
from datetime import datetime
import aiohttp
import discord
from discord import Webhook, AsyncWebhookAdapter
from discord.ext import commands
import psutil
# Framework
import Framework
# Cog Initialising
class HANDLER(commands.Cog):
def __init__(self, client):
self.client = client
# Tell
@commands.command()
@commands.is_owner()
async def tell(self, ctx, *, message):
await ctx.message.delete()
embed = discord.Embed(title="", colour=Framework.Farbe.Red, description=f"{message}")
await ctx.send(embed=embed)
# ERROR_HANDLER
@commands.Cog.listener()
@Framework.Wrappers.TimeLogger
async def on_command_error(self, ctx, error):
if isinstance(error, commands.DisabledCommand):
embed = discord.Embed(
title=f'{Framework.YAML.GET('Embed', 'HTitle')}',
colour=Framework.Farbe.Red,
description='Dieser Command ist Deaktiviert.'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error, commands.NoPrivateMessage):
embed = discord.Embed(
title=f'{Framework.YAML.GET('Embed', 'HTitle')}',
colour=Framework.Farbe.Red,
description='Du darfst diesen Command nicht in Privatnachrichten nutzen.'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error, commands.BadArgument or commands.ArgumentParsingError or commands.BadBoolArgument):
embed = discord.Embed(
title=f'{Framework.YAML.GET('Embed', 'HTitle')}',
colour=Framework.Farbe.Red,
description=f'Dein angegebenes Argument ist fehlerhaft.\n`{error}`'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error, commands.MissingRequiredArgument or commands.TooManyArguments):
embed = discord.Embed(
title=f'{Framework.YAML.GET('Embed', 'HTitle')}',
colour=Framework.Farbe.Red,
description=f'Dein angegebenes Argument ist fehlerhaft.'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error, commands.MissingPermissions or commands.BotMissingPermissions):
embed = discord.Embed(
title=f'{Framework.YAML.GET('Embed', 'HTitle')}',
colour=Framework.Farbe.Red,
description=f'Du besitzt nicht die benötigten Rechte ({error.missing_perms}), andernfalls besitze ich nicht die benötigten Rechte!'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error, commands.NotOwner):
embed = discord.Embed(
title='Hey! Was machst du da?',
colour=Framework.Farbe.Red,
description='Du kannst mich mit diesem Befehl __stark beschädigen__!'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error, commands.CommandOnCooldown):
embed = discord.Embed(
title=f'{Framework.YAML.GET('Embed', 'HTitle')}',
colour=Framework.Farbe.Red,
description=f'Du **musst {'%.2f' % round(error.retry_after, 2)}sek. warten**, bevor du den Command erneut benutzen kannst'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error, commands.CommandNotFound):
return
elif isinstance(error, commands.CheckFailure):
embed = discord.Embed(
title='Hey! Was machst du da?',
colour=Framework.Farbe.Red,
description=f'Du erfüllst nicht die benötigten Rechte.'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error, commands.CommandInvokeError):
if isinstance(error.original, Framework.CreditError):
embed = discord.Embed(
title='',
colour=Framework.Farbe.Red,
description=f'{error.__context__}'
)
embed.set_author(name="Credit Bank", icon_url=Framework.YAML.GET("Bilder", "Credits"))
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error.original, Framework.MusicError):
embed = discord.Embed(
title=f'{Framework.YAML.GET('Embed', 'HTitle')}',
colour=Framework.Farbe.Red,
description=f'Etwas in der Rubrik: `Enhanced Music` ist schiefgelaufen. Versuche es erneut.\n`{error}`'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error.original, Framework.UccountError):
embed = discord.Embed(
title='-Uccount-',
colour=Framework.Farbe.Red,
description=f'{error.__context__}'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error.original, Framework.YAMLError):
embed = discord.Embed(
title='-YAML-',
colour=Framework.Farbe.Red,
description=f'{error.__context__}'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
else:
embed = discord.Embed(
title='ACHTUNG!',
colour=Framework.Farbe.Red,
description='In diesem Command ist ein schwerwiegender Fehler aufgetreten!\nIch habe die Fehlermeldung and das Developement Team weitergeleitet.'
'Tritt dieser Fehler in den nächsten Tagen erneut auf, '
'kontaktiere **dringend** den Support: **!s**'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
async with aiohttp.ClientSession() as session:
url = Framework.YAML.GET("Variables", "ClientSide", "Webhooks", "System")
webhook = Webhook.from_url(url, adapter=AsyncWebhookAdapter(session))
trace = traceback.format_exception(None, error, error.__traceback__)
if '\nThe above exception was the direct cause of the following exception:\n\n' in trace:
trace = trace[:trace.index(
'\nThe above exception was the direct cause of the following exception:\n\n')]
traceback_text = "\n".join(trace)
else:
traceback_text = trace
y = len(traceback_text)
while y >= 1400:
traceback_text = traceback_text[:-1]
y = len(traceback_text)
print(traceback_text)
try:
Server = ctx.guild.name
except:
Server = None
try:
Channel = ctx.channel.name
except:
Channel = None
erembed = discord.Embed(
title="\u200b\nEin Fehler ist aufgetreten!\n\u200b",
colour=Framework.Farbe.Red,
description=f"**Ausgeführt von:**\n`{ctx.author} | {ctx.author.id}`\n\n"
f"**Command Information:**\n"
f"Executed on Server: `{Server}`\n"
f"Executed in Channel: `{Channel}`\n"
f"Cog: `{ctx.cog}`\n"
f"Command: `{self.client.command_prefix}{ctx.command.name} {ctx.command.signature}`\n"
f"_Executed:_ `{ctx.message.content}`\n\n"
f"**Error:**\n"
f"`{error}`\n\n"
f"**Analytics:**\n"
f"CPU: `{psutil.cpu_percent(interval=1, percpu=True)}`\n"
f"RAM: `{psutil.virtual_memory().percent}`\n\n"
f"**Traceback:**```py\n{str(traceback_text)}\n```",
timestamp=datetime.utcnow()
)
embed.set_footer(text='\u200b', icon_url=Framework.YAML.GET("Bilder", "Clock"))
await webhook.send(username="System Benachrichtigung", avatar_url=self.client.user.avatar_url,
embed=erembed)
await Framework.Messaging.Universal_send(ctx, embed, 15)
else:
if type(error) in [AttributeError, ValueError, KeyError]:
embed = discord.Embed(
title=f'{Framework.YAML.GET('Embed', 'HTitle')}',
colour=Framework.Farbe.Red,
description=f'Ein `invoke_error` ist aufgetreten!\nException: `{error}`'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
else:
embed = discord.Embed(
title='ACHTUNG!',
colour=Framework.Farbe.Red,
description='In diesem Command ist ein schwerwiegender Fehler aufgetreten!\nIch habe die Fehlermeldung and das Developement Team weitergeleitet.'
'Tritt dieser Fehler in den nächsten Tagen erneut auf, '
'kontaktiere **dringend** den Support: **!s**'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
async with aiohttp.ClientSession() as session:
url = Framework.YAML.GET("Variables", "ClientSide", "Webhooks", "System")
webhook = Webhook.from_url(url, adapter=AsyncWebhookAdapter(session))
trace = traceback.format_exception(None, error, error.__traceback__)
if '\nThe above exception was the direct cause of the following exception:\n\n' in trace:
trace = trace[
:trace.index(
'\nThe above exception was the direct cause of the following exception:\n\n')]
traceback_text = "\n".join(trace)
else:
traceback_text = trace
y = len(traceback_text)
while y >= 1400:
traceback_text = traceback_text[:-1]
y = len(traceback_text)
print(traceback_text)
try:
Server = ctx.guild.name
except:
Server = None
try:
Channel = ctx.channel.name
except:
Channel = None
erembed = discord.Embed(
title="\u200b\nEin Fehler ist aufgetreten!\n\u200b",
colour=Framework.Farbe.Red,
description=f"**Ausgeführt von:**\n`{ctx.author} | {ctx.author.id}`\n\n"
f"**Command Information:**\n"
f"Executed on Server: `{Server}`\n"
f"Executed in Channel: `{Channel}`\n"
f"Cog: `{ctx.cog}`\n"
f"Command: `{self.client.command_prefix}{ctx.command.name} {ctx.command.signature}`\n"
f"_Executed:_ `{ctx.message.content}`\n\n"
f"**Error:**\n"
f"`{error}`\n\n"
f"**Analytics:**\n"
f"CPU: `{psutil.cpu_percent(interval=1, percpu=True)}`\n"
f"RAM: `{psutil.virtual_memory().percent}`\n\n"
f"**Traceback:**```py\n{str(traceback_text)}\n```",
timestamp=datetime.utcnow()
)
embed.set_footer(text='\u200b', icon_url=Framework.YAML.GET("Bilder", "Clock"))
await webhook.send(username="System Benachrichtigung", avatar_url=self.client.user.avatar_url,
embed=erembed)
await Framework.Messaging.Universal_send(ctx, embed, 15)
# COMMAND_HANDLER
@commands.command(aliases=["ds"])
@commands.is_owner()
async def disable_commands(self, ctx, *, command_name):
if command_name is not None:
command = self.client.get_command(command_name)
if command is None:
embed = discord.Embed(
title=f'{Framework.YAML.GET('Embed', 'HTitle')}',
colour=Framework.Farbe.Red,
description=f'Dieser Command existiert nicht.\nÜberprüfe ihn auf Rechtschreibfehler.\nDeine Angabe: **{command_name}**'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed)
elif command == ctx.command:
embed = discord.Embed(
title=f'{Framework.YAML.GET('Embed', 'HTitle')}',
colour=Framework.Farbe.Red,
description=f'Du darfst diesen Command nicht Deaktivieren!'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed)
else:
command.enabled = not command.enabled
choice = "Aktiviert" if command.enabled else "Deaktiviert"
choice_colour = Framework.Farbe.Lp_Green if command.enabled else Framework.Farbe.Dp_Red
embed = discord.Embed(
title=f'{choice}',
colour=choice_colour,
description=f'Der Command: **{command}** wurde erfolgreich {choice}.'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed)
# Cog Finishing
def setup(client):
client.add_cog(HANDLER(client))
|
# Import
import traceback
from datetime import datetime
import aiohttp
import discord
from discord import Webhook, AsyncWebhookAdapter
from discord.ext import commands
import psutil
# Framework
import Framework
# Cog Initialising
class HANDLER(commands.Cog):
def __init__(self, client):
self.client = client
# Tell
@commands.command()
@commands.is_owner()
async def tell(self, ctx, *, message):
await ctx.message.delete()
embed = discord.Embed(title="", colour=Framework.Farbe.Red, description=f"{message}")
await ctx.send(embed=embed)
# ERROR_HANDLER
@commands.Cog.listener()
@Framework.Wrappers.TimeLogger
async def on_command_error(self, ctx, error):
if isinstance(error, commands.DisabledCommand):
embed = discord.Embed(
title=f'{Framework.YAML.GET("Embed", "HTitle")}',
colour=Framework.Farbe.Red,
description='Dieser Command ist Deaktiviert.'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error, commands.NoPrivateMessage):
embed = discord.Embed(
title=f'{Framework.YAML.GET("Embed", "HTitle")}',
colour=Framework.Farbe.Red,
description='Du darfst diesen Command nicht in Privatnachrichten nutzen.'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error, commands.BadArgument or commands.ArgumentParsingError or commands.BadBoolArgument):
embed = discord.Embed(
title=f'{Framework.YAML.GET("Embed", "HTitle")}',
colour=Framework.Farbe.Red,
description=f'Dein angegebenes Argument ist fehlerhaft.\n`{error}`'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error, commands.MissingRequiredArgument or commands.TooManyArguments):
embed = discord.Embed(
title=f'{Framework.YAML.GET("Embed", "HTitle")}',
colour=Framework.Farbe.Red,
description=f'Dein angegebenes Argument ist fehlerhaft.'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error, commands.MissingPermissions or commands.BotMissingPermissions):
embed = discord.Embed(
title=f'{Framework.YAML.GET("Embed", "HTitle")}',
colour=Framework.Farbe.Red,
description=f'Du besitzt nicht die benötigten Rechte ({error.missing_perms}), andernfalls besitze ich nicht die benötigten Rechte!'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error, commands.NotOwner):
embed = discord.Embed(
title='Hey! Was machst du da?',
colour=Framework.Farbe.Red,
description='Du kannst mich mit diesem Befehl __stark beschädigen__!'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error, commands.CommandOnCooldown):
embed = discord.Embed(
title=f'{Framework.YAML.GET("Embed", "HTitle")}',
colour=Framework.Farbe.Red,
description=f'Du **musst {"%.2f" % round(error.retry_after, 2)}sek. warten**, bevor du den Command erneut benutzen kannst'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error, commands.CommandNotFound):
return
elif isinstance(error, commands.CheckFailure):
embed = discord.Embed(
title='Hey! Was machst du da?',
colour=Framework.Farbe.Red,
description=f'Du erfüllst nicht die benötigten Rechte.'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error, commands.CommandInvokeError):
if isinstance(error.original, Framework.CreditError):
embed = discord.Embed(
title='',
colour=Framework.Farbe.Red,
description=f'{error.__context__}'
)
embed.set_author(name="Credit Bank", icon_url=Framework.YAML.GET("Bilder", "Credits"))
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error.original, Framework.MusicError):
embed = discord.Embed(
title=f'{Framework.YAML.GET("Embed", "HTitle")}',
colour=Framework.Farbe.Red,
description=f'Etwas in der Rubrik: `Enhanced Music` ist schiefgelaufen. Versuche es erneut.\n`{error}`'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error.original, Framework.UccountError):
embed = discord.Embed(
title='-Uccount-',
colour=Framework.Farbe.Red,
description=f'{error.__context__}'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
elif isinstance(error.original, Framework.YAMLError):
embed = discord.Embed(
title='-YAML-',
colour=Framework.Farbe.Red,
description=f'{error.__context__}'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
else:
embed = discord.Embed(
title='ACHTUNG!',
colour=Framework.Farbe.Red,
description='In diesem Command ist ein schwerwiegender Fehler aufgetreten!\nIch habe die Fehlermeldung and das Developement Team weitergeleitet.'
'Tritt dieser Fehler in den nächsten Tagen erneut auf, '
'kontaktiere **dringend** den Support: **!s**'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
async with aiohttp.ClientSession() as session:
url = Framework.YAML.GET("Variables", "ClientSide", "Webhooks", "System")
webhook = Webhook.from_url(url, adapter=AsyncWebhookAdapter(session))
trace = traceback.format_exception(None, error, error.__traceback__)
if '\nThe above exception was the direct cause of the following exception:\n\n' in trace:
trace = trace[:trace.index(
'\nThe above exception was the direct cause of the following exception:\n\n')]
traceback_text = "\n".join(trace)
else:
traceback_text = trace
y = len(traceback_text)
while y >= 1400:
traceback_text = traceback_text[:-1]
y = len(traceback_text)
print(traceback_text)
try:
Server = ctx.guild.name
except:
Server = None
try:
Channel = ctx.channel.name
except:
Channel = None
erembed = discord.Embed(
title="\u200b\nEin Fehler ist aufgetreten!\n\u200b",
colour=Framework.Farbe.Red,
description=f"**Ausgeführt von:**\n`{ctx.author} | {ctx.author.id}`\n\n"
f"**Command Information:**\n"
f"Executed on Server: `{Server}`\n"
f"Executed in Channel: `{Channel}`\n"
f"Cog: `{ctx.cog}`\n"
f"Command: `{self.client.command_prefix}{ctx.command.name} {ctx.command.signature}`\n"
f"_Executed:_ `{ctx.message.content}`\n\n"
f"**Error:**\n"
f"`{error}`\n\n"
f"**Analytics:**\n"
f"CPU: `{psutil.cpu_percent(interval=1, percpu=True)}`\n"
f"RAM: `{psutil.virtual_memory().percent}`\n\n"
f"**Traceback:**```py\n{str(traceback_text)}\n```",
timestamp=datetime.utcnow()
)
embed.set_footer(text='\u200b', icon_url=Framework.YAML.GET("Bilder", "Clock"))
await webhook.send(username="System Benachrichtigung", avatar_url=self.client.user.avatar_url,
embed=erembed)
await Framework.Messaging.Universal_send(ctx, embed, 15)
else:
if type(error) in [AttributeError, ValueError, KeyError]:
embed = discord.Embed(
title=f'{Framework.YAML.GET("Embed", "HTitle")}',
colour=Framework.Farbe.Red,
description=f'Ein `invoke_error` ist aufgetreten!\nException: `{error}`'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed, 15)
else:
embed = discord.Embed(
title='ACHTUNG!',
colour=Framework.Farbe.Red,
description='In diesem Command ist ein schwerwiegender Fehler aufgetreten!\nIch habe die Fehlermeldung and das Developement Team weitergeleitet.'
'Tritt dieser Fehler in den nächsten Tagen erneut auf, '
'kontaktiere **dringend** den Support: **!s**'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
async with aiohttp.ClientSession() as session:
url = Framework.YAML.GET("Variables", "ClientSide", "Webhooks", "System")
webhook = Webhook.from_url(url, adapter=AsyncWebhookAdapter(session))
trace = traceback.format_exception(None, error, error.__traceback__)
if '\nThe above exception was the direct cause of the following exception:\n\n' in trace:
trace = trace[
:trace.index(
'\nThe above exception was the direct cause of the following exception:\n\n')]
traceback_text = "\n".join(trace)
else:
traceback_text = trace
y = len(traceback_text)
while y >= 1400:
traceback_text = traceback_text[:-1]
y = len(traceback_text)
print(traceback_text)
try:
Server = ctx.guild.name
except:
Server = None
try:
Channel = ctx.channel.name
except:
Channel = None
erembed = discord.Embed(
title="\u200b\nEin Fehler ist aufgetreten!\n\u200b",
colour=Framework.Farbe.Red,
description=f"**Ausgeführt von:**\n`{ctx.author} | {ctx.author.id}`\n\n"
f"**Command Information:**\n"
f"Executed on Server: `{Server}`\n"
f"Executed in Channel: `{Channel}`\n"
f"Cog: `{ctx.cog}`\n"
f"Command: `{self.client.command_prefix}{ctx.command.name} {ctx.command.signature}`\n"
f"_Executed:_ `{ctx.message.content}`\n\n"
f"**Error:**\n"
f"`{error}`\n\n"
f"**Analytics:**\n"
f"CPU: `{psutil.cpu_percent(interval=1, percpu=True)}`\n"
f"RAM: `{psutil.virtual_memory().percent}`\n\n"
f"**Traceback:**```py\n{str(traceback_text)}\n```",
timestamp=datetime.utcnow()
)
embed.set_footer(text='\u200b', icon_url=Framework.YAML.GET("Bilder", "Clock"))
await webhook.send(username="System Benachrichtigung", avatar_url=self.client.user.avatar_url,
embed=erembed)
await Framework.Messaging.Universal_send(ctx, embed, 15)
# COMMAND_HANDLER
@commands.command(aliases=["ds"])
@commands.is_owner()
async def disable_commands(self, ctx, *, command_name):
if command_name is not None:
command = self.client.get_command(command_name)
if command is None:
embed = discord.Embed(
title=f'{Framework.YAML.GET("Embed", "HTitle")}',
colour=Framework.Farbe.Red,
description=f'Dieser Command existiert nicht.\nÜberprüfe ihn auf Rechtschreibfehler.\nDeine Angabe: **{command_name}**'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed)
elif command == ctx.command:
embed = discord.Embed(
title=f'{Framework.YAML.GET("Embed", "HTitle")}',
colour=Framework.Farbe.Red,
description=f'Du darfst diesen Command nicht Deaktivieren!'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed)
else:
command.enabled = not command.enabled
choice = "Aktiviert" if command.enabled else "Deaktiviert"
choice_colour = Framework.Farbe.Lp_Green if command.enabled else Framework.Farbe.Dp_Red
embed = discord.Embed(
title=f'{choice}',
colour=choice_colour,
description=f'Der Command: **{command}** wurde erfolgreich {choice}.'
)
embed.set_thumbnail(url=self.client.user.avatar_url)
await Framework.Messaging.Universal_send(ctx, embed)
# Cog Finishing
def setup(client):
client.add_cog(HANDLER(client))
|
import argparse
import requests
from cromwell_tools.cromwell_api import CromwellAPI
from cromwell_tools.cromwell_auth import CromwellAuth
from cromwell_tools.diag import task_runtime
from cromwell_tools import __version__
diagnostic_index = {
'task_runtime': task_runtime.run
}
def parser(arguments=None):
# TODO: dynamically walk through the commands and automatcally create parsers here
main_parser = argparse.ArgumentParser()
# Check the installed version of Cromwell-tools
main_parser.add_argument(
'-V', '--version', action='version', version=f'%(prog)s {__version__}'
)
subparsers = main_parser.add_subparsers(help='sub-command help', dest='command')
# sub-commands of cromwell-tools
submit = subparsers.add_parser(
'submit', help='submit help', description='Submit a WDL workflow on Cromwell.'
)
wait = subparsers.add_parser(
'wait',
help='wait help',
description='Wait for one or more running workflow to finish.',
)
status = subparsers.add_parser(
'status',
help='status help',
description='Get the status of one or more workflows.',
)
abort = subparsers.add_parser(
'abort',
help='abort help',
description='Request Cromwell to abort a running workflow by UUID.',
)
release_hold = subparsers.add_parser(
'release_hold',
help='release_hold help',
description='Request Cromwell to release the hold on a workflow.',
)
metadata = subparsers.add_parser(
'metadata',
help='metadata help',
description='Retrieve the workflow and call-level metadata for a specified workflow by UUID.',
)
query = subparsers.add_parser(
'query',
help='query help',
description='[NOT IMPLEMENTED IN CLI] Query for workflows.',
)
health = subparsers.add_parser(
'health',
help='health help',
description='Check that cromwell is running and that provided authentication is valid.',
)
task_runtime = subparsers.add_parser(
'task_runtime',
help='task_runtime help',
description='Output tsv breakdown of task runtimes by execution event categories',
)
# cromwell url and authentication arguments apply to all sub-commands
cromwell_sub_commands = (
submit,
wait,
status,
abort,
release_hold,
metadata,
query,
health,
task_runtime,
)
auth_args = {
'url': 'The URL to the Cromwell server. e.g. "https://cromwell.server.org/"',
'username': 'Cromwell username for HTTPBasicAuth.',
'password': 'Cromwell password for HTTPBasicAuth.',
'secrets_file': 'Path to the JSON file containing username, password, and url fields.',
'service_account_key': 'Path to the JSON key file for authenticating with CaaS.',
}
def add_auth_args(subcommand_parser):
for arg_dest, help_text in auth_args.items():
subcommand_parser.add_argument(
'--{arg}'.format(arg=arg_dest.replace('_', '-')),
dest=arg_dest,
default=None,
type=str,
help=help_text,
)
# TODO: this should be a group which is called authentication
for p in cromwell_sub_commands:
add_auth_args(p)
# submit arguments
submit.add_argument(
'-w',
'--wdl-file',
dest='wdl_file',
type=str,
required=True,
help='Path to the workflow source file to submit for execution.',
)
submit.add_argument(
'-i',
'--inputs-files',
dest='inputs_files',
nargs='+',
type=str,
required=True,
help='Path(s) to the input file(s) containing input data in JSON format, separated by space.',
)
submit.add_argument(
'-d',
'--deps-file',
dest='dependencies',
nargs='+',
type=str,
help='Path to the Zip file containing dependencies, or a list of raw dependency files to '
'be zipped together separated by space.',
)
submit.add_argument(
'-o',
'--options-file',
dest='options_file',
type=str,
help='Path to the Cromwell configs JSON file.',
)
# TODO: add a mutually exclusive group to make it easy to add labels for users
submit.add_argument(
'-l',
'--label-file',
dest='label_file',
type=str,
default=None,
help='Path to the JSON file containing a collection of key/value pairs for workflow labels.',
)
submit.add_argument(
'-c',
'--collection-name',
dest='collection_name',
type=str,
default=None,
help='Collection in SAM that the workflow should belong to, if use CaaS.',
)
submit.add_argument(
'--on-hold',
dest='on_hold',
type=bool,
default=False,
help='Whether to submit the workflow in "On Hold" status.',
)
submit.add_argument(
'--validate-labels',
dest='validate_labels',
type=bool,
default=False,
help='Whether to validate cromwell labels.',
)
# wait arguments
wait.add_argument('workflow_ids', nargs='+')
wait.add_argument(
'--timeout-minutes',
dest='timeout_minutes',
type=int,
default=120,
help='number of minutes to wait before timeout.',
)
wait.add_argument(
'--poll-interval-seconds',
dest='poll_interval_seconds',
type=int,
default=30,
help='seconds between polling cromwell for workflow status.',
)
wait.add_argument(
'--silent',
dest='verbose',
action='store_false',
help='whether to silently print verbose workflow information while polling cromwell.',
)
# status arguments
status.add_argument(
'--uuid',
required=True,
help='A Cromwell workflow UUID, which is the workflow identifier.',
)
# abort arguments
abort.add_argument(
'--uuid',
required=True,
help='A Cromwell workflow UUID, which is the workflow identifier.',
)
# release_hold arguments
release_hold.add_argument(
'--uuid',
required=True,
help='A Cromwell workflow UUID, which is the workflow identifier.',
)
# metadata arguments
metadata.add_argument(
'--uuid',
required=True,
help='A Cromwell workflow UUID, which is the workflow identifier.',
)
# TODO: add a mutually exclusive group to make it fail early
metadata.add_argument(
'--includeKey',
nargs='+',
default=None,
help='When specified key(s) to include from the metadata. Matches any key starting with the value. May not be used with excludeKey.',
)
metadata.add_argument(
'--excludeKey',
nargs='+',
default=None,
help='When specified key(s) to exclude from the metadata. Matches any key starting with the value. May not be used with includeKey.',
)
metadata.add_argument(
'--expandSubWorkflows',
default=False,
help='When true, metadata for sub workflows will be fetched and inserted automatically in the metadata response.',
)
either_runtime = task_runtime.add_mutually_exclusive_group(required=True)
either_runtime.add_argument(
'--metadata',
dest='metadata',
help='Metadata json file to calculate cost on',
)
either_runtime.add_argument(
'--uuid',
dest='uuid',
help='A Cromwell workflow UUID, which is the workflow identifier.',
)
# query arguments
# TODO: implement CLI entry for query API.
# group all of the arguments
args = vars(main_parser.parse_args(arguments))
# TODO: see if this can be moved or if the commands can be populated from above
if args['command'] in (
'submit',
'wait',
'status',
'abort',
'release_hold',
'health',
'metadata',
'task_runtime',
):
auth_arg_dict = {k: args.get(k) for k in auth_args.keys()}
auth = CromwellAuth.harmonize_credentials(**auth_arg_dict)
args['auth'] = auth
for k in auth_args:
if k in args:
del args[k]
command = getattr(CromwellAPI, args['command'], False)
if not command:
try:
command = diagnostic_index[args['command']]
except KeyError:
raise KeyError(f"{args["command"]} is not a valid command.")
del args['command']
return command, args
# this should just getattr from CromwellAPI and call the func with args.
# TODO: refactor this module into class-based parsers
def main(arguments=None):
command, args = parser(arguments)
result = command(**args)
if isinstance(result, requests.Response):
print(result.text)
else:
print(result)
|
import argparse
import requests
from cromwell_tools.cromwell_api import CromwellAPI
from cromwell_tools.cromwell_auth import CromwellAuth
from cromwell_tools.diag import task_runtime
from cromwell_tools import __version__
diagnostic_index = {
'task_runtime': task_runtime.run
}
def parser(arguments=None):
# TODO: dynamically walk through the commands and automatcally create parsers here
main_parser = argparse.ArgumentParser()
# Check the installed version of Cromwell-tools
main_parser.add_argument(
'-V', '--version', action='version', version=f'%(prog)s {__version__}'
)
subparsers = main_parser.add_subparsers(help='sub-command help', dest='command')
# sub-commands of cromwell-tools
submit = subparsers.add_parser(
'submit', help='submit help', description='Submit a WDL workflow on Cromwell.'
)
wait = subparsers.add_parser(
'wait',
help='wait help',
description='Wait for one or more running workflow to finish.',
)
status = subparsers.add_parser(
'status',
help='status help',
description='Get the status of one or more workflows.',
)
abort = subparsers.add_parser(
'abort',
help='abort help',
description='Request Cromwell to abort a running workflow by UUID.',
)
release_hold = subparsers.add_parser(
'release_hold',
help='release_hold help',
description='Request Cromwell to release the hold on a workflow.',
)
metadata = subparsers.add_parser(
'metadata',
help='metadata help',
description='Retrieve the workflow and call-level metadata for a specified workflow by UUID.',
)
query = subparsers.add_parser(
'query',
help='query help',
description='[NOT IMPLEMENTED IN CLI] Query for workflows.',
)
health = subparsers.add_parser(
'health',
help='health help',
description='Check that cromwell is running and that provided authentication is valid.',
)
task_runtime = subparsers.add_parser(
'task_runtime',
help='task_runtime help',
description='Output tsv breakdown of task runtimes by execution event categories',
)
# cromwell url and authentication arguments apply to all sub-commands
cromwell_sub_commands = (
submit,
wait,
status,
abort,
release_hold,
metadata,
query,
health,
task_runtime,
)
auth_args = {
'url': 'The URL to the Cromwell server. e.g. "https://cromwell.server.org/"',
'username': 'Cromwell username for HTTPBasicAuth.',
'password': 'Cromwell password for HTTPBasicAuth.',
'secrets_file': 'Path to the JSON file containing username, password, and url fields.',
'service_account_key': 'Path to the JSON key file for authenticating with CaaS.',
}
def add_auth_args(subcommand_parser):
for arg_dest, help_text in auth_args.items():
subcommand_parser.add_argument(
'--{arg}'.format(arg=arg_dest.replace('_', '-')),
dest=arg_dest,
default=None,
type=str,
help=help_text,
)
# TODO: this should be a group which is called authentication
for p in cromwell_sub_commands:
add_auth_args(p)
# submit arguments
submit.add_argument(
'-w',
'--wdl-file',
dest='wdl_file',
type=str,
required=True,
help='Path to the workflow source file to submit for execution.',
)
submit.add_argument(
'-i',
'--inputs-files',
dest='inputs_files',
nargs='+',
type=str,
required=True,
help='Path(s) to the input file(s) containing input data in JSON format, separated by space.',
)
submit.add_argument(
'-d',
'--deps-file',
dest='dependencies',
nargs='+',
type=str,
help='Path to the Zip file containing dependencies, or a list of raw dependency files to '
'be zipped together separated by space.',
)
submit.add_argument(
'-o',
'--options-file',
dest='options_file',
type=str,
help='Path to the Cromwell configs JSON file.',
)
# TODO: add a mutually exclusive group to make it easy to add labels for users
submit.add_argument(
'-l',
'--label-file',
dest='label_file',
type=str,
default=None,
help='Path to the JSON file containing a collection of key/value pairs for workflow labels.',
)
submit.add_argument(
'-c',
'--collection-name',
dest='collection_name',
type=str,
default=None,
help='Collection in SAM that the workflow should belong to, if use CaaS.',
)
submit.add_argument(
'--on-hold',
dest='on_hold',
type=bool,
default=False,
help='Whether to submit the workflow in "On Hold" status.',
)
submit.add_argument(
'--validate-labels',
dest='validate_labels',
type=bool,
default=False,
help='Whether to validate cromwell labels.',
)
# wait arguments
wait.add_argument('workflow_ids', nargs='+')
wait.add_argument(
'--timeout-minutes',
dest='timeout_minutes',
type=int,
default=120,
help='number of minutes to wait before timeout.',
)
wait.add_argument(
'--poll-interval-seconds',
dest='poll_interval_seconds',
type=int,
default=30,
help='seconds between polling cromwell for workflow status.',
)
wait.add_argument(
'--silent',
dest='verbose',
action='store_false',
help='whether to silently print verbose workflow information while polling cromwell.',
)
# status arguments
status.add_argument(
'--uuid',
required=True,
help='A Cromwell workflow UUID, which is the workflow identifier.',
)
# abort arguments
abort.add_argument(
'--uuid',
required=True,
help='A Cromwell workflow UUID, which is the workflow identifier.',
)
# release_hold arguments
release_hold.add_argument(
'--uuid',
required=True,
help='A Cromwell workflow UUID, which is the workflow identifier.',
)
# metadata arguments
metadata.add_argument(
'--uuid',
required=True,
help='A Cromwell workflow UUID, which is the workflow identifier.',
)
# TODO: add a mutually exclusive group to make it fail early
metadata.add_argument(
'--includeKey',
nargs='+',
default=None,
help='When specified key(s) to include from the metadata. Matches any key starting with the value. May not be used with excludeKey.',
)
metadata.add_argument(
'--excludeKey',
nargs='+',
default=None,
help='When specified key(s) to exclude from the metadata. Matches any key starting with the value. May not be used with includeKey.',
)
metadata.add_argument(
'--expandSubWorkflows',
default=False,
help='When true, metadata for sub workflows will be fetched and inserted automatically in the metadata response.',
)
either_runtime = task_runtime.add_mutually_exclusive_group(required=True)
either_runtime.add_argument(
'--metadata',
dest='metadata',
help='Metadata json file to calculate cost on',
)
either_runtime.add_argument(
'--uuid',
dest='uuid',
help='A Cromwell workflow UUID, which is the workflow identifier.',
)
# query arguments
# TODO: implement CLI entry for query API.
# group all of the arguments
args = vars(main_parser.parse_args(arguments))
# TODO: see if this can be moved or if the commands can be populated from above
if args['command'] in (
'submit',
'wait',
'status',
'abort',
'release_hold',
'health',
'metadata',
'task_runtime',
):
auth_arg_dict = {k: args.get(k) for k in auth_args.keys()}
auth = CromwellAuth.harmonize_credentials(**auth_arg_dict)
args['auth'] = auth
for k in auth_args:
if k in args:
del args[k]
command = getattr(CromwellAPI, args['command'], False)
if not command:
try:
command = diagnostic_index[args['command']]
except KeyError:
raise KeyError(f"{args['command']} is not a valid command.")
del args['command']
return command, args
# this should just getattr from CromwellAPI and call the func with args.
# TODO: refactor this module into class-based parsers
def main(arguments=None):
command, args = parser(arguments)
result = command(**args)
if isinstance(result, requests.Response):
print(result.text)
else:
print(result)
|
# -----------------------------------------------------------------------------
# Builder
# -----------------------------------------------------------------------------
# Team: DataHub
# -----------------------------------------------------------------------------
# Author: Maxime Sirois
# -----------------------------------------------------------------------------
"""Build the project
Takes various actions to build the project from the templates and parameters.
"""
# -----------------------------------------------------------------------------
from jinja2 import Template
import logging
from pathlib import Path
import subprocess
from . import constants as C
from . import utils
logger = logging.getLogger(__name__)
class Builder:
def __init__(self, properties, parameters=None):
self.parameters = parameters
self.properties = properties
def start(self):
self._build_project()
def _build_project(self):
"""
Build the Project in the target location.
"""
location = self.properties.location
for folder, body in self.parameters.items():
_create_folder_and_files(Path(location), folder, body)
def add_new_module(self):
location = self.properties.location
module_file_name = utils.slugify(self.properties.module_name) + '.py'
param = {
"template": "./lib/module.py",
"header": "./header_module.txt",
"template_string": {
"module_name": self.properties.module_name,
"team_name": self.properties.team_name,
"author_name": self.properties.author_name,
"module_name_styled": self.properties.module_name_styled
}
}
_create_file(module_file_name, param, location)
def _generate_file(content, location):
with open(location, 'w+') as f_out:
f_out.write(content)
if not content.endswith('\n'):
f_out.write('\n')
def _add_header(content, header_template):
header_content = header_template.read_text()
return header_content + '\n\n\n' + content
def _create_file(file_name, parameters, location):
content = ''
if parameters.get('template'):
template_name = parameters['template']
logging.info(f" - {file_name} :: template :: {template_name}")
# Get template
file_template = Path(C._TEMPLATE_FOLDER, template_name)
template_content = file_template.read_text()
# Check if there is a header to paste before content
if parameters.get('header'):
header_template = Path(C._TEMPLATE_FOLDER, parameters['header'])
template_content = _add_header(template_content, header_template)
# Change values of template
t = Template(template_content)
content = t.render(**parameters.get('template_string', {}))
# Generate the File (with Header if applicable)
_generate_file(content, location / file_name)
def _create_folder_and_files(location, folder, body):
location = location / folder
if folder == 'venv':
if body.get('exe'):
logging.info(f">>>> Creating VirtualEnv from {body.get("exe")}")
cmd = f"\"{body.get("exe")}\" \"{location}\""
subprocess.check_call(cmd, shell=True)
logging.info(f">>>> Pip Install Requirements.txt")
pip_exe = location / 'Scripts' / 'pip.exe'
requirements = location / '..' / 'requirements.txt'
cmd_pip = f'"{pip_exe}" install -r "{requirements}"'
subprocess.check_call(cmd_pip, shell=True)
return
else:
logging.info(f">> Creating {folder} @ {str(location)}")
location.mkdir()
if body.get('files'):
for file_name, file_param in body['files'].items():
_create_file(file_name, file_param, location)
if body.get('folders'):
for folder, body_ in body['folders'].items():
_create_folder_and_files(location, folder, body_)
|
# -----------------------------------------------------------------------------
# Builder
# -----------------------------------------------------------------------------
# Team: DataHub
# -----------------------------------------------------------------------------
# Author: Maxime Sirois
# -----------------------------------------------------------------------------
"""Build the project
Takes various actions to build the project from the templates and parameters.
"""
# -----------------------------------------------------------------------------
from jinja2 import Template
import logging
from pathlib import Path
import subprocess
from . import constants as C
from . import utils
logger = logging.getLogger(__name__)
class Builder:
def __init__(self, properties, parameters=None):
self.parameters = parameters
self.properties = properties
def start(self):
self._build_project()
def _build_project(self):
"""
Build the Project in the target location.
"""
location = self.properties.location
for folder, body in self.parameters.items():
_create_folder_and_files(Path(location), folder, body)
def add_new_module(self):
location = self.properties.location
module_file_name = utils.slugify(self.properties.module_name) + '.py'
param = {
"template": "./lib/module.py",
"header": "./header_module.txt",
"template_string": {
"module_name": self.properties.module_name,
"team_name": self.properties.team_name,
"author_name": self.properties.author_name,
"module_name_styled": self.properties.module_name_styled
}
}
_create_file(module_file_name, param, location)
def _generate_file(content, location):
with open(location, 'w+') as f_out:
f_out.write(content)
if not content.endswith('\n'):
f_out.write('\n')
def _add_header(content, header_template):
header_content = header_template.read_text()
return header_content + '\n\n\n' + content
def _create_file(file_name, parameters, location):
content = ''
if parameters.get('template'):
template_name = parameters['template']
logging.info(f" - {file_name} :: template :: {template_name}")
# Get template
file_template = Path(C._TEMPLATE_FOLDER, template_name)
template_content = file_template.read_text()
# Check if there is a header to paste before content
if parameters.get('header'):
header_template = Path(C._TEMPLATE_FOLDER, parameters['header'])
template_content = _add_header(template_content, header_template)
# Change values of template
t = Template(template_content)
content = t.render(**parameters.get('template_string', {}))
# Generate the File (with Header if applicable)
_generate_file(content, location / file_name)
def _create_folder_and_files(location, folder, body):
location = location / folder
if folder == 'venv':
if body.get('exe'):
logging.info(f">>>> Creating VirtualEnv from {body.get('exe')}")
cmd = f"\"{body.get('exe')}\" \"{location}\""
subprocess.check_call(cmd, shell=True)
logging.info(f">>>> Pip Install Requirements.txt")
pip_exe = location / 'Scripts' / 'pip.exe'
requirements = location / '..' / 'requirements.txt'
cmd_pip = f'"{pip_exe}" install -r "{requirements}"'
subprocess.check_call(cmd_pip, shell=True)
return
else:
logging.info(f">> Creating {folder} @ {str(location)}")
location.mkdir()
if body.get('files'):
for file_name, file_param in body['files'].items():
_create_file(file_name, file_param, location)
if body.get('folders'):
for folder, body_ in body['folders'].items():
_create_folder_and_files(location, folder, body_)
|
from copy import deepcopy
import asyncio
import json
import pandas as pd
import streamlit as st
from structlog import get_logger
from helpers import (
fromtimestamp,
show_weather,
WeatherItem,
gather_one_call_weather_data,
clean_time,
)
log = get_logger()
st.set_page_config(
layout="wide",
page_title="Peak Weather: 4,000 Footers",
page_icon=":mountain:",
)
@st.cache(ttl=60 * 60)
def load_data(lat_lon_pairs: list) -> list:
"""Function to fetch Open Weather data and cache results
Args:
lat_lon_pairs (list): Destinations to get data for
Returns:
list: List of dictionaries which are json responses from open weather
"""
log.info("Start Load Data")
data = asyncio.run(gather_one_call_weather_data(lat_lon_pairs))
log.info("Returning Load Data")
return data
@st.cache()
def load_metadata() -> pd.DataFrame:
"""Function to read mountain lat, lon, and other metadata and cache results
Returns:
pd.DataFrame: df containing information for 48 mountains
"""
df = pd.read_csv("./data/mountains.csv")
df = df.sort_values("name")
return df
def get_mtn_anchor(mountain: str) -> str:
anchor = mountain.lower().replace(" ", "-")
return f"[{mountain}](#{anchor})"
def main():
"""Main Streamlit App Entrypoint"""
st.title(
":sunny::mountain::rainbow: Peak Weather of the 4,000 Footers :rainbow::mountain::sunny:"
)
st.header(":umbrella: You can't stop the rain, but you can spot it! :umbrella:")
with st.expander("Expand for Basic App Information:"):
st.markdown(
"""\
# Peak Weather: New Hampshire's 4,000 Footers
Built to give you a dashboard view of the next few hours' forecast for New Hampshire's 48 4,000 ft mountains.
Gonna rain on the Kinsmans?
Is it snowing on Washington?
Should I hike Owl's Head?
Powered by [Streamlit](https://docs.streamlit.io/) + [Open Weather API](https://openweathermap.org/api).
Specifically, Streamlit runs the web interactinos and OpenWeather provides the data.
Built with :heart: from [Gar's Bar](https://tech.gerardbentley.com) by Gerard Bentley
"""
)
with st.spinner("Loading Mountain List"):
base_mountains = load_metadata()
with st.expander("Expand for Basic Mountain Information: "):
st.dataframe(base_mountains)
with st.spinner("Fetching Weather Data"):
lat_lon_pairs = zip(base_mountains.lat, base_mountains.lon)
cached_responses = load_data(lat_lon_pairs)
weather_responses = deepcopy(cached_responses)
first_response = weather_responses[0]
log.info("Weather Response", first_response=first_response)
if "current" not in first_response:
st.error(
"""\
### Oof...
Open Weather API can't be reached for data at the moment.
Apologies, feel free to check back soon."""
)
st.write(
f"## Time: {fromtimestamp(first_response["current"]["dt"]).strftime("%I:%M:%S %p, %b %d %Y")}"
)
table = []
table.append("| Mountains | | |")
table.append("|---|---|---|")
for left, middle, right in zip(
base_mountains.name[::3], base_mountains.name[1::3], base_mountains.name[2::3]
):
table.append(
f"| {get_mtn_anchor(left)} | {get_mtn_anchor(middle)} | {get_mtn_anchor(right)} |"
)
st.markdown("\n".join(table))
for mountain, response in zip(base_mountains.name, weather_responses):
st.write("-" * 88)
st.write(f"#### {mountain}")
st.write(f"({response["lat"]}, {response["lon"]})")
st.write(f"Weather {clean_time(response["current"]["dt"])}: ")
current_temperature = round(response["current"]["temp"], 1)
st.metric("Temp (F)", current_temperature, 0.0)
for weather in response["current"]["weather"]:
weather_item = WeatherItem(**weather)
show_weather(weather_item)
with st.expander("Expand for future forecast:"):
for col, entry in zip(st.columns(5), response["hourly"][1:]):
col.write(f"{clean_time(entry["dt"])}")
temperature = round(entry["temp"], 1)
col.metric(
"Temp (F)", temperature, round(temperature - current_temperature, 1)
)
for weather in entry["weather"]:
weather_item = WeatherItem(**weather)
show_weather(weather_item, col)
current_temperature = temperature
alerts = response.get("alerts")
if alerts is not None:
for alert in alerts:
body = (
f"### Alert From {alert["sender_name"]}: {alert["event"]}",
f"Duration: {fromtimestamp(alert["start"])} - {fromtimestamp(alert["end"])}",
alert["description"],
f"Tags: {"; ".join(alert["tags"])}",
)
st.warning("\n".join(body))
if __name__ == "__main__":
main()
|
from copy import deepcopy
import asyncio
import json
import pandas as pd
import streamlit as st
from structlog import get_logger
from helpers import (
fromtimestamp,
show_weather,
WeatherItem,
gather_one_call_weather_data,
clean_time,
)
log = get_logger()
st.set_page_config(
layout="wide",
page_title="Peak Weather: 4,000 Footers",
page_icon=":mountain:",
)
@st.cache(ttl=60 * 60)
def load_data(lat_lon_pairs: list) -> list:
"""Function to fetch Open Weather data and cache results
Args:
lat_lon_pairs (list): Destinations to get data for
Returns:
list: List of dictionaries which are json responses from open weather
"""
log.info("Start Load Data")
data = asyncio.run(gather_one_call_weather_data(lat_lon_pairs))
log.info("Returning Load Data")
return data
@st.cache()
def load_metadata() -> pd.DataFrame:
"""Function to read mountain lat, lon, and other metadata and cache results
Returns:
pd.DataFrame: df containing information for 48 mountains
"""
df = pd.read_csv("./data/mountains.csv")
df = df.sort_values("name")
return df
def get_mtn_anchor(mountain: str) -> str:
anchor = mountain.lower().replace(" ", "-")
return f"[{mountain}](#{anchor})"
def main():
"""Main Streamlit App Entrypoint"""
st.title(
":sunny::mountain::rainbow: Peak Weather of the 4,000 Footers :rainbow::mountain::sunny:"
)
st.header(":umbrella: You can't stop the rain, but you can spot it! :umbrella:")
with st.expander("Expand for Basic App Information:"):
st.markdown(
"""\
# Peak Weather: New Hampshire's 4,000 Footers
Built to give you a dashboard view of the next few hours' forecast for New Hampshire's 48 4,000 ft mountains.
Gonna rain on the Kinsmans?
Is it snowing on Washington?
Should I hike Owl's Head?
Powered by [Streamlit](https://docs.streamlit.io/) + [Open Weather API](https://openweathermap.org/api).
Specifically, Streamlit runs the web interactinos and OpenWeather provides the data.
Built with :heart: from [Gar's Bar](https://tech.gerardbentley.com) by Gerard Bentley
"""
)
with st.spinner("Loading Mountain List"):
base_mountains = load_metadata()
with st.expander("Expand for Basic Mountain Information: "):
st.dataframe(base_mountains)
with st.spinner("Fetching Weather Data"):
lat_lon_pairs = zip(base_mountains.lat, base_mountains.lon)
cached_responses = load_data(lat_lon_pairs)
weather_responses = deepcopy(cached_responses)
first_response = weather_responses[0]
log.info("Weather Response", first_response=first_response)
if "current" not in first_response:
st.error(
"""\
### Oof...
Open Weather API can't be reached for data at the moment.
Apologies, feel free to check back soon."""
)
st.write(
f"## Time: {fromtimestamp(first_response['current']['dt']).strftime('%I:%M:%S %p, %b %d %Y')}"
)
table = []
table.append("| Mountains | | |")
table.append("|---|---|---|")
for left, middle, right in zip(
base_mountains.name[::3], base_mountains.name[1::3], base_mountains.name[2::3]
):
table.append(
f"| {get_mtn_anchor(left)} | {get_mtn_anchor(middle)} | {get_mtn_anchor(right)} |"
)
st.markdown("\n".join(table))
for mountain, response in zip(base_mountains.name, weather_responses):
st.write("-" * 88)
st.write(f"#### {mountain}")
st.write(f"({response['lat']}, {response['lon']})")
st.write(f"Weather {clean_time(response['current']['dt'])}: ")
current_temperature = round(response["current"]["temp"], 1)
st.metric("Temp (F)", current_temperature, 0.0)
for weather in response["current"]["weather"]:
weather_item = WeatherItem(**weather)
show_weather(weather_item)
with st.expander("Expand for future forecast:"):
for col, entry in zip(st.columns(5), response["hourly"][1:]):
col.write(f"{clean_time(entry['dt'])}")
temperature = round(entry["temp"], 1)
col.metric(
"Temp (F)", temperature, round(temperature - current_temperature, 1)
)
for weather in entry["weather"]:
weather_item = WeatherItem(**weather)
show_weather(weather_item, col)
current_temperature = temperature
alerts = response.get("alerts")
if alerts is not None:
for alert in alerts:
body = (
f"### Alert From {alert['sender_name']}: {alert['event']}",
f"Duration: {fromtimestamp(alert['start'])} - {fromtimestamp(alert['end'])}",
alert["description"],
f"Tags: {'; '.join(alert['tags'])}",
)
st.warning("\n".join(body))
if __name__ == "__main__":
main()
|
import redis
from typing import Tuple, Union, List
class Redis():
def __init__(self, host: str = 'localhost', port: int = 6379, user: str = '', password: str = '') -> None:
self._host = host
self._port = port
self._user = user
self._password = password
self.client = redis.StrictRedis(self._host, self._port, db=0, username=self._user, password=self._password)
@property
def host(self):
return self._host
@host.setter
def host(self, value):
assert value != '', 'El parametro <VALUE> no fue definido coreectamente en el setter'
self._host = value
self.client = redis.StrictRedis(self._host, self._port, db=0, username=self._user, password=self._password)
@property
def port(self):
return self._port
@port.setter
def port(self, value):
assert value != None, 'El parametro <VALUE> no fue definido coreectamente en el setter'
self._port = value
self.client = redis.StrictRedis(self._host, self._port, db=0, username=self._user, password=self._password)
@property
def user(self):
return self._user
@user.setter
def user(self, value):
self._user = value
self.client = redis.StrictRedis(self._host, self._port, db=0, username=self._user, password=self._password)
@property
def password(self):
return self._password
@password.setter
def password(self, value):
self._password = value
self.client = redis.StrictRedis(self._host, self._port, db=0, username=self._user, password=self._password)
def get_keys(self) -> List[str]:
keys = self.client.keys()
return [f'{k+1}) {v.decode('utf-8')}' for k, v in enumerate(keys)]
def create_var(self, key: str, value: str, type: str = 'var') -> Tuple[str, str]:
assert key != '', 'El parametro <KEY> no ha sido validado correctamente'
assert value != '', 'El parametro <VALUE> no ha sido validado correctamente'
if type == 'var':
self.client.set(key, value)
elif type == 'list':
self.client.lpush(key, *value.split('\n'))
else:
raise Exception()
return (key, value)
def delete_var(self, key:str) -> None:
ret = self.client.delete(key)
if ret == 0:
raise Exception()
def get_var(self, key: str) :
var = None
assert key != '', 'El parametro <KEY> no ha sido validado correctamente'
try:
var = self.client.get(key)
except:
var = self.client.lrange(key, 0, -1)
var = '\n'.join([i.decode("utf-8") for i in var])
assert var != None, f'No se encuentra ningun dato con la <KEY> <{key}>'
return var
|
import redis
from typing import Tuple, Union, List
class Redis():
def __init__(self, host: str = 'localhost', port: int = 6379, user: str = '', password: str = '') -> None:
self._host = host
self._port = port
self._user = user
self._password = password
self.client = redis.StrictRedis(self._host, self._port, db=0, username=self._user, password=self._password)
@property
def host(self):
return self._host
@host.setter
def host(self, value):
assert value != '', 'El parametro <VALUE> no fue definido coreectamente en el setter'
self._host = value
self.client = redis.StrictRedis(self._host, self._port, db=0, username=self._user, password=self._password)
@property
def port(self):
return self._port
@port.setter
def port(self, value):
assert value != None, 'El parametro <VALUE> no fue definido coreectamente en el setter'
self._port = value
self.client = redis.StrictRedis(self._host, self._port, db=0, username=self._user, password=self._password)
@property
def user(self):
return self._user
@user.setter
def user(self, value):
self._user = value
self.client = redis.StrictRedis(self._host, self._port, db=0, username=self._user, password=self._password)
@property
def password(self):
return self._password
@password.setter
def password(self, value):
self._password = value
self.client = redis.StrictRedis(self._host, self._port, db=0, username=self._user, password=self._password)
def get_keys(self) -> List[str]:
keys = self.client.keys()
return [f'{k+1}) {v.decode("utf-8")}' for k, v in enumerate(keys)]
def create_var(self, key: str, value: str, type: str = 'var') -> Tuple[str, str]:
assert key != '', 'El parametro <KEY> no ha sido validado correctamente'
assert value != '', 'El parametro <VALUE> no ha sido validado correctamente'
if type == 'var':
self.client.set(key, value)
elif type == 'list':
self.client.lpush(key, *value.split('\n'))
else:
raise Exception()
return (key, value)
def delete_var(self, key:str) -> None:
ret = self.client.delete(key)
if ret == 0:
raise Exception()
def get_var(self, key: str) :
var = None
assert key != '', 'El parametro <KEY> no ha sido validado correctamente'
try:
var = self.client.get(key)
except:
var = self.client.lrange(key, 0, -1)
var = '\n'.join([i.decode("utf-8") for i in var])
assert var != None, f'No se encuentra ningun dato con la <KEY> <{key}>'
return var
|
# DataManager -> responsible for talking to the Google Sheets API.
# FlightSearch -> responsible for talking to the Flight Search API.
# FlightData -> responsible for structuring the flight data
# NotificationManager -> responsible for sending notifications with the deal flight details
from data_manager import DataManager
from flight_search import FlightSearch
from notification_manager import NotificationManager
from utils import format_notification
dm = DataManager()
fs = FlightSearch()
nm = NotificationManager()
sheet_data = dm.get_google_sheet_curret_data()
for data_row in sheet_data:
if data_row["iataCode"] == "":
data_row["iataCode"] = fs.get_iata_codes(data_row["city"])
dm.update_iata(data_row)
flight_info = fs.get_flights_data(data_row["iataCode"])
if flight_info is None:
continue
if data_row["lowestPrice"] >= flight_info.price:
msg = f"\n({flight_info.city_from.split("-")[0].strip()}) -> ({flight_info.city_to.split("-")[0].strip()})"
print(msg)
if flight_info.stop_overs > 0:
msg = f"Flight from {flight_info.city_from.split("-")[0].strip()} to {flight_info.city_to.split("-")[0].strip()} has 1 stop over, via {flight_info.via_city}."
print(msg)
nm.send_text(format_notification(flight_info))
nm.send_emails(format_notification(flight_info))
|
# DataManager -> responsible for talking to the Google Sheets API.
# FlightSearch -> responsible for talking to the Flight Search API.
# FlightData -> responsible for structuring the flight data
# NotificationManager -> responsible for sending notifications with the deal flight details
from data_manager import DataManager
from flight_search import FlightSearch
from notification_manager import NotificationManager
from utils import format_notification
dm = DataManager()
fs = FlightSearch()
nm = NotificationManager()
sheet_data = dm.get_google_sheet_curret_data()
for data_row in sheet_data:
if data_row["iataCode"] == "":
data_row["iataCode"] = fs.get_iata_codes(data_row["city"])
dm.update_iata(data_row)
flight_info = fs.get_flights_data(data_row["iataCode"])
if flight_info is None:
continue
if data_row["lowestPrice"] >= flight_info.price:
msg = f"\n({flight_info.city_from.split('-')[0].strip()}) -> ({flight_info.city_to.split('-')[0].strip()})"
print(msg)
if flight_info.stop_overs > 0:
msg = f"Flight from {flight_info.city_from.split('-')[0].strip()} to {flight_info.city_to.split('-')[0].strip()} has 1 stop over, via {flight_info.via_city}."
print(msg)
nm.send_text(format_notification(flight_info))
nm.send_emails(format_notification(flight_info))
|
import difflib
import re
from typing import Optional, Union
from discord.utils import escape_markdown
def wrap_in_code(value: str, *, block: Optional[Union[bool, str]] = None):
value = value.replace("`", "\u200b`\u200b")
value = value.replace("\u200b\u200b", "\u200b")
if block is None:
return "``" + value + "``"
lang = "" if block is True else block
return f"```{block}\n" + value + "\n```"
def escape(text: str):
return escape_markdown(re.sub(r"<(a?:\w+:\d+)>", "<\u200b\\1>", text))
def cut_words(text: str, max_len: int, *, end: str = "..."):
words = [""] + re.split(r"(\s+)", text)
result = ""
if len(words[1] + end) > max_len:
return words[1][: max_len - len(end)] + end
for last_sep, word in zip(words[::2], words[1::2]):
if len(result + last_sep + word + end) > max_len:
return result + end
result += last_sep + word
return result
def diff_message(
a: str,
b: str,
*,
max_len: Optional[int] = None,
group_sep: str = "**...**",
cutoff_end: str = " **... [cut off]**",
):
a_words = a.split()
b_words = b.split()
matcher = difflib.SequenceMatcher(autojunk=False)
matcher.set_seqs(a_words, b_words)
groups = []
start = f"{group_sep} "
end = f" {group_sep}"
for group in matcher.get_grouped_opcodes():
parts = []
for op, i1, i2, j1, j2 in group:
if min(i1, j1) == 0:
start = ""
if i2 == len(a_words) or j2 == len(b_words):
end = ""
if op == "delete" or op == "replace":
parts.append(f"~~{escape(" ".join(a_words[i1:i2]))}~~")
if op == "insert" or op == "replace":
parts.append(f"__{escape(" ".join(b_words[j1:j2]))}__")
if op == "equal":
parts.append(escape(" ".join(a_words[i1:i2])))
groups.append(" ".join(parts))
res = start + f" {group_sep} ".join(groups) + end
if max_len:
res = cut_words(res, max_len, end=cutoff_end)
return res
|
import difflib
import re
from typing import Optional, Union
from discord.utils import escape_markdown
def wrap_in_code(value: str, *, block: Optional[Union[bool, str]] = None):
value = value.replace("`", "\u200b`\u200b")
value = value.replace("\u200b\u200b", "\u200b")
if block is None:
return "``" + value + "``"
lang = "" if block is True else block
return f"```{block}\n" + value + "\n```"
def escape(text: str):
return escape_markdown(re.sub(r"<(a?:\w+:\d+)>", "<\u200b\\1>", text))
def cut_words(text: str, max_len: int, *, end: str = "..."):
words = [""] + re.split(r"(\s+)", text)
result = ""
if len(words[1] + end) > max_len:
return words[1][: max_len - len(end)] + end
for last_sep, word in zip(words[::2], words[1::2]):
if len(result + last_sep + word + end) > max_len:
return result + end
result += last_sep + word
return result
def diff_message(
a: str,
b: str,
*,
max_len: Optional[int] = None,
group_sep: str = "**...**",
cutoff_end: str = " **... [cut off]**",
):
a_words = a.split()
b_words = b.split()
matcher = difflib.SequenceMatcher(autojunk=False)
matcher.set_seqs(a_words, b_words)
groups = []
start = f"{group_sep} "
end = f" {group_sep}"
for group in matcher.get_grouped_opcodes():
parts = []
for op, i1, i2, j1, j2 in group:
if min(i1, j1) == 0:
start = ""
if i2 == len(a_words) or j2 == len(b_words):
end = ""
if op == "delete" or op == "replace":
parts.append(f"~~{escape(' '.join(a_words[i1:i2]))}~~")
if op == "insert" or op == "replace":
parts.append(f"__{escape(' '.join(b_words[j1:j2]))}__")
if op == "equal":
parts.append(escape(" ".join(a_words[i1:i2])))
groups.append(" ".join(parts))
res = start + f" {group_sep} ".join(groups) + end
if max_len:
res = cut_words(res, max_len, end=cutoff_end)
return res
|
import os
import sys
import copy
import pickle
import numpy as np
import pandas as pd
from tqdm import tqdm
from pprint import pprint
from sklearn.model_selection import train_test_split
import utils
from debug import ipsh
sys.path.insert(0, '_data_main')
try:
from _data_main.fair_adult_data import *
except:
print('[ENV WARNING] fair_adult_data not available')
try:
from _data_main.fair_compas_data import *
except:
print('[ENV WARNING] fair_compas_data not available')
try:
from _data_main.process_credit_data import *
except:
print('[ENV WARNING] process_credit_data not available')
try:
from _data_main.process_german_data import *
except:
print('[ENV WARNING] process_german_data not available')
try:
from _data_main.process_synthetic_data import *
except:
print('[ENV WARNING] process_synthetic_data not available')
try:
from _data_main.process_mortgage_data import *
except:
print('[ENV WARNING] process_mortgage_data not available')
try:
from _data_main.process_twomoon_data import *
except:
print('[ENV WARNING] process_twomoon_data not available')
try:
from _data_main.process_test_data import *
except:
print('[ENV WARNING] process_test_data not available')
VALID_ATTRIBUTE_DATA_TYPES = { \
'numeric-int', \
'numeric-real', \
'binary', \
'categorical', \
'sub-categorical', \
'ordinal', \
'sub-ordinal'}
VALID_ATTRIBUTE_NODE_TYPES = { \
'meta', \
'input', \
'output'}
VALID_ACTIONABILITY_TYPES = { \
'none', \
'any', \
'same-or-increase', \
'same-or-decrease'}
VALID_MUTABILITY_TYPES = { \
True, \
False}
from random import seed
RANDOM_SEED = 54321
seed(RANDOM_SEED) # set the random seed so that the random permutations can be reproduced again
np.random.seed(RANDOM_SEED)
class Dataset(object):
# TODO: getOneHotEquivalent can be a class method, and this object can store
# both one-hot and non-hot versions!
def __init__(self, data_frame, attributes, is_one_hot, dataset_name):
self.dataset_name = dataset_name
self.is_one_hot = is_one_hot
attributes_long = attributes
data_frame_long = data_frame
self.data_frame_long = data_frame_long # i.e., data_frame is indexed by attr_name_long
self.attributes_long = attributes_long # i.e., attributes is indexed by attr_name_long
attributes_kurz = dict((attributes[key].attr_name_kurz, value) for (key, value) in attributes_long.items())
data_frame_kurz = copy.deepcopy(data_frame_long)
data_frame_kurz.columns = self.getAllAttributeNames('kurz')
self.data_frame_kurz = data_frame_kurz # i.e., data_frame is indexed by attr_name_kurz
self.attributes_kurz = attributes_kurz # i.e., attributes is indexed by attr_name_kurz
# assert that data_frame and attributes match on variable names (long)
assert len(np.setdiff1d(
data_frame.columns.values,
np.array(self.getAllAttributeNames('long'))
)) == 0
# assert attribute type matches what is in the data frame
for attr_name in np.setdiff1d(
self.getInputAttributeNames('long'),
self.getRealBasedAttributeNames('long'),
):
unique_values = np.unique(data_frame_long[attr_name].to_numpy())
# all non-numerical-real values should be integer or {0,1}
for value in unique_values:
assert value == np.floor(value)
if is_one_hot and attributes_long[attr_name].attr_type != 'numeric-int': # binary, sub-categorical, sub-ordinal
try:
assert \
np.array_equal(unique_values, [0,1]) or \
np.array_equal(unique_values, [1,2]) or \
np.array_equal(unique_values, [1]) # the first sub-ordinal attribute is always 1
# race (binary) in compass is encoded as {1,2}
except:
ipsh()
# # assert attributes and is_one_hot agree on one-hot-ness (i.e., if is_one_hot,
# # then at least one attribute should be encoded as one-hot (w/ parent reference))
# tmp_is_one_hot = False
# for attr_name in attributes.keys():
# attr_obj = attributes[attr_name]
# # this simply checks to make sure that at least one elem is one-hot encoded
# if attr_obj.parent_name_long != -1 or attr_obj.parent_name_kurz != -1:
# tmp_is_one_hot = True
# # TODO: assert only if there is a cat/ord variable!
# assert is_one_hot == tmp_is_one_hot, "Dataset object and actual attributes don't agree on one-hot"
self.assertSiblingsShareAttributes('long')
self.assertSiblingsShareAttributes('kurz')
def getAttributeNames(self, allowed_node_types, long_or_kurz = 'kurz'):
names = []
# We must loop through all attributes and check attr_name
for attr_name in self.attributes_long.keys():
attr_obj = self.attributes_long[attr_name]
if attr_obj.node_type not in allowed_node_types:
continue
if long_or_kurz == 'long':
names.append(attr_obj.attr_name_long)
elif long_or_kurz == 'kurz':
names.append(attr_obj.attr_name_kurz)
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
return np.array(names)
def getAllAttributeNames(self, long_or_kurz = 'kurz'):
return self.getAttributeNames({'meta', 'input', 'output'}, long_or_kurz)
def getInputOutputAttributeNames(self, long_or_kurz = 'kurz'):
return self.getAttributeNames({'input', 'output'}, long_or_kurz)
def getMetaInputAttributeNames(self, long_or_kurz = 'kurz'):
return self.getAttributeNames({'meta', 'input'}, long_or_kurz)
def getMetaAttributeNames(self, long_or_kurz = 'kurz'):
return self.getAttributeNames({'meta'}, long_or_kurz)
def getInputAttributeNames(self, long_or_kurz = 'kurz'):
return self.getAttributeNames({'input'}, long_or_kurz)
def getOutputAttributeNames(self, long_or_kurz = 'kurz'):
return self.getAttributeNames({'output'}, long_or_kurz)
def getBinaryAttributeNames(self, long_or_kurz = 'kurz'):
names = []
# We must loop through all attributes and check binary
for attr_name_long in self.getInputAttributeNames('long'):
attr_obj = self.attributes_long[attr_name_long]
if attr_obj.node_type == 'input' and attr_obj.attr_type == 'binary':
if long_or_kurz == 'long':
names.append(attr_obj.attr_name_long)
elif long_or_kurz == 'kurz':
names.append(attr_obj.attr_name_kurz)
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
return np.array(names)
def getActionableAttributeNames(self, long_or_kurz = 'kurz'):
names = []
# We must loop through all attributes and check actionability
for attr_name_long in self.getInputAttributeNames('long'):
attr_obj = self.attributes_long[attr_name_long]
if attr_obj.node_type == 'input' and attr_obj.actionability != 'none':
if long_or_kurz == 'long':
names.append(attr_obj.attr_name_long)
elif long_or_kurz == 'kurz':
names.append(attr_obj.attr_name_kurz)
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
return np.array(names)
def getNonActionableAttributeNames(self, long_or_kurz = 'kurz'):
a = self.getInputAttributeNames(long_or_kurz)
b = self.getActionableAttributeNames(long_or_kurz)
return np.setdiff1d(a,b)
def getMutableAttributeNames(self, long_or_kurz = 'kurz'):
names = []
# We must loop through all attributes and check mutability
for attr_name_long in self.getInputAttributeNames('long'):
attr_obj = self.attributes_long[attr_name_long]
if attr_obj.node_type == 'input' and attr_obj.mutability != False:
if long_or_kurz == 'long':
names.append(attr_obj.attr_name_long)
elif long_or_kurz == 'kurz':
names.append(attr_obj.attr_name_kurz)
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
return np.array(names)
def getNonMutableAttributeNames(self, long_or_kurz = 'kurz'):
a = self.getInputAttributeNames(long_or_kurz)
b = self.getMutableAttributeNames(long_or_kurz)
return np.setdiff1d(a,b)
def getIntegerBasedAttributeNames(self, long_or_kurz = 'kurz'):
names = []
# We must loop through all attributes and check attr_type
for attr_name_long in self.getInputAttributeNames('long'):
attr_obj = self.attributes_long[attr_name_long]
if attr_obj.attr_type == 'numeric-int':
if long_or_kurz == 'long':
names.append(attr_obj.attr_name_long)
elif long_or_kurz == 'kurz':
names.append(attr_obj.attr_name_kurz)
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
return np.array(names)
def getRealBasedAttributeNames(self, long_or_kurz = 'kurz'):
names = []
# We must loop through all attributes and check attr_type
for attr_name_long in self.getInputAttributeNames('long'):
attr_obj = self.attributes_long[attr_name_long]
if attr_obj.attr_type == 'numeric-real':
if long_or_kurz == 'long':
names.append(attr_obj.attr_name_long)
elif long_or_kurz == 'kurz':
names.append(attr_obj.attr_name_kurz)
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
return np.array(names)
def assertSiblingsShareAttributes(self, long_or_kurz = 'kurz'):
# assert elems of dictOfSiblings share attr_type, node_type, parent, actionability, and mutability
dict_of_siblings = self.getDictOfSiblings(long_or_kurz)
for parent_name in dict_of_siblings['cat'].keys():
siblings = dict_of_siblings['cat'][parent_name]
assert len(siblings) > 1
for sibling in siblings:
if long_or_kurz == 'long':
self.attributes_long[sibling].attr_type = self.attributes_long[siblings[0]].attr_type
self.attributes_long[sibling].node_type = self.attributes_long[siblings[0]].node_type
self.attributes_long[sibling].actionability = self.attributes_long[siblings[0]].actionability
self.attributes_long[sibling].mutability = self.attributes_long[siblings[0]].mutability
self.attributes_long[sibling].parent_name_long = self.attributes_long[siblings[0]].parent_name_long
self.attributes_long[sibling].parent_name_kurz = self.attributes_long[siblings[0]].parent_name_kurz
elif long_or_kurz == 'kurz':
self.attributes_kurz[sibling].attr_type = self.attributes_kurz[siblings[0]].attr_type
self.attributes_kurz[sibling].node_type = self.attributes_kurz[siblings[0]].node_type
self.attributes_kurz[sibling].actionability = self.attributes_kurz[siblings[0]].actionability
self.attributes_kurz[sibling].mutability = self.attributes_kurz[siblings[0]].mutability
self.attributes_kurz[sibling].parent_name_long = self.attributes_kurz[siblings[0]].parent_name_long
self.attributes_kurz[sibling].parent_name_kurz = self.attributes_kurz[siblings[0]].parent_name_kurz
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
def getSiblingsFor(self, attr_name_long_or_kurz):
# If attr_name_long is given, we will return siblings_long (the same length)
# but not siblings_kurz. Same for the opposite direction.
assert \
'cat' in attr_name_long_or_kurz or 'ord' in attr_name_long_or_kurz, \
'attr_name must include either `cat` or `ord`.'
if attr_name_long_or_kurz in self.getInputOutputAttributeNames('long'):
attr_name_long = attr_name_long_or_kurz
dict_of_siblings_long = self.getDictOfSiblings('long')
for parent_name_long in dict_of_siblings_long['cat']:
siblings_long = dict_of_siblings_long['cat'][parent_name_long]
if attr_name_long_or_kurz in siblings_long:
return siblings_long
for parent_name_long in dict_of_siblings_long['ord']:
siblings_long = dict_of_siblings_long['ord'][parent_name_long]
if attr_name_long_or_kurz in siblings_long:
return siblings_long
elif attr_name_long_or_kurz in self.getInputOutputAttributeNames('kurz'):
attr_name_kurz = attr_name_long_or_kurz
dict_of_siblings_kurz = self.getDictOfSiblings('kurz')
for parent_name_kurz in dict_of_siblings_kurz['cat']:
siblings_kurz = dict_of_siblings_kurz['cat'][parent_name_kurz]
if attr_name_long_or_kurz in siblings_kurz:
return siblings_kurz
for parent_name_kurz in dict_of_siblings_kurz['ord']:
siblings_kurz = dict_of_siblings_kurz['ord'][parent_name_kurz]
if attr_name_long_or_kurz in siblings_kurz:
return siblings_kurz
else:
raise Exception(f'{attr_name_long_or_kurz} not recognized as a valid `attr_name_long_or_kurz`.')
def getDictOfSiblings(self, long_or_kurz = 'kurz'):
if long_or_kurz == 'long':
dict_of_siblings_long = {}
dict_of_siblings_long['cat'] = {}
dict_of_siblings_long['ord'] = {}
for attr_name_long in self.getInputAttributeNames('long'):
attr_obj = self.attributes_long[attr_name_long]
if attr_obj.attr_type == 'sub-categorical':
if attr_obj.parent_name_long not in dict_of_siblings_long['cat'].keys():
dict_of_siblings_long['cat'][attr_obj.parent_name_long] = [] # initiate key-value pair
dict_of_siblings_long['cat'][attr_obj.parent_name_long].append(attr_obj.attr_name_long)
elif attr_obj.attr_type == 'sub-ordinal':
if attr_obj.parent_name_long not in dict_of_siblings_long['ord'].keys():
dict_of_siblings_long['ord'][attr_obj.parent_name_long] = [] # initiate key-value pair
dict_of_siblings_long['ord'][attr_obj.parent_name_long].append(attr_obj.attr_name_long)
# sort sub-arrays
for key in dict_of_siblings_long['cat'].keys():
dict_of_siblings_long['cat'][key] = sorted(dict_of_siblings_long['cat'][key], key = lambda x : int(x.split('_')[-1]))
for key in dict_of_siblings_long['ord'].keys():
dict_of_siblings_long['ord'][key] = sorted(dict_of_siblings_long['ord'][key], key = lambda x : int(x.split('_')[-1]))
return dict_of_siblings_long
elif long_or_kurz == 'kurz':
dict_of_siblings_kurz = {}
dict_of_siblings_kurz['cat'] = {}
dict_of_siblings_kurz['ord'] = {}
for attr_name_kurz in self.getInputAttributeNames('kurz'):
attr_obj = self.attributes_kurz[attr_name_kurz]
if attr_obj.attr_type == 'sub-categorical':
if attr_obj.parent_name_kurz not in dict_of_siblings_kurz['cat'].keys():
dict_of_siblings_kurz['cat'][attr_obj.parent_name_kurz] = [] # initiate key-value pair
dict_of_siblings_kurz['cat'][attr_obj.parent_name_kurz].append(attr_obj.attr_name_kurz)
elif attr_obj.attr_type == 'sub-ordinal':
if attr_obj.parent_name_kurz not in dict_of_siblings_kurz['ord'].keys():
dict_of_siblings_kurz['ord'][attr_obj.parent_name_kurz] = [] # initiate key-value pair
dict_of_siblings_kurz['ord'][attr_obj.parent_name_kurz].append(attr_obj.attr_name_kurz)
# sort sub-arrays
for key in dict_of_siblings_kurz['cat'].keys():
dict_of_siblings_kurz['cat'][key] = sorted(dict_of_siblings_kurz['cat'][key], key = lambda x : int(x.split('_')[-1]))
for key in dict_of_siblings_kurz['ord'].keys():
dict_of_siblings_kurz['ord'][key] = sorted(dict_of_siblings_kurz['ord'][key], key = lambda x : int(x.split('_')[-1]))
return dict_of_siblings_kurz
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
def getOneHotAttributesNames(self, long_or_kurz = 'kurz'):
tmp = self.getDictOfSiblings(long_or_kurz)
names = []
for key1 in tmp.keys():
for key2 in tmp[key1].keys():
names.extend(tmp[key1][key2])
return np.array(names)
def getNonHotAttributesNames(self, long_or_kurz = 'kurz'):
a = self.getInputAttributeNames(long_or_kurz)
b = self.getOneHotAttributesNames(long_or_kurz)
return np.setdiff1d(a,b)
def getVariableRanges(self):
return dict(zip(
self.getInputAttributeNames('kurz'),
[
self.attributes_kurz[attr_name_kurz].upper_bound -
self.attributes_kurz[attr_name_kurz].lower_bound
for attr_name_kurz in self.getInputAttributeNames('kurz')
],
))
def printDataset(self, long_or_kurz = 'kurz'):
if long_or_kurz == 'long':
for attr_name_long in self.attributes_long:
print(self.attributes_long[attr_name_long].__dict__)
elif long_or_kurz == 'kurz':
for attr_name_kurz in self.attributes_kurz:
print(self.attributes_kurz[attr_name_kurz].__dict__)
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
# (2020.04.15) perhaps we need a memoize here... but I tried calling this function
# multiple times in a row from another file and it always returned the same slice
# of data... weird.
def getTrainTestSplit(self, preprocessing = None, with_meta = False, balanced = True):
# When working only with normalized data in [0, 1], data ranges must change to [0, 1] as well
# otherwise, in computing normalized distance we will normalize with intial ranges again!
# pseudonym (2020.05.17) does this work with cat/ord and sub-cat/sub-ord data???
def setBoundsToZeroOne():
for attr_name_kurz in self.getNonHotAttributesNames('kurz'):
attr_obj = self.attributes_kurz[attr_name_kurz]
attr_obj.lower_bound = 0.0
attr_obj.upper_bound = 1.0
attr_obj = self.attributes_long[attr_obj.attr_name_long]
attr_obj.lower_bound = 0.0
attr_obj.upper_bound = 1.0
# Normalize data: bring everything to [0, 1] - implemented for when feeding the model to DiCE
def normalizeData(X_train, X_test):
for attr_name_kurz in self.getNonHotAttributesNames('kurz'):
attr_obj = self.attributes_kurz[attr_name_kurz]
lower_bound = attr_obj.lower_bound
upper_bound =attr_obj.upper_bound
X_train[attr_name_kurz] = (X_train[attr_name_kurz] - lower_bound) / (upper_bound - lower_bound)
X_test[attr_name_kurz] = (X_test[attr_name_kurz] - lower_bound) / (upper_bound - lower_bound)
setBoundsToZeroOne()
return X_train, X_test
# TODO: This should be used with caution... it messes things up in MACE as ranges
# will differ between factual and counterfactual domains
def standardizeData(X_train, X_test):
x_mean = X_train.mean()
x_std = X_train.std()
for index in x_std.index:
if '_ord_' in index or '_cat_' in index:
x_mean[index] = 0
x_std[index] = 1
X_train = (X_train - x_mean) / x_std
X_test = (X_test - x_mean) / x_std
return X_train, X_test
def getBalancedDataFrame(data_frame, output_col):
# assert only two classes in label (maybe relax later??)
unique_labels = np.unique(data_frame[output_col])
assert \
np.array_equal(
unique_labels,
np.array([0, 1]) # only allowing {0, 1} labels,
) or \
np.array_equal(
unique_labels,
np.array([-1, 1]) # only allowing {-1, 1} labels,
), \
f'expected unique labels to be [0, 1], but got {unique_labels}'
# get balanced dataframe (take minimum of the count, then round down to nearest 250)
unique_values_and_count = data_frame[output_col].value_counts()
number_of_subsamples_in_each_class = unique_values_and_count.min() // 250 * 250
data_frame = pd.concat([
data_frame[data_frame.loc[:,output_col] == unique_labels[0]].sample(number_of_subsamples_in_each_class, random_state = RANDOM_SEED),
data_frame[data_frame.loc[:,output_col] == unique_labels[1]].sample(number_of_subsamples_in_each_class, random_state = RANDOM_SEED),
]).sample(frac = 1, random_state = RANDOM_SEED)
# data_frame = pd.concat([
# data_frame[data_frame.loc[:,output_col] == 0],
# data_frame[data_frame.loc[:,output_col] == 1],
# ]).sample(frac = 1, random_state = RANDOM_SEED)
return data_frame
meta_cols = self.getMetaAttributeNames()
input_cols = self.getInputAttributeNames()
output_col = self.getOutputAttributeNames()[0]
data_frame = copy.deepcopy(self.data_frame_kurz)
if balanced:
data_frame = getBalancedDataFrame(data_frame, self.getOutputAttributeNames()[0])
if with_meta:
all_data = data_frame.loc[:,np.array((input_cols, meta_cols)).flatten()]
all_true_labels = data_frame.loc[:,output_col]
if preprocessing is not None:
assert with_meta == False, 'This feature is not built yet...'
X_train, X_test, y_train, y_test = train_test_split(
all_data,
all_true_labels,
train_size=.7,
random_state = RANDOM_SEED)
# ordering of next two lines matters (shouldn't overwrite input_cols); silly code... :|
U_train = X_train[self.getMetaAttributeNames()]
U_test = X_test[self.getMetaAttributeNames()]
X_train = X_train[self.getInputAttributeNames()]
X_test = X_test[self.getInputAttributeNames()]
y_train = y_train # noop
y_test = y_test # noop
return X_train, X_test, U_train, U_test, y_train, y_test
else:
all_data = data_frame.loc[:,input_cols]
all_true_labels = data_frame.loc[:,output_col]
X_train, X_test, y_train, y_test = train_test_split(
all_data,
all_true_labels,
train_size=.7,
random_state = RANDOM_SEED)
# TODO (2020.05.18): this should be updated so as NOT to update meta variables
if preprocessing == 'standardize':
X_train, X_test = standardizeData(X_train, X_test)
elif preprocessing == 'normalize':
X_train, X_test = normalizeData(X_train, X_test)
return X_train, X_test, y_train, y_test
def getOriginalDataFrame(self, num_samples, with_meta = False, with_label = False, balanced = True, data_split = 'train_and_test'):
if with_meta:
X_train, X_test, U_train, U_test, y_train, y_test = self.getTrainTestSplit(with_meta = True, balanced = balanced)
else:
X_train, X_test, y_train, y_test = self.getTrainTestSplit(with_meta = False, balanced = balanced)
# order of if/elif is important
if with_meta and with_label:
data_train = pd.concat([X_train, U_train, y_train], axis = 1)
data_test = pd.concat([X_test, U_test, y_test], axis = 1)
elif with_meta:
data_train = pd.concat([X_train, U_train], axis = 1)
data_test = pd.concat([X_test, U_test], axis = 1)
elif with_label:
data_train = pd.concat([X_train, y_train], axis = 1)
data_test = pd.concat([X_test, y_test], axis = 1)
else:
data_train = X_train
data_test = X_test
if data_split == 'train_and_test':
data_all = pd.concat([data_train, data_test], axis = 0)
elif data_split == 'train_only':
data_all = data_train
elif data_split == 'test_only':
data_all = data_test
else:
raise NotImplementedError
return data_all[:num_samples]
class DatasetAttribute(object):
def __init__(
self,
attr_name_long,
attr_name_kurz,
attr_type,
node_type,
actionability,
mutability,
parent_name_long,
parent_name_kurz,
lower_bound,
upper_bound):
if attr_type not in VALID_ATTRIBUTE_DATA_TYPES:
raise Exception("`attr_type` must be one of %r." % VALID_ATTRIBUTE_DATA_TYPES)
if node_type not in VALID_ATTRIBUTE_NODE_TYPES:
raise Exception("`node_type` must be one of %r." % VALID_ATTRIBUTE_NODE_TYPES)
if actionability not in VALID_ACTIONABILITY_TYPES:
raise Exception("`actionability` must be one of %r." % VALID_ACTIONABILITY_TYPES)
if mutability not in VALID_MUTABILITY_TYPES:
raise Exception("`mutability` must be one of %r." % VALID_MUTABILITY_TYPES)
if lower_bound > upper_bound:
raise Exception("`lower_bound` must be <= `upper_bound`")
if attr_type in {'sub-categorical', 'sub-ordinal'}:
assert parent_name_long != -1, 'Parent ID set for non-hot attribute.'
assert parent_name_kurz != -1, 'Parent ID set for non-hot attribute.'
if attr_type == 'sub-categorical':
assert lower_bound == 0
assert upper_bound == 1
if attr_type == 'sub-ordinal':
# the first elem in thermometer is always on, but the rest may be on or off
assert lower_bound == 0 or lower_bound == 1
assert upper_bound == 1
else:
assert parent_name_long == -1, 'Parent ID set for non-hot attribute.'
assert parent_name_kurz == -1, 'Parent ID set for non-hot attribute.'
if attr_type in {'categorical', 'ordinal'}:
assert lower_bound == 1 # setOneHotValue & setThermoValue assume this in their logic
if attr_type in {'binary', 'categorical', 'sub-categorical'}: # not 'ordinal' or 'sub-ordinal'
# IMPORTANT: surprisingly, it is OK if all sub-ordinal variables share actionability
# think about it, if each sub- variable is same-or-increase, along with
# the constraints that x0_ord_1 >= x0_ord_2, all variables can only stay
# the same or increase. It works :)
assert actionability in {'none', 'any'}, f"{attr_type}"s actionability can only be in {"none", "any"}, not `{actionability}`."
if node_type != 'input':
assert actionability == 'none', f'{node_type} attribute is not actionable.'
assert mutability == False, f'{node_type} attribute is not mutable.'
# We have introduced 3 types of variables: (actionable and mutable, non-actionable but mutable, immutable and non-actionable)
if actionability != 'none':
assert mutability == True
# TODO: above/below seem contradictory... (2020.04.14)
if mutability == False:
assert actionability == 'none'
if parent_name_long == -1 or parent_name_kurz == -1:
assert parent_name_long == parent_name_kurz == -1
self.attr_name_long = attr_name_long
self.attr_name_kurz = attr_name_kurz
self.attr_type = attr_type
self.node_type = node_type
self.actionability = actionability
self.mutability = mutability
self.parent_name_long = parent_name_long
self.parent_name_kurz = parent_name_kurz
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def loadDataset(dataset_name, return_one_hot, load_from_cache = False, debug_flag = True, index_offset = 0, meta_param = None):
def getInputOutputColumns(data_frame):
all_data_frame_cols = data_frame.columns.values
input_cols = [x for x in all_data_frame_cols if 'label' not in x.lower()]
output_cols = [x for x in all_data_frame_cols if 'label' in x.lower()]
assert len(output_cols) == 1
return input_cols, output_cols[0]
one_hot_string = 'one_hot' if return_one_hot else 'non_hot'
save_file_path = os.path.join(
os.path.dirname(__file__),
f'_data_main/_cached/{dataset_name}_{one_hot_string}'
)
if load_from_cache:
if debug_flag: print(f'[INFO] Attempting to load saved dataset (`{dataset_name}`) from cache...\t', end = '')
try:
tmp = pickle.load(open(save_file_path, 'rb'))
if debug_flag: print('done.')
return tmp
except:
if debug_flag: print('failed. Re-creating dataset...')
if dataset_name == 'adult':
data_frame_non_hot = load_adult_data_new()
data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)
attributes_non_hot = {}
input_cols, output_col = getInputOutputColumns(data_frame_non_hot)
col_name = output_col
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = 'y',
attr_type = 'binary',
node_type = 'output',
actionability = 'none',
mutability = False,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(input_cols):
if col_name == 'Sex':
attr_type = 'binary'
actionability = 'any' # 'none'
mutability = True
elif col_name == 'Age':
attr_type = 'binary' # 'numeric-int'
actionability = 'any' # 'none'
mutability = True
elif col_name == 'NativeCountry': #~ RACE
attr_type = 'binary'
actionability = 'any' # 'none'
mutability = True
elif col_name == 'WorkClass':
attr_type = 'categorical'
actionability = 'any'
mutability = True
# elif col_name == 'EducationNumber':
# attr_type = 'numeric-int'
# actionability = 'any'
# mutability = True
elif col_name == 'EducationLevel':
attr_type = 'numeric-int'
actionability = 'any'
mutability = True
elif col_name == 'MaritalStatus':
attr_type = 'categorical'
actionability = 'any'
mutability = True
elif col_name == 'Occupation':
attr_type = 'categorical'
actionability = 'any'
mutability = True
# elif col_name == 'Relationship':
# attr_type = 'categorical'
# actionability = 'any'
# mutability = True
# elif col_name == 'CapitalGain':
# attr_type = 'numeric-real'
# actionability = 'any'
# mutability = True
# elif col_name == 'CapitalLoss':
# attr_type = 'numeric-real'
# actionability = 'any'
# mutability = True
elif col_name == 'HoursPerWeek':
attr_type = 'numeric-int'
actionability = 'any'
mutability = True
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = f'x{col_idx + index_offset}',
attr_type = attr_type,
node_type = 'input',
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
elif dataset_name == 'german':
data_frame_non_hot = load_german_data()
data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)
attributes_non_hot = {}
input_cols, output_col = getInputOutputColumns(data_frame_non_hot)
col_name = output_col
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = 'y',
attr_type = 'binary',
node_type = 'output',
actionability = 'none',
mutability = False,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(input_cols):
if col_name == 'Sex': # TODO: make sex and race immutable in all datasets!
attr_type = 'binary'
actionability = 'any'
mutability = True
elif col_name == 'Age':
attr_type = 'numeric-int' # 'numeric-real'
actionability = 'same-or-increase'
mutability = True
elif col_name == 'Credit':
attr_type = 'numeric-real'
actionability = 'any'
mutability = True
elif col_name == 'LoanDuration':
attr_type = 'numeric-int'
actionability = 'none'
mutability = True
# elif col_name == 'CheckingAccountBalance':
# attr_type = 'ordinal' # 'numeric-real'
# actionability = 'any'
# mutability = True
# elif col_name == 'SavingsAccountBalance':
# attr_type = 'ordinal'
# actionability = 'any'
# mutability = True
# elif col_name == 'HousingStatus':
# attr_type = 'ordinal'
# actionability = 'any'
# mutability = True
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = f'x{col_idx + index_offset}',
attr_type = attr_type,
node_type = 'input',
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
elif dataset_name == 'credit':
data_frame_non_hot = load_credit_data()
data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)
attributes_non_hot = {}
input_cols, output_col = getInputOutputColumns(data_frame_non_hot)
col_name = output_col
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = 'y',
attr_type = 'binary',
node_type = 'output',
actionability = 'none',
mutability = False,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(input_cols):
if col_name == 'isMale':
attr_type = 'binary'
actionability = 'any' # 'none'
mutability = True
elif col_name == 'isMarried':
attr_type = 'binary'
actionability = 'any'
mutability = True
elif col_name == 'AgeGroup':
attr_type = 'ordinal'
actionability = 'any' # 'none'
mutability = True
elif col_name == 'EducationLevel':
attr_type = 'ordinal'
actionability = 'any'
mutability = True
elif col_name == 'MaxBillAmountOverLast6Months':
attr_type = 'numeric-real'
actionability = 'any'
mutability = True
elif col_name == 'MaxPaymentAmountOverLast6Months':
attr_type = 'numeric-real'
actionability = 'any'
mutability = True
elif col_name == 'MonthsWithZeroBalanceOverLast6Months':
attr_type = 'numeric-int'
actionability = 'any'
mutability = True
elif col_name == 'MonthsWithLowSpendingOverLast6Months':
attr_type = 'numeric-int'
actionability = 'any'
mutability = True
elif col_name == 'MonthsWithHighSpendingOverLast6Months':
attr_type = 'numeric-int'
actionability = 'any'
mutability = True
elif col_name == 'MostRecentBillAmount':
attr_type = 'numeric-real'
actionability = 'any'
mutability = True
elif col_name == 'MostRecentPaymentAmount':
attr_type = 'numeric-real'
actionability = 'any'
mutability = True
elif col_name == 'TotalOverdueCounts':
attr_type = 'numeric-int'
actionability = 'any'
mutability = True
elif col_name == 'TotalMonthsOverdue':
attr_type = 'numeric-int'
actionability = 'any'
mutability = True
elif col_name == 'HasHistoryOfOverduePayments':
attr_type = 'binary'
actionability = 'any'
mutability = True
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = f'x{col_idx + index_offset}',
attr_type = attr_type,
node_type = 'input',
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
elif dataset_name == 'compass':
data_frame_non_hot = load_compas_data_new()
data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)
attributes_non_hot = {}
input_cols, output_col = getInputOutputColumns(data_frame_non_hot)
col_name = output_col
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = 'y',
attr_type = 'binary',
node_type = 'output',
actionability = 'none',
mutability = False,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(input_cols):
if col_name == 'AgeGroup':
attr_type = 'ordinal'
actionability = 'any' # 'none'
mutability = True
elif col_name == 'Race':
attr_type = 'binary'
actionability = 'any' # 'none'
mutability = True
elif col_name == 'Sex':
attr_type = 'binary'
actionability = 'any' # 'none'
mutability = True
elif col_name == 'PriorsCount':
attr_type = 'numeric-int'
actionability = 'any'
mutability = True
elif col_name == 'ChargeDegree':
attr_type = 'binary'
actionability = 'any'
mutability = True
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = f'x{col_idx + index_offset}',
attr_type = attr_type,
node_type = 'input',
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
elif dataset_name == 'synthetic':
variable_type = 'real'
# variable_type = 'integer'
scm_class = meta_param
data_frame_non_hot = load_synthetic_data(scm_class, variable_type)
data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)
attributes_non_hot = {}
input_cols, output_col = getInputOutputColumns(data_frame_non_hot)
# ordering of next two lines matters (shouldn't overwrite input_cols); silly code... :|
meta_cols = [col_name for col_name in input_cols if 'u' in col_name]
input_cols = [col_name for col_name in input_cols if 'x' in col_name] # endogenous variables must start with `x`
if 'fair' in scm_class:
# fair experiments (other than adult) rely on labels being in {-1/+1}
# TODO (lowpri): can we change this?? can sklearn svm and lr predict 0,1 instead of -1/+1??
data_frame_non_hot[output_col] = data_frame_non_hot[output_col] * 2 - 1
col_name = output_col
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = 'y',
attr_type = 'binary',
node_type = 'output',
actionability = 'none',
mutability = False,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(input_cols):
attr_type = 'numeric-real' if variable_type == 'real' else 'numeric-int'
node_type = 'input'
actionability = 'any'
mutability = True
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = col_name,
attr_type = attr_type,
node_type = node_type,
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(meta_cols):
attr_type = 'numeric-real'
node_type = 'meta'
actionability = 'none'
mutability = False
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = col_name,
attr_type = attr_type,
node_type = node_type,
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
elif dataset_name == 'mortgage':
data_frame_non_hot = load_mortgage_data()
data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)
attributes_non_hot = {}
input_cols, output_col = getInputOutputColumns(data_frame_non_hot)
col_name = output_col
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = 'y',
attr_type = 'binary',
node_type = 'output',
actionability = 'none',
mutability = False,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(input_cols):
if col_name == 'x0':
attr_type = 'numeric-real'
actionability = 'any'
mutability = True
elif col_name == 'x1':
attr_type = 'numeric-real'
actionability = 'any'
mutability = True
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = f'x{col_idx + index_offset}',
attr_type = attr_type,
node_type = 'input',
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
elif dataset_name == 'twomoon':
variable_type = 'real'
# variable_type = 'integer'
data_frame_non_hot = load_twomoon_data(variable_type)
data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)
attributes_non_hot = {}
input_cols, output_col = getInputOutputColumns(data_frame_non_hot)
col_name = output_col
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = 'y',
attr_type = 'binary',
node_type = 'output',
actionability = 'none',
mutability = False,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(input_cols):
if col_name == 'x0':
attr_type = 'numeric-real' if variable_type == 'real' else 'numeric-int'
actionability = 'any'
mutability = True
elif col_name == 'x1':
attr_type = 'numeric-real' if variable_type == 'real' else 'numeric-int'
actionability = 'any'
mutability = True
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = f'x{col_idx + index_offset}',
attr_type = attr_type,
node_type = 'input',
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
elif dataset_name == 'test':
data_frame_non_hot = load_test_data()
data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)
attributes_non_hot = {}
input_cols, output_col = getInputOutputColumns(data_frame_non_hot)
col_name = output_col
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = 'y',
attr_type = 'binary',
node_type = 'output',
actionability = 'none',
mutability = False,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(input_cols):
if col_name == 'x0':
attr_type = 'categorical'
actionability = 'any'
mutability = True
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = f'x{col_idx + index_offset}',
attr_type = attr_type,
node_type = 'input',
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
else:
raise Exception(f'{dataset_name} not recognized as a valid dataset.')
if return_one_hot:
data_frame, attributes = getOneHotEquivalent(data_frame_non_hot, attributes_non_hot)
else:
data_frame, attributes = data_frame_non_hot, attributes_non_hot
# save then return
dataset_obj = Dataset(data_frame, attributes, return_one_hot, dataset_name)
# if not loading from cache, we always overwrite the cache
pickle.dump(dataset_obj, open(save_file_path, 'wb'))
return dataset_obj
# TODO: consider moving into Dataset class with getOneHot and getNonHot methods
def getOneHotEquivalent(data_frame_non_hot, attributes_non_hot):
# TODO: see how we can switch between feature_names = col names for kurz and long (also maybe ordered)
data_frame = copy.deepcopy(data_frame_non_hot)
attributes = copy.deepcopy(attributes_non_hot)
def setOneHotValue(val):
return np.append(np.append(
np.zeros(val - 1),
np.ones(1)),
np.zeros(num_unique_values - val)
)
def setThermoValue(val):
return np.append(
np.ones(val),
np.zeros(num_unique_values - val)
)
for col_name in data_frame.columns.values:
if attributes[col_name].attr_type not in {'categorical', 'ordinal'}:
continue
old_col_name_long = col_name
new_col_names_long = []
new_col_names_kurz = []
old_attr_name_long = attributes[old_col_name_long].attr_name_long
old_attr_name_kurz = attributes[old_col_name_long].attr_name_kurz
old_attr_type = attributes[old_col_name_long].attr_type
old_node_type = attributes[old_col_name_long].node_type
old_actionability = attributes[old_col_name_long].actionability
old_mutability = attributes[old_col_name_long].mutability
old_lower_bound = attributes[old_col_name_long].lower_bound
old_upper_bound = attributes[old_col_name_long].upper_bound
num_unique_values = int(old_upper_bound - old_lower_bound + 1)
assert old_col_name_long == old_attr_name_long
new_attr_type = 'sub-' + old_attr_type
new_node_type = old_node_type
new_actionability = old_actionability
new_mutability = old_mutability
new_parent_name_long = old_attr_name_long
new_parent_name_kurz = old_attr_name_kurz
if attributes[col_name].attr_type == 'categorical': # do not do this for 'binary'!
new_col_names_long = [f'{old_attr_name_long}_cat_{i}' for i in range(num_unique_values)]
new_col_names_kurz = [f'{old_attr_name_kurz}_cat_{i}' for i in range(num_unique_values)]
print(f'Replacing column {col_name} with {{{', '.join(new_col_names_long)}}}')
tmp = np.array(list(map(setOneHotValue, list(data_frame[col_name].astype(int).values))))
data_frame_dummies = pd.DataFrame(data=tmp, columns=new_col_names_long)
elif attributes[col_name].attr_type == 'ordinal':
new_col_names_long = [f'{old_attr_name_long}_ord_{i}' for i in range(num_unique_values)]
new_col_names_kurz = [f'{old_attr_name_kurz}_ord_{i}' for i in range(num_unique_values)]
print(f'Replacing column {col_name} with {{{', '.join(new_col_names_long)}}}')
tmp = np.array(list(map(setThermoValue, list(data_frame[col_name].astype(int).values))))
data_frame_dummies = pd.DataFrame(data=tmp, columns=new_col_names_long)
# Update data_frame
data_frame = pd.concat([data_frame.drop(columns = old_col_name_long), data_frame_dummies], axis=1)
# Update attributes
del attributes[old_col_name_long]
for col_idx in range(len(new_col_names_long)):
new_col_name_long = new_col_names_long[col_idx]
new_col_name_kurz = new_col_names_kurz[col_idx]
attributes[new_col_name_long] = DatasetAttribute(
attr_name_long = new_col_name_long,
attr_name_kurz = new_col_name_kurz,
attr_type = new_attr_type,
node_type = new_node_type,
actionability = new_actionability,
mutability = new_mutability,
parent_name_long = new_parent_name_long,
parent_name_kurz = new_parent_name_kurz,
lower_bound = data_frame[new_col_name_long].min(),
upper_bound = data_frame[new_col_name_long].max())
return data_frame, attributes
|
import os
import sys
import copy
import pickle
import numpy as np
import pandas as pd
from tqdm import tqdm
from pprint import pprint
from sklearn.model_selection import train_test_split
import utils
from debug import ipsh
sys.path.insert(0, '_data_main')
try:
from _data_main.fair_adult_data import *
except:
print('[ENV WARNING] fair_adult_data not available')
try:
from _data_main.fair_compas_data import *
except:
print('[ENV WARNING] fair_compas_data not available')
try:
from _data_main.process_credit_data import *
except:
print('[ENV WARNING] process_credit_data not available')
try:
from _data_main.process_german_data import *
except:
print('[ENV WARNING] process_german_data not available')
try:
from _data_main.process_synthetic_data import *
except:
print('[ENV WARNING] process_synthetic_data not available')
try:
from _data_main.process_mortgage_data import *
except:
print('[ENV WARNING] process_mortgage_data not available')
try:
from _data_main.process_twomoon_data import *
except:
print('[ENV WARNING] process_twomoon_data not available')
try:
from _data_main.process_test_data import *
except:
print('[ENV WARNING] process_test_data not available')
VALID_ATTRIBUTE_DATA_TYPES = { \
'numeric-int', \
'numeric-real', \
'binary', \
'categorical', \
'sub-categorical', \
'ordinal', \
'sub-ordinal'}
VALID_ATTRIBUTE_NODE_TYPES = { \
'meta', \
'input', \
'output'}
VALID_ACTIONABILITY_TYPES = { \
'none', \
'any', \
'same-or-increase', \
'same-or-decrease'}
VALID_MUTABILITY_TYPES = { \
True, \
False}
from random import seed
RANDOM_SEED = 54321
seed(RANDOM_SEED) # set the random seed so that the random permutations can be reproduced again
np.random.seed(RANDOM_SEED)
class Dataset(object):
# TODO: getOneHotEquivalent can be a class method, and this object can store
# both one-hot and non-hot versions!
def __init__(self, data_frame, attributes, is_one_hot, dataset_name):
self.dataset_name = dataset_name
self.is_one_hot = is_one_hot
attributes_long = attributes
data_frame_long = data_frame
self.data_frame_long = data_frame_long # i.e., data_frame is indexed by attr_name_long
self.attributes_long = attributes_long # i.e., attributes is indexed by attr_name_long
attributes_kurz = dict((attributes[key].attr_name_kurz, value) for (key, value) in attributes_long.items())
data_frame_kurz = copy.deepcopy(data_frame_long)
data_frame_kurz.columns = self.getAllAttributeNames('kurz')
self.data_frame_kurz = data_frame_kurz # i.e., data_frame is indexed by attr_name_kurz
self.attributes_kurz = attributes_kurz # i.e., attributes is indexed by attr_name_kurz
# assert that data_frame and attributes match on variable names (long)
assert len(np.setdiff1d(
data_frame.columns.values,
np.array(self.getAllAttributeNames('long'))
)) == 0
# assert attribute type matches what is in the data frame
for attr_name in np.setdiff1d(
self.getInputAttributeNames('long'),
self.getRealBasedAttributeNames('long'),
):
unique_values = np.unique(data_frame_long[attr_name].to_numpy())
# all non-numerical-real values should be integer or {0,1}
for value in unique_values:
assert value == np.floor(value)
if is_one_hot and attributes_long[attr_name].attr_type != 'numeric-int': # binary, sub-categorical, sub-ordinal
try:
assert \
np.array_equal(unique_values, [0,1]) or \
np.array_equal(unique_values, [1,2]) or \
np.array_equal(unique_values, [1]) # the first sub-ordinal attribute is always 1
# race (binary) in compass is encoded as {1,2}
except:
ipsh()
# # assert attributes and is_one_hot agree on one-hot-ness (i.e., if is_one_hot,
# # then at least one attribute should be encoded as one-hot (w/ parent reference))
# tmp_is_one_hot = False
# for attr_name in attributes.keys():
# attr_obj = attributes[attr_name]
# # this simply checks to make sure that at least one elem is one-hot encoded
# if attr_obj.parent_name_long != -1 or attr_obj.parent_name_kurz != -1:
# tmp_is_one_hot = True
# # TODO: assert only if there is a cat/ord variable!
# assert is_one_hot == tmp_is_one_hot, "Dataset object and actual attributes don't agree on one-hot"
self.assertSiblingsShareAttributes('long')
self.assertSiblingsShareAttributes('kurz')
def getAttributeNames(self, allowed_node_types, long_or_kurz = 'kurz'):
names = []
# We must loop through all attributes and check attr_name
for attr_name in self.attributes_long.keys():
attr_obj = self.attributes_long[attr_name]
if attr_obj.node_type not in allowed_node_types:
continue
if long_or_kurz == 'long':
names.append(attr_obj.attr_name_long)
elif long_or_kurz == 'kurz':
names.append(attr_obj.attr_name_kurz)
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
return np.array(names)
def getAllAttributeNames(self, long_or_kurz = 'kurz'):
return self.getAttributeNames({'meta', 'input', 'output'}, long_or_kurz)
def getInputOutputAttributeNames(self, long_or_kurz = 'kurz'):
return self.getAttributeNames({'input', 'output'}, long_or_kurz)
def getMetaInputAttributeNames(self, long_or_kurz = 'kurz'):
return self.getAttributeNames({'meta', 'input'}, long_or_kurz)
def getMetaAttributeNames(self, long_or_kurz = 'kurz'):
return self.getAttributeNames({'meta'}, long_or_kurz)
def getInputAttributeNames(self, long_or_kurz = 'kurz'):
return self.getAttributeNames({'input'}, long_or_kurz)
def getOutputAttributeNames(self, long_or_kurz = 'kurz'):
return self.getAttributeNames({'output'}, long_or_kurz)
def getBinaryAttributeNames(self, long_or_kurz = 'kurz'):
names = []
# We must loop through all attributes and check binary
for attr_name_long in self.getInputAttributeNames('long'):
attr_obj = self.attributes_long[attr_name_long]
if attr_obj.node_type == 'input' and attr_obj.attr_type == 'binary':
if long_or_kurz == 'long':
names.append(attr_obj.attr_name_long)
elif long_or_kurz == 'kurz':
names.append(attr_obj.attr_name_kurz)
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
return np.array(names)
def getActionableAttributeNames(self, long_or_kurz = 'kurz'):
names = []
# We must loop through all attributes and check actionability
for attr_name_long in self.getInputAttributeNames('long'):
attr_obj = self.attributes_long[attr_name_long]
if attr_obj.node_type == 'input' and attr_obj.actionability != 'none':
if long_or_kurz == 'long':
names.append(attr_obj.attr_name_long)
elif long_or_kurz == 'kurz':
names.append(attr_obj.attr_name_kurz)
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
return np.array(names)
def getNonActionableAttributeNames(self, long_or_kurz = 'kurz'):
a = self.getInputAttributeNames(long_or_kurz)
b = self.getActionableAttributeNames(long_or_kurz)
return np.setdiff1d(a,b)
def getMutableAttributeNames(self, long_or_kurz = 'kurz'):
names = []
# We must loop through all attributes and check mutability
for attr_name_long in self.getInputAttributeNames('long'):
attr_obj = self.attributes_long[attr_name_long]
if attr_obj.node_type == 'input' and attr_obj.mutability != False:
if long_or_kurz == 'long':
names.append(attr_obj.attr_name_long)
elif long_or_kurz == 'kurz':
names.append(attr_obj.attr_name_kurz)
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
return np.array(names)
def getNonMutableAttributeNames(self, long_or_kurz = 'kurz'):
a = self.getInputAttributeNames(long_or_kurz)
b = self.getMutableAttributeNames(long_or_kurz)
return np.setdiff1d(a,b)
def getIntegerBasedAttributeNames(self, long_or_kurz = 'kurz'):
names = []
# We must loop through all attributes and check attr_type
for attr_name_long in self.getInputAttributeNames('long'):
attr_obj = self.attributes_long[attr_name_long]
if attr_obj.attr_type == 'numeric-int':
if long_or_kurz == 'long':
names.append(attr_obj.attr_name_long)
elif long_or_kurz == 'kurz':
names.append(attr_obj.attr_name_kurz)
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
return np.array(names)
def getRealBasedAttributeNames(self, long_or_kurz = 'kurz'):
names = []
# We must loop through all attributes and check attr_type
for attr_name_long in self.getInputAttributeNames('long'):
attr_obj = self.attributes_long[attr_name_long]
if attr_obj.attr_type == 'numeric-real':
if long_or_kurz == 'long':
names.append(attr_obj.attr_name_long)
elif long_or_kurz == 'kurz':
names.append(attr_obj.attr_name_kurz)
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
return np.array(names)
def assertSiblingsShareAttributes(self, long_or_kurz = 'kurz'):
# assert elems of dictOfSiblings share attr_type, node_type, parent, actionability, and mutability
dict_of_siblings = self.getDictOfSiblings(long_or_kurz)
for parent_name in dict_of_siblings['cat'].keys():
siblings = dict_of_siblings['cat'][parent_name]
assert len(siblings) > 1
for sibling in siblings:
if long_or_kurz == 'long':
self.attributes_long[sibling].attr_type = self.attributes_long[siblings[0]].attr_type
self.attributes_long[sibling].node_type = self.attributes_long[siblings[0]].node_type
self.attributes_long[sibling].actionability = self.attributes_long[siblings[0]].actionability
self.attributes_long[sibling].mutability = self.attributes_long[siblings[0]].mutability
self.attributes_long[sibling].parent_name_long = self.attributes_long[siblings[0]].parent_name_long
self.attributes_long[sibling].parent_name_kurz = self.attributes_long[siblings[0]].parent_name_kurz
elif long_or_kurz == 'kurz':
self.attributes_kurz[sibling].attr_type = self.attributes_kurz[siblings[0]].attr_type
self.attributes_kurz[sibling].node_type = self.attributes_kurz[siblings[0]].node_type
self.attributes_kurz[sibling].actionability = self.attributes_kurz[siblings[0]].actionability
self.attributes_kurz[sibling].mutability = self.attributes_kurz[siblings[0]].mutability
self.attributes_kurz[sibling].parent_name_long = self.attributes_kurz[siblings[0]].parent_name_long
self.attributes_kurz[sibling].parent_name_kurz = self.attributes_kurz[siblings[0]].parent_name_kurz
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
def getSiblingsFor(self, attr_name_long_or_kurz):
# If attr_name_long is given, we will return siblings_long (the same length)
# but not siblings_kurz. Same for the opposite direction.
assert \
'cat' in attr_name_long_or_kurz or 'ord' in attr_name_long_or_kurz, \
'attr_name must include either `cat` or `ord`.'
if attr_name_long_or_kurz in self.getInputOutputAttributeNames('long'):
attr_name_long = attr_name_long_or_kurz
dict_of_siblings_long = self.getDictOfSiblings('long')
for parent_name_long in dict_of_siblings_long['cat']:
siblings_long = dict_of_siblings_long['cat'][parent_name_long]
if attr_name_long_or_kurz in siblings_long:
return siblings_long
for parent_name_long in dict_of_siblings_long['ord']:
siblings_long = dict_of_siblings_long['ord'][parent_name_long]
if attr_name_long_or_kurz in siblings_long:
return siblings_long
elif attr_name_long_or_kurz in self.getInputOutputAttributeNames('kurz'):
attr_name_kurz = attr_name_long_or_kurz
dict_of_siblings_kurz = self.getDictOfSiblings('kurz')
for parent_name_kurz in dict_of_siblings_kurz['cat']:
siblings_kurz = dict_of_siblings_kurz['cat'][parent_name_kurz]
if attr_name_long_or_kurz in siblings_kurz:
return siblings_kurz
for parent_name_kurz in dict_of_siblings_kurz['ord']:
siblings_kurz = dict_of_siblings_kurz['ord'][parent_name_kurz]
if attr_name_long_or_kurz in siblings_kurz:
return siblings_kurz
else:
raise Exception(f'{attr_name_long_or_kurz} not recognized as a valid `attr_name_long_or_kurz`.')
def getDictOfSiblings(self, long_or_kurz = 'kurz'):
if long_or_kurz == 'long':
dict_of_siblings_long = {}
dict_of_siblings_long['cat'] = {}
dict_of_siblings_long['ord'] = {}
for attr_name_long in self.getInputAttributeNames('long'):
attr_obj = self.attributes_long[attr_name_long]
if attr_obj.attr_type == 'sub-categorical':
if attr_obj.parent_name_long not in dict_of_siblings_long['cat'].keys():
dict_of_siblings_long['cat'][attr_obj.parent_name_long] = [] # initiate key-value pair
dict_of_siblings_long['cat'][attr_obj.parent_name_long].append(attr_obj.attr_name_long)
elif attr_obj.attr_type == 'sub-ordinal':
if attr_obj.parent_name_long not in dict_of_siblings_long['ord'].keys():
dict_of_siblings_long['ord'][attr_obj.parent_name_long] = [] # initiate key-value pair
dict_of_siblings_long['ord'][attr_obj.parent_name_long].append(attr_obj.attr_name_long)
# sort sub-arrays
for key in dict_of_siblings_long['cat'].keys():
dict_of_siblings_long['cat'][key] = sorted(dict_of_siblings_long['cat'][key], key = lambda x : int(x.split('_')[-1]))
for key in dict_of_siblings_long['ord'].keys():
dict_of_siblings_long['ord'][key] = sorted(dict_of_siblings_long['ord'][key], key = lambda x : int(x.split('_')[-1]))
return dict_of_siblings_long
elif long_or_kurz == 'kurz':
dict_of_siblings_kurz = {}
dict_of_siblings_kurz['cat'] = {}
dict_of_siblings_kurz['ord'] = {}
for attr_name_kurz in self.getInputAttributeNames('kurz'):
attr_obj = self.attributes_kurz[attr_name_kurz]
if attr_obj.attr_type == 'sub-categorical':
if attr_obj.parent_name_kurz not in dict_of_siblings_kurz['cat'].keys():
dict_of_siblings_kurz['cat'][attr_obj.parent_name_kurz] = [] # initiate key-value pair
dict_of_siblings_kurz['cat'][attr_obj.parent_name_kurz].append(attr_obj.attr_name_kurz)
elif attr_obj.attr_type == 'sub-ordinal':
if attr_obj.parent_name_kurz not in dict_of_siblings_kurz['ord'].keys():
dict_of_siblings_kurz['ord'][attr_obj.parent_name_kurz] = [] # initiate key-value pair
dict_of_siblings_kurz['ord'][attr_obj.parent_name_kurz].append(attr_obj.attr_name_kurz)
# sort sub-arrays
for key in dict_of_siblings_kurz['cat'].keys():
dict_of_siblings_kurz['cat'][key] = sorted(dict_of_siblings_kurz['cat'][key], key = lambda x : int(x.split('_')[-1]))
for key in dict_of_siblings_kurz['ord'].keys():
dict_of_siblings_kurz['ord'][key] = sorted(dict_of_siblings_kurz['ord'][key], key = lambda x : int(x.split('_')[-1]))
return dict_of_siblings_kurz
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
def getOneHotAttributesNames(self, long_or_kurz = 'kurz'):
tmp = self.getDictOfSiblings(long_or_kurz)
names = []
for key1 in tmp.keys():
for key2 in tmp[key1].keys():
names.extend(tmp[key1][key2])
return np.array(names)
def getNonHotAttributesNames(self, long_or_kurz = 'kurz'):
a = self.getInputAttributeNames(long_or_kurz)
b = self.getOneHotAttributesNames(long_or_kurz)
return np.setdiff1d(a,b)
def getVariableRanges(self):
return dict(zip(
self.getInputAttributeNames('kurz'),
[
self.attributes_kurz[attr_name_kurz].upper_bound -
self.attributes_kurz[attr_name_kurz].lower_bound
for attr_name_kurz in self.getInputAttributeNames('kurz')
],
))
def printDataset(self, long_or_kurz = 'kurz'):
if long_or_kurz == 'long':
for attr_name_long in self.attributes_long:
print(self.attributes_long[attr_name_long].__dict__)
elif long_or_kurz == 'kurz':
for attr_name_kurz in self.attributes_kurz:
print(self.attributes_kurz[attr_name_kurz].__dict__)
else:
raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')
# (2020.04.15) perhaps we need a memoize here... but I tried calling this function
# multiple times in a row from another file and it always returned the same slice
# of data... weird.
def getTrainTestSplit(self, preprocessing = None, with_meta = False, balanced = True):
# When working only with normalized data in [0, 1], data ranges must change to [0, 1] as well
# otherwise, in computing normalized distance we will normalize with intial ranges again!
# pseudonym (2020.05.17) does this work with cat/ord and sub-cat/sub-ord data???
def setBoundsToZeroOne():
for attr_name_kurz in self.getNonHotAttributesNames('kurz'):
attr_obj = self.attributes_kurz[attr_name_kurz]
attr_obj.lower_bound = 0.0
attr_obj.upper_bound = 1.0
attr_obj = self.attributes_long[attr_obj.attr_name_long]
attr_obj.lower_bound = 0.0
attr_obj.upper_bound = 1.0
# Normalize data: bring everything to [0, 1] - implemented for when feeding the model to DiCE
def normalizeData(X_train, X_test):
for attr_name_kurz in self.getNonHotAttributesNames('kurz'):
attr_obj = self.attributes_kurz[attr_name_kurz]
lower_bound = attr_obj.lower_bound
upper_bound =attr_obj.upper_bound
X_train[attr_name_kurz] = (X_train[attr_name_kurz] - lower_bound) / (upper_bound - lower_bound)
X_test[attr_name_kurz] = (X_test[attr_name_kurz] - lower_bound) / (upper_bound - lower_bound)
setBoundsToZeroOne()
return X_train, X_test
# TODO: This should be used with caution... it messes things up in MACE as ranges
# will differ between factual and counterfactual domains
def standardizeData(X_train, X_test):
x_mean = X_train.mean()
x_std = X_train.std()
for index in x_std.index:
if '_ord_' in index or '_cat_' in index:
x_mean[index] = 0
x_std[index] = 1
X_train = (X_train - x_mean) / x_std
X_test = (X_test - x_mean) / x_std
return X_train, X_test
def getBalancedDataFrame(data_frame, output_col):
# assert only two classes in label (maybe relax later??)
unique_labels = np.unique(data_frame[output_col])
assert \
np.array_equal(
unique_labels,
np.array([0, 1]) # only allowing {0, 1} labels,
) or \
np.array_equal(
unique_labels,
np.array([-1, 1]) # only allowing {-1, 1} labels,
), \
f'expected unique labels to be [0, 1], but got {unique_labels}'
# get balanced dataframe (take minimum of the count, then round down to nearest 250)
unique_values_and_count = data_frame[output_col].value_counts()
number_of_subsamples_in_each_class = unique_values_and_count.min() // 250 * 250
data_frame = pd.concat([
data_frame[data_frame.loc[:,output_col] == unique_labels[0]].sample(number_of_subsamples_in_each_class, random_state = RANDOM_SEED),
data_frame[data_frame.loc[:,output_col] == unique_labels[1]].sample(number_of_subsamples_in_each_class, random_state = RANDOM_SEED),
]).sample(frac = 1, random_state = RANDOM_SEED)
# data_frame = pd.concat([
# data_frame[data_frame.loc[:,output_col] == 0],
# data_frame[data_frame.loc[:,output_col] == 1],
# ]).sample(frac = 1, random_state = RANDOM_SEED)
return data_frame
meta_cols = self.getMetaAttributeNames()
input_cols = self.getInputAttributeNames()
output_col = self.getOutputAttributeNames()[0]
data_frame = copy.deepcopy(self.data_frame_kurz)
if balanced:
data_frame = getBalancedDataFrame(data_frame, self.getOutputAttributeNames()[0])
if with_meta:
all_data = data_frame.loc[:,np.array((input_cols, meta_cols)).flatten()]
all_true_labels = data_frame.loc[:,output_col]
if preprocessing is not None:
assert with_meta == False, 'This feature is not built yet...'
X_train, X_test, y_train, y_test = train_test_split(
all_data,
all_true_labels,
train_size=.7,
random_state = RANDOM_SEED)
# ordering of next two lines matters (shouldn't overwrite input_cols); silly code... :|
U_train = X_train[self.getMetaAttributeNames()]
U_test = X_test[self.getMetaAttributeNames()]
X_train = X_train[self.getInputAttributeNames()]
X_test = X_test[self.getInputAttributeNames()]
y_train = y_train # noop
y_test = y_test # noop
return X_train, X_test, U_train, U_test, y_train, y_test
else:
all_data = data_frame.loc[:,input_cols]
all_true_labels = data_frame.loc[:,output_col]
X_train, X_test, y_train, y_test = train_test_split(
all_data,
all_true_labels,
train_size=.7,
random_state = RANDOM_SEED)
# TODO (2020.05.18): this should be updated so as NOT to update meta variables
if preprocessing == 'standardize':
X_train, X_test = standardizeData(X_train, X_test)
elif preprocessing == 'normalize':
X_train, X_test = normalizeData(X_train, X_test)
return X_train, X_test, y_train, y_test
def getOriginalDataFrame(self, num_samples, with_meta = False, with_label = False, balanced = True, data_split = 'train_and_test'):
if with_meta:
X_train, X_test, U_train, U_test, y_train, y_test = self.getTrainTestSplit(with_meta = True, balanced = balanced)
else:
X_train, X_test, y_train, y_test = self.getTrainTestSplit(with_meta = False, balanced = balanced)
# order of if/elif is important
if with_meta and with_label:
data_train = pd.concat([X_train, U_train, y_train], axis = 1)
data_test = pd.concat([X_test, U_test, y_test], axis = 1)
elif with_meta:
data_train = pd.concat([X_train, U_train], axis = 1)
data_test = pd.concat([X_test, U_test], axis = 1)
elif with_label:
data_train = pd.concat([X_train, y_train], axis = 1)
data_test = pd.concat([X_test, y_test], axis = 1)
else:
data_train = X_train
data_test = X_test
if data_split == 'train_and_test':
data_all = pd.concat([data_train, data_test], axis = 0)
elif data_split == 'train_only':
data_all = data_train
elif data_split == 'test_only':
data_all = data_test
else:
raise NotImplementedError
return data_all[:num_samples]
class DatasetAttribute(object):
def __init__(
self,
attr_name_long,
attr_name_kurz,
attr_type,
node_type,
actionability,
mutability,
parent_name_long,
parent_name_kurz,
lower_bound,
upper_bound):
if attr_type not in VALID_ATTRIBUTE_DATA_TYPES:
raise Exception("`attr_type` must be one of %r." % VALID_ATTRIBUTE_DATA_TYPES)
if node_type not in VALID_ATTRIBUTE_NODE_TYPES:
raise Exception("`node_type` must be one of %r." % VALID_ATTRIBUTE_NODE_TYPES)
if actionability not in VALID_ACTIONABILITY_TYPES:
raise Exception("`actionability` must be one of %r." % VALID_ACTIONABILITY_TYPES)
if mutability not in VALID_MUTABILITY_TYPES:
raise Exception("`mutability` must be one of %r." % VALID_MUTABILITY_TYPES)
if lower_bound > upper_bound:
raise Exception("`lower_bound` must be <= `upper_bound`")
if attr_type in {'sub-categorical', 'sub-ordinal'}:
assert parent_name_long != -1, 'Parent ID set for non-hot attribute.'
assert parent_name_kurz != -1, 'Parent ID set for non-hot attribute.'
if attr_type == 'sub-categorical':
assert lower_bound == 0
assert upper_bound == 1
if attr_type == 'sub-ordinal':
# the first elem in thermometer is always on, but the rest may be on or off
assert lower_bound == 0 or lower_bound == 1
assert upper_bound == 1
else:
assert parent_name_long == -1, 'Parent ID set for non-hot attribute.'
assert parent_name_kurz == -1, 'Parent ID set for non-hot attribute.'
if attr_type in {'categorical', 'ordinal'}:
assert lower_bound == 1 # setOneHotValue & setThermoValue assume this in their logic
if attr_type in {'binary', 'categorical', 'sub-categorical'}: # not 'ordinal' or 'sub-ordinal'
# IMPORTANT: surprisingly, it is OK if all sub-ordinal variables share actionability
# think about it, if each sub- variable is same-or-increase, along with
# the constraints that x0_ord_1 >= x0_ord_2, all variables can only stay
# the same or increase. It works :)
assert actionability in {'none', 'any'}, f"{attr_type}'s actionability can only be in {'none', 'any'}, not `{actionability}`."
if node_type != 'input':
assert actionability == 'none', f'{node_type} attribute is not actionable.'
assert mutability == False, f'{node_type} attribute is not mutable.'
# We have introduced 3 types of variables: (actionable and mutable, non-actionable but mutable, immutable and non-actionable)
if actionability != 'none':
assert mutability == True
# TODO: above/below seem contradictory... (2020.04.14)
if mutability == False:
assert actionability == 'none'
if parent_name_long == -1 or parent_name_kurz == -1:
assert parent_name_long == parent_name_kurz == -1
self.attr_name_long = attr_name_long
self.attr_name_kurz = attr_name_kurz
self.attr_type = attr_type
self.node_type = node_type
self.actionability = actionability
self.mutability = mutability
self.parent_name_long = parent_name_long
self.parent_name_kurz = parent_name_kurz
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def loadDataset(dataset_name, return_one_hot, load_from_cache = False, debug_flag = True, index_offset = 0, meta_param = None):
def getInputOutputColumns(data_frame):
all_data_frame_cols = data_frame.columns.values
input_cols = [x for x in all_data_frame_cols if 'label' not in x.lower()]
output_cols = [x for x in all_data_frame_cols if 'label' in x.lower()]
assert len(output_cols) == 1
return input_cols, output_cols[0]
one_hot_string = 'one_hot' if return_one_hot else 'non_hot'
save_file_path = os.path.join(
os.path.dirname(__file__),
f'_data_main/_cached/{dataset_name}_{one_hot_string}'
)
if load_from_cache:
if debug_flag: print(f'[INFO] Attempting to load saved dataset (`{dataset_name}`) from cache...\t', end = '')
try:
tmp = pickle.load(open(save_file_path, 'rb'))
if debug_flag: print('done.')
return tmp
except:
if debug_flag: print('failed. Re-creating dataset...')
if dataset_name == 'adult':
data_frame_non_hot = load_adult_data_new()
data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)
attributes_non_hot = {}
input_cols, output_col = getInputOutputColumns(data_frame_non_hot)
col_name = output_col
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = 'y',
attr_type = 'binary',
node_type = 'output',
actionability = 'none',
mutability = False,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(input_cols):
if col_name == 'Sex':
attr_type = 'binary'
actionability = 'any' # 'none'
mutability = True
elif col_name == 'Age':
attr_type = 'binary' # 'numeric-int'
actionability = 'any' # 'none'
mutability = True
elif col_name == 'NativeCountry': #~ RACE
attr_type = 'binary'
actionability = 'any' # 'none'
mutability = True
elif col_name == 'WorkClass':
attr_type = 'categorical'
actionability = 'any'
mutability = True
# elif col_name == 'EducationNumber':
# attr_type = 'numeric-int'
# actionability = 'any'
# mutability = True
elif col_name == 'EducationLevel':
attr_type = 'numeric-int'
actionability = 'any'
mutability = True
elif col_name == 'MaritalStatus':
attr_type = 'categorical'
actionability = 'any'
mutability = True
elif col_name == 'Occupation':
attr_type = 'categorical'
actionability = 'any'
mutability = True
# elif col_name == 'Relationship':
# attr_type = 'categorical'
# actionability = 'any'
# mutability = True
# elif col_name == 'CapitalGain':
# attr_type = 'numeric-real'
# actionability = 'any'
# mutability = True
# elif col_name == 'CapitalLoss':
# attr_type = 'numeric-real'
# actionability = 'any'
# mutability = True
elif col_name == 'HoursPerWeek':
attr_type = 'numeric-int'
actionability = 'any'
mutability = True
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = f'x{col_idx + index_offset}',
attr_type = attr_type,
node_type = 'input',
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
elif dataset_name == 'german':
data_frame_non_hot = load_german_data()
data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)
attributes_non_hot = {}
input_cols, output_col = getInputOutputColumns(data_frame_non_hot)
col_name = output_col
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = 'y',
attr_type = 'binary',
node_type = 'output',
actionability = 'none',
mutability = False,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(input_cols):
if col_name == 'Sex': # TODO: make sex and race immutable in all datasets!
attr_type = 'binary'
actionability = 'any'
mutability = True
elif col_name == 'Age':
attr_type = 'numeric-int' # 'numeric-real'
actionability = 'same-or-increase'
mutability = True
elif col_name == 'Credit':
attr_type = 'numeric-real'
actionability = 'any'
mutability = True
elif col_name == 'LoanDuration':
attr_type = 'numeric-int'
actionability = 'none'
mutability = True
# elif col_name == 'CheckingAccountBalance':
# attr_type = 'ordinal' # 'numeric-real'
# actionability = 'any'
# mutability = True
# elif col_name == 'SavingsAccountBalance':
# attr_type = 'ordinal'
# actionability = 'any'
# mutability = True
# elif col_name == 'HousingStatus':
# attr_type = 'ordinal'
# actionability = 'any'
# mutability = True
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = f'x{col_idx + index_offset}',
attr_type = attr_type,
node_type = 'input',
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
elif dataset_name == 'credit':
data_frame_non_hot = load_credit_data()
data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)
attributes_non_hot = {}
input_cols, output_col = getInputOutputColumns(data_frame_non_hot)
col_name = output_col
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = 'y',
attr_type = 'binary',
node_type = 'output',
actionability = 'none',
mutability = False,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(input_cols):
if col_name == 'isMale':
attr_type = 'binary'
actionability = 'any' # 'none'
mutability = True
elif col_name == 'isMarried':
attr_type = 'binary'
actionability = 'any'
mutability = True
elif col_name == 'AgeGroup':
attr_type = 'ordinal'
actionability = 'any' # 'none'
mutability = True
elif col_name == 'EducationLevel':
attr_type = 'ordinal'
actionability = 'any'
mutability = True
elif col_name == 'MaxBillAmountOverLast6Months':
attr_type = 'numeric-real'
actionability = 'any'
mutability = True
elif col_name == 'MaxPaymentAmountOverLast6Months':
attr_type = 'numeric-real'
actionability = 'any'
mutability = True
elif col_name == 'MonthsWithZeroBalanceOverLast6Months':
attr_type = 'numeric-int'
actionability = 'any'
mutability = True
elif col_name == 'MonthsWithLowSpendingOverLast6Months':
attr_type = 'numeric-int'
actionability = 'any'
mutability = True
elif col_name == 'MonthsWithHighSpendingOverLast6Months':
attr_type = 'numeric-int'
actionability = 'any'
mutability = True
elif col_name == 'MostRecentBillAmount':
attr_type = 'numeric-real'
actionability = 'any'
mutability = True
elif col_name == 'MostRecentPaymentAmount':
attr_type = 'numeric-real'
actionability = 'any'
mutability = True
elif col_name == 'TotalOverdueCounts':
attr_type = 'numeric-int'
actionability = 'any'
mutability = True
elif col_name == 'TotalMonthsOverdue':
attr_type = 'numeric-int'
actionability = 'any'
mutability = True
elif col_name == 'HasHistoryOfOverduePayments':
attr_type = 'binary'
actionability = 'any'
mutability = True
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = f'x{col_idx + index_offset}',
attr_type = attr_type,
node_type = 'input',
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
elif dataset_name == 'compass':
data_frame_non_hot = load_compas_data_new()
data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)
attributes_non_hot = {}
input_cols, output_col = getInputOutputColumns(data_frame_non_hot)
col_name = output_col
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = 'y',
attr_type = 'binary',
node_type = 'output',
actionability = 'none',
mutability = False,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(input_cols):
if col_name == 'AgeGroup':
attr_type = 'ordinal'
actionability = 'any' # 'none'
mutability = True
elif col_name == 'Race':
attr_type = 'binary'
actionability = 'any' # 'none'
mutability = True
elif col_name == 'Sex':
attr_type = 'binary'
actionability = 'any' # 'none'
mutability = True
elif col_name == 'PriorsCount':
attr_type = 'numeric-int'
actionability = 'any'
mutability = True
elif col_name == 'ChargeDegree':
attr_type = 'binary'
actionability = 'any'
mutability = True
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = f'x{col_idx + index_offset}',
attr_type = attr_type,
node_type = 'input',
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
elif dataset_name == 'synthetic':
variable_type = 'real'
# variable_type = 'integer'
scm_class = meta_param
data_frame_non_hot = load_synthetic_data(scm_class, variable_type)
data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)
attributes_non_hot = {}
input_cols, output_col = getInputOutputColumns(data_frame_non_hot)
# ordering of next two lines matters (shouldn't overwrite input_cols); silly code... :|
meta_cols = [col_name for col_name in input_cols if 'u' in col_name]
input_cols = [col_name for col_name in input_cols if 'x' in col_name] # endogenous variables must start with `x`
if 'fair' in scm_class:
# fair experiments (other than adult) rely on labels being in {-1/+1}
# TODO (lowpri): can we change this?? can sklearn svm and lr predict 0,1 instead of -1/+1??
data_frame_non_hot[output_col] = data_frame_non_hot[output_col] * 2 - 1
col_name = output_col
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = 'y',
attr_type = 'binary',
node_type = 'output',
actionability = 'none',
mutability = False,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(input_cols):
attr_type = 'numeric-real' if variable_type == 'real' else 'numeric-int'
node_type = 'input'
actionability = 'any'
mutability = True
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = col_name,
attr_type = attr_type,
node_type = node_type,
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(meta_cols):
attr_type = 'numeric-real'
node_type = 'meta'
actionability = 'none'
mutability = False
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = col_name,
attr_type = attr_type,
node_type = node_type,
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
elif dataset_name == 'mortgage':
data_frame_non_hot = load_mortgage_data()
data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)
attributes_non_hot = {}
input_cols, output_col = getInputOutputColumns(data_frame_non_hot)
col_name = output_col
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = 'y',
attr_type = 'binary',
node_type = 'output',
actionability = 'none',
mutability = False,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(input_cols):
if col_name == 'x0':
attr_type = 'numeric-real'
actionability = 'any'
mutability = True
elif col_name == 'x1':
attr_type = 'numeric-real'
actionability = 'any'
mutability = True
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = f'x{col_idx + index_offset}',
attr_type = attr_type,
node_type = 'input',
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
elif dataset_name == 'twomoon':
variable_type = 'real'
# variable_type = 'integer'
data_frame_non_hot = load_twomoon_data(variable_type)
data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)
attributes_non_hot = {}
input_cols, output_col = getInputOutputColumns(data_frame_non_hot)
col_name = output_col
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = 'y',
attr_type = 'binary',
node_type = 'output',
actionability = 'none',
mutability = False,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(input_cols):
if col_name == 'x0':
attr_type = 'numeric-real' if variable_type == 'real' else 'numeric-int'
actionability = 'any'
mutability = True
elif col_name == 'x1':
attr_type = 'numeric-real' if variable_type == 'real' else 'numeric-int'
actionability = 'any'
mutability = True
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = f'x{col_idx + index_offset}',
attr_type = attr_type,
node_type = 'input',
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
elif dataset_name == 'test':
data_frame_non_hot = load_test_data()
data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)
attributes_non_hot = {}
input_cols, output_col = getInputOutputColumns(data_frame_non_hot)
col_name = output_col
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = 'y',
attr_type = 'binary',
node_type = 'output',
actionability = 'none',
mutability = False,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
for col_idx, col_name in enumerate(input_cols):
if col_name == 'x0':
attr_type = 'categorical'
actionability = 'any'
mutability = True
attributes_non_hot[col_name] = DatasetAttribute(
attr_name_long = col_name,
attr_name_kurz = f'x{col_idx + index_offset}',
attr_type = attr_type,
node_type = 'input',
actionability = actionability,
mutability = mutability,
parent_name_long = -1,
parent_name_kurz = -1,
lower_bound = data_frame_non_hot[col_name].min(),
upper_bound = data_frame_non_hot[col_name].max())
else:
raise Exception(f'{dataset_name} not recognized as a valid dataset.')
if return_one_hot:
data_frame, attributes = getOneHotEquivalent(data_frame_non_hot, attributes_non_hot)
else:
data_frame, attributes = data_frame_non_hot, attributes_non_hot
# save then return
dataset_obj = Dataset(data_frame, attributes, return_one_hot, dataset_name)
# if not loading from cache, we always overwrite the cache
pickle.dump(dataset_obj, open(save_file_path, 'wb'))
return dataset_obj
# TODO: consider moving into Dataset class with getOneHot and getNonHot methods
def getOneHotEquivalent(data_frame_non_hot, attributes_non_hot):
# TODO: see how we can switch between feature_names = col names for kurz and long (also maybe ordered)
data_frame = copy.deepcopy(data_frame_non_hot)
attributes = copy.deepcopy(attributes_non_hot)
def setOneHotValue(val):
return np.append(np.append(
np.zeros(val - 1),
np.ones(1)),
np.zeros(num_unique_values - val)
)
def setThermoValue(val):
return np.append(
np.ones(val),
np.zeros(num_unique_values - val)
)
for col_name in data_frame.columns.values:
if attributes[col_name].attr_type not in {'categorical', 'ordinal'}:
continue
old_col_name_long = col_name
new_col_names_long = []
new_col_names_kurz = []
old_attr_name_long = attributes[old_col_name_long].attr_name_long
old_attr_name_kurz = attributes[old_col_name_long].attr_name_kurz
old_attr_type = attributes[old_col_name_long].attr_type
old_node_type = attributes[old_col_name_long].node_type
old_actionability = attributes[old_col_name_long].actionability
old_mutability = attributes[old_col_name_long].mutability
old_lower_bound = attributes[old_col_name_long].lower_bound
old_upper_bound = attributes[old_col_name_long].upper_bound
num_unique_values = int(old_upper_bound - old_lower_bound + 1)
assert old_col_name_long == old_attr_name_long
new_attr_type = 'sub-' + old_attr_type
new_node_type = old_node_type
new_actionability = old_actionability
new_mutability = old_mutability
new_parent_name_long = old_attr_name_long
new_parent_name_kurz = old_attr_name_kurz
if attributes[col_name].attr_type == 'categorical': # do not do this for 'binary'!
new_col_names_long = [f'{old_attr_name_long}_cat_{i}' for i in range(num_unique_values)]
new_col_names_kurz = [f'{old_attr_name_kurz}_cat_{i}' for i in range(num_unique_values)]
print(f'Replacing column {col_name} with {{{", ".join(new_col_names_long)}}}')
tmp = np.array(list(map(setOneHotValue, list(data_frame[col_name].astype(int).values))))
data_frame_dummies = pd.DataFrame(data=tmp, columns=new_col_names_long)
elif attributes[col_name].attr_type == 'ordinal':
new_col_names_long = [f'{old_attr_name_long}_ord_{i}' for i in range(num_unique_values)]
new_col_names_kurz = [f'{old_attr_name_kurz}_ord_{i}' for i in range(num_unique_values)]
print(f'Replacing column {col_name} with {{{", ".join(new_col_names_long)}}}')
tmp = np.array(list(map(setThermoValue, list(data_frame[col_name].astype(int).values))))
data_frame_dummies = pd.DataFrame(data=tmp, columns=new_col_names_long)
# Update data_frame
data_frame = pd.concat([data_frame.drop(columns = old_col_name_long), data_frame_dummies], axis=1)
# Update attributes
del attributes[old_col_name_long]
for col_idx in range(len(new_col_names_long)):
new_col_name_long = new_col_names_long[col_idx]
new_col_name_kurz = new_col_names_kurz[col_idx]
attributes[new_col_name_long] = DatasetAttribute(
attr_name_long = new_col_name_long,
attr_name_kurz = new_col_name_kurz,
attr_type = new_attr_type,
node_type = new_node_type,
actionability = new_actionability,
mutability = new_mutability,
parent_name_long = new_parent_name_long,
parent_name_kurz = new_parent_name_kurz,
lower_bound = data_frame[new_col_name_long].min(),
upper_bound = data_frame[new_col_name_long].max())
return data_frame, attributes
|
import synapse.exc as s_exc
import synapse.lib.gis as s_gis
import synapse.lib.layer as s_layer
import synapse.lib.types as s_types
import synapse.lib.module as s_module
import synapse.lib.grammar as s_grammar
units = {
'mm': 1,
'millimeter': 1,
'millimeters': 1,
'cm': 10,
'centimeter': 10,
'centimeters': 10,
# international foot
'foot': 304.8,
'feet': 304.8,
'm': 1000,
'meter': 1000,
'meters': 1000,
# international mile
'mile': 1609344,
'miles': 1609344,
'km': 1000000,
'kilometer': 1000000,
'kilometers': 1000000,
# international yard
'yard': 914.4,
'yards': 914.4,
}
distrepr = (
(1000000.0, 'km'),
(1000.0, 'm'),
(10.0, 'cm'),
)
geojsonschema = {
'definitions': {
'BoundingBox': {'type': 'array', 'minItems': 4, 'items': {'type': 'number'}},
'PointCoordinates': {'type': 'array', 'minItems': 2, 'items': {'type': 'number'}},
'LineStringCoordinates': {'type': 'array', 'minItems': 2, 'items': {'$ref': '#/definitions/PointCoordinates'}},
'LinearRingCoordinates': {'type': 'array', 'minItems': 4, 'items': {'$ref': '#/definitions/PointCoordinates'}},
'PolygonCoordinates': {'type': 'array', 'items': {'$ref': '#/definitions/LinearRingCoordinates'}},
'Point': {
'title': 'GeoJSON Point',
'type': 'object',
'required': ['type', 'coordinates'],
'properties': {
'type': {'type': 'string', 'enum': ['Point']},
'coordinates': {'$ref': '#/definitions/PointCoordinates'},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
'LineString': {
'title': 'GeoJSON LineString',
'type': 'object',
'required': ['type', 'coordinates'],
'properties': {
'type': {'type': 'string', 'enum': ['LineString']},
'coordinates': {'$ref': '#/definitions/LineStringCoordinates'},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
'Polygon': {
'title': 'GeoJSON Polygon',
'type': 'object',
'required': ['type', 'coordinates'],
'properties': {
'type': {'type': 'string', 'enum': ['Polygon']},
'coordinates': {'$ref': '#/definitions/PolygonCoordinates'},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
'MultiPoint': {
'title': 'GeoJSON MultiPoint',
'type': 'object',
'required': ['type', 'coordinates'],
'properties': {
'type': {'type': 'string', 'enum': ['MultiPoint']},
'coordinates': {'type': 'array', 'items': {'$ref': '#/definitions/PointCoordinates'}},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
'MultiLineString': {
'title': 'GeoJSON MultiLineString',
'type': 'object',
'required': ['type', 'coordinates'],
'properties': {
'type': {'type': 'string', 'enum': ['MultiLineString']},
'coordinates': {'type': 'array', 'items': {'$ref': '#/definitions/LineStringCoordinates'}},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
'MultiPolygon': {
'title': 'GeoJSON MultiPolygon',
'type': 'object',
'required': ['type', 'coordinates'],
'properties': {
'type': {'type': 'string', 'enum': ['MultiPolygon']},
'coordinates': {'type': 'array', 'items': {'$ref': '#/definitions/PolygonCoordinates'}},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
'GeometryCollection': {
'title': 'GeoJSON GeometryCollection',
'type': 'object',
'required': ['type', 'geometries'],
'properties': {
'type': {'type': 'string', 'enum': ['GeometryCollection']},
'geometries': {'type': 'array', 'items': {'oneOf': [
{'$ref': '#/definitions/Point'},
{'$ref': '#/definitions/LineString'},
{'$ref': '#/definitions/Polygon'},
{'$ref': '#/definitions/MultiPoint'},
{'$ref': '#/definitions/MultiLineString'},
{'$ref': '#/definitions/MultiPolygon'},
]}},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
'Feature': {
'title': 'GeoJSON Feature',
'type': 'object',
'required': ['type', 'properties', 'geometry'],
'properties': {
'type': {'type': 'string', 'enum': ['Feature']},
'geometry': {'oneOf': [
{'type': 'null'},
{'$ref': '#/definitions/Point'},
{'$ref': '#/definitions/LineString'},
{'$ref': '#/definitions/Polygon'},
{'$ref': '#/definitions/MultiPoint'},
{'$ref': '#/definitions/MultiLineString'},
{'$ref': '#/definitions/MultiPolygon'},
{'$ref': '#/definitions/GeometryCollection'},
]},
'properties': {'oneOf': [{'type': 'null'}, {'type': 'object'}]},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
'FeatureCollection': {
'title': 'GeoJSON FeatureCollection',
'type': 'object',
'required': ['type', 'features'],
'properties': {
'type': {'type': 'string', 'enum': ['FeatureCollection']},
'features': {'type': 'array', 'items': {'$ref': '#/definitions/Feature'}},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
},
'oneOf': [
{'$ref': '#/definitions/Point'},
{'$ref': '#/definitions/LineString'},
{'$ref': '#/definitions/Polygon'},
{'$ref': '#/definitions/MultiPoint'},
{'$ref': '#/definitions/MultiLineString'},
{'$ref': '#/definitions/MultiPolygon'},
{'$ref': '#/definitions/GeometryCollection'},
{'$ref': '#/definitions/Feature'},
{'$ref': '#/definitions/FeatureCollection'},
],
}
class Dist(s_types.Int):
def postTypeInit(self):
s_types.Int.postTypeInit(self)
self.setNormFunc(int, self._normPyInt)
self.setNormFunc(str, self._normPyStr)
self.baseoff = self.opts.get('baseoff', 0)
def _normPyInt(self, valu):
return valu, {}
def _normPyStr(self, text):
try:
valu, off = s_grammar.parse_float(text, 0)
except Exception:
raise s_exc.BadTypeValu(valu=text, name=self.name,
mesg='Dist requires a valid float and dist '
'unit, no valid float found') from None
unit, off = s_grammar.nom(text, off, s_grammar.alphaset)
mult = units.get(unit.lower())
if mult is None:
raise s_exc.BadTypeValu(valu=text, name=self.name,
mesg='invalid/unknown dist unit: %s' % (unit,))
norm = int(valu * mult) + self.baseoff
if norm < 0:
mesg = 'A geo:dist may not be negative.'
raise s_exc.BadTypeValu(mesg=mesg, name=self.name, valu=text)
return norm, {}
def repr(self, norm):
valu = norm - self.baseoff
text = None
absv = abs(valu)
for base, unit in distrepr:
if absv >= base:
size = absv / base
text = '%s %s' % (size, unit)
break
if text is None:
text = '%d mm' % (absv,)
if valu < 0:
text = f'-{text}'
return text
class LatLong(s_types.Type):
stortype = s_layer.STOR_TYPE_LATLONG
def postTypeInit(self):
self.setNormFunc(str, self._normPyStr)
self.setNormFunc(list, self._normPyTuple)
self.setNormFunc(tuple, self._normPyTuple)
self.setCmprCtor('near=', self._cmprNear)
self.storlifts.update({
'near=': self._storLiftNear,
})
def _normCmprValu(self, valu):
latlong, dist = valu
rlatlong = self.modl.type('geo:latlong').norm(latlong)[0]
rdist = self.modl.type('geo:dist').norm(dist)[0]
return rlatlong, rdist
def _cmprNear(self, valu):
latlong, dist = self._normCmprValu(valu)
def cmpr(valu):
if s_gis.haversine(valu, latlong) <= dist:
return True
return False
return cmpr
def _storLiftNear(self, cmpr, valu):
latlong = self.norm(valu[0])[0]
dist = self.modl.type('geo:dist').norm(valu[1])[0]
return ((cmpr, (latlong, dist), self.stortype),)
def _normPyStr(self, valu):
valu = tuple(valu.strip().split(','))
return self._normPyTuple(valu)
def _normPyTuple(self, valu):
if len(valu) != 2:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Valu must contain valid latitude,longitude')
try:
latv = self.modl.type('geo:latitude').norm(valu[0])[0]
lonv = self.modl.type('geo:longitude').norm(valu[1])[0]
except Exception as e:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg=str(e)) from None
return (latv, lonv), {'subs': {'lat': latv, 'lon': lonv}}
def repr(self, norm):
return f'{norm[0]},{norm[1]}'
class GeoModule(s_module.CoreModule):
def getModelDefs(self):
return (
('geo', {
'ctors': (
('geo:dist', 'synapse.models.geospace.Dist', {}, {
'doc': 'A geographic distance (base unit is mm).', 'ex': '10 km'
}),
('geo:latlong', 'synapse.models.geospace.LatLong', {}, {
'doc': 'A Lat/Long string specifying a point on Earth.',
'ex': '-12.45,56.78'
}),
),
'types': (
('geo:nloc', ('comp', {'fields': (('ndef', 'ndef'), ('latlong', 'geo:latlong'), ('time', 'time'))}), {
'doc': 'Records a node latitude/longitude in space-time.'
}),
('geo:json', ('data', {'schema': geojsonschema}), {
'doc': 'GeoJSON structured JSON data.',
}),
('geo:place', ('guid', {}), {
'doc': 'A GUID for a geographic place.'}),
('geo:address', ('str', {'lower': 1, 'onespace': 1, 'strip': True}), {
'doc': 'A street/mailing address string.',
}),
('geo:longitude', ('float', {'min': -180.0, 'max': 180.0,
'minisvalid': False, 'maxisvalid': True}), {
'ex': '31.337',
'doc': 'A longitude in floating point notation.',
}),
('geo:latitude', ('float', {'min': -90.0, 'max': 90.0,
'minisvalid': True, 'maxisvalid': True}), {
'ex': '31.337',
'doc': 'A latitude in floating point notation.',
}),
('geo:bbox', ('comp', {'sepr': ',', 'fields': (
('xmin', 'geo:longitude'),
('xmax', 'geo:longitude'),
('ymin', 'geo:latitude'),
('ymax', 'geo:latitude'))}), {
'doc': 'A geospatial bounding box in (xmin, xmax, ymin, ymax) format.',
}),
('geo:altitude', ('geo:dist', {'baseoff': 6371008800}), {
'doc': 'A negative or positive offset from Mean Sea Level (6,371.0088km from Earths core).'
}),
),
'forms': (
('geo:nloc', {}, (
('ndef', ('ndef', {}), {'ro': True,
'doc': 'The node with location in geospace and time.'}),
('ndef:form', ('str', {}), {'ro': True,
'doc': 'The form of node referenced by the ndef.'}),
('latlong', ('geo:latlong', {}), {'ro': True,
'doc': 'The latitude/longitude the node was observed.'}),
('time', ('time', {}), {'ro': True,
'doc': 'The time the node was observed at location.'}),
('place', ('geo:place', {}), {
'doc': 'The place corresponding to the latlong property.'}),
('loc', ('loc', {}), {
'doc': 'The geo-political location string for the node.'}),
)),
('geo:place', {}, (
('name', ('str', {'lower': 1, 'onespace': 1}), {
'doc': 'The name of the place.'}),
('parent', ('geo:place', {}), {
'doc': 'A parent place, possibly from reverse geocoding.'}),
('desc', ('str', {}), {
'doc': 'A long form description of the place.'}),
('loc', ('loc', {}), {
'doc': 'The geo-political location string for the node.'}),
('address', ('geo:address', {}), {
'doc': 'The street/mailing address for the place.'}),
('geojson', ('geo:json', {}), {
'doc': 'A GeoJSON representation of the place.'}),
('latlong', ('geo:latlong', {}), {
'doc': 'The lat/long position for the place.'}),
('bbox', ('geo:bbox', {}), {
'doc': 'A bounding box which encompases the place.'}),
('radius', ('geo:dist', {}), {
'doc': 'An approximate radius to use for bounding box calculation.'}),
('photo', ('file:bytes', {}), {
'doc': 'The image file to use as the primary image of the place.'}),
)),
)
}),
)
|
import synapse.exc as s_exc
import synapse.lib.gis as s_gis
import synapse.lib.layer as s_layer
import synapse.lib.types as s_types
import synapse.lib.module as s_module
import synapse.lib.grammar as s_grammar
units = {
'mm': 1,
'millimeter': 1,
'millimeters': 1,
'cm': 10,
'centimeter': 10,
'centimeters': 10,
# international foot
'foot': 304.8,
'feet': 304.8,
'm': 1000,
'meter': 1000,
'meters': 1000,
# international mile
'mile': 1609344,
'miles': 1609344,
'km': 1000000,
'kilometer': 1000000,
'kilometers': 1000000,
# international yard
'yard': 914.4,
'yards': 914.4,
}
distrepr = (
(1000000.0, 'km'),
(1000.0, 'm'),
(10.0, 'cm'),
)
geojsonschema = {
'definitions': {
'BoundingBox': {'type': 'array', 'minItems': 4, 'items': {'type': 'number'}},
'PointCoordinates': {'type': 'array', 'minItems': 2, 'items': {'type': 'number'}},
'LineStringCoordinates': {'type': 'array', 'minItems': 2, 'items': {'$ref': '#/definitions/PointCoordinates'}},
'LinearRingCoordinates': {'type': 'array', 'minItems': 4, 'items': {'$ref': '#/definitions/PointCoordinates'}},
'PolygonCoordinates': {'type': 'array', 'items': {'$ref': '#/definitions/LinearRingCoordinates'}},
'Point': {
'title': 'GeoJSON Point',
'type': 'object',
'required': ['type', 'coordinates'],
'properties': {
'type': {'type': 'string', 'enum': ['Point']},
'coordinates': {'$ref': '#/definitions/PointCoordinates'},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
'LineString': {
'title': 'GeoJSON LineString',
'type': 'object',
'required': ['type', 'coordinates'],
'properties': {
'type': {'type': 'string', 'enum': ['LineString']},
'coordinates': {'$ref': '#/definitions/LineStringCoordinates'},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
'Polygon': {
'title': 'GeoJSON Polygon',
'type': 'object',
'required': ['type', 'coordinates'],
'properties': {
'type': {'type': 'string', 'enum': ['Polygon']},
'coordinates': {'$ref': '#/definitions/PolygonCoordinates'},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
'MultiPoint': {
'title': 'GeoJSON MultiPoint',
'type': 'object',
'required': ['type', 'coordinates'],
'properties': {
'type': {'type': 'string', 'enum': ['MultiPoint']},
'coordinates': {'type': 'array', 'items': {'$ref': '#/definitions/PointCoordinates'}},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
'MultiLineString': {
'title': 'GeoJSON MultiLineString',
'type': 'object',
'required': ['type', 'coordinates'],
'properties': {
'type': {'type': 'string', 'enum': ['MultiLineString']},
'coordinates': {'type': 'array', 'items': {'$ref': '#/definitions/LineStringCoordinates'}},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
'MultiPolygon': {
'title': 'GeoJSON MultiPolygon',
'type': 'object',
'required': ['type', 'coordinates'],
'properties': {
'type': {'type': 'string', 'enum': ['MultiPolygon']},
'coordinates': {'type': 'array', 'items': {'$ref': '#/definitions/PolygonCoordinates'}},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
'GeometryCollection': {
'title': 'GeoJSON GeometryCollection',
'type': 'object',
'required': ['type', 'geometries'],
'properties': {
'type': {'type': 'string', 'enum': ['GeometryCollection']},
'geometries': {'type': 'array', 'items': {'oneOf': [
{'$ref': '#/definitions/Point'},
{'$ref': '#/definitions/LineString'},
{'$ref': '#/definitions/Polygon'},
{'$ref': '#/definitions/MultiPoint'},
{'$ref': '#/definitions/MultiLineString'},
{'$ref': '#/definitions/MultiPolygon'},
]}},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
'Feature': {
'title': 'GeoJSON Feature',
'type': 'object',
'required': ['type', 'properties', 'geometry'],
'properties': {
'type': {'type': 'string', 'enum': ['Feature']},
'geometry': {'oneOf': [
{'type': 'null'},
{'$ref': '#/definitions/Point'},
{'$ref': '#/definitions/LineString'},
{'$ref': '#/definitions/Polygon'},
{'$ref': '#/definitions/MultiPoint'},
{'$ref': '#/definitions/MultiLineString'},
{'$ref': '#/definitions/MultiPolygon'},
{'$ref': '#/definitions/GeometryCollection'},
]},
'properties': {'oneOf': [{'type': 'null'}, {'type': 'object'}]},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
'FeatureCollection': {
'title': 'GeoJSON FeatureCollection',
'type': 'object',
'required': ['type', 'features'],
'properties': {
'type': {'type': 'string', 'enum': ['FeatureCollection']},
'features': {'type': 'array', 'items': {'$ref': '#/definitions/Feature'}},
'bbox': {'$ref': '#/definitions/BoundingBox'},
},
},
},
'oneOf': [
{'$ref': '#/definitions/Point'},
{'$ref': '#/definitions/LineString'},
{'$ref': '#/definitions/Polygon'},
{'$ref': '#/definitions/MultiPoint'},
{'$ref': '#/definitions/MultiLineString'},
{'$ref': '#/definitions/MultiPolygon'},
{'$ref': '#/definitions/GeometryCollection'},
{'$ref': '#/definitions/Feature'},
{'$ref': '#/definitions/FeatureCollection'},
],
}
class Dist(s_types.Int):
def postTypeInit(self):
s_types.Int.postTypeInit(self)
self.setNormFunc(int, self._normPyInt)
self.setNormFunc(str, self._normPyStr)
self.baseoff = self.opts.get('baseoff', 0)
def _normPyInt(self, valu):
return valu, {}
def _normPyStr(self, text):
try:
valu, off = s_grammar.parse_float(text, 0)
except Exception:
raise s_exc.BadTypeValu(valu=text, name=self.name,
mesg='Dist requires a valid float and dist '
'unit, no valid float found') from None
unit, off = s_grammar.nom(text, off, s_grammar.alphaset)
mult = units.get(unit.lower())
if mult is None:
raise s_exc.BadTypeValu(valu=text, name=self.name,
mesg='invalid/unknown dist unit: %s' % (unit,))
norm = int(valu * mult) + self.baseoff
if norm < 0:
mesg = 'A geo:dist may not be negative.'
raise s_exc.BadTypeValu(mesg=mesg, name=self.name, valu=text)
return norm, {}
def repr(self, norm):
valu = norm - self.baseoff
text = None
absv = abs(valu)
for base, unit in distrepr:
if absv >= base:
size = absv / base
text = '%s %s' % (size, unit)
break
if text is None:
text = '%d mm' % (absv,)
if valu < 0:
text = f'-{text}'
return text
class LatLong(s_types.Type):
stortype = s_layer.STOR_TYPE_LATLONG
def postTypeInit(self):
self.setNormFunc(str, self._normPyStr)
self.setNormFunc(list, self._normPyTuple)
self.setNormFunc(tuple, self._normPyTuple)
self.setCmprCtor('near=', self._cmprNear)
self.storlifts.update({
'near=': self._storLiftNear,
})
def _normCmprValu(self, valu):
latlong, dist = valu
rlatlong = self.modl.type('geo:latlong').norm(latlong)[0]
rdist = self.modl.type('geo:dist').norm(dist)[0]
return rlatlong, rdist
def _cmprNear(self, valu):
latlong, dist = self._normCmprValu(valu)
def cmpr(valu):
if s_gis.haversine(valu, latlong) <= dist:
return True
return False
return cmpr
def _storLiftNear(self, cmpr, valu):
latlong = self.norm(valu[0])[0]
dist = self.modl.type('geo:dist').norm(valu[1])[0]
return ((cmpr, (latlong, dist), self.stortype),)
def _normPyStr(self, valu):
valu = tuple(valu.strip().split(','))
return self._normPyTuple(valu)
def _normPyTuple(self, valu):
if len(valu) != 2:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Valu must contain valid latitude,longitude')
try:
latv = self.modl.type('geo:latitude').norm(valu[0])[0]
lonv = self.modl.type('geo:longitude').norm(valu[1])[0]
except Exception as e:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg=str(e)) from None
return (latv, lonv), {'subs': {'lat': latv, 'lon': lonv}}
def repr(self, norm):
return f'{norm[0]},{norm[1]}'
class GeoModule(s_module.CoreModule):
def getModelDefs(self):
return (
('geo', {
'ctors': (
('geo:dist', 'synapse.models.geospace.Dist', {}, {
'doc': 'A geographic distance (base unit is mm).', 'ex': '10 km'
}),
('geo:latlong', 'synapse.models.geospace.LatLong', {}, {
'doc': 'A Lat/Long string specifying a point on Earth.',
'ex': '-12.45,56.78'
}),
),
'types': (
('geo:nloc', ('comp', {'fields': (('ndef', 'ndef'), ('latlong', 'geo:latlong'), ('time', 'time'))}), {
'doc': 'Records a node latitude/longitude in space-time.'
}),
('geo:json', ('data', {'schema': geojsonschema}), {
'doc': 'GeoJSON structured JSON data.',
}),
('geo:place', ('guid', {}), {
'doc': 'A GUID for a geographic place.'}),
('geo:address', ('str', {'lower': 1, 'onespace': 1, 'strip': True}), {
'doc': 'A street/mailing address string.',
}),
('geo:longitude', ('float', {'min': -180.0, 'max': 180.0,
'minisvalid': False, 'maxisvalid': True}), {
'ex': '31.337',
'doc': 'A longitude in floating point notation.',
}),
('geo:latitude', ('float', {'min': -90.0, 'max': 90.0,
'minisvalid': True, 'maxisvalid': True}), {
'ex': '31.337',
'doc': 'A latitude in floating point notation.',
}),
('geo:bbox', ('comp', {'sepr': ',', 'fields': (
('xmin', 'geo:longitude'),
('xmax', 'geo:longitude'),
('ymin', 'geo:latitude'),
('ymax', 'geo:latitude'))}), {
'doc': 'A geospatial bounding box in (xmin, xmax, ymin, ymax) format.',
}),
('geo:altitude', ('geo:dist', {'baseoff': 6371008800}), {
'doc': 'A negative or positive offset from Mean Sea Level (6,371.0088km from Earths core).'
}),
),
'forms': (
('geo:nloc', {}, (
('ndef', ('ndef', {}), {'ro': True,
'doc': 'The node with location in geospace and time.'}),
('ndef:form', ('str', {}), {'ro': True,
'doc': 'The form of node referenced by the ndef.'}),
('latlong', ('geo:latlong', {}), {'ro': True,
'doc': 'The latitude/longitude the node was observed.'}),
('time', ('time', {}), {'ro': True,
'doc': 'The time the node was observed at location.'}),
('place', ('geo:place', {}), {
'doc': 'The place corresponding to the latlong property.'}),
('loc', ('loc', {}), {
'doc': 'The geo-political location string for the node.'}),
)),
('geo:place', {}, (
('name', ('str', {'lower': 1, 'onespace': 1}), {
'doc': 'The name of the place.'}),
('parent', ('geo:place', {}), {
'doc': 'A parent place, possibly from reverse geocoding.'}),
('desc', ('str', {}), {
'doc': 'A long form description of the place.'}),
('loc', ('loc', {}), {
'doc': 'The geo-political location string for the node.'}),
('address', ('geo:address', {}), {
'doc': 'The street/mailing address for the place.'}),
('geojson', ('geo:json', {}), {
'doc': 'A GeoJSON representation of the place.'}),
('latlong', ('geo:latlong', {}), {
'doc': 'The lat/long position for the place.'}),
('bbox', ('geo:bbox', {}), {
'doc': 'A bounding box which encompases the place.'}),
('radius', ('geo:dist', {}), {
'doc': 'An approximate radius to use for bounding box calculation.'}),
('photo', ('file:bytes', {}), {
'doc': 'The image file to use as the primary image of the place.'}),
)),
)
}),
)
|
# Use snippet 'summarize_a_survey_module' to output a table and a graph of
# participant counts by response for one question_concept_id
# The snippet assumes that a dataframe containing survey questions and answers already exists
# The snippet also assumes that setup has been run
# Update the next 3 lines
survey_df = YOUR_DATASET_NAME_survey_df
question_concept_id = 1585940
denominator = None # e.g: 200000
####################################################################################
# DON'T CHANGE FROM HERE
####################################################################################
def summarize_a_question_concept_id(df, question_concept_id, denominator=None):
df = df.loc[df['question_concept_id'] == question_concept_id].copy()
new_df = df.groupby(['answer_concept_id', 'answer'])['person_id']\
.nunique()\
.reset_index()\
.rename(columns=dict(person_id='n_participant'))\
.assign(answer_concept_id = lambda x: np.int32(x.answer_concept_id))
if denominator:
new_df['response_rate'] = round(100*new_df['n_participant']/denominator,2)
if question_concept_id in df['question_concept_id'].unique():
print(f"Distribution of response to {df.loc[df["question_concept_id"] == question_concept_id, "question"].unique()[0]}")
# show table
display(new_df)
# show graph
display(ggplot(data=new_df) +
geom_bar(aes(x='answer', y='n_participant'), stat='identity') +
coord_flip() +
labs(y="Participant count", x="") +
theme_bw())
else:
print("There is an error with your question_concept_id")
summarize_a_question_concept_id(survey_df, question_concept_id, denominator)
|
# Use snippet 'summarize_a_survey_module' to output a table and a graph of
# participant counts by response for one question_concept_id
# The snippet assumes that a dataframe containing survey questions and answers already exists
# The snippet also assumes that setup has been run
# Update the next 3 lines
survey_df = YOUR_DATASET_NAME_survey_df
question_concept_id = 1585940
denominator = None # e.g: 200000
####################################################################################
# DON'T CHANGE FROM HERE
####################################################################################
def summarize_a_question_concept_id(df, question_concept_id, denominator=None):
df = df.loc[df['question_concept_id'] == question_concept_id].copy()
new_df = df.groupby(['answer_concept_id', 'answer'])['person_id']\
.nunique()\
.reset_index()\
.rename(columns=dict(person_id='n_participant'))\
.assign(answer_concept_id = lambda x: np.int32(x.answer_concept_id))
if denominator:
new_df['response_rate'] = round(100*new_df['n_participant']/denominator,2)
if question_concept_id in df['question_concept_id'].unique():
print(f"Distribution of response to {df.loc[df['question_concept_id'] == question_concept_id, 'question'].unique()[0]}")
# show table
display(new_df)
# show graph
display(ggplot(data=new_df) +
geom_bar(aes(x='answer', y='n_participant'), stat='identity') +
coord_flip() +
labs(y="Participant count", x="") +
theme_bw())
else:
print("There is an error with your question_concept_id")
summarize_a_question_concept_id(survey_df, question_concept_id, denominator)
|
from __future__ import absolute_import, print_function, unicode_literals
import elliottlib
from elliottlib import constants, logutil, Runtime, bzutil, openshiftclient, errata
LOGGER = logutil.getLogger(__name__)
from elliottlib.cli import cli_opts
from elliottlib.cli.common import cli, use_default_advisory_option, find_default_advisory
from elliottlib.exceptions import ElliottFatalError
from elliottlib.util import green_prefix, green_print, red_print
import click
pass_runtime = click.make_pass_decorator(Runtime)
import datetime
import re
@cli.command("find-bugs", short_help="Find or add MODIFED/VERIFIED bugs to ADVISORY")
@click.option("--add", "-a", 'advisory',
default=False, metavar='ADVISORY',
help="Add found bugs to ADVISORY. Applies to bug flags as well (by default only a list of discovered bugs are displayed)")
@use_default_advisory_option
@click.option("--mode",
required=True,
type=click.Choice(['list', 'sweep', 'diff', 'qe']),
default='list',
help='Mode to use to find bugs')
@click.option("--status", 'status',
multiple=True,
required=False,
default=['MODIFIED', 'VERIFIED', 'ON_QA'],
type=click.Choice(constants.VALID_BUG_STATES),
help="Status of the bugs")
@click.option("--id", metavar='BUGID', default=None,
multiple=True, required=False,
help="Bugzilla IDs to add, required for LIST mode.")
@click.option("--cve-trackers",
required=False,
is_flag=True,
help='Include CVE trackers in sweep mode')
@click.option("--from-diff", "--between",
required=False,
nargs=2,
help="Two payloads to compare against")
@click.option("--flag", metavar='FLAG',
required=False, multiple=True,
help="Optional flag to apply to found bugs [MULTIPLE]")
@click.option("--report",
required=False,
is_flag=True,
help="Output a detailed report of the found bugs")
@click.option("--into-default-advisories",
is_flag=True,
help='attaches bugs found to their correct default advisories, e.g. operator-related bugs go to "extras" instead of the default "image", bugs filtered into "none" are not attached at all.')
@click.option("--noop", "--dry-run",
is_flag=True,
default=False,
help="Don't change anything")
@pass_runtime
def find_bugs_cli(runtime, advisory, default_advisory_type, mode, status, id, cve_trackers, from_diff, flag, report, into_default_advisories, noop):
"""Find Red Hat Bugzilla bugs or add them to ADVISORY. Bugs can be
"swept" into the advisory either automatically (--mode sweep), or by
manually specifying one or more bugs using --mode list and the --id option.
Use cases are described below:
Note: Using --id without --add is basically pointless
SWEEP: For this use-case the --group option MUST be provided. The
--group automatically determines the correct target-releases to search
for bugs claimed to be fixed, but not yet attached to advisories.
LIST: The --group option is not required if you are specifying bugs
manually. Provide one or more --id's for manual bug addition. In LIST
mode you must provide a list of IDs to attach with the --id option.
DIFF: For this use case, you must provide the --between option using two
URLs to payloads.
QE: Find MODIFIED bugs for the target-releases, and set them to ON_QA.
The --group option MUST be provided. Cannot be used in combination
with --into-default-advisories, --add, --into-default-advisories
Using --use-default-advisory without a value set for the matching key
in the build-data will cause an error and elliott will exit in a
non-zero state. Use of this option silently overrides providing an
advisory with the --add option.
Automatically add bugs with target-release matching 3.7.Z or 3.7.0
to advisory 123456:
\b
$ elliott --group openshift-3.7 find-bugs --mode sweep --add 123456
List bugs that WOULD be added to an advisory and have set the bro_ok flag on them (NOOP):
\b
$ elliott --group openshift-3.7 find-bugs --mode sweep --flag bro_ok
Attach bugs to their correct default advisories, e.g. operator-related bugs go to "extras" instead of the default "image":
\b
$ elliott --group=openshift-4.4 find-bugs --mode=sweep --into-default-advisories
Add two bugs to advisory 123456. Note that --group is not required
because we're not auto searching:
\b
$ elliott find-bugs --mode list --id 8675309 --id 7001337 --add 123456
Automatically find bugs for openshift-4.1 and attach them to the
rpm advisory defined in ocp-build-data:
\b
$ elliott --group=openshift-4.1 --mode sweep --use-default-advisory rpm
Find bugs for 4.6 that are in MODIFIED state, and set them to ON_QA:
\b
$ elliott --group=openshift-4.6 --mode qe
"""
if mode != 'list' and len(id) > 0:
raise click.BadParameter("Combining the automatic and manual bug attachment options is not supported")
if mode == 'list' and len(id) == 0:
raise click.BadParameter("When using mode=list, you must provide a list of bug IDs")
if mode == 'payload' and not len(from_diff) == 2:
raise click.BadParameter("If using mode=payload, you must provide two payloads to compare")
if sum(map(bool, [advisory, default_advisory_type, into_default_advisories])) > 1:
raise click.BadParameter("Use only one of --use-default-advisory, --add, or --into-default-advisories")
if mode == 'qe' and sum(map(bool, [advisory, default_advisory_type, into_default_advisories])) > 0:
raise click.BadParameter("--mode=qe does not operate on an advisory. Do not specify any of `--use-default-advisory`, `--add`, or `--into-default-advisories`")
runtime.initialize()
bz_data = runtime.gitdata.load_data(key='bugzilla').data
bzapi = bzutil.get_bzapi(bz_data)
if default_advisory_type is not None:
advisory = find_default_advisory(runtime, default_advisory_type)
if mode == 'sweep' or mode == 'qe':
if mode == 'qe':
status = ['MODIFIED']
green_prefix(f"Searching for bugs with status {" ".join(status)} and target release(s):")
click.echo(" {tr}".format(tr=", ".join(bz_data['target_release'])))
bugs = bzutil.search_for_bugs(bz_data, status, filter_out_security_bugs=not(cve_trackers), verbose=runtime.debug)
elif mode == 'list':
bugs = [bzapi.getbug(i) for i in cli_opts.id_convert(id)]
elif mode == 'diff':
click.echo(runtime.working_dir)
bug_id_strings = openshiftclient.get_bug_list(runtime.working_dir, from_diff[0], from_diff[1])
bugs = [bzapi.getbug(i) for i in bug_id_strings]
# Some bugs should goes to CPaaS so we should ignore them
m = re.match(r"rhaos-(\d+).(\d+)", runtime.branch) # extract OpenShift version from the branch name. there should be a better way...
if not m:
raise ElliottFatalError(f"Unable to determine OpenShift version from branch name {runtime.branch}.")
major_version = int(m[1])
minor_version = int(m[2])
def _filter_bugs(bugs): # returns a list of bugs that should be processed
r = []
ignored_repos = set() # GitHub repos that should be ignored
if major_version == 4 and minor_version == 5:
# per https://issues.redhat.com/browse/ART-997: these repos should have their release-4.5 branches ignored by ART:
ignored_repos = {
"https://github.com/openshift/aws-ebs-csi-driver",
"https://github.com/openshift/aws-ebs-csi-driver-operator",
"https://github.com/openshift/cloud-provider-openstack",
"https://github.com/openshift/csi-driver-nfs",
"https://github.com/openshift/csi-driver-manila-operator"
}
for bug in bugs:
external_links = [ext["type"]["full_url"].replace("%id%", ext["ext_bz_bug_id"]) for ext in bug.external_bugs] # https://github.com/python-bugzilla/python-bugzilla/blob/7aa70edcfea9b524cd8ac51a891b6395ca40dc87/bugzilla/_cli.py#L750
public_links = [runtime.get_public_upstream(url)[0] for url in external_links] # translate openshift-priv org to openshift org when comparing to filter (i.e. prow may link to a PR on the private org).
# if a bug has 1 or more public_links, we should ignore the bug if ALL of the public_links are ANY of `ignored_repos`
if public_links and all(map(lambda url: any(map(lambda repo: url != repo and url.startswith(repo), ignored_repos)), public_links)):
continue
r.append(bug)
return r
if len(id) == 0: # unless --id is given, we should ignore bugs that don't belong to ART. e.g. some bugs should go to CPaaS
filtered_bugs = _filter_bugs(bugs)
green_prefix(f"Found {len(filtered_bugs)} bugs ({len(bugs) - len(filtered_bugs)} ignored):")
bugs = filtered_bugs
else:
green_prefix("Found {} bugs:".format(len(bugs)))
click.echo(" {}".format(", ".join([str(b.bug_id) for b in bugs])))
if mode == 'qe':
for bug in bugs:
bzutil.set_state(bug, 'ON_QA', noop=noop)
if len(flag) > 0:
for bug in bugs:
for f in flag:
if noop:
click.echo(f'Would have updated bug {bug.id} by setting flag {f}')
continue
bug.updateflags({f: "+"})
if report:
green_print("{:<8s} {:<25s} {:<12s} {:<7s} {:<10s} {:60s}".format("ID", "COMPONENT", "STATUS", "SCORE", "AGE", "SUMMARY"))
for bug in bugs:
created_date = datetime.datetime.strptime(str(bug.creation_time), '%Y%m%dT%H:%M:%S')
days_ago = (datetime.datetime.today() - created_date).days
click.echo("{:<8d} {:<25s} {:<12s} {:<7s} {:<3d} days {:60s} ".format(bug.id,
bug.component,
bug.status,
bug.cf_pm_score if hasattr(bug, "cf_pm_score") else '?',
days_ago,
bug.summary[:60]))
if advisory and not default_advisory_type: # `--add ADVISORY_NUMBER` should respect the user's wish and attach all available bugs to whatever advisory is specified.
errata.add_bugs_with_retry(advisory, bugs, noop=noop)
return
# If --use-default-advisory or --into-default-advisories is given, we need to determine which bugs should be swept into which advisory.
# Otherwise we don't need to sweep bugs at all.
if not (into_default_advisories or default_advisory_type):
return
impetus_bugs = {} # key is impetus ("rpm", "image", "extras"), value is a set of bug IDs.
# @lmeyer: simple and stupid would still be keeping the logic in python, possibly with config flags for branched logic. until that logic becomes too ugly to keep in python, i suppose..
if major_version < 4: # for 3.x, all bugs should go to the rpm advisory
impetus_bugs["rpm"] = set(bugs)
else: # for 4.x
# optional operators bugs should be swept to the "extras" advisory, while other bugs should be swept to "image" advisory.
# a way to identify operator-related bugs is by its "Component" value. temporarily hardcode here until we need to move it to ocp-build-data.
extra_components = {"Logging", "Service Brokers", "Metering Operator", "Node Feature Discovery Operator"} # we will probably find more
impetus_bugs["extras"] = {b for b in bugs if b.component in extra_components}
impetus_bugs["image"] = {b for b in bugs if b.component not in extra_components}
if default_advisory_type and impetus_bugs.get(default_advisory_type):
errata.add_bugs_with_retry(advisory, impetus_bugs[default_advisory_type], noop=noop)
elif into_default_advisories:
for impetus, bugs in impetus_bugs.items():
if bugs:
errata.add_bugs_with_retry(runtime.group_config.advisories[impetus], bugs, noop=noop)
|
from __future__ import absolute_import, print_function, unicode_literals
import elliottlib
from elliottlib import constants, logutil, Runtime, bzutil, openshiftclient, errata
LOGGER = logutil.getLogger(__name__)
from elliottlib.cli import cli_opts
from elliottlib.cli.common import cli, use_default_advisory_option, find_default_advisory
from elliottlib.exceptions import ElliottFatalError
from elliottlib.util import green_prefix, green_print, red_print
import click
pass_runtime = click.make_pass_decorator(Runtime)
import datetime
import re
@cli.command("find-bugs", short_help="Find or add MODIFED/VERIFIED bugs to ADVISORY")
@click.option("--add", "-a", 'advisory',
default=False, metavar='ADVISORY',
help="Add found bugs to ADVISORY. Applies to bug flags as well (by default only a list of discovered bugs are displayed)")
@use_default_advisory_option
@click.option("--mode",
required=True,
type=click.Choice(['list', 'sweep', 'diff', 'qe']),
default='list',
help='Mode to use to find bugs')
@click.option("--status", 'status',
multiple=True,
required=False,
default=['MODIFIED', 'VERIFIED', 'ON_QA'],
type=click.Choice(constants.VALID_BUG_STATES),
help="Status of the bugs")
@click.option("--id", metavar='BUGID', default=None,
multiple=True, required=False,
help="Bugzilla IDs to add, required for LIST mode.")
@click.option("--cve-trackers",
required=False,
is_flag=True,
help='Include CVE trackers in sweep mode')
@click.option("--from-diff", "--between",
required=False,
nargs=2,
help="Two payloads to compare against")
@click.option("--flag", metavar='FLAG',
required=False, multiple=True,
help="Optional flag to apply to found bugs [MULTIPLE]")
@click.option("--report",
required=False,
is_flag=True,
help="Output a detailed report of the found bugs")
@click.option("--into-default-advisories",
is_flag=True,
help='attaches bugs found to their correct default advisories, e.g. operator-related bugs go to "extras" instead of the default "image", bugs filtered into "none" are not attached at all.')
@click.option("--noop", "--dry-run",
is_flag=True,
default=False,
help="Don't change anything")
@pass_runtime
def find_bugs_cli(runtime, advisory, default_advisory_type, mode, status, id, cve_trackers, from_diff, flag, report, into_default_advisories, noop):
"""Find Red Hat Bugzilla bugs or add them to ADVISORY. Bugs can be
"swept" into the advisory either automatically (--mode sweep), or by
manually specifying one or more bugs using --mode list and the --id option.
Use cases are described below:
Note: Using --id without --add is basically pointless
SWEEP: For this use-case the --group option MUST be provided. The
--group automatically determines the correct target-releases to search
for bugs claimed to be fixed, but not yet attached to advisories.
LIST: The --group option is not required if you are specifying bugs
manually. Provide one or more --id's for manual bug addition. In LIST
mode you must provide a list of IDs to attach with the --id option.
DIFF: For this use case, you must provide the --between option using two
URLs to payloads.
QE: Find MODIFIED bugs for the target-releases, and set them to ON_QA.
The --group option MUST be provided. Cannot be used in combination
with --into-default-advisories, --add, --into-default-advisories
Using --use-default-advisory without a value set for the matching key
in the build-data will cause an error and elliott will exit in a
non-zero state. Use of this option silently overrides providing an
advisory with the --add option.
Automatically add bugs with target-release matching 3.7.Z or 3.7.0
to advisory 123456:
\b
$ elliott --group openshift-3.7 find-bugs --mode sweep --add 123456
List bugs that WOULD be added to an advisory and have set the bro_ok flag on them (NOOP):
\b
$ elliott --group openshift-3.7 find-bugs --mode sweep --flag bro_ok
Attach bugs to their correct default advisories, e.g. operator-related bugs go to "extras" instead of the default "image":
\b
$ elliott --group=openshift-4.4 find-bugs --mode=sweep --into-default-advisories
Add two bugs to advisory 123456. Note that --group is not required
because we're not auto searching:
\b
$ elliott find-bugs --mode list --id 8675309 --id 7001337 --add 123456
Automatically find bugs for openshift-4.1 and attach them to the
rpm advisory defined in ocp-build-data:
\b
$ elliott --group=openshift-4.1 --mode sweep --use-default-advisory rpm
Find bugs for 4.6 that are in MODIFIED state, and set them to ON_QA:
\b
$ elliott --group=openshift-4.6 --mode qe
"""
if mode != 'list' and len(id) > 0:
raise click.BadParameter("Combining the automatic and manual bug attachment options is not supported")
if mode == 'list' and len(id) == 0:
raise click.BadParameter("When using mode=list, you must provide a list of bug IDs")
if mode == 'payload' and not len(from_diff) == 2:
raise click.BadParameter("If using mode=payload, you must provide two payloads to compare")
if sum(map(bool, [advisory, default_advisory_type, into_default_advisories])) > 1:
raise click.BadParameter("Use only one of --use-default-advisory, --add, or --into-default-advisories")
if mode == 'qe' and sum(map(bool, [advisory, default_advisory_type, into_default_advisories])) > 0:
raise click.BadParameter("--mode=qe does not operate on an advisory. Do not specify any of `--use-default-advisory`, `--add`, or `--into-default-advisories`")
runtime.initialize()
bz_data = runtime.gitdata.load_data(key='bugzilla').data
bzapi = bzutil.get_bzapi(bz_data)
if default_advisory_type is not None:
advisory = find_default_advisory(runtime, default_advisory_type)
if mode == 'sweep' or mode == 'qe':
if mode == 'qe':
status = ['MODIFIED']
green_prefix(f"Searching for bugs with status {' '.join(status)} and target release(s):")
click.echo(" {tr}".format(tr=", ".join(bz_data['target_release'])))
bugs = bzutil.search_for_bugs(bz_data, status, filter_out_security_bugs=not(cve_trackers), verbose=runtime.debug)
elif mode == 'list':
bugs = [bzapi.getbug(i) for i in cli_opts.id_convert(id)]
elif mode == 'diff':
click.echo(runtime.working_dir)
bug_id_strings = openshiftclient.get_bug_list(runtime.working_dir, from_diff[0], from_diff[1])
bugs = [bzapi.getbug(i) for i in bug_id_strings]
# Some bugs should goes to CPaaS so we should ignore them
m = re.match(r"rhaos-(\d+).(\d+)", runtime.branch) # extract OpenShift version from the branch name. there should be a better way...
if not m:
raise ElliottFatalError(f"Unable to determine OpenShift version from branch name {runtime.branch}.")
major_version = int(m[1])
minor_version = int(m[2])
def _filter_bugs(bugs): # returns a list of bugs that should be processed
r = []
ignored_repos = set() # GitHub repos that should be ignored
if major_version == 4 and minor_version == 5:
# per https://issues.redhat.com/browse/ART-997: these repos should have their release-4.5 branches ignored by ART:
ignored_repos = {
"https://github.com/openshift/aws-ebs-csi-driver",
"https://github.com/openshift/aws-ebs-csi-driver-operator",
"https://github.com/openshift/cloud-provider-openstack",
"https://github.com/openshift/csi-driver-nfs",
"https://github.com/openshift/csi-driver-manila-operator"
}
for bug in bugs:
external_links = [ext["type"]["full_url"].replace("%id%", ext["ext_bz_bug_id"]) for ext in bug.external_bugs] # https://github.com/python-bugzilla/python-bugzilla/blob/7aa70edcfea9b524cd8ac51a891b6395ca40dc87/bugzilla/_cli.py#L750
public_links = [runtime.get_public_upstream(url)[0] for url in external_links] # translate openshift-priv org to openshift org when comparing to filter (i.e. prow may link to a PR on the private org).
# if a bug has 1 or more public_links, we should ignore the bug if ALL of the public_links are ANY of `ignored_repos`
if public_links and all(map(lambda url: any(map(lambda repo: url != repo and url.startswith(repo), ignored_repos)), public_links)):
continue
r.append(bug)
return r
if len(id) == 0: # unless --id is given, we should ignore bugs that don't belong to ART. e.g. some bugs should go to CPaaS
filtered_bugs = _filter_bugs(bugs)
green_prefix(f"Found {len(filtered_bugs)} bugs ({len(bugs) - len(filtered_bugs)} ignored):")
bugs = filtered_bugs
else:
green_prefix("Found {} bugs:".format(len(bugs)))
click.echo(" {}".format(", ".join([str(b.bug_id) for b in bugs])))
if mode == 'qe':
for bug in bugs:
bzutil.set_state(bug, 'ON_QA', noop=noop)
if len(flag) > 0:
for bug in bugs:
for f in flag:
if noop:
click.echo(f'Would have updated bug {bug.id} by setting flag {f}')
continue
bug.updateflags({f: "+"})
if report:
green_print("{:<8s} {:<25s} {:<12s} {:<7s} {:<10s} {:60s}".format("ID", "COMPONENT", "STATUS", "SCORE", "AGE", "SUMMARY"))
for bug in bugs:
created_date = datetime.datetime.strptime(str(bug.creation_time), '%Y%m%dT%H:%M:%S')
days_ago = (datetime.datetime.today() - created_date).days
click.echo("{:<8d} {:<25s} {:<12s} {:<7s} {:<3d} days {:60s} ".format(bug.id,
bug.component,
bug.status,
bug.cf_pm_score if hasattr(bug, "cf_pm_score") else '?',
days_ago,
bug.summary[:60]))
if advisory and not default_advisory_type: # `--add ADVISORY_NUMBER` should respect the user's wish and attach all available bugs to whatever advisory is specified.
errata.add_bugs_with_retry(advisory, bugs, noop=noop)
return
# If --use-default-advisory or --into-default-advisories is given, we need to determine which bugs should be swept into which advisory.
# Otherwise we don't need to sweep bugs at all.
if not (into_default_advisories or default_advisory_type):
return
impetus_bugs = {} # key is impetus ("rpm", "image", "extras"), value is a set of bug IDs.
# @lmeyer: simple and stupid would still be keeping the logic in python, possibly with config flags for branched logic. until that logic becomes too ugly to keep in python, i suppose..
if major_version < 4: # for 3.x, all bugs should go to the rpm advisory
impetus_bugs["rpm"] = set(bugs)
else: # for 4.x
# optional operators bugs should be swept to the "extras" advisory, while other bugs should be swept to "image" advisory.
# a way to identify operator-related bugs is by its "Component" value. temporarily hardcode here until we need to move it to ocp-build-data.
extra_components = {"Logging", "Service Brokers", "Metering Operator", "Node Feature Discovery Operator"} # we will probably find more
impetus_bugs["extras"] = {b for b in bugs if b.component in extra_components}
impetus_bugs["image"] = {b for b in bugs if b.component not in extra_components}
if default_advisory_type and impetus_bugs.get(default_advisory_type):
errata.add_bugs_with_retry(advisory, impetus_bugs[default_advisory_type], noop=noop)
elif into_default_advisories:
for impetus, bugs in impetus_bugs.items():
if bugs:
errata.add_bugs_with_retry(runtime.group_config.advisories[impetus], bugs, noop=noop)
|
import json
import click
from isic_cli.cli.context import IsicContext
@click.group(short_help='Manage authentication with the ISIC Archive.')
@click.pass_obj
def user(ctx):
pass
@user.command()
@click.pass_obj
def login(obj: IsicContext):
"""Login to the ISIC Archive."""
if obj.user:
click.echo(f'Hello {obj.user['email']}!')
else:
obj.oauth.login()
click.echo('Success!')
@user.command()
@click.pass_obj
def logout(obj: IsicContext):
"""Logout of the ISIC Archive."""
obj.oauth.logout()
@user.command(hidden=True)
@click.pass_obj
def print_token(obj: IsicContext):
obj.oauth._load()
click.echo(json.dumps(obj.oauth._session.token, indent=4))
|
import json
import click
from isic_cli.cli.context import IsicContext
@click.group(short_help='Manage authentication with the ISIC Archive.')
@click.pass_obj
def user(ctx):
pass
@user.command()
@click.pass_obj
def login(obj: IsicContext):
"""Login to the ISIC Archive."""
if obj.user:
click.echo(f'Hello {obj.user["email"]}!')
else:
obj.oauth.login()
click.echo('Success!')
@user.command()
@click.pass_obj
def logout(obj: IsicContext):
"""Logout of the ISIC Archive."""
obj.oauth.logout()
@user.command(hidden=True)
@click.pass_obj
def print_token(obj: IsicContext):
obj.oauth._load()
click.echo(json.dumps(obj.oauth._session.token, indent=4))
|
#!/usr/bin/env python
import os
import subprocess
import sys
# Required third-party imports, must be specified in pyproject.toml.
import packaging.version
import setuptools
def process_options():
"""
Determine all runtime options, returning a dictionary of the results. The
keys are:
'rootdir': str
The root directory of the setup. Almost certainly the directory
that this setup.py file is contained in.
'release': bool
Is this a release build (True) or a local development build (False)
"""
options = {}
options['rootdir'] = os.path.dirname(os.path.abspath(__file__))
options = _determine_version(options)
return options
def _determine_version(options):
"""
Adds the 'short_version', 'version' and 'release' options.
Read from the VERSION file to discover the version. This should be a
single line file containing valid Python package public identifier (see PEP
440), for example
4.5.2rc2
5.0.0
5.1.1a1
We do that here rather than in setup.cfg so we can apply the local
versioning number as well.
"""
version_filename = os.path.join(options['rootdir'], 'VERSION')
with open(version_filename, "r") as version_file:
version_string = version_file.read().strip()
version = packaging.version.parse(version_string)
if isinstance(version, packaging.version.LegacyVersion):
raise ValueError("invalid version: " + version_string)
options['short_version'] = str(version.public)
options['release'] = not version.is_devrelease
if not options['release']:
# Put the version string into canonical form, if it wasn't already.
version_string = str(version)
version_string += "+"
try:
git_out = subprocess.run(
('git', 'rev-parse', '--verify', '--short=7', 'HEAD'),
check=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
)
git_hash = git_out.stdout.decode(sys.stdout.encoding).strip()
version_string += git_hash or "nogit"
# CalledProcessError is for if the git command fails for internal
# reasons (e.g. we're not in a git repository), OSError is for if
# something goes wrong when trying to run git (e.g. it's not installed,
# or a permission error).
except (subprocess.CalledProcessError, OSError):
version_string += "nogit"
options['version'] = version_string
return options
def create_version_py_file(options):
"""
Generate and write out the file version.py, which is used to produce the
'__version__' information for the module. This function will overwrite an
existing file at that location.
"""
filename = os.path.join(
options['rootdir'], 'src', 'qutip_tensorflow', 'version.py',
)
content = "\n".join([
"# This file is automatically generated during package setup.",
f"short_version = '{options["short_version"]}'",
f"version = '{options["version"]}'",
f"release = {options["release"]}",
])
with open(filename, 'w') as file:
print(content, file=file)
if __name__ == "__main__":
options = process_options()
create_version_py_file(options)
# Most of the kwargs to setup are defined in setup.cfg; the only ones we
# keep here are ones that we have done some compile-time processing on.
setuptools.setup(
version=options['version'],
)
|
#!/usr/bin/env python
import os
import subprocess
import sys
# Required third-party imports, must be specified in pyproject.toml.
import packaging.version
import setuptools
def process_options():
"""
Determine all runtime options, returning a dictionary of the results. The
keys are:
'rootdir': str
The root directory of the setup. Almost certainly the directory
that this setup.py file is contained in.
'release': bool
Is this a release build (True) or a local development build (False)
"""
options = {}
options['rootdir'] = os.path.dirname(os.path.abspath(__file__))
options = _determine_version(options)
return options
def _determine_version(options):
"""
Adds the 'short_version', 'version' and 'release' options.
Read from the VERSION file to discover the version. This should be a
single line file containing valid Python package public identifier (see PEP
440), for example
4.5.2rc2
5.0.0
5.1.1a1
We do that here rather than in setup.cfg so we can apply the local
versioning number as well.
"""
version_filename = os.path.join(options['rootdir'], 'VERSION')
with open(version_filename, "r") as version_file:
version_string = version_file.read().strip()
version = packaging.version.parse(version_string)
if isinstance(version, packaging.version.LegacyVersion):
raise ValueError("invalid version: " + version_string)
options['short_version'] = str(version.public)
options['release'] = not version.is_devrelease
if not options['release']:
# Put the version string into canonical form, if it wasn't already.
version_string = str(version)
version_string += "+"
try:
git_out = subprocess.run(
('git', 'rev-parse', '--verify', '--short=7', 'HEAD'),
check=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
)
git_hash = git_out.stdout.decode(sys.stdout.encoding).strip()
version_string += git_hash or "nogit"
# CalledProcessError is for if the git command fails for internal
# reasons (e.g. we're not in a git repository), OSError is for if
# something goes wrong when trying to run git (e.g. it's not installed,
# or a permission error).
except (subprocess.CalledProcessError, OSError):
version_string += "nogit"
options['version'] = version_string
return options
def create_version_py_file(options):
"""
Generate and write out the file version.py, which is used to produce the
'__version__' information for the module. This function will overwrite an
existing file at that location.
"""
filename = os.path.join(
options['rootdir'], 'src', 'qutip_tensorflow', 'version.py',
)
content = "\n".join([
"# This file is automatically generated during package setup.",
f"short_version = '{options['short_version']}'",
f"version = '{options['version']}'",
f"release = {options['release']}",
])
with open(filename, 'w') as file:
print(content, file=file)
if __name__ == "__main__":
options = process_options()
create_version_py_file(options)
# Most of the kwargs to setup are defined in setup.cfg; the only ones we
# keep here are ones that we have done some compile-time processing on.
setuptools.setup(
version=options['version'],
)
|
from typing import Dict, Union, List, Optional
import ray
from ray._raylet import ObjectRef
from ray._raylet import PlacementGroupID
from ray._private.utils import hex_to_binary
from ray.util.annotations import PublicAPI, DeveloperAPI
from ray.ray_constants import to_memory_units
from ray._private.client_mode_hook import client_mode_should_convert
from ray._private.client_mode_hook import client_mode_wrap
bundle_reservation_check = None
BUNDLE_RESOURCE_LABEL = "bundle"
# We need to import this method to use for ready API.
# But ray.remote is only available in runtime, and
# if we define this method inside ready method, this function is
# exported whenever ready is called, which can impact performance,
# https://github.com/ray-project/ray/issues/6240.
def _export_bundle_reservation_check_method_if_needed():
global bundle_reservation_check
if bundle_reservation_check:
return
@ray.remote(num_cpus=0)
def bundle_reservation_check_func(placement_group):
return placement_group
bundle_reservation_check = bundle_reservation_check_func
@PublicAPI
class PlacementGroup:
"""A handle to a placement group."""
@staticmethod
def empty() -> "PlacementGroup":
return PlacementGroup(PlacementGroupID.nil())
def __init__(self, id: PlacementGroupID, bundle_cache: Optional[List[Dict]] = None):
self.id = id
self.bundle_cache = bundle_cache
@property
def is_empty(self):
return self.id.is_nil()
def ready(self) -> ObjectRef:
"""Returns an ObjectRef to check ready status.
This API runs a small dummy task to wait for placement group creation.
It is compatible to ray.get and ray.wait.
Example:
>>> import ray
>>> from ray.util.placement_group import PlacementGroup
>>> pg = PlacementGroup([{"CPU": 1}]) # doctest: +SKIP
>>> ray.get(pg.ready()) # doctest: +SKIP
>>> pg = PlacementGroup([{"CPU": 1}]) # doctest: +SKIP
>>> ray.wait([pg.ready()], timeout=0) # doctest: +SKIP
"""
self._fill_bundle_cache_if_needed()
_export_bundle_reservation_check_method_if_needed()
assert len(self.bundle_cache) != 0, (
"ready() cannot be called on placement group object with a "
"bundle length == 0, current bundle length: "
f"{len(self.bundle_cache)}"
)
return bundle_reservation_check.options(
placement_group=self, resources={BUNDLE_RESOURCE_LABEL: 0.001}
).remote(self)
def wait(self, timeout_seconds: Union[float, int]) -> bool:
"""Wait for the placement group to be ready within the specified time.
Args:
timeout_seconds(float|int): Timeout in seconds.
Return:
True if the placement group is created. False otherwise.
"""
return _call_placement_group_ready(self.id, timeout_seconds)
@property
def bundle_specs(self) -> List[Dict]:
"""List[Dict]: Return bundles belonging to this placement group."""
self._fill_bundle_cache_if_needed()
return self.bundle_cache
@property
def bundle_count(self) -> int:
self._fill_bundle_cache_if_needed()
return len(self.bundle_cache)
def _fill_bundle_cache_if_needed(self) -> None:
if not self.bundle_cache:
self.bundle_cache = _get_bundle_cache(self.id)
@client_mode_wrap
def _call_placement_group_ready(pg_id: PlacementGroupID, timeout_seconds: int) -> bool:
worker = ray.worker.global_worker
worker.check_connected()
return worker.core_worker.wait_placement_group_ready(pg_id, timeout_seconds)
@client_mode_wrap
def _get_bundle_cache(pg_id: PlacementGroupID) -> List[Dict]:
worker = ray.worker.global_worker
worker.check_connected()
return list(ray.state.state.placement_group_table(pg_id)["bundles"].values())
@PublicAPI
@client_mode_wrap
def placement_group(
bundles: List[Dict[str, float]],
strategy: str = "PACK",
name: str = "",
lifetime=None,
) -> PlacementGroup:
"""Asynchronously creates a PlacementGroup.
Args:
bundles(List[Dict]): A list of bundles which
represent the resources requirements.
strategy(str): The strategy to create the placement group.
- "PACK": Packs Bundles into as few nodes as possible.
- "SPREAD": Places Bundles across distinct nodes as even as possible.
- "STRICT_PACK": Packs Bundles into one node. The group is
not allowed to span multiple nodes.
- "STRICT_SPREAD": Packs Bundles across distinct nodes.
name(str): The name of the placement group.
lifetime(str): Either `None`, which defaults to the placement group
will fate share with its creator and will be deleted once its
creator is dead, or "detached", which means the placement group
will live as a global object independent of the creator.
Raises:
ValueError if bundle type is not a list.
ValueError if empty bundle or empty resource bundles are given.
ValueError if the wrong lifetime arguments are given.
Return:
PlacementGroup: Placement group object.
"""
worker = ray.worker.global_worker
worker.check_connected()
if not isinstance(bundles, list):
raise ValueError("The type of bundles must be list, got {}".format(bundles))
# Validate bundles
for bundle in bundles:
if len(bundle) == 0 or all(
resource_value == 0 for resource_value in bundle.values()
):
raise ValueError(
"Bundles cannot be an empty dictionary or "
f"resources with only 0 values. Bundles: {bundles}"
)
if "memory" in bundle.keys() and bundle["memory"] > 0:
# Make sure the memory resource can be
# transformed to memory unit.
to_memory_units(bundle["memory"], True)
if lifetime is None:
detached = False
elif lifetime == "detached":
detached = True
else:
raise ValueError(
"placement group `lifetime` argument must be either `None` or 'detached'"
)
placement_group_id = worker.core_worker.create_placement_group(
name, bundles, strategy, detached
)
return PlacementGroup(placement_group_id)
@PublicAPI
@client_mode_wrap
def remove_placement_group(placement_group: PlacementGroup) -> None:
"""Asynchronously remove placement group.
Args:
placement_group (PlacementGroup): The placement group to delete.
"""
assert placement_group is not None
worker = ray.worker.global_worker
worker.check_connected()
worker.core_worker.remove_placement_group(placement_group.id)
@PublicAPI
@client_mode_wrap
def get_placement_group(placement_group_name: str) -> PlacementGroup:
"""Get a placement group object with a global name.
Returns:
None if can't find a placement group with the given name.
The placement group object otherwise.
"""
if not placement_group_name:
raise ValueError("Please supply a non-empty value to get_placement_group")
worker = ray.worker.global_worker
worker.check_connected()
placement_group_info = ray.state.state.get_placement_group_by_name(
placement_group_name, worker.namespace
)
if placement_group_info is None:
raise ValueError(f"Failed to look up actor with name: {placement_group_name}")
else:
return PlacementGroup(
PlacementGroupID(hex_to_binary(placement_group_info["placement_group_id"]))
)
@DeveloperAPI
@client_mode_wrap
def placement_group_table(placement_group: PlacementGroup = None) -> dict:
"""Get the state of the placement group from GCS.
Args:
placement_group (PlacementGroup): placement group to see
states.
"""
worker = ray.worker.global_worker
worker.check_connected()
placement_group_id = placement_group.id if (placement_group is not None) else None
return ray.state.state.placement_group_table(placement_group_id)
@PublicAPI
def get_current_placement_group() -> Optional[PlacementGroup]:
"""Get the current placement group which a task or actor is using.
It returns None if there's no current placement group for the worker.
For example, if you call this method in your driver, it returns None
(because drivers never belong to any placement group).
Examples:
>>> import ray
>>> from ray.util.placement_group import PlacementGroup
>>> from ray.util.placement_group import get_current_placement_group
>>> @ray.remote # doctest: +SKIP
... def f(): # doctest: +SKIP
... # This will return the placement group the task f belongs to.
... # It means this pg will be identical to the pg created below.
... pg = get_current_placement_group() # doctest: +SKIP
>>> pg = PlacementGroup([{"CPU": 2}]) # doctest: +SKIP
>>> f.options(placement_group=pg).remote() # doctest: +SKIP
>>> # New script.
>>> ray.init() # doctest: +SKIP
>>> # New script doesn't belong to any placement group,
>>> # so it returns None.
>>> assert get_current_placement_group() is None # doctest: +SKIP
Return:
PlacementGroup: Placement group object.
None if the current task or actor wasn't
created with any placement group.
"""
if client_mode_should_convert(auto_init=True):
# Client mode is only a driver.
return None
worker = ray.worker.global_worker
worker.check_connected()
pg_id = worker.placement_group_id
if pg_id.is_nil():
return None
return PlacementGroup(pg_id)
def check_placement_group_index(
placement_group: PlacementGroup, bundle_index: int
) -> None:
assert placement_group is not None
if placement_group.id.is_nil():
if bundle_index != -1:
raise ValueError(
"If placement group is not set, "
"the value of bundle index must be -1."
)
elif bundle_index >= placement_group.bundle_count or bundle_index < -1:
raise ValueError(
f"placement group bundle index {bundle_index} "
f"is invalid. Valid placement group indexes: "
f"0-{placement_group.bundle_count}"
)
def _validate_resource_shape(
placement_group, resources, placement_resources, task_or_actor_repr
):
def valid_resource_shape(resources, bundle_specs):
"""
If the resource shape cannot fit into every
bundle spec, return False
"""
for bundle in bundle_specs:
fit_in_bundle = True
for resource, requested_val in resources.items():
# Skip "bundle" resource as it is automatically added
# to all nodes with bundles by the placement group.
if resource == BUNDLE_RESOURCE_LABEL:
continue
if bundle.get(resource, 0) < requested_val:
fit_in_bundle = False
break
if fit_in_bundle:
# If resource request fits in any bundle, it is valid.
return True
return False
bundles = placement_group.bundle_specs
resources_valid = valid_resource_shape(resources, bundles)
placement_resources_valid = valid_resource_shape(placement_resources, bundles)
if not resources_valid:
raise ValueError(
f"Cannot schedule {task_or_actor_repr} with "
"the placement group because the resource request "
f"{resources} cannot fit into any bundles for "
f"the placement group, {bundles}."
)
if not placement_resources_valid:
# Happens for the default actor case.
# placement_resources is not an exposed concept to users,
# so we should write more specialized error messages.
raise ValueError(
f"Cannot schedule {task_or_actor_repr} with "
"the placement group because the actor requires "
f"{placement_resources.get("CPU", 0)} CPU for "
"creation, but it cannot "
f"fit into any bundles for the placement group, "
f"{bundles}. Consider "
"creating a placement group with CPU resources."
)
def configure_placement_group_based_on_context(
placement_group_capture_child_tasks: bool,
bundle_index: int,
resources: Dict,
placement_resources: Dict,
task_or_actor_repr: str,
placement_group: Union[PlacementGroup, str, None] = "default",
) -> PlacementGroup:
"""Configure the placement group based on the given context.
Based on the given context, this API returns the placement group instance
for task/actor scheduling.
Params:
placement_group_capture_child_tasks: Whether or not the
placement group needs to be captured from the global
context.
bundle_index: The bundle index for tasks/actor scheduling.
resources: The scheduling resources.
placement_resources: The scheduling placement resources for
actors.
task_or_actor_repr: The repr of task or actor
function/class descriptor.
placement_group: The placement group instance.
- "default": Default placement group argument. Currently,
the default behavior is to capture the parent task'
placement group if placement_group_capture_child_tasks
is set.
- None: means placement group is explicitly not configured.
- Placement group instance: In this case, do nothing.
Returns:
Placement group instance based on the given context.
Raises:
ValueError: If the bundle index is invalid for the placement group
or the requested resources shape doesn't fit to any
bundles.
"""
# Validate inputs.
assert placement_group_capture_child_tasks is not None
assert resources is not None
# Validate and get the PlacementGroup instance.
# Placement group could be None, default, or placement group.
# Default behavior is "do not capture child tasks".
if placement_group != "default":
if not placement_group:
placement_group = PlacementGroup.empty()
elif placement_group == "default":
if placement_group_capture_child_tasks:
placement_group = get_current_placement_group()
else:
placement_group = PlacementGroup.empty()
if not placement_group:
placement_group = PlacementGroup.empty()
assert isinstance(placement_group, PlacementGroup)
# Validate the index.
check_placement_group_index(placement_group, bundle_index)
# Validate the shape.
if not placement_group.is_empty:
_validate_resource_shape(
placement_group, resources, placement_resources, task_or_actor_repr
)
return placement_group
|
from typing import Dict, Union, List, Optional
import ray
from ray._raylet import ObjectRef
from ray._raylet import PlacementGroupID
from ray._private.utils import hex_to_binary
from ray.util.annotations import PublicAPI, DeveloperAPI
from ray.ray_constants import to_memory_units
from ray._private.client_mode_hook import client_mode_should_convert
from ray._private.client_mode_hook import client_mode_wrap
bundle_reservation_check = None
BUNDLE_RESOURCE_LABEL = "bundle"
# We need to import this method to use for ready API.
# But ray.remote is only available in runtime, and
# if we define this method inside ready method, this function is
# exported whenever ready is called, which can impact performance,
# https://github.com/ray-project/ray/issues/6240.
def _export_bundle_reservation_check_method_if_needed():
global bundle_reservation_check
if bundle_reservation_check:
return
@ray.remote(num_cpus=0)
def bundle_reservation_check_func(placement_group):
return placement_group
bundle_reservation_check = bundle_reservation_check_func
@PublicAPI
class PlacementGroup:
"""A handle to a placement group."""
@staticmethod
def empty() -> "PlacementGroup":
return PlacementGroup(PlacementGroupID.nil())
def __init__(self, id: PlacementGroupID, bundle_cache: Optional[List[Dict]] = None):
self.id = id
self.bundle_cache = bundle_cache
@property
def is_empty(self):
return self.id.is_nil()
def ready(self) -> ObjectRef:
"""Returns an ObjectRef to check ready status.
This API runs a small dummy task to wait for placement group creation.
It is compatible to ray.get and ray.wait.
Example:
>>> import ray
>>> from ray.util.placement_group import PlacementGroup
>>> pg = PlacementGroup([{"CPU": 1}]) # doctest: +SKIP
>>> ray.get(pg.ready()) # doctest: +SKIP
>>> pg = PlacementGroup([{"CPU": 1}]) # doctest: +SKIP
>>> ray.wait([pg.ready()], timeout=0) # doctest: +SKIP
"""
self._fill_bundle_cache_if_needed()
_export_bundle_reservation_check_method_if_needed()
assert len(self.bundle_cache) != 0, (
"ready() cannot be called on placement group object with a "
"bundle length == 0, current bundle length: "
f"{len(self.bundle_cache)}"
)
return bundle_reservation_check.options(
placement_group=self, resources={BUNDLE_RESOURCE_LABEL: 0.001}
).remote(self)
def wait(self, timeout_seconds: Union[float, int]) -> bool:
"""Wait for the placement group to be ready within the specified time.
Args:
timeout_seconds(float|int): Timeout in seconds.
Return:
True if the placement group is created. False otherwise.
"""
return _call_placement_group_ready(self.id, timeout_seconds)
@property
def bundle_specs(self) -> List[Dict]:
"""List[Dict]: Return bundles belonging to this placement group."""
self._fill_bundle_cache_if_needed()
return self.bundle_cache
@property
def bundle_count(self) -> int:
self._fill_bundle_cache_if_needed()
return len(self.bundle_cache)
def _fill_bundle_cache_if_needed(self) -> None:
if not self.bundle_cache:
self.bundle_cache = _get_bundle_cache(self.id)
@client_mode_wrap
def _call_placement_group_ready(pg_id: PlacementGroupID, timeout_seconds: int) -> bool:
worker = ray.worker.global_worker
worker.check_connected()
return worker.core_worker.wait_placement_group_ready(pg_id, timeout_seconds)
@client_mode_wrap
def _get_bundle_cache(pg_id: PlacementGroupID) -> List[Dict]:
worker = ray.worker.global_worker
worker.check_connected()
return list(ray.state.state.placement_group_table(pg_id)["bundles"].values())
@PublicAPI
@client_mode_wrap
def placement_group(
bundles: List[Dict[str, float]],
strategy: str = "PACK",
name: str = "",
lifetime=None,
) -> PlacementGroup:
"""Asynchronously creates a PlacementGroup.
Args:
bundles(List[Dict]): A list of bundles which
represent the resources requirements.
strategy(str): The strategy to create the placement group.
- "PACK": Packs Bundles into as few nodes as possible.
- "SPREAD": Places Bundles across distinct nodes as even as possible.
- "STRICT_PACK": Packs Bundles into one node. The group is
not allowed to span multiple nodes.
- "STRICT_SPREAD": Packs Bundles across distinct nodes.
name(str): The name of the placement group.
lifetime(str): Either `None`, which defaults to the placement group
will fate share with its creator and will be deleted once its
creator is dead, or "detached", which means the placement group
will live as a global object independent of the creator.
Raises:
ValueError if bundle type is not a list.
ValueError if empty bundle or empty resource bundles are given.
ValueError if the wrong lifetime arguments are given.
Return:
PlacementGroup: Placement group object.
"""
worker = ray.worker.global_worker
worker.check_connected()
if not isinstance(bundles, list):
raise ValueError("The type of bundles must be list, got {}".format(bundles))
# Validate bundles
for bundle in bundles:
if len(bundle) == 0 or all(
resource_value == 0 for resource_value in bundle.values()
):
raise ValueError(
"Bundles cannot be an empty dictionary or "
f"resources with only 0 values. Bundles: {bundles}"
)
if "memory" in bundle.keys() and bundle["memory"] > 0:
# Make sure the memory resource can be
# transformed to memory unit.
to_memory_units(bundle["memory"], True)
if lifetime is None:
detached = False
elif lifetime == "detached":
detached = True
else:
raise ValueError(
"placement group `lifetime` argument must be either `None` or 'detached'"
)
placement_group_id = worker.core_worker.create_placement_group(
name, bundles, strategy, detached
)
return PlacementGroup(placement_group_id)
@PublicAPI
@client_mode_wrap
def remove_placement_group(placement_group: PlacementGroup) -> None:
"""Asynchronously remove placement group.
Args:
placement_group (PlacementGroup): The placement group to delete.
"""
assert placement_group is not None
worker = ray.worker.global_worker
worker.check_connected()
worker.core_worker.remove_placement_group(placement_group.id)
@PublicAPI
@client_mode_wrap
def get_placement_group(placement_group_name: str) -> PlacementGroup:
"""Get a placement group object with a global name.
Returns:
None if can't find a placement group with the given name.
The placement group object otherwise.
"""
if not placement_group_name:
raise ValueError("Please supply a non-empty value to get_placement_group")
worker = ray.worker.global_worker
worker.check_connected()
placement_group_info = ray.state.state.get_placement_group_by_name(
placement_group_name, worker.namespace
)
if placement_group_info is None:
raise ValueError(f"Failed to look up actor with name: {placement_group_name}")
else:
return PlacementGroup(
PlacementGroupID(hex_to_binary(placement_group_info["placement_group_id"]))
)
@DeveloperAPI
@client_mode_wrap
def placement_group_table(placement_group: PlacementGroup = None) -> dict:
"""Get the state of the placement group from GCS.
Args:
placement_group (PlacementGroup): placement group to see
states.
"""
worker = ray.worker.global_worker
worker.check_connected()
placement_group_id = placement_group.id if (placement_group is not None) else None
return ray.state.state.placement_group_table(placement_group_id)
@PublicAPI
def get_current_placement_group() -> Optional[PlacementGroup]:
"""Get the current placement group which a task or actor is using.
It returns None if there's no current placement group for the worker.
For example, if you call this method in your driver, it returns None
(because drivers never belong to any placement group).
Examples:
>>> import ray
>>> from ray.util.placement_group import PlacementGroup
>>> from ray.util.placement_group import get_current_placement_group
>>> @ray.remote # doctest: +SKIP
... def f(): # doctest: +SKIP
... # This will return the placement group the task f belongs to.
... # It means this pg will be identical to the pg created below.
... pg = get_current_placement_group() # doctest: +SKIP
>>> pg = PlacementGroup([{"CPU": 2}]) # doctest: +SKIP
>>> f.options(placement_group=pg).remote() # doctest: +SKIP
>>> # New script.
>>> ray.init() # doctest: +SKIP
>>> # New script doesn't belong to any placement group,
>>> # so it returns None.
>>> assert get_current_placement_group() is None # doctest: +SKIP
Return:
PlacementGroup: Placement group object.
None if the current task or actor wasn't
created with any placement group.
"""
if client_mode_should_convert(auto_init=True):
# Client mode is only a driver.
return None
worker = ray.worker.global_worker
worker.check_connected()
pg_id = worker.placement_group_id
if pg_id.is_nil():
return None
return PlacementGroup(pg_id)
def check_placement_group_index(
placement_group: PlacementGroup, bundle_index: int
) -> None:
assert placement_group is not None
if placement_group.id.is_nil():
if bundle_index != -1:
raise ValueError(
"If placement group is not set, "
"the value of bundle index must be -1."
)
elif bundle_index >= placement_group.bundle_count or bundle_index < -1:
raise ValueError(
f"placement group bundle index {bundle_index} "
f"is invalid. Valid placement group indexes: "
f"0-{placement_group.bundle_count}"
)
def _validate_resource_shape(
placement_group, resources, placement_resources, task_or_actor_repr
):
def valid_resource_shape(resources, bundle_specs):
"""
If the resource shape cannot fit into every
bundle spec, return False
"""
for bundle in bundle_specs:
fit_in_bundle = True
for resource, requested_val in resources.items():
# Skip "bundle" resource as it is automatically added
# to all nodes with bundles by the placement group.
if resource == BUNDLE_RESOURCE_LABEL:
continue
if bundle.get(resource, 0) < requested_val:
fit_in_bundle = False
break
if fit_in_bundle:
# If resource request fits in any bundle, it is valid.
return True
return False
bundles = placement_group.bundle_specs
resources_valid = valid_resource_shape(resources, bundles)
placement_resources_valid = valid_resource_shape(placement_resources, bundles)
if not resources_valid:
raise ValueError(
f"Cannot schedule {task_or_actor_repr} with "
"the placement group because the resource request "
f"{resources} cannot fit into any bundles for "
f"the placement group, {bundles}."
)
if not placement_resources_valid:
# Happens for the default actor case.
# placement_resources is not an exposed concept to users,
# so we should write more specialized error messages.
raise ValueError(
f"Cannot schedule {task_or_actor_repr} with "
"the placement group because the actor requires "
f"{placement_resources.get('CPU', 0)} CPU for "
"creation, but it cannot "
f"fit into any bundles for the placement group, "
f"{bundles}. Consider "
"creating a placement group with CPU resources."
)
def configure_placement_group_based_on_context(
placement_group_capture_child_tasks: bool,
bundle_index: int,
resources: Dict,
placement_resources: Dict,
task_or_actor_repr: str,
placement_group: Union[PlacementGroup, str, None] = "default",
) -> PlacementGroup:
"""Configure the placement group based on the given context.
Based on the given context, this API returns the placement group instance
for task/actor scheduling.
Params:
placement_group_capture_child_tasks: Whether or not the
placement group needs to be captured from the global
context.
bundle_index: The bundle index for tasks/actor scheduling.
resources: The scheduling resources.
placement_resources: The scheduling placement resources for
actors.
task_or_actor_repr: The repr of task or actor
function/class descriptor.
placement_group: The placement group instance.
- "default": Default placement group argument. Currently,
the default behavior is to capture the parent task'
placement group if placement_group_capture_child_tasks
is set.
- None: means placement group is explicitly not configured.
- Placement group instance: In this case, do nothing.
Returns:
Placement group instance based on the given context.
Raises:
ValueError: If the bundle index is invalid for the placement group
or the requested resources shape doesn't fit to any
bundles.
"""
# Validate inputs.
assert placement_group_capture_child_tasks is not None
assert resources is not None
# Validate and get the PlacementGroup instance.
# Placement group could be None, default, or placement group.
# Default behavior is "do not capture child tasks".
if placement_group != "default":
if not placement_group:
placement_group = PlacementGroup.empty()
elif placement_group == "default":
if placement_group_capture_child_tasks:
placement_group = get_current_placement_group()
else:
placement_group = PlacementGroup.empty()
if not placement_group:
placement_group = PlacementGroup.empty()
assert isinstance(placement_group, PlacementGroup)
# Validate the index.
check_placement_group_index(placement_group, bundle_index)
# Validate the shape.
if not placement_group.is_empty:
_validate_resource_shape(
placement_group, resources, placement_resources, task_or_actor_repr
)
return placement_group
|
# -*- coding: utf-8 -*-
#
# conda-forge documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 1 01:44:13 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import csv
import os
import sys
import datetime
import cloud_sptheme as csp
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'myst_parser',
'sphinx.ext.todo',
'sphinxcontrib.fulltoc',
'sphinxcontrib.newsfeed',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'conda-forge'
copyright = u'2016-%s, conda-forge' % datetime.datetime.now().strftime("%Y")
author = u'conda-forge'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = datetime.datetime.now().strftime("%Y.%m")
# The full version, including alpha/beta/rc tags.
release = datetime.datetime.now().strftime("%Y.%m.%d")
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# ---- Options for link validation --------
anchor_check_fps = [
r'https://conda-forge.org/status/#armosxaddition$',
r'https://github.com/conda-forge/conda-smithy/blob/main/CHANGELOG.rst#v3130$',
r'https://github.com/.*#L\d+-L\d+$',
r'https://github.com/conda-forge/miniforge/#download$',
r'https://github.com/conda-incubator/grayskull#introduction$',
]
linkcheck_exclude_documents = [r'.*/minutes/.*']
linkcheck_ignore = [
r'https://anaconda.org/?$', # 403 forbidden
r'https://cloudflare.com/learning/cdn/what-is-a-cdn/?$', # 403 forbidden
r'https://gitter.im/conda-forge/core$', # private team
r'https://polys.me/?$', # 403 forbidden
] + anchor_check_fps
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'cloud'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'sidebar_localtoc_title': 'Overview',
'externalrefs': False,
'relbarbgcolor': '#000000',
'footerbgcolor': '#FFFFFF',
'sectionbgcolor': '#cd5c5c',
'linkcolor': 'rgb(31, 158, 111)',
'sidebarlinkcolor': 'rgb(31, 158, 111)',
'codebgcolor': '#F2F2F2',
'sidebarbgcolor': '#F2F2F2',
'logotarget': '../../../index',
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [csp.get_theme_dir()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'conda-forge v0.0.1a1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo_black_on_trans.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'conda-forgedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'conda-forge.tex', u'conda-forge Documentation',
u'conda-forge', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'conda-forge', u'conda-forge Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'conda-forge', u'conda-forge Documentation',
author, 'conda-forge', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
def rstjinja(app, docname, source):
def get_formated_names(path_file):
with open(path_file, "r") as csv_file:
dict_csv = csv.DictReader(csv_file)
sorted_csv = sorted(dict_csv, key=lambda d: d["name"])
return "\n".join(
f"* `{m["name"]},"
f" @{m["github_username"]}"
f" <https://github.com/{m["github_username"]}>`__" for m in sorted_csv
)
if (
app.builder.format != "html"
or os.path.basename(docname) != "governance"
):
return
src = source[0]
current_file = os.path.dirname(__file__)
context = app.config.html_context
context["core_members"] = get_formated_names(
os.path.join(current_file, "core.csv")
)
context["emeritus_members"] = get_formated_names(
os.path.join(current_file, "emeritus.csv")
)
rendered = app.builder.templates.render_string(src, context)
source[0] = rendered
def setup(app):
app.connect("source-read", rstjinja)
|
# -*- coding: utf-8 -*-
#
# conda-forge documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 1 01:44:13 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import csv
import os
import sys
import datetime
import cloud_sptheme as csp
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'myst_parser',
'sphinx.ext.todo',
'sphinxcontrib.fulltoc',
'sphinxcontrib.newsfeed',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'conda-forge'
copyright = u'2016-%s, conda-forge' % datetime.datetime.now().strftime("%Y")
author = u'conda-forge'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = datetime.datetime.now().strftime("%Y.%m")
# The full version, including alpha/beta/rc tags.
release = datetime.datetime.now().strftime("%Y.%m.%d")
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# ---- Options for link validation --------
anchor_check_fps = [
r'https://conda-forge.org/status/#armosxaddition$',
r'https://github.com/conda-forge/conda-smithy/blob/main/CHANGELOG.rst#v3130$',
r'https://github.com/.*#L\d+-L\d+$',
r'https://github.com/conda-forge/miniforge/#download$',
r'https://github.com/conda-incubator/grayskull#introduction$',
]
linkcheck_exclude_documents = [r'.*/minutes/.*']
linkcheck_ignore = [
r'https://anaconda.org/?$', # 403 forbidden
r'https://cloudflare.com/learning/cdn/what-is-a-cdn/?$', # 403 forbidden
r'https://gitter.im/conda-forge/core$', # private team
r'https://polys.me/?$', # 403 forbidden
] + anchor_check_fps
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'cloud'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'sidebar_localtoc_title': 'Overview',
'externalrefs': False,
'relbarbgcolor': '#000000',
'footerbgcolor': '#FFFFFF',
'sectionbgcolor': '#cd5c5c',
'linkcolor': 'rgb(31, 158, 111)',
'sidebarlinkcolor': 'rgb(31, 158, 111)',
'codebgcolor': '#F2F2F2',
'sidebarbgcolor': '#F2F2F2',
'logotarget': '../../../index',
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [csp.get_theme_dir()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'conda-forge v0.0.1a1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo_black_on_trans.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'conda-forgedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'conda-forge.tex', u'conda-forge Documentation',
u'conda-forge', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'conda-forge', u'conda-forge Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'conda-forge', u'conda-forge Documentation',
author, 'conda-forge', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
def rstjinja(app, docname, source):
def get_formated_names(path_file):
with open(path_file, "r") as csv_file:
dict_csv = csv.DictReader(csv_file)
sorted_csv = sorted(dict_csv, key=lambda d: d["name"])
return "\n".join(
f"* `{m['name']},"
f" @{m['github_username']}"
f" <https://github.com/{m['github_username']}>`__" for m in sorted_csv
)
if (
app.builder.format != "html"
or os.path.basename(docname) != "governance"
):
return
src = source[0]
current_file = os.path.dirname(__file__)
context = app.config.html_context
context["core_members"] = get_formated_names(
os.path.join(current_file, "core.csv")
)
context["emeritus_members"] = get_formated_names(
os.path.join(current_file, "emeritus.csv")
)
rendered = app.builder.templates.render_string(src, context)
source[0] = rendered
def setup(app):
app.connect("source-read", rstjinja)
|
"""Derive the license information and publish in docs."""
import functools
import json
import pathlib
import pkg_resources
import string
import subprocess # nosec
from typing import List, Tuple
__all__ = ['dependency_tree_console_text', 'direct_dependencies_table', 'indirect_dependencies_table']
ENCODING = 'utf-8'
TP_PATH = pathlib.Path('docs', 'third-party')
TABLE_KEYS = (('Name', 'URL'), 'Version', 'License', 'Author', 'Description')
HEADER_LABELS = ('Name', 'Version', 'License', 'Author', 'Description (from packaging data)')
FALLBACK_URLS = {
'typing-extensions': 'https://github.com/python/typing/blob/master/typing_extensions/README.rst',
}
TARGET = """\
__version__ = '$version$+parent.$revision$'\
"""
@functools.lru_cache()
def _fetch_direct_dependency_names():
with pathlib.Path('requirements.txt').open() as requirements_txt:
install_requires = [
str(requirement)
for requirement
in pkg_resources.parse_requirements(requirements_txt)
]
return install_requires
def _generate_dependency_information() -> None:
"""Use pip-licenses for creation of diverse databases and graphs."""
install_requires = _fetch_direct_dependency_names()
tokens = set(list(string.ascii_letters + '-_'))
direct_names = [''.join(c for c in term if c in tokens) for term in install_requires]
direct_vector = [
'pip-licenses', '--format', 'json', '-p', *direct_names,
'--with-authors', '--with-description', '--with-urls', '--with-license-file', '--with-notice-file',
'--output-file', str(TP_PATH / 'direct-dependency-licenses.json')]
noise = subprocess.run(direct_vector, capture_output=True, encoding=ENCODING, text=True).stdout.strip() # nosec
if not noise.startswith('created path: ') or not noise.endswith('direct-dependency-licenses.json'):
raise RuntimeError(noise)
indirect_names = [ # TODO(sthagen) these indirect deps may diverge ...
'appdirs',
'attrs',
'cattrs',
'certifi',
'charset-normalizer',
'click',
'deprecated',
'idna',
'oauthlib',
'requests',
'requests-oauthlib',
'six',
'typing-extensions',
'url-normalize',
'urllib3',
'wrapt',
]
full_vector = [
'pip-licenses', '--format', 'json', '-p', *direct_names, *indirect_names,
'--with-authors', '--with-description', '--with-urls', '--with-license-file', '--with-notice-file',
'--output-file', str(TP_PATH / 'all-dependency-licenses.json')]
noise = subprocess.run(full_vector, capture_output=True, encoding=ENCODING, text=True).stdout.strip() # nosec
if not noise.startswith('created path: ') or not noise.endswith('all-dependency-licenses.json'):
raise RuntimeError(noise)
"""
pipdeptree --packages antlr4-python3-runtime,atlassian-python-api,cogapp,jmespath,pydantic,requests-cache,typer --graph-output svg > docs/third-party/package-dependency-tree.svg
pipdeptree --packages antlr4-python3-runtime,atlassian-python-api,cogapp,jmespath,pydantic,requests-cache,typer --json-tree --warn silence > docs/third-party/package-dependency-tree.json
"""
base_vector = ['pipdeptree', '--packages', ','.join(direct_names)]
jobs = (
(TP_PATH / 'package-dependency-tree.dot.txt', base_vector + ['--graph-output', 'dot']),
(TP_PATH / 'package-dependency-tree.svg', base_vector + ['--graph-output', 'svg']),
(TP_PATH / 'package-dependency-tree.json', base_vector + ['--json-tree', '--warn', 'silence']),
(TP_PATH / 'package-dependency-tree.console.txt', base_vector + ['--warn', 'silence']),
)
for target, vector in jobs:
plot = subprocess.run(vector, capture_output=True, encoding=ENCODING, text=True).stdout.strip() # nosec
target.write_text(plot, encoding=ENCODING)
@functools.lru_cache()
def _fetch_dependencies(direct_only: bool = True):
db = 'direct-dependency-licenses.json' if direct_only else 'all-dependency-licenses.json'
dep_json_path = pathlib.Path('docs', 'third-party') / db
with open(dep_json_path, 'rt', encoding=ENCODING) as handle:
data = json.load(handle)
return data
def _markdown_table(table: List[Tuple[str, str, str, str, str]], header_labels=HEADER_LABELS) -> str:
"""Create the gfm table as string."""
columns = header_labels
col_wid = {key: len(key) for key in columns}
for slot, record in enumerate(table):
for key, cell in zip(columns, record):
col_wid[key] = max(len(cell), col_wid[key])
header_cells = [key.ljust(col_wid[key]) for key in columns]
header = f'| {' | '.join(header_cells)} |'
separator_cells = ['-' * (col_wid[key] + 1) for key in columns]
separator = f'|:{'|:'.join(separator_cells)}|'
rows = [f'| {' | '.join(str(v).ljust(col_wid[k]) for k, v in zip(columns, line))} |' for line in table]
return '\n'.join([header] + [separator] + rows)
def _extract_rows(data):
rows = []
for record in data:
nam = record['Name']
url = record.get('URL', '')
if url == 'UNKNOWN':
url = FALLBACK_URLS.get(nam, '')
nam_e = f'[{nam}]({url})' if url else nam
ver = record['Version']
ver_sion = f'[{ver}](https://pypi.org/project/{nam}/{ver}/)'
lic = record['License']
aut = record['Author']
des = record['Description']
rows.append((nam_e, ver_sion, lic, aut, des))
rows.sort()
return rows
def direct_dependencies_table() -> None:
"""Fill in the data from the direct dependencies."""
_generate_dependency_information()
print(_markdown_table(_extract_rows(_fetch_dependencies(direct_only=True))))
def indirect_dependencies_table() -> None:
"""Fill in the data from the indirect dependencies."""
direct_data = _fetch_dependencies(direct_only=True)
direct_names = tuple(record['Name'] for record in direct_data)
indirect_only_data = [rec for rec in _fetch_dependencies(direct_only=False) if rec['Name'] not in direct_names]
print(_markdown_table(_extract_rows(indirect_only_data)))
def dependency_tree_console_text():
"""Fill in the pipdeptree console output minus any warnings."""
console_tree = (TP_PATH / 'package-dependency-tree.console.txt').read_text(encoding=ENCODING).strip()
fence = '````'
print(f'{fence}console')
print(console_tree)
print(fence)
|
"""Derive the license information and publish in docs."""
import functools
import json
import pathlib
import pkg_resources
import string
import subprocess # nosec
from typing import List, Tuple
__all__ = ['dependency_tree_console_text', 'direct_dependencies_table', 'indirect_dependencies_table']
ENCODING = 'utf-8'
TP_PATH = pathlib.Path('docs', 'third-party')
TABLE_KEYS = (('Name', 'URL'), 'Version', 'License', 'Author', 'Description')
HEADER_LABELS = ('Name', 'Version', 'License', 'Author', 'Description (from packaging data)')
FALLBACK_URLS = {
'typing-extensions': 'https://github.com/python/typing/blob/master/typing_extensions/README.rst',
}
TARGET = """\
__version__ = '$version$+parent.$revision$'\
"""
@functools.lru_cache()
def _fetch_direct_dependency_names():
with pathlib.Path('requirements.txt').open() as requirements_txt:
install_requires = [
str(requirement)
for requirement
in pkg_resources.parse_requirements(requirements_txt)
]
return install_requires
def _generate_dependency_information() -> None:
"""Use pip-licenses for creation of diverse databases and graphs."""
install_requires = _fetch_direct_dependency_names()
tokens = set(list(string.ascii_letters + '-_'))
direct_names = [''.join(c for c in term if c in tokens) for term in install_requires]
direct_vector = [
'pip-licenses', '--format', 'json', '-p', *direct_names,
'--with-authors', '--with-description', '--with-urls', '--with-license-file', '--with-notice-file',
'--output-file', str(TP_PATH / 'direct-dependency-licenses.json')]
noise = subprocess.run(direct_vector, capture_output=True, encoding=ENCODING, text=True).stdout.strip() # nosec
if not noise.startswith('created path: ') or not noise.endswith('direct-dependency-licenses.json'):
raise RuntimeError(noise)
indirect_names = [ # TODO(sthagen) these indirect deps may diverge ...
'appdirs',
'attrs',
'cattrs',
'certifi',
'charset-normalizer',
'click',
'deprecated',
'idna',
'oauthlib',
'requests',
'requests-oauthlib',
'six',
'typing-extensions',
'url-normalize',
'urllib3',
'wrapt',
]
full_vector = [
'pip-licenses', '--format', 'json', '-p', *direct_names, *indirect_names,
'--with-authors', '--with-description', '--with-urls', '--with-license-file', '--with-notice-file',
'--output-file', str(TP_PATH / 'all-dependency-licenses.json')]
noise = subprocess.run(full_vector, capture_output=True, encoding=ENCODING, text=True).stdout.strip() # nosec
if not noise.startswith('created path: ') or not noise.endswith('all-dependency-licenses.json'):
raise RuntimeError(noise)
"""
pipdeptree --packages antlr4-python3-runtime,atlassian-python-api,cogapp,jmespath,pydantic,requests-cache,typer --graph-output svg > docs/third-party/package-dependency-tree.svg
pipdeptree --packages antlr4-python3-runtime,atlassian-python-api,cogapp,jmespath,pydantic,requests-cache,typer --json-tree --warn silence > docs/third-party/package-dependency-tree.json
"""
base_vector = ['pipdeptree', '--packages', ','.join(direct_names)]
jobs = (
(TP_PATH / 'package-dependency-tree.dot.txt', base_vector + ['--graph-output', 'dot']),
(TP_PATH / 'package-dependency-tree.svg', base_vector + ['--graph-output', 'svg']),
(TP_PATH / 'package-dependency-tree.json', base_vector + ['--json-tree', '--warn', 'silence']),
(TP_PATH / 'package-dependency-tree.console.txt', base_vector + ['--warn', 'silence']),
)
for target, vector in jobs:
plot = subprocess.run(vector, capture_output=True, encoding=ENCODING, text=True).stdout.strip() # nosec
target.write_text(plot, encoding=ENCODING)
@functools.lru_cache()
def _fetch_dependencies(direct_only: bool = True):
db = 'direct-dependency-licenses.json' if direct_only else 'all-dependency-licenses.json'
dep_json_path = pathlib.Path('docs', 'third-party') / db
with open(dep_json_path, 'rt', encoding=ENCODING) as handle:
data = json.load(handle)
return data
def _markdown_table(table: List[Tuple[str, str, str, str, str]], header_labels=HEADER_LABELS) -> str:
"""Create the gfm table as string."""
columns = header_labels
col_wid = {key: len(key) for key in columns}
for slot, record in enumerate(table):
for key, cell in zip(columns, record):
col_wid[key] = max(len(cell), col_wid[key])
header_cells = [key.ljust(col_wid[key]) for key in columns]
header = f'| {" | ".join(header_cells)} |'
separator_cells = ['-' * (col_wid[key] + 1) for key in columns]
separator = f'|:{"|:".join(separator_cells)}|'
rows = [f'| {" | ".join(str(v).ljust(col_wid[k]) for k, v in zip(columns, line))} |' for line in table]
return '\n'.join([header] + [separator] + rows)
def _extract_rows(data):
rows = []
for record in data:
nam = record['Name']
url = record.get('URL', '')
if url == 'UNKNOWN':
url = FALLBACK_URLS.get(nam, '')
nam_e = f'[{nam}]({url})' if url else nam
ver = record['Version']
ver_sion = f'[{ver}](https://pypi.org/project/{nam}/{ver}/)'
lic = record['License']
aut = record['Author']
des = record['Description']
rows.append((nam_e, ver_sion, lic, aut, des))
rows.sort()
return rows
def direct_dependencies_table() -> None:
"""Fill in the data from the direct dependencies."""
_generate_dependency_information()
print(_markdown_table(_extract_rows(_fetch_dependencies(direct_only=True))))
def indirect_dependencies_table() -> None:
"""Fill in the data from the indirect dependencies."""
direct_data = _fetch_dependencies(direct_only=True)
direct_names = tuple(record['Name'] for record in direct_data)
indirect_only_data = [rec for rec in _fetch_dependencies(direct_only=False) if rec['Name'] not in direct_names]
print(_markdown_table(_extract_rows(indirect_only_data)))
def dependency_tree_console_text():
"""Fill in the pipdeptree console output minus any warnings."""
console_tree = (TP_PATH / 'package-dependency-tree.console.txt').read_text(encoding=ENCODING).strip()
fence = '````'
print(f'{fence}console')
print(console_tree)
print(fence)
|
#
# This file is part of LiteX.
#
# This file is Copyright (c) 2013-2014 Sebastien Bourdeauducq <sb@m-labs.hk>
# This file is Copyright (c) 2014-2019 Florent Kermarrec <florent@enjoy-digital.fr>
# This file is Copyright (c) 2018 Dolu1990 <charles.papon.90@gmail.com>
# This file is Copyright (c) 2019 Gabriel L. Somlo <gsomlo@gmail.com>
# This file is Copyright (c) 2018 Jean-François Nguyen <jf@lambdaconcept.fr>
# This file is Copyright (c) 2019 Antmicro <www.antmicro.com>
# This file is Copyright (c) 2013 Robert Jordens <jordens@gmail.com>
# This file is Copyright (c) 2018 Sean Cross <sean@xobs.io>
# This file is Copyright (c) 2018 Sergiusz Bazanski <q3k@q3k.org>
# This file is Copyright (c) 2018-2016 Tim 'mithro' Ansell <me@mith.ro>
# This file is Copyright (c) 2015 whitequark <whitequark@whitequark.org>
# This file is Copyright (c) 2018 William D. Jones <thor0505@comcast.net>
# This file is Copyright (c) 2020 Piotr Esden-Tempski <piotr@esden.net>
# SPDX-License-Identifier: BSD-2-Clause
import os
import json
import time
import datetime
import inspect
from shutil import which
from sysconfig import get_platform
from migen import *
from litex.soc.interconnect.csr import CSRStatus
from litex.build.tools import generated_banner
from litex.soc.doc.rst import reflow
from litex.soc.doc.module import gather_submodules, ModuleNotDocumented, DocumentedModule, DocumentedInterrupts
from litex.soc.doc.csr import DocumentedCSRRegion
from litex.soc.interconnect.csr import _CompoundCSR
# CPU files ----------------------------------------------------------------------------------------
def get_cpu_mak(cpu, compile_software):
# Select between CLANG and GCC.
clang = os.getenv("CLANG", "")
if clang != "":
clang = bool(int(clang))
else:
clang = None
if cpu.clang_triple is None:
if clang:
raise ValueError(cpu.name + " is not supported with CLANG.")
else:
clang = False
else:
# Default to gcc unless told otherwise.
if clang is None:
clang = False
assert isinstance(clang, bool)
if clang:
triple = cpu.clang_triple
flags = cpu.clang_flags
else:
triple = cpu.gcc_triple
flags = cpu.gcc_flags
# Select triple when more than one.
def select_triple(triple):
r = None
if not isinstance(triple, tuple):
triple = (triple,)
override = os.getenv("LITEX_ENV_CC_TRIPLE")
if override:
triple = (override,) + triple
p = get_platform()
for i in range(len(triple)):
t = triple[i]
# Use native toolchain if host and target platforms are the same.
if t == 'riscv64-unknown-elf' and p == 'linux-riscv64':
r = '--native--'
break
if which(t+"-gcc"):
r = t
break
if r is None:
if not compile_software:
return "--not-found--"
msg = "Unable to find any of the cross compilation toolchains:\n"
for i in range(len(triple)):
msg += "- " + triple[i] + "\n"
raise OSError(msg)
return r
# Return informations.
return [
("TRIPLE", select_triple(triple)),
("CPU", cpu.name),
("CPUFLAGS", flags),
("CPUENDIANNESS", cpu.endianness),
("CLANG", str(int(clang))),
("CPU_DIRECTORY", os.path.dirname(inspect.getfile(cpu.__class__))),
]
def get_linker_output_format(cpu):
return f"OUTPUT_FORMAT(\"{cpu.linker_output_format}\")\n"
def get_linker_regions(regions):
r = "MEMORY {\n"
for name, region in regions.items():
r += f"\t{name} : ORIGIN = 0x{region.origin:08x}, LENGTH = 0x{region.length:08x}\n"
r += "}\n"
return r
# C Export -----------------------------------------------------------------------------------------
def get_git_header():
from litex.build.tools import get_migen_git_revision, get_litex_git_revision
r = generated_banner("//")
r += "#ifndef __GENERATED_GIT_H\n#define __GENERATED_GIT_H\n\n"
r += f"#define MIGEN_GIT_SHA1 \"{get_migen_git_revision()}\"\n"
r += f"#define LITEX_GIT_SHA1 \"{get_litex_git_revision()}\"\n"
r += "#endif\n"
return r
def get_mem_header(regions):
r = generated_banner("//")
r += "#ifndef __GENERATED_MEM_H\n#define __GENERATED_MEM_H\n\n"
for name, region in regions.items():
r += f"#ifndef {name.upper()}_BASE\n"
r += f"#define {name.upper()}_BASE 0x{region.origin:08x}L\n"
r += f"#define {name.upper()}_SIZE 0x{region.length:08x}\n"
r += "#endif\n\n"
r += "#ifndef MEM_REGIONS\n"
r += "#define MEM_REGIONS \"";
for name, region in regions.items():
r += f"{name.upper()} {" "*(8-len(name))} 0x{region.origin:08x} 0x{region.size:x} \\n"
r = r[:-2]
r += "\"\n"
r += "#endif\n"
r += "#endif\n"
return r
def get_soc_header(constants, with_access_functions=True):
r = generated_banner("//")
r += "#ifndef __GENERATED_SOC_H\n#define __GENERATED_SOC_H\n"
funcs = ""
for name, value in constants.items():
if value is None:
r += "#define "+name+"\n"
continue
if isinstance(value, str):
value = "\"" + value + "\""
ctype = "const char *"
else:
value = str(value)
ctype = "int"
r += "#define "+name+" "+value+"\n"
if with_access_functions:
funcs += "static inline "+ctype+" "+name.lower()+"_read(void) {\n"
funcs += "\treturn "+value+";\n}\n"
if with_access_functions:
r += "\n#ifndef __ASSEMBLER__\n"
r += funcs
r += "#endif // !__ASSEMBLER__\n"
r += "\n#endif\n"
return r
def _get_rw_functions_c(reg_name, reg_base, nwords, busword, alignment, read_only, with_access_functions):
r = ""
addr_str = f"CSR_{reg_name.upper()}_ADDR"
size_str = f"CSR_{reg_name.upper()}_SIZE"
r += f"#define {addr_str} (CSR_BASE + {hex(reg_base)}L)\n"
r += f"#define {size_str} {nwords}\n"
size = nwords*busword//8
if size > 8:
# Downstream should select appropriate `csr_[rd|wr]_buf_uintX()` pair!
return r
elif size > 4:
ctype = "uint64_t"
elif size > 2:
ctype = "uint32_t"
elif size > 1:
ctype = "uint16_t"
else:
ctype = "uint8_t"
stride = alignment//8;
if with_access_functions:
r += f"static inline {ctype} {reg_name}_read(void) {{\n"
if nwords > 1:
r += f"\t{ctype} r = csr_read_simple(CSR_BASE + {reg_base}L);\n"
for sub in range(1, nwords):
r += f"\tr <<= {busword};\n"
r += f"\tr |= csr_read_simple(CSR_BASE + {hex(reg_base+sub*stride)}L);\n"
r += "\treturn r;\n}\n"
else:
r += f"\treturn csr_read_simple(CSR_BASE + {hex(reg_base)}L);\n}}\n"
if not read_only:
r += f"static inline void {reg_name}_write({ctype} v) {{\n"
for sub in range(nwords):
shift = (nwords-sub-1)*busword
if shift:
v_shift = "v >> {}".format(shift)
else:
v_shift = "v"
r += f"\tcsr_write_simple({v_shift}, CSR_BASE + {hex(reg_base+sub*stride)}L);\n"
r += "}\n"
return r
def get_csr_header(regions, constants, csr_base=None, with_access_functions=True):
alignment = constants.get("CONFIG_CSR_ALIGNMENT", 32)
r = generated_banner("//")
if with_access_functions: # FIXME
r += "#include <generated/soc.h>\n"
r += "#ifndef __GENERATED_CSR_H\n#define __GENERATED_CSR_H\n"
if with_access_functions:
r += "#include <stdint.h>\n"
r += "#include <system.h>\n"
r += "#ifndef CSR_ACCESSORS_DEFINED\n"
r += "#include <hw/common.h>\n"
r += "#endif /* ! CSR_ACCESSORS_DEFINED */\n"
csr_base = csr_base if csr_base is not None else regions[next(iter(regions))].origin
r += "#ifndef CSR_BASE\n"
r += f"#define CSR_BASE {hex(csr_base)}L\n"
r += "#endif\n"
for name, region in regions.items():
origin = region.origin - csr_base
r += "\n/* "+name+" */\n"
r += f"#define CSR_{name.upper()}_BASE (CSR_BASE + {hex(origin)}L)\n"
if not isinstance(region.obj, Memory):
for csr in region.obj:
nr = (csr.size + region.busword - 1)//region.busword
r += _get_rw_functions_c(name + "_" + csr.name, origin, nr, region.busword, alignment,
getattr(csr, "read_only", False), with_access_functions)
origin += alignment//8*nr
if hasattr(csr, "fields"):
for field in csr.fields.fields:
offset = str(field.offset)
size = str(field.size)
r += f"#define CSR_{name.upper()}_{csr.name.upper()}_{field.name.upper()}_OFFSET {offset}\n"
r += f"#define CSR_{name.upper()}_{csr.name.upper()}_{field.name.upper()}_SIZE {size}\n"
if with_access_functions and csr.size <= 32: # FIXME: Implement extract/read functions for csr.size > 32-bit.
reg_name = name + "_" + csr.name.lower()
field_name = reg_name + "_" + field.name.lower()
r += "static inline uint32_t " + field_name + "_extract(uint32_t oldword) {\n"
r += "\tuint32_t mask = ((1 << " + size + ")-1);\n"
r += "\treturn ( (oldword >> " + offset + ") & mask );\n}\n"
r += "static inline uint32_t " + field_name + "_read(void) {\n"
r += "\tuint32_t word = " + reg_name + "_read();\n"
r += "\treturn " + field_name + "_extract(word);\n"
r += "}\n"
if not getattr(csr, "read_only", False):
r += "static inline uint32_t " + field_name + "_replace(uint32_t oldword, uint32_t plain_value) {\n"
r += "\tuint32_t mask = ((1 << " + size + ")-1);\n"
r += "\treturn (oldword & (~(mask << " + offset + "))) | (mask & plain_value)<< " + offset + " ;\n}\n"
r += "static inline void " + field_name + "_write(uint32_t plain_value) {\n"
r += "\tuint32_t oldword = " + reg_name + "_read();\n"
r += "\tuint32_t newword = " + field_name + "_replace(oldword, plain_value);\n"
r += "\t" + reg_name + "_write(newword);\n"
r += "}\n"
r += "\n#endif\n"
return r
# JSON Export --------------------------------------------------------------------------------------
def get_csr_json(csr_regions={}, constants={}, mem_regions={}):
alignment = constants.get("CONFIG_CSR_ALIGNMENT", 32)
d = {
"csr_bases": {},
"csr_registers": {},
"constants": {},
"memories": {},
}
for name, region in csr_regions.items():
d["csr_bases"][name] = region.origin
region_origin = region.origin
if not isinstance(region.obj, Memory):
for csr in region.obj:
_size = (csr.size + region.busword - 1)//region.busword
_type = "rw"
if isinstance(csr, CSRStatus) and not hasattr(csr, "r"):
_type = "ro"
d["csr_registers"][name + "_" + csr.name] = {
"addr": region_origin,
"size": _size,
"type": _type
}
region_origin += alignment//8*_size
for name, value in constants.items():
d["constants"][name.lower()] = value.lower() if isinstance(value, str) else value
for name, region in mem_regions.items():
d["memories"][name.lower()] = {
"base": region.origin,
"size": region.length,
"type": region.type,
}
return json.dumps(d, indent=4)
# CSV Export --------------------------------------------------------------------------------------
def get_csr_csv(csr_regions={}, constants={}, mem_regions={}):
d = json.loads(get_csr_json(csr_regions, constants, mem_regions))
r = generated_banner("#")
for name, value in d["csr_bases"].items():
r += "csr_base,{},0x{:08x},,\n".format(name, value)
for name in d["csr_registers"].keys():
r += "csr_register,{},0x{:08x},{},{}\n".format(name,
d["csr_registers"][name]["addr"],
d["csr_registers"][name]["size"],
d["csr_registers"][name]["type"])
for name, value in d["constants"].items():
r += "constant,{},{},,\n".format(name, value)
for name in d["memories"].keys():
r += "memory_region,{},0x{:08x},{:d},{:s}\n".format(name,
d["memories"][name]["base"],
d["memories"][name]["size"],
d["memories"][name]["type"],
)
return r
# SVD Export --------------------------------------------------------------------------------------
def get_csr_svd(soc, vendor="litex", name="soc", description=None):
def sub_csr_bit_range(busword, csr, offset):
nwords = (csr.size + busword - 1)//busword
i = nwords - offset - 1
nbits = min(csr.size - i*busword, busword) - 1
name = (csr.name + str(i) if nwords > 1 else csr.name).upper()
origin = i*busword
return (origin, nbits, name)
def print_svd_register(csr, csr_address, description, length, svd):
svd.append(' <register>')
svd.append(' <name>{}</name>'.format(csr.short_numbered_name))
if description is not None:
svd.append(' <description><![CDATA[{}]]></description>'.format(description))
svd.append(' <addressOffset>0x{:04x}</addressOffset>'.format(csr_address))
svd.append(' <resetValue>0x{:02x}</resetValue>'.format(csr.reset_value))
svd.append(' <size>{}</size>'.format(length))
# svd.append(' <access>{}</access>'.format(csr.access)) # 'access' is a lie: "read-only" registers can legitimately change state based on a write, and is in fact used to handle the "pending" field in events
csr_address = csr_address + 4
svd.append(' <fields>')
if hasattr(csr, "fields") and len(csr.fields) > 0:
for field in csr.fields:
svd.append(' <field>')
svd.append(' <name>{}</name>'.format(field.name))
svd.append(' <msb>{}</msb>'.format(field.offset +
field.size - 1))
svd.append(' <bitRange>[{}:{}]</bitRange>'.format(
field.offset + field.size - 1, field.offset))
svd.append(' <lsb>{}</lsb>'.format(field.offset))
svd.append(' <description><![CDATA[{}]]></description>'.format(
reflow(field.description)))
svd.append(' </field>')
else:
field_size = csr.size
field_name = csr.short_name.lower()
# Strip off "ev_" from eventmanager fields
if field_name == "ev_enable":
field_name = "enable"
elif field_name == "ev_pending":
field_name = "pending"
elif field_name == "ev_status":
field_name = "status"
svd.append(' <field>')
svd.append(' <name>{}</name>'.format(field_name))
svd.append(' <msb>{}</msb>'.format(field_size - 1))
svd.append(' <bitRange>[{}:{}]</bitRange>'.format(field_size - 1, 0))
svd.append(' <lsb>{}</lsb>'.format(0))
svd.append(' </field>')
svd.append(' </fields>')
svd.append(' </register>')
interrupts = {}
for csr, irq in sorted(soc.irq.locs.items()):
interrupts[csr] = irq
documented_regions = []
for region_name, region in soc.csr.regions.items():
documented_regions.append(DocumentedCSRRegion(
name = region_name,
region = region,
csr_data_width = soc.csr.data_width)
)
svd = []
svd.append('<?xml version="1.0" encoding="utf-8"?>')
svd.append('')
svd.append('<device schemaVersion="1.1" xmlns:xs="http://www.w3.org/2001/XMLSchema-instance" xs:noNamespaceSchemaLocation="CMSIS-SVD.xsd" >')
svd.append(' <vendor>{}</vendor>'.format(vendor))
svd.append(' <name>{}</name>'.format(name.upper()))
if description is not None:
svd.append(' <description><![CDATA[{}]]></description>'.format(reflow(description)))
else:
fmt = "%Y-%m-%d %H:%M:%S"
build_time = datetime.datetime.fromtimestamp(time.time()).strftime(fmt)
svd.append(' <description><![CDATA[{}]]></description>'.format(reflow("Litex SoC " + build_time)))
svd.append('')
svd.append(' <addressUnitBits>8</addressUnitBits>')
svd.append(' <width>32</width>')
svd.append(' <size>32</size>')
svd.append(' <access>read-write</access>')
svd.append(' <resetValue>0x00000000</resetValue>')
svd.append(' <resetMask>0xFFFFFFFF</resetMask>')
svd.append('')
svd.append(' <peripherals>')
for region in documented_regions:
csr_address = 0
svd.append(' <peripheral>')
svd.append(' <name>{}</name>'.format(region.name.upper()))
svd.append(' <baseAddress>0x{:08X}</baseAddress>'.format(region.origin))
svd.append(' <groupName>{}</groupName>'.format(region.name.upper()))
if len(region.sections) > 0:
svd.append(' <description><![CDATA[{}]]></description>'.format(
reflow(region.sections[0].body())))
svd.append(' <registers>')
for csr in region.csrs:
description = None
if hasattr(csr, "description"):
description = csr.description
if isinstance(csr, _CompoundCSR) and len(csr.simple_csrs) > 1:
is_first = True
for i in range(len(csr.simple_csrs)):
(start, length, name) = sub_csr_bit_range(
region.busword, csr, i)
if length > 0:
bits_str = "Bits {}-{} of `{}`.".format(
start, start+length, csr.name)
else:
bits_str = "Bit {} of `{}`.".format(
start, csr.name)
if is_first:
if description is not None:
print_svd_register(
csr.simple_csrs[i], csr_address, bits_str + " " + description, length, svd)
else:
print_svd_register(
csr.simple_csrs[i], csr_address, bits_str, length, svd)
is_first = False
else:
print_svd_register(
csr.simple_csrs[i], csr_address, bits_str, length, svd)
csr_address = csr_address + 4
else:
length = ((csr.size + region.busword - 1) //
region.busword) * region.busword
print_svd_register(
csr, csr_address, description, length, svd)
csr_address = csr_address + 4
svd.append(' </registers>')
svd.append(' <addressBlock>')
svd.append(' <offset>0</offset>')
svd.append(' <size>0x{:x}</size>'.format(csr_address))
svd.append(' <usage>registers</usage>')
svd.append(' </addressBlock>')
if region.name in interrupts:
svd.append(' <interrupt>')
svd.append(' <name>{}</name>'.format(region.name))
svd.append(' <value>{}</value>'.format(interrupts[region.name]))
svd.append(' </interrupt>')
svd.append(' </peripheral>')
svd.append(' </peripherals>')
svd.append(' <vendorExtensions>')
if len(soc.mem_regions) > 0:
svd.append(' <memoryRegions>')
for region_name, region in soc.mem_regions.items():
svd.append(' <memoryRegion>')
svd.append(' <name>{}</name>'.format(region_name.upper()))
svd.append(' <baseAddress>0x{:08X}</baseAddress>'.format(region.origin))
svd.append(' <size>0x{:08X}</size>'.format(region.size))
svd.append(' </memoryRegion>')
svd.append(' </memoryRegions>')
svd.append(' <constants>')
for name, value in soc.constants.items():
svd.append(' <constant name="{}" value="{}" />'.format(name, value))
svd.append(' </constants>')
svd.append(' </vendorExtensions>')
svd.append('</device>')
return "\n".join(svd)
# Memory.x Export ----------------------------------------------------------------------------------
def get_memory_x(soc):
r = get_linker_regions(soc.mem_regions)
r += '\n'
r += 'REGION_ALIAS("REGION_TEXT", spiflash);\n'
r += 'REGION_ALIAS("REGION_RODATA", spiflash);\n'
r += 'REGION_ALIAS("REGION_DATA", sram);\n'
r += 'REGION_ALIAS("REGION_BSS", sram);\n'
r += 'REGION_ALIAS("REGION_HEAP", sram);\n'
r += 'REGION_ALIAS("REGION_STACK", sram);\n\n'
r += '/* CPU reset location. */\n'
r += '_stext = {:#08x};\n'.format(soc.cpu.reset_address)
return r
|
#
# This file is part of LiteX.
#
# This file is Copyright (c) 2013-2014 Sebastien Bourdeauducq <sb@m-labs.hk>
# This file is Copyright (c) 2014-2019 Florent Kermarrec <florent@enjoy-digital.fr>
# This file is Copyright (c) 2018 Dolu1990 <charles.papon.90@gmail.com>
# This file is Copyright (c) 2019 Gabriel L. Somlo <gsomlo@gmail.com>
# This file is Copyright (c) 2018 Jean-François Nguyen <jf@lambdaconcept.fr>
# This file is Copyright (c) 2019 Antmicro <www.antmicro.com>
# This file is Copyright (c) 2013 Robert Jordens <jordens@gmail.com>
# This file is Copyright (c) 2018 Sean Cross <sean@xobs.io>
# This file is Copyright (c) 2018 Sergiusz Bazanski <q3k@q3k.org>
# This file is Copyright (c) 2018-2016 Tim 'mithro' Ansell <me@mith.ro>
# This file is Copyright (c) 2015 whitequark <whitequark@whitequark.org>
# This file is Copyright (c) 2018 William D. Jones <thor0505@comcast.net>
# This file is Copyright (c) 2020 Piotr Esden-Tempski <piotr@esden.net>
# SPDX-License-Identifier: BSD-2-Clause
import os
import json
import time
import datetime
import inspect
from shutil import which
from sysconfig import get_platform
from migen import *
from litex.soc.interconnect.csr import CSRStatus
from litex.build.tools import generated_banner
from litex.soc.doc.rst import reflow
from litex.soc.doc.module import gather_submodules, ModuleNotDocumented, DocumentedModule, DocumentedInterrupts
from litex.soc.doc.csr import DocumentedCSRRegion
from litex.soc.interconnect.csr import _CompoundCSR
# CPU files ----------------------------------------------------------------------------------------
def get_cpu_mak(cpu, compile_software):
# Select between CLANG and GCC.
clang = os.getenv("CLANG", "")
if clang != "":
clang = bool(int(clang))
else:
clang = None
if cpu.clang_triple is None:
if clang:
raise ValueError(cpu.name + " is not supported with CLANG.")
else:
clang = False
else:
# Default to gcc unless told otherwise.
if clang is None:
clang = False
assert isinstance(clang, bool)
if clang:
triple = cpu.clang_triple
flags = cpu.clang_flags
else:
triple = cpu.gcc_triple
flags = cpu.gcc_flags
# Select triple when more than one.
def select_triple(triple):
r = None
if not isinstance(triple, tuple):
triple = (triple,)
override = os.getenv("LITEX_ENV_CC_TRIPLE")
if override:
triple = (override,) + triple
p = get_platform()
for i in range(len(triple)):
t = triple[i]
# Use native toolchain if host and target platforms are the same.
if t == 'riscv64-unknown-elf' and p == 'linux-riscv64':
r = '--native--'
break
if which(t+"-gcc"):
r = t
break
if r is None:
if not compile_software:
return "--not-found--"
msg = "Unable to find any of the cross compilation toolchains:\n"
for i in range(len(triple)):
msg += "- " + triple[i] + "\n"
raise OSError(msg)
return r
# Return informations.
return [
("TRIPLE", select_triple(triple)),
("CPU", cpu.name),
("CPUFLAGS", flags),
("CPUENDIANNESS", cpu.endianness),
("CLANG", str(int(clang))),
("CPU_DIRECTORY", os.path.dirname(inspect.getfile(cpu.__class__))),
]
def get_linker_output_format(cpu):
return f"OUTPUT_FORMAT(\"{cpu.linker_output_format}\")\n"
def get_linker_regions(regions):
r = "MEMORY {\n"
for name, region in regions.items():
r += f"\t{name} : ORIGIN = 0x{region.origin:08x}, LENGTH = 0x{region.length:08x}\n"
r += "}\n"
return r
# C Export -----------------------------------------------------------------------------------------
def get_git_header():
from litex.build.tools import get_migen_git_revision, get_litex_git_revision
r = generated_banner("//")
r += "#ifndef __GENERATED_GIT_H\n#define __GENERATED_GIT_H\n\n"
r += f"#define MIGEN_GIT_SHA1 \"{get_migen_git_revision()}\"\n"
r += f"#define LITEX_GIT_SHA1 \"{get_litex_git_revision()}\"\n"
r += "#endif\n"
return r
def get_mem_header(regions):
r = generated_banner("//")
r += "#ifndef __GENERATED_MEM_H\n#define __GENERATED_MEM_H\n\n"
for name, region in regions.items():
r += f"#ifndef {name.upper()}_BASE\n"
r += f"#define {name.upper()}_BASE 0x{region.origin:08x}L\n"
r += f"#define {name.upper()}_SIZE 0x{region.length:08x}\n"
r += "#endif\n\n"
r += "#ifndef MEM_REGIONS\n"
r += "#define MEM_REGIONS \"";
for name, region in regions.items():
r += f"{name.upper()} {' '*(8-len(name))} 0x{region.origin:08x} 0x{region.size:x} \\n"
r = r[:-2]
r += "\"\n"
r += "#endif\n"
r += "#endif\n"
return r
def get_soc_header(constants, with_access_functions=True):
r = generated_banner("//")
r += "#ifndef __GENERATED_SOC_H\n#define __GENERATED_SOC_H\n"
funcs = ""
for name, value in constants.items():
if value is None:
r += "#define "+name+"\n"
continue
if isinstance(value, str):
value = "\"" + value + "\""
ctype = "const char *"
else:
value = str(value)
ctype = "int"
r += "#define "+name+" "+value+"\n"
if with_access_functions:
funcs += "static inline "+ctype+" "+name.lower()+"_read(void) {\n"
funcs += "\treturn "+value+";\n}\n"
if with_access_functions:
r += "\n#ifndef __ASSEMBLER__\n"
r += funcs
r += "#endif // !__ASSEMBLER__\n"
r += "\n#endif\n"
return r
def _get_rw_functions_c(reg_name, reg_base, nwords, busword, alignment, read_only, with_access_functions):
r = ""
addr_str = f"CSR_{reg_name.upper()}_ADDR"
size_str = f"CSR_{reg_name.upper()}_SIZE"
r += f"#define {addr_str} (CSR_BASE + {hex(reg_base)}L)\n"
r += f"#define {size_str} {nwords}\n"
size = nwords*busword//8
if size > 8:
# Downstream should select appropriate `csr_[rd|wr]_buf_uintX()` pair!
return r
elif size > 4:
ctype = "uint64_t"
elif size > 2:
ctype = "uint32_t"
elif size > 1:
ctype = "uint16_t"
else:
ctype = "uint8_t"
stride = alignment//8;
if with_access_functions:
r += f"static inline {ctype} {reg_name}_read(void) {{\n"
if nwords > 1:
r += f"\t{ctype} r = csr_read_simple(CSR_BASE + {reg_base}L);\n"
for sub in range(1, nwords):
r += f"\tr <<= {busword};\n"
r += f"\tr |= csr_read_simple(CSR_BASE + {hex(reg_base+sub*stride)}L);\n"
r += "\treturn r;\n}\n"
else:
r += f"\treturn csr_read_simple(CSR_BASE + {hex(reg_base)}L);\n}}\n"
if not read_only:
r += f"static inline void {reg_name}_write({ctype} v) {{\n"
for sub in range(nwords):
shift = (nwords-sub-1)*busword
if shift:
v_shift = "v >> {}".format(shift)
else:
v_shift = "v"
r += f"\tcsr_write_simple({v_shift}, CSR_BASE + {hex(reg_base+sub*stride)}L);\n"
r += "}\n"
return r
def get_csr_header(regions, constants, csr_base=None, with_access_functions=True):
alignment = constants.get("CONFIG_CSR_ALIGNMENT", 32)
r = generated_banner("//")
if with_access_functions: # FIXME
r += "#include <generated/soc.h>\n"
r += "#ifndef __GENERATED_CSR_H\n#define __GENERATED_CSR_H\n"
if with_access_functions:
r += "#include <stdint.h>\n"
r += "#include <system.h>\n"
r += "#ifndef CSR_ACCESSORS_DEFINED\n"
r += "#include <hw/common.h>\n"
r += "#endif /* ! CSR_ACCESSORS_DEFINED */\n"
csr_base = csr_base if csr_base is not None else regions[next(iter(regions))].origin
r += "#ifndef CSR_BASE\n"
r += f"#define CSR_BASE {hex(csr_base)}L\n"
r += "#endif\n"
for name, region in regions.items():
origin = region.origin - csr_base
r += "\n/* "+name+" */\n"
r += f"#define CSR_{name.upper()}_BASE (CSR_BASE + {hex(origin)}L)\n"
if not isinstance(region.obj, Memory):
for csr in region.obj:
nr = (csr.size + region.busword - 1)//region.busword
r += _get_rw_functions_c(name + "_" + csr.name, origin, nr, region.busword, alignment,
getattr(csr, "read_only", False), with_access_functions)
origin += alignment//8*nr
if hasattr(csr, "fields"):
for field in csr.fields.fields:
offset = str(field.offset)
size = str(field.size)
r += f"#define CSR_{name.upper()}_{csr.name.upper()}_{field.name.upper()}_OFFSET {offset}\n"
r += f"#define CSR_{name.upper()}_{csr.name.upper()}_{field.name.upper()}_SIZE {size}\n"
if with_access_functions and csr.size <= 32: # FIXME: Implement extract/read functions for csr.size > 32-bit.
reg_name = name + "_" + csr.name.lower()
field_name = reg_name + "_" + field.name.lower()
r += "static inline uint32_t " + field_name + "_extract(uint32_t oldword) {\n"
r += "\tuint32_t mask = ((1 << " + size + ")-1);\n"
r += "\treturn ( (oldword >> " + offset + ") & mask );\n}\n"
r += "static inline uint32_t " + field_name + "_read(void) {\n"
r += "\tuint32_t word = " + reg_name + "_read();\n"
r += "\treturn " + field_name + "_extract(word);\n"
r += "}\n"
if not getattr(csr, "read_only", False):
r += "static inline uint32_t " + field_name + "_replace(uint32_t oldword, uint32_t plain_value) {\n"
r += "\tuint32_t mask = ((1 << " + size + ")-1);\n"
r += "\treturn (oldword & (~(mask << " + offset + "))) | (mask & plain_value)<< " + offset + " ;\n}\n"
r += "static inline void " + field_name + "_write(uint32_t plain_value) {\n"
r += "\tuint32_t oldword = " + reg_name + "_read();\n"
r += "\tuint32_t newword = " + field_name + "_replace(oldword, plain_value);\n"
r += "\t" + reg_name + "_write(newword);\n"
r += "}\n"
r += "\n#endif\n"
return r
# JSON Export --------------------------------------------------------------------------------------
def get_csr_json(csr_regions={}, constants={}, mem_regions={}):
alignment = constants.get("CONFIG_CSR_ALIGNMENT", 32)
d = {
"csr_bases": {},
"csr_registers": {},
"constants": {},
"memories": {},
}
for name, region in csr_regions.items():
d["csr_bases"][name] = region.origin
region_origin = region.origin
if not isinstance(region.obj, Memory):
for csr in region.obj:
_size = (csr.size + region.busword - 1)//region.busword
_type = "rw"
if isinstance(csr, CSRStatus) and not hasattr(csr, "r"):
_type = "ro"
d["csr_registers"][name + "_" + csr.name] = {
"addr": region_origin,
"size": _size,
"type": _type
}
region_origin += alignment//8*_size
for name, value in constants.items():
d["constants"][name.lower()] = value.lower() if isinstance(value, str) else value
for name, region in mem_regions.items():
d["memories"][name.lower()] = {
"base": region.origin,
"size": region.length,
"type": region.type,
}
return json.dumps(d, indent=4)
# CSV Export --------------------------------------------------------------------------------------
def get_csr_csv(csr_regions={}, constants={}, mem_regions={}):
d = json.loads(get_csr_json(csr_regions, constants, mem_regions))
r = generated_banner("#")
for name, value in d["csr_bases"].items():
r += "csr_base,{},0x{:08x},,\n".format(name, value)
for name in d["csr_registers"].keys():
r += "csr_register,{},0x{:08x},{},{}\n".format(name,
d["csr_registers"][name]["addr"],
d["csr_registers"][name]["size"],
d["csr_registers"][name]["type"])
for name, value in d["constants"].items():
r += "constant,{},{},,\n".format(name, value)
for name in d["memories"].keys():
r += "memory_region,{},0x{:08x},{:d},{:s}\n".format(name,
d["memories"][name]["base"],
d["memories"][name]["size"],
d["memories"][name]["type"],
)
return r
# SVD Export --------------------------------------------------------------------------------------
def get_csr_svd(soc, vendor="litex", name="soc", description=None):
def sub_csr_bit_range(busword, csr, offset):
nwords = (csr.size + busword - 1)//busword
i = nwords - offset - 1
nbits = min(csr.size - i*busword, busword) - 1
name = (csr.name + str(i) if nwords > 1 else csr.name).upper()
origin = i*busword
return (origin, nbits, name)
def print_svd_register(csr, csr_address, description, length, svd):
svd.append(' <register>')
svd.append(' <name>{}</name>'.format(csr.short_numbered_name))
if description is not None:
svd.append(' <description><![CDATA[{}]]></description>'.format(description))
svd.append(' <addressOffset>0x{:04x}</addressOffset>'.format(csr_address))
svd.append(' <resetValue>0x{:02x}</resetValue>'.format(csr.reset_value))
svd.append(' <size>{}</size>'.format(length))
# svd.append(' <access>{}</access>'.format(csr.access)) # 'access' is a lie: "read-only" registers can legitimately change state based on a write, and is in fact used to handle the "pending" field in events
csr_address = csr_address + 4
svd.append(' <fields>')
if hasattr(csr, "fields") and len(csr.fields) > 0:
for field in csr.fields:
svd.append(' <field>')
svd.append(' <name>{}</name>'.format(field.name))
svd.append(' <msb>{}</msb>'.format(field.offset +
field.size - 1))
svd.append(' <bitRange>[{}:{}]</bitRange>'.format(
field.offset + field.size - 1, field.offset))
svd.append(' <lsb>{}</lsb>'.format(field.offset))
svd.append(' <description><![CDATA[{}]]></description>'.format(
reflow(field.description)))
svd.append(' </field>')
else:
field_size = csr.size
field_name = csr.short_name.lower()
# Strip off "ev_" from eventmanager fields
if field_name == "ev_enable":
field_name = "enable"
elif field_name == "ev_pending":
field_name = "pending"
elif field_name == "ev_status":
field_name = "status"
svd.append(' <field>')
svd.append(' <name>{}</name>'.format(field_name))
svd.append(' <msb>{}</msb>'.format(field_size - 1))
svd.append(' <bitRange>[{}:{}]</bitRange>'.format(field_size - 1, 0))
svd.append(' <lsb>{}</lsb>'.format(0))
svd.append(' </field>')
svd.append(' </fields>')
svd.append(' </register>')
interrupts = {}
for csr, irq in sorted(soc.irq.locs.items()):
interrupts[csr] = irq
documented_regions = []
for region_name, region in soc.csr.regions.items():
documented_regions.append(DocumentedCSRRegion(
name = region_name,
region = region,
csr_data_width = soc.csr.data_width)
)
svd = []
svd.append('<?xml version="1.0" encoding="utf-8"?>')
svd.append('')
svd.append('<device schemaVersion="1.1" xmlns:xs="http://www.w3.org/2001/XMLSchema-instance" xs:noNamespaceSchemaLocation="CMSIS-SVD.xsd" >')
svd.append(' <vendor>{}</vendor>'.format(vendor))
svd.append(' <name>{}</name>'.format(name.upper()))
if description is not None:
svd.append(' <description><![CDATA[{}]]></description>'.format(reflow(description)))
else:
fmt = "%Y-%m-%d %H:%M:%S"
build_time = datetime.datetime.fromtimestamp(time.time()).strftime(fmt)
svd.append(' <description><![CDATA[{}]]></description>'.format(reflow("Litex SoC " + build_time)))
svd.append('')
svd.append(' <addressUnitBits>8</addressUnitBits>')
svd.append(' <width>32</width>')
svd.append(' <size>32</size>')
svd.append(' <access>read-write</access>')
svd.append(' <resetValue>0x00000000</resetValue>')
svd.append(' <resetMask>0xFFFFFFFF</resetMask>')
svd.append('')
svd.append(' <peripherals>')
for region in documented_regions:
csr_address = 0
svd.append(' <peripheral>')
svd.append(' <name>{}</name>'.format(region.name.upper()))
svd.append(' <baseAddress>0x{:08X}</baseAddress>'.format(region.origin))
svd.append(' <groupName>{}</groupName>'.format(region.name.upper()))
if len(region.sections) > 0:
svd.append(' <description><![CDATA[{}]]></description>'.format(
reflow(region.sections[0].body())))
svd.append(' <registers>')
for csr in region.csrs:
description = None
if hasattr(csr, "description"):
description = csr.description
if isinstance(csr, _CompoundCSR) and len(csr.simple_csrs) > 1:
is_first = True
for i in range(len(csr.simple_csrs)):
(start, length, name) = sub_csr_bit_range(
region.busword, csr, i)
if length > 0:
bits_str = "Bits {}-{} of `{}`.".format(
start, start+length, csr.name)
else:
bits_str = "Bit {} of `{}`.".format(
start, csr.name)
if is_first:
if description is not None:
print_svd_register(
csr.simple_csrs[i], csr_address, bits_str + " " + description, length, svd)
else:
print_svd_register(
csr.simple_csrs[i], csr_address, bits_str, length, svd)
is_first = False
else:
print_svd_register(
csr.simple_csrs[i], csr_address, bits_str, length, svd)
csr_address = csr_address + 4
else:
length = ((csr.size + region.busword - 1) //
region.busword) * region.busword
print_svd_register(
csr, csr_address, description, length, svd)
csr_address = csr_address + 4
svd.append(' </registers>')
svd.append(' <addressBlock>')
svd.append(' <offset>0</offset>')
svd.append(' <size>0x{:x}</size>'.format(csr_address))
svd.append(' <usage>registers</usage>')
svd.append(' </addressBlock>')
if region.name in interrupts:
svd.append(' <interrupt>')
svd.append(' <name>{}</name>'.format(region.name))
svd.append(' <value>{}</value>'.format(interrupts[region.name]))
svd.append(' </interrupt>')
svd.append(' </peripheral>')
svd.append(' </peripherals>')
svd.append(' <vendorExtensions>')
if len(soc.mem_regions) > 0:
svd.append(' <memoryRegions>')
for region_name, region in soc.mem_regions.items():
svd.append(' <memoryRegion>')
svd.append(' <name>{}</name>'.format(region_name.upper()))
svd.append(' <baseAddress>0x{:08X}</baseAddress>'.format(region.origin))
svd.append(' <size>0x{:08X}</size>'.format(region.size))
svd.append(' </memoryRegion>')
svd.append(' </memoryRegions>')
svd.append(' <constants>')
for name, value in soc.constants.items():
svd.append(' <constant name="{}" value="{}" />'.format(name, value))
svd.append(' </constants>')
svd.append(' </vendorExtensions>')
svd.append('</device>')
return "\n".join(svd)
# Memory.x Export ----------------------------------------------------------------------------------
def get_memory_x(soc):
r = get_linker_regions(soc.mem_regions)
r += '\n'
r += 'REGION_ALIAS("REGION_TEXT", spiflash);\n'
r += 'REGION_ALIAS("REGION_RODATA", spiflash);\n'
r += 'REGION_ALIAS("REGION_DATA", sram);\n'
r += 'REGION_ALIAS("REGION_BSS", sram);\n'
r += 'REGION_ALIAS("REGION_HEAP", sram);\n'
r += 'REGION_ALIAS("REGION_STACK", sram);\n\n'
r += '/* CPU reset location. */\n'
r += '_stext = {:#08x};\n'.format(soc.cpu.reset_address)
return r
|
"""Window object represented in layout."""
from bui.layout.attr import Attr
from bui.layout.component import Component
class Window(Component):
"""
Window tag, to encompass widget tags.
The window tag is the only one that is truly mandatory in your
[layout](../overview.md). It is used to describe both a window and
dialog. It will contain all your widgets (graphical elements).
```
<window>
...
</window>
```
## Attributes
| Name | Required | Description | Example |
| ------------ | -------- | ------------------------ | ----------- |
| `rows` | No | The number of rows in | `<window |
| | | the window grid. | rows=10>` |
| | | Default is `6`. | |
| `cols` | No | The number of columns | `<window |
| | | in the window grid. | cols=5>` |
| | | Default is `6`. | |
| `title` | Yes | The window or dialog | `<window |
| | | title. This attribute | title="User |
| | | is mandatory. | Config">` |
You cannot set a window or dialog without a proper title. Doing so
would impair accessibility for screen readers. If these tools can
read anything at all on your window, it's the title bar, so be sure
it's not empty.
> `title` is a translatable attribute. If internationalization is
set, it should contain the `ytranslate` path to the title and will
be translated in the proper language as needed.
The `rows` and `cols` attributes are used to set the window grid. You
can think of them as the height (in rows) and width (in columns) of the
grid. Changing this value won't make the window any bigger, but
it will give you more control on how to place the widget in the window
itself. On the other hand, having a large grid can make designing not
so easy. It all depends on your needs.
> Note: you don't have to set the same number of rows and columns.
This is just the default value. You can set different values with no
trap:
```
<window cols=1 rows=8>
```
This will set a window with only one column, but 8 rows. If you place
a widget in `x=0 y=0`, it will take all the window's width. Again,
this doesn't change the window size in any way, just the way widgets
are placed on it. You can picture the window to always be a
square but sliced in different portions (squares or rectangles, more
or less big depending on the height and width you set in the window
tag).
## Data
A window is a specific graphical element since it only contains other
elements and has no meaning by itself. Therefore, you cannot send
it data, it wouldn't make much sense. Instead, you should
send data to the window's graphical elements themselves.
However, some window attributes can be changed on the fly.
| Attribute | Meaning and type | Example |
| -------------- | ---------------- | --------------------------- |
| `title` | The title (str) | `self.title = "New title"` |
These attributes can be accessed and set using the standard Python
syntax for attributes. Behind the scenes, these attributes are cached,
handled by an extended `property()`, but you don't really need to
worry about how it works. Suffice it to say that:
class Example(Windows):
def on_press_a(self):
self.title = "You pressed A."
... will update the window title when the user presses the 'a' key
on her keyboard.
## Controls
The window tag is tied to the [Window](../../widget/Window.md) or
[Dialog](../../widget/Dialog.md) class. Therefore, when you write
controls on either of these classes, you often want to catch controls
on indidivual graphical elements in the window. There are a few
exceptions however:
| Control | Method | Description |
| --------------------------------- | ---------- | ---------------- |
| [close](../../control/close.md) | `on_close` | The window is |
| | | about to be |
| | | closed, but |
| | | isn't closed |
| | | yet. |
| [focus](../../control/focus.md) | `on_focus` | The window is |
| | | focused or lose |
| | | focus. This |
| | | usually happens |
| | | for a top window |
| | | when the user |
| | | switches the |
| | | current app. |
| [init](../../control/init.md) | `on_init` | The window is |
| | | ready to be |
| | | displayed, but |
| | | is not displayed |
| | | just yet. |
| [press](../../control/press.md) | `on_press` | The user presses |
| | | on a key from her|
| | | keyboard. This |
| | | control can have |
| | | sub-controls. |
| [release](../../ | `on_release` | The user |
| control/release.md) | | relases a key on |
| | | her keyboard. |
| | | This control can |
| | | have sub- |
| | | controls. |
| [type](../../control/type.md) | `on_type` | The user types |
| | | a character |
| | | using her |
| | | keyboard. This |
| | | control can have |
| | | sub-controls. |
Notice that we don't specify the window identifier. It would make
no sense here. Therefore, to use these events, you should just add a
method in the window class with the control name and no identifier:
class MainWindow(Window):
def on_focus(self):
print(f"Am I focused? {"yes" if self.focused else "no"}")
"""
tag_name = "window"
attrs = (
Attr("title", help="The window title"),
Attr("width", help="The window width", type=int, default=6),
Attr("height", help="The window height", type=int, default=6),
)
def __init__(self, layout, parent, title, width=0, height=0):
super().__init__(layout, parent)
self.title = title
self.width = width
self.height = height
|
"""Window object represented in layout."""
from bui.layout.attr import Attr
from bui.layout.component import Component
class Window(Component):
"""
Window tag, to encompass widget tags.
The window tag is the only one that is truly mandatory in your
[layout](../overview.md). It is used to describe both a window and
dialog. It will contain all your widgets (graphical elements).
```
<window>
...
</window>
```
## Attributes
| Name | Required | Description | Example |
| ------------ | -------- | ------------------------ | ----------- |
| `rows` | No | The number of rows in | `<window |
| | | the window grid. | rows=10>` |
| | | Default is `6`. | |
| `cols` | No | The number of columns | `<window |
| | | in the window grid. | cols=5>` |
| | | Default is `6`. | |
| `title` | Yes | The window or dialog | `<window |
| | | title. This attribute | title="User |
| | | is mandatory. | Config">` |
You cannot set a window or dialog without a proper title. Doing so
would impair accessibility for screen readers. If these tools can
read anything at all on your window, it's the title bar, so be sure
it's not empty.
> `title` is a translatable attribute. If internationalization is
set, it should contain the `ytranslate` path to the title and will
be translated in the proper language as needed.
The `rows` and `cols` attributes are used to set the window grid. You
can think of them as the height (in rows) and width (in columns) of the
grid. Changing this value won't make the window any bigger, but
it will give you more control on how to place the widget in the window
itself. On the other hand, having a large grid can make designing not
so easy. It all depends on your needs.
> Note: you don't have to set the same number of rows and columns.
This is just the default value. You can set different values with no
trap:
```
<window cols=1 rows=8>
```
This will set a window with only one column, but 8 rows. If you place
a widget in `x=0 y=0`, it will take all the window's width. Again,
this doesn't change the window size in any way, just the way widgets
are placed on it. You can picture the window to always be a
square but sliced in different portions (squares or rectangles, more
or less big depending on the height and width you set in the window
tag).
## Data
A window is a specific graphical element since it only contains other
elements and has no meaning by itself. Therefore, you cannot send
it data, it wouldn't make much sense. Instead, you should
send data to the window's graphical elements themselves.
However, some window attributes can be changed on the fly.
| Attribute | Meaning and type | Example |
| -------------- | ---------------- | --------------------------- |
| `title` | The title (str) | `self.title = "New title"` |
These attributes can be accessed and set using the standard Python
syntax for attributes. Behind the scenes, these attributes are cached,
handled by an extended `property()`, but you don't really need to
worry about how it works. Suffice it to say that:
class Example(Windows):
def on_press_a(self):
self.title = "You pressed A."
... will update the window title when the user presses the 'a' key
on her keyboard.
## Controls
The window tag is tied to the [Window](../../widget/Window.md) or
[Dialog](../../widget/Dialog.md) class. Therefore, when you write
controls on either of these classes, you often want to catch controls
on indidivual graphical elements in the window. There are a few
exceptions however:
| Control | Method | Description |
| --------------------------------- | ---------- | ---------------- |
| [close](../../control/close.md) | `on_close` | The window is |
| | | about to be |
| | | closed, but |
| | | isn't closed |
| | | yet. |
| [focus](../../control/focus.md) | `on_focus` | The window is |
| | | focused or lose |
| | | focus. This |
| | | usually happens |
| | | for a top window |
| | | when the user |
| | | switches the |
| | | current app. |
| [init](../../control/init.md) | `on_init` | The window is |
| | | ready to be |
| | | displayed, but |
| | | is not displayed |
| | | just yet. |
| [press](../../control/press.md) | `on_press` | The user presses |
| | | on a key from her|
| | | keyboard. This |
| | | control can have |
| | | sub-controls. |
| [release](../../ | `on_release` | The user |
| control/release.md) | | relases a key on |
| | | her keyboard. |
| | | This control can |
| | | have sub- |
| | | controls. |
| [type](../../control/type.md) | `on_type` | The user types |
| | | a character |
| | | using her |
| | | keyboard. This |
| | | control can have |
| | | sub-controls. |
Notice that we don't specify the window identifier. It would make
no sense here. Therefore, to use these events, you should just add a
method in the window class with the control name and no identifier:
class MainWindow(Window):
def on_focus(self):
print(f"Am I focused? {'yes' if self.focused else 'no'}")
"""
tag_name = "window"
attrs = (
Attr("title", help="The window title"),
Attr("width", help="The window width", type=int, default=6),
Attr("height", help="The window height", type=int, default=6),
)
def __init__(self, layout, parent, title, width=0, height=0):
super().__init__(layout, parent)
self.title = title
self.width = width
self.height = height
|
import os
import re
import logging
from abc import abstractmethod
from collections import Counter
from pathlib import Path
from typing import List, Union, Dict
import gensim
import numpy as np
import torch
from bpemb import BPEmb
from deprecated import deprecated
from pytorch_pretrained_bert import (
BertTokenizer,
BertModel,
TransfoXLTokenizer,
TransfoXLModel,
OpenAIGPTModel,
OpenAIGPTTokenizer,
)
from pytorch_pretrained_bert.modeling_openai import (
PRETRAINED_MODEL_ARCHIVE_MAP as OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from pytorch_pretrained_bert.modeling_transfo_xl import (
PRETRAINED_MODEL_ARCHIVE_MAP as TRANSFORMER_XL_PRETRAINED_MODEL_ARCHIVE_MAP,
)
import flair
from flair.data import Corpus
from .nn import LockedDropout, WordDropout
from .data import Dictionary, Token, Sentence
from .file_utils import cached_path, open_inside_zip
log = logging.getLogger("flair")
class Embeddings(torch.nn.Module):
"""Abstract base class for all embeddings. Every new type of embedding must implement these methods."""
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
pass
@property
@abstractmethod
def embedding_type(self) -> str:
pass
def embed(self, sentences: Union[Sentence, List[Sentence]]) -> List[Sentence]:
"""Add embeddings to all words in a list of sentences. If embeddings are already added, updates only if embeddings
are non-static."""
# if only one sentence is passed, convert to list of sentence
if type(sentences) is Sentence:
sentences = [sentences]
everything_embedded: bool = True
if self.embedding_type == "word-level":
for sentence in sentences:
for token in sentence.tokens:
if self.name not in token._embeddings.keys():
everything_embedded = False
else:
for sentence in sentences:
if self.name not in sentence._embeddings.keys():
everything_embedded = False
if not everything_embedded or not self.static_embeddings:
self._add_embeddings_internal(sentences)
return sentences
@abstractmethod
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
"""Private method for adding embeddings to all words in a list of sentences."""
pass
class TokenEmbeddings(Embeddings):
"""Abstract base class for all token-level embeddings. Ever new type of word embedding must implement these methods."""
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
pass
@property
def embedding_type(self) -> str:
return "word-level"
class DocumentEmbeddings(Embeddings):
"""Abstract base class for all document-level embeddings. Ever new type of document embedding must implement these methods."""
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
pass
@property
def embedding_type(self) -> str:
return "sentence-level"
class StackedEmbeddings(TokenEmbeddings):
"""A stack of embeddings, used if you need to combine several different embedding types."""
def __init__(self, embeddings: List[TokenEmbeddings], detach: bool = True):
"""The constructor takes a list of embeddings to be combined."""
super().__init__()
self.embeddings = embeddings
# IMPORTANT: add embeddings as torch modules
for i, embedding in enumerate(embeddings):
self.add_module("list_embedding_{}".format(i), embedding)
self.detach: bool = detach
self.name: str = "Stack"
self.static_embeddings: bool = True
self.__embedding_type: str = embeddings[0].embedding_type
self.__embedding_length: int = 0
for embedding in embeddings:
self.__embedding_length += embedding.embedding_length
def embed(
self, sentences: Union[Sentence, List[Sentence]], static_embeddings: bool = True
):
# if only one sentence is passed, convert to list of sentence
if type(sentences) is Sentence:
sentences = [sentences]
for embedding in self.embeddings:
embedding.embed(sentences)
@property
def embedding_type(self) -> str:
return self.__embedding_type
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for embedding in self.embeddings:
embedding._add_embeddings_internal(sentences)
return sentences
def __str__(self):
return f'StackedEmbeddings [{','.join([str(e) for e in self.embeddings])}]'
class WordEmbeddings(TokenEmbeddings):
"""Standard static word embeddings, such as GloVe or FastText."""
def __init__(self, embeddings: str, field: str = None):
"""
Initializes classic word embeddings. Constructor downloads required files if not there.
:param embeddings: one of: 'glove', 'extvec', 'crawl' or two-letter language code or custom
If you want to use a custom embedding file, just pass the path to the embeddings as embeddings variable.
"""
self.embeddings = embeddings
old_base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/"
)
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/"
)
embeddings_path_v4 = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/"
)
embeddings_path_v4_1 = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4.1/"
cache_dir = Path("embeddings")
# GLOVE embeddings
if embeddings.lower() == "glove" or embeddings.lower() == "en-glove":
cached_path(f"{old_base_path}glove.gensim.vectors.npy", cache_dir=cache_dir)
embeddings = cached_path(
f"{old_base_path}glove.gensim", cache_dir=cache_dir
)
# TURIAN embeddings
elif embeddings.lower() == "turian" or embeddings.lower() == "en-turian":
cached_path(
f"{embeddings_path_v4_1}turian.vectors.npy", cache_dir=cache_dir
)
embeddings = cached_path(
f"{embeddings_path_v4_1}turian", cache_dir=cache_dir
)
# KOMNINOS embeddings
elif embeddings.lower() == "extvec" or embeddings.lower() == "en-extvec":
cached_path(
f"{old_base_path}extvec.gensim.vectors.npy", cache_dir=cache_dir
)
embeddings = cached_path(
f"{old_base_path}extvec.gensim", cache_dir=cache_dir
)
# FT-CRAWL embeddings
elif embeddings.lower() == "crawl" or embeddings.lower() == "en-crawl":
cached_path(
f"{base_path}en-fasttext-crawl-300d-1M.vectors.npy", cache_dir=cache_dir
)
embeddings = cached_path(
f"{base_path}en-fasttext-crawl-300d-1M", cache_dir=cache_dir
)
# FT-CRAWL embeddings
elif (
embeddings.lower() == "news"
or embeddings.lower() == "en-news"
or embeddings.lower() == "en"
):
cached_path(
f"{base_path}en-fasttext-news-300d-1M.vectors.npy", cache_dir=cache_dir
)
embeddings = cached_path(
f"{base_path}en-fasttext-news-300d-1M", cache_dir=cache_dir
)
# twitter embeddings
elif embeddings.lower() == "twitter" or embeddings.lower() == "en-twitter":
cached_path(
f"{old_base_path}twitter.gensim.vectors.npy", cache_dir=cache_dir
)
embeddings = cached_path(
f"{old_base_path}twitter.gensim", cache_dir=cache_dir
)
# two-letter language code wiki embeddings
elif len(embeddings.lower()) == 2:
cached_path(
f"{embeddings_path_v4}{embeddings}-wiki-fasttext-300d-1M.vectors.npy",
cache_dir=cache_dir,
)
embeddings = cached_path(
f"{embeddings_path_v4}{embeddings}-wiki-fasttext-300d-1M",
cache_dir=cache_dir,
)
# two-letter language code wiki embeddings
elif len(embeddings.lower()) == 7 and embeddings.endswith("-wiki"):
cached_path(
f"{embeddings_path_v4}{embeddings[:2]}-wiki-fasttext-300d-1M.vectors.npy",
cache_dir=cache_dir,
)
embeddings = cached_path(
f"{embeddings_path_v4}{embeddings[:2]}-wiki-fasttext-300d-1M",
cache_dir=cache_dir,
)
# two-letter language code crawl embeddings
elif len(embeddings.lower()) == 8 and embeddings.endswith("-crawl"):
cached_path(
f"{embeddings_path_v4}{embeddings[:2]}-crawl-fasttext-300d-1M.vectors.npy",
cache_dir=cache_dir,
)
embeddings = cached_path(
f"{embeddings_path_v4}{embeddings[:2]}-crawl-fasttext-300d-1M",
cache_dir=cache_dir,
)
elif not Path(embeddings).exists():
raise ValueError(
f'The given embeddings "{embeddings}" is not available or is not a valid path.'
)
self.name: str = str(embeddings)
self.static_embeddings = True
if str(embeddings).endswith(".bin"):
self.precomputed_word_embeddings = gensim.models.KeyedVectors.load_word2vec_format(
str(embeddings), binary=True
)
else:
self.precomputed_word_embeddings = gensim.models.KeyedVectors.load(
str(embeddings)
)
self.field = field
self.__embedding_length: int = self.precomputed_word_embeddings.vector_size
super().__init__()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for i, sentence in enumerate(sentences):
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
if "field" not in self.__dict__ or self.field is None:
word = token.text
else:
word = token.get_tag(self.field).value
if word in self.precomputed_word_embeddings:
word_embedding = self.precomputed_word_embeddings[word]
elif word.lower() in self.precomputed_word_embeddings:
word_embedding = self.precomputed_word_embeddings[word.lower()]
elif (
re.sub(r"\d", "#", word.lower()) in self.precomputed_word_embeddings
):
word_embedding = self.precomputed_word_embeddings[
re.sub(r"\d", "#", word.lower())
]
elif (
re.sub(r"\d", "0", word.lower()) in self.precomputed_word_embeddings
):
word_embedding = self.precomputed_word_embeddings[
re.sub(r"\d", "0", word.lower())
]
else:
word_embedding = np.zeros(self.embedding_length, dtype="float")
word_embedding = torch.FloatTensor(word_embedding)
token.set_embedding(self.name, word_embedding)
return sentences
def __str__(self):
return self.name
def extra_repr(self):
return f"'{self.embeddings}'"
class OneHotEmbeddings(TokenEmbeddings):
"""One-hot encoded embeddings."""
def __init__(
self,
corpus=Union[Corpus, List[Sentence]],
field: str = "text",
embedding_length: int = 300,
min_freq: int = 3,
):
super().__init__()
self.name = "one-hot"
self.static_embeddings = False
self.min_freq = min_freq
tokens = list(map((lambda s: s.tokens), corpus.train))
tokens = [token for sublist in tokens for token in sublist]
if field == "text":
most_common = Counter(list(map((lambda t: t.text), tokens))).most_common()
else:
most_common = Counter(
list(map((lambda t: t.get_tag(field)), tokens))
).most_common()
tokens = []
for token, freq in most_common:
if freq < min_freq:
break
tokens.append(token)
self.vocab_dictionary: Dictionary = Dictionary()
for token in tokens:
self.vocab_dictionary.add_item(token)
# max_tokens = 500
self.__embedding_length = embedding_length
print(self.vocab_dictionary.idx2item)
print(f"vocabulary size of {len(self.vocab_dictionary)}")
# model architecture
self.embedding_layer = torch.nn.Embedding(
len(self.vocab_dictionary), self.__embedding_length
)
torch.nn.init.xavier_uniform_(self.embedding_layer.weight)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
one_hot_sentences = []
for i, sentence in enumerate(sentences):
context_idxs = [
self.vocab_dictionary.get_idx_for_item(t.text) for t in sentence.tokens
]
one_hot_sentences.extend(context_idxs)
one_hot_sentences = torch.tensor(one_hot_sentences, dtype=torch.long).to(
flair.device
)
embedded = self.embedding_layer.forward(one_hot_sentences)
index = 0
for sentence in sentences:
for token in sentence:
embedding = embedded[index]
token.set_embedding(self.name, embedding)
index += 1
return sentences
def __str__(self):
return self.name
@property
def embedding_length(self) -> int:
return self.__embedding_length
def extra_repr(self):
return "min_freq={}".format(self.min_freq)
class BPEmbSerializable(BPEmb):
def __getstate__(self):
state = self.__dict__.copy()
# save the sentence piece model as binary file (not as path which may change)
state["spm_model_binary"] = open(self.model_file, mode="rb").read()
state["spm"] = None
return state
def __setstate__(self, state):
from bpemb.util import sentencepiece_load
model_file = self.model_tpl.format(lang=state["lang"], vs=state["vs"])
self.__dict__ = state
# write out the binary sentence piece model into the expected directory
self.cache_dir: Path = Path(flair.cache_root) / "embeddings"
if "spm_model_binary" in self.__dict__:
# if the model was saved as binary and it is not found on disk, write to appropriate path
if not os.path.exists(self.cache_dir / state["lang"]):
os.makedirs(self.cache_dir / state["lang"])
self.model_file = self.cache_dir / model_file
with open(self.model_file, "wb") as out:
out.write(self.__dict__["spm_model_binary"])
else:
# otherwise, use normal process and potentially trigger another download
self.model_file = self._load_file(model_file)
# once the modes if there, load it with sentence piece
state["spm"] = sentencepiece_load(self.model_file)
class BytePairEmbeddings(TokenEmbeddings):
def __init__(
self,
language: str,
dim: int = 50,
syllables: int = 100000,
cache_dir=Path(flair.cache_root) / "embeddings",
):
"""
Initializes BP embeddings. Constructor downloads required files if not there.
"""
self.name: str = f"bpe-{language}-{syllables}-{dim}"
self.static_embeddings = True
self.embedder = BPEmbSerializable(
lang=language, vs=syllables, dim=dim, cache_dir=cache_dir
)
self.__embedding_length: int = self.embedder.emb.vector_size * 2
super().__init__()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for i, sentence in enumerate(sentences):
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
if "field" not in self.__dict__ or self.field is None:
word = token.text
else:
word = token.get_tag(self.field).value
if word.strip() == "":
# empty words get no embedding
token.set_embedding(
self.name, torch.zeros(self.embedding_length, dtype=torch.float)
)
else:
# all other words get embedded
embeddings = self.embedder.embed(word.lower())
embedding = np.concatenate(
(embeddings[0], embeddings[len(embeddings) - 1])
)
token.set_embedding(
self.name, torch.tensor(embedding, dtype=torch.float)
)
return sentences
def __str__(self):
return self.name
def extra_repr(self):
return "model={}".format(self.name)
class ELMoEmbeddings(TokenEmbeddings):
"""Contextual word embeddings using word-level LM, as proposed in Peters et al., 2018."""
def __init__(
self, model: str = "original", options_file: str = None, weight_file: str = None
):
super().__init__()
try:
import allennlp.commands.elmo
except:
log.warning("-" * 100)
log.warning('ATTENTION! The library "allennlp" is not installed!')
log.warning(
'To use ELMoEmbeddings, please first install with "pip install allennlp"'
)
log.warning("-" * 100)
pass
self.name = "elmo-" + model
self.static_embeddings = True
if not options_file or not weight_file:
# the default model for ELMo is the 'original' model, which is very large
options_file = allennlp.commands.elmo.DEFAULT_OPTIONS_FILE
weight_file = allennlp.commands.elmo.DEFAULT_WEIGHT_FILE
# alternatively, a small, medium or portuguese model can be selected by passing the appropriate mode name
if model == "small":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_weights.hdf5"
if model == "medium":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_weights.hdf5"
if model == "pt" or model == "portuguese":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_weights.hdf5"
if model == "pubmed":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_weights_PubMed_only.hdf5"
# put on Cuda if available
from flair import device
if re.fullmatch(r'cuda:[0-9]+', str(device)):
cuda_device = int(str(device).split(':')[-1])
elif str(device) == "cpu":
cuda_device = -1
else:
cuda_device = 0
self.ee = allennlp.commands.elmo.ElmoEmbedder(
options_file=options_file, weight_file=weight_file, cuda_device=cuda_device
)
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
sentence_words: List[List[str]] = []
for sentence in sentences:
sentence_words.append([token.text for token in sentence])
embeddings = self.ee.embed_batch(sentence_words)
for i, sentence in enumerate(sentences):
sentence_embeddings = embeddings[i]
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
word_embedding = torch.cat(
[
torch.FloatTensor(sentence_embeddings[0, token_idx, :]),
torch.FloatTensor(sentence_embeddings[1, token_idx, :]),
torch.FloatTensor(sentence_embeddings[2, token_idx, :]),
],
0,
)
token.set_embedding(self.name, word_embedding)
return sentences
def extra_repr(self):
return "model={}".format(self.name)
def __str__(self):
return self.name
class ELMoTransformerEmbeddings(TokenEmbeddings):
"""Contextual word embeddings using word-level Transformer-based LM, as proposed in Peters et al., 2018."""
def __init__(self, model_file: str):
super().__init__()
try:
from allennlp.modules.token_embedders.bidirectional_language_model_token_embedder import (
BidirectionalLanguageModelTokenEmbedder,
)
from allennlp.data.token_indexers.elmo_indexer import (
ELMoTokenCharactersIndexer,
)
except:
log.warning("-" * 100)
log.warning('ATTENTION! The library "allennlp" is not installed!')
log.warning(
"To use ELMoTransformerEmbeddings, please first install a recent version from https://github.com/allenai/allennlp"
)
log.warning("-" * 100)
pass
self.name = "elmo-transformer"
self.static_embeddings = True
self.lm_embedder = BidirectionalLanguageModelTokenEmbedder(
archive_file=model_file,
dropout=0.2,
bos_eos_tokens=("<S>", "</S>"),
remove_bos_eos=True,
requires_grad=False,
)
self.lm_embedder = self.lm_embedder.to(device=flair.device)
self.vocab = self.lm_embedder._lm.vocab
self.indexer = ELMoTokenCharactersIndexer()
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# Avoid conflicts with flair's Token class
import allennlp.data.tokenizers.token as allen_nlp_token
indexer = self.indexer
vocab = self.vocab
for sentence in sentences:
character_indices = indexer.tokens_to_indices(
[allen_nlp_token.Token(token.text) for token in sentence], vocab, "elmo"
)["elmo"]
indices_tensor = torch.LongTensor([character_indices])
indices_tensor = indices_tensor.to(device=flair.device)
embeddings = self.lm_embedder(indices_tensor)[0].detach().cpu().numpy()
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
embedding = embeddings[token_idx]
word_embedding = torch.FloatTensor(embedding)
token.set_embedding(self.name, word_embedding)
return sentences
def extra_repr(self):
return "model={}".format(self.name)
def __str__(self):
return self.name
class TransformerXLEmbeddings(TokenEmbeddings):
def __init__(self, model: str = "transfo-xl-wt103"):
"""Transformer-XL embeddings, as proposed in Dai et al., 2019.
:param model: name of Transformer-XL model
"""
super().__init__()
if model not in TRANSFORMER_XL_PRETRAINED_MODEL_ARCHIVE_MAP.keys():
raise ValueError("Provided Transformer-XL model is not available.")
self.tokenizer = TransfoXLTokenizer.from_pretrained(model)
self.model = TransfoXLModel.from_pretrained(model)
self.name = model
self.static_embeddings = True
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
with torch.no_grad():
for sentence in sentences:
token_strings = [token.text for token in sentence.tokens]
indexed_tokens = self.tokenizer.convert_tokens_to_ids(token_strings)
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to(flair.device)
hidden_states, _ = self.model(tokens_tensor)
for token, token_idx in zip(
sentence.tokens, range(len(sentence.tokens))
):
token.set_embedding(self.name, hidden_states[0][token_idx])
return sentences
def extra_repr(self):
return "model={}".format(self.name)
def __str__(self):
return self.name
class OpenAIGPTEmbeddings(TokenEmbeddings):
def __init__(
self, model: str = "openai-gpt", pooling_operation: str = "first_last"
):
"""OpenAI GPT embeddings, as proposed in Radford et al. 2018.
:param model: name of OpenAI GPT model
:param pooling_operation: defines pooling operation for subwords
"""
super().__init__()
if model not in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP.keys():
raise ValueError("Provided OpenAI GPT model is not available.")
self.tokenizer = OpenAIGPTTokenizer.from_pretrained(model)
self.model = OpenAIGPTModel.from_pretrained(model)
self.name = model
self.static_embeddings = True
self.pooling_operation = pooling_operation
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
with torch.no_grad():
for sentence in sentences:
for token in sentence.tokens:
token_text = token.text
subwords = self.tokenizer.tokenize(token_text)
indexed_tokens = self.tokenizer.convert_tokens_to_ids(subwords)
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to(flair.device)
hidden_states = self.model(tokens_tensor)
if self.pooling_operation == "first":
# Use embedding of first subword
token.set_embedding(self.name, hidden_states[0][0])
elif self.pooling_operation == "last":
last_embedding = hidden_states[0][len(hidden_states[0]) - 1]
token.set_embedding(self.name, last_embedding)
elif self.pooling_operation == "first_last":
# Use embedding of first and last subword
first_embedding = hidden_states[0][0]
last_embedding = hidden_states[0][len(hidden_states[0]) - 1]
final_embedding = torch.cat([first_embedding, last_embedding])
token.set_embedding(self.name, final_embedding)
else:
# Otherwise, use mean over all subwords in token
all_embeddings = [
embedding.unsqueeze(0) for embedding in hidden_states[0]
]
mean = torch.mean(torch.cat(all_embeddings, dim=0), dim=0)
token.set_embedding(self.name, mean)
return sentences
def extra_repr(self):
return "model={}".format(self.name)
def __str__(self):
return self.name
class CharacterEmbeddings(TokenEmbeddings):
"""Character embeddings of words, as proposed in Lample et al., 2016."""
def __init__(self, path_to_char_dict: str = None, char_embedding_dim: int = 25, hidden_size_char: int = 25):
"""Uses the default character dictionary if none provided."""
super().__init__()
self.name = "Char"
self.static_embeddings = False
# use list of common characters if none provided
if path_to_char_dict is None:
self.char_dictionary: Dictionary = Dictionary.load("common-chars")
else:
self.char_dictionary: Dictionary = Dictionary.load_from_file(
path_to_char_dict
)
self.char_embedding_dim: int = char_embedding_dim
self.hidden_size_char: int = hidden_size_char
self.char_embedding = torch.nn.Embedding(
len(self.char_dictionary.item2idx), self.char_embedding_dim
)
self.char_rnn = torch.nn.LSTM(
self.char_embedding_dim,
self.hidden_size_char,
num_layers=1,
bidirectional=True,
)
self.__embedding_length = self.char_embedding_dim * 2
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
for sentence in sentences:
tokens_char_indices = []
# translate words in sentence into ints using dictionary
for token in sentence.tokens:
char_indices = [
self.char_dictionary.get_idx_for_item(char) for char in token.text
]
tokens_char_indices.append(char_indices)
# sort words by length, for batching and masking
tokens_sorted_by_length = sorted(
tokens_char_indices, key=lambda p: len(p), reverse=True
)
d = {}
for i, ci in enumerate(tokens_char_indices):
for j, cj in enumerate(tokens_sorted_by_length):
if ci == cj:
d[j] = i
continue
chars2_length = [len(c) for c in tokens_sorted_by_length]
longest_token_in_sentence = max(chars2_length)
tokens_mask = torch.zeros(
(len(tokens_sorted_by_length), longest_token_in_sentence),
dtype=torch.long,
device=flair.device,
)
for i, c in enumerate(tokens_sorted_by_length):
tokens_mask[i, : chars2_length[i]] = torch.tensor(
c, dtype=torch.long, device=flair.device
)
# chars for rnn processing
chars = tokens_mask
character_embeddings = self.char_embedding(chars).transpose(0, 1)
packed = torch.nn.utils.rnn.pack_padded_sequence(
character_embeddings, chars2_length
)
lstm_out, self.hidden = self.char_rnn(packed)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)
outputs = outputs.transpose(0, 1)
chars_embeds_temp = torch.zeros(
(outputs.size(0), outputs.size(2)),
dtype=torch.float,
device=flair.device,
)
for i, index in enumerate(output_lengths):
chars_embeds_temp[i] = outputs[i, index - 1]
character_embeddings = chars_embeds_temp.clone()
for i in range(character_embeddings.size(0)):
character_embeddings[d[i]] = chars_embeds_temp[i]
for token_number, token in enumerate(sentence.tokens):
token.set_embedding(self.name, character_embeddings[token_number])
def __str__(self):
return self.name
class FlairEmbeddings(TokenEmbeddings):
"""Contextual string embeddings of words, as proposed in Akbik et al., 2018."""
def __init__(
self,
model: str,
use_cache: bool = False,
cache_directory: Path = None,
chars_per_chunk: int = 512,
):
"""
initializes contextual string embeddings using a character-level language model.
:param model: model string, one of 'news-forward', 'news-backward', 'news-forward-fast', 'news-backward-fast',
'mix-forward', 'mix-backward', 'german-forward', 'german-backward', 'polish-backward', 'polish-forward'
depending on which character language model is desired.
:param use_cache: if set to False, will not write embeddings to file for later retrieval. this saves disk space but will
not allow re-use of once computed embeddings that do not fit into memory
:param cache_directory: if cache_directory is not set, the cache will be written to ~/.flair/embeddings. otherwise the cache
is written to the provided directory.
:param chars_per_chunk: max number of chars per rnn pass to control speed/memory tradeoff. Higher means faster but requires
more memory. Lower means slower but less memory.
"""
super().__init__()
cache_dir = Path("embeddings")
aws_path: str = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources"
self.PRETRAINED_MODEL_ARCHIVE_MAP = {
# multilingual models
"multi-forward": f"{aws_path}/embeddings-v0.4/lm-multi-forward-v0.1.pt",
"multi-backward": f"{aws_path}/embeddings-v0.4/lm-multi-backward-v0.1.pt",
"multi-forward-fast": f"{aws_path}/embeddings-v0.4/lm-multi-forward-fast-v0.1.pt",
"multi-backward-fast": f"{aws_path}/embeddings-v0.4/lm-multi-backward-fast-v0.1.pt",
# English models
"news-forward": f"{aws_path}/embeddings-v0.4.1/big-news-forward--h2048-l1-d0.05-lr30-0.25-20/news-forward-0.4.1.pt",
"news-backward": f"{aws_path}/embeddings-v0.4.1/big-news-backward--h2048-l1-d0.05-lr30-0.25-20/news-backward-0.4.1.pt",
"news-forward-fast": f"{aws_path}/embeddings/lm-news-english-forward-1024-v0.2rc.pt",
"news-backward-fast": f"{aws_path}/embeddings/lm-news-english-backward-1024-v0.2rc.pt",
"mix-forward": f"{aws_path}/embeddings/lm-mix-english-forward-v0.2rc.pt",
"mix-backward": f"{aws_path}/embeddings/lm-mix-english-backward-v0.2rc.pt",
# Arabic
"ar-forward": f"{aws_path}/embeddings-stefan-it/lm-ar-opus-large-forward-v0.1.pt",
"ar-backward": f"{aws_path}/embeddings-stefan-it/lm-ar-opus-large-backward-v0.1.pt",
# Bulgarian
"bg-forward-fast": f"{aws_path}/embeddings-v0.3/lm-bg-small-forward-v0.1.pt",
"bg-backward-fast": f"{aws_path}/embeddings-v0.3/lm-bg-small-backward-v0.1.pt",
"bg-forward": f"{aws_path}/embeddings-stefan-it/lm-bg-opus-large-forward-v0.1.pt",
"bg-backward": f"{aws_path}/embeddings-stefan-it/lm-bg-opus-large-backward-v0.1.pt",
# Czech
"cs-forward": f"{aws_path}/embeddings-stefan-it/lm-cs-opus-large-forward-v0.1.pt",
"cs-backward": f"{aws_path}/embeddings-stefan-it/lm-cs-opus-large-backward-v0.1.pt",
"cs-v0-forward": f"{aws_path}/embeddings-v0.4/lm-cs-large-forward-v0.1.pt",
"cs-v0-backward": f"{aws_path}/embeddings-v0.4/lm-cs-large-backward-v0.1.pt",
# Danish
"da-forward": f"{aws_path}/embeddings-stefan-it/lm-da-opus-large-forward-v0.1.pt",
"da-backward": f"{aws_path}/embeddings-stefan-it/lm-da-opus-large-backward-v0.1.pt",
# German
"de-forward": f"{aws_path}/embeddings/lm-mix-german-forward-v0.2rc.pt",
"de-backward": f"{aws_path}/embeddings/lm-mix-german-backward-v0.2rc.pt",
"de-historic-ha-forward": f"{aws_path}/embeddings-stefan-it/lm-historic-hamburger-anzeiger-forward-v0.1.pt",
"de-historic-ha-backward": f"{aws_path}/embeddings-stefan-it/lm-historic-hamburger-anzeiger-backward-v0.1.pt",
"de-historic-wz-forward": f"{aws_path}/embeddings-stefan-it/lm-historic-wiener-zeitung-forward-v0.1.pt",
"de-historic-wz-backward": f"{aws_path}/embeddings-stefan-it/lm-historic-wiener-zeitung-backward-v0.1.pt",
# Spanish
"es-forward": f"{aws_path}/embeddings-v0.4/language_model_es_forward_long/lm-es-forward.pt",
"es-backward": f"{aws_path}/embeddings-v0.4/language_model_es_backward_long/lm-es-backward.pt",
"es-forward-fast": f"{aws_path}/embeddings-v0.4/language_model_es_forward/lm-es-forward-fast.pt",
"es-backward-fast": f"{aws_path}/embeddings-v0.4/language_model_es_backward/lm-es-backward-fast.pt",
# Basque
"eu-forward": f"{aws_path}/embeddings-stefan-it/lm-eu-opus-large-forward-v0.1.pt",
"eu-backward": f"{aws_path}/embeddings-stefan-it/lm-eu-opus-large-backward-v0.1.pt",
"eu-v0-forward": f"{aws_path}/embeddings-v0.4/lm-eu-large-forward-v0.1.pt",
"eu-v0-backward": f"{aws_path}/embeddings-v0.4/lm-eu-large-backward-v0.1.pt",
# Persian
"fa-forward": f"{aws_path}/embeddings-stefan-it/lm-fa-opus-large-forward-v0.1.pt",
"fa-backward": f"{aws_path}/embeddings-stefan-it/lm-fa-opus-large-backward-v0.1.pt",
# Finnish
"fi-forward": f"{aws_path}/embeddings-stefan-it/lm-fi-opus-large-forward-v0.1.pt",
"fi-backward": f"{aws_path}/embeddings-stefan-it/lm-fi-opus-large-backward-v0.1.pt",
# French
"fr-forward": f"{aws_path}/embeddings/lm-fr-charlm-forward.pt",
"fr-backward": f"{aws_path}/embeddings/lm-fr-charlm-backward.pt",
# Hebrew
"he-forward": f"{aws_path}/embeddings-stefan-it/lm-he-opus-large-forward-v0.1.pt",
"he-backward": f"{aws_path}/embeddings-stefan-it/lm-he-opus-large-backward-v0.1.pt",
# Hindi
"hi-forward": f"{aws_path}/embeddings-stefan-it/lm-hi-opus-large-forward-v0.1.pt",
"hi-backward": f"{aws_path}/embeddings-stefan-it/lm-hi-opus-large-backward-v0.1.pt",
# Croatian
"hr-forward": f"{aws_path}/embeddings-stefan-it/lm-hr-opus-large-forward-v0.1.pt",
"hr-backward": f"{aws_path}/embeddings-stefan-it/lm-hr-opus-large-backward-v0.1.pt",
# Indonesian
"id-forward": f"{aws_path}/embeddings-stefan-it/lm-id-opus-large-forward-v0.1.pt",
"id-backward": f"{aws_path}/embeddings-stefan-it/lm-id-opus-large-backward-v0.1.pt",
# Italian
"it-forward": f"{aws_path}/embeddings-stefan-it/lm-it-opus-large-forward-v0.1.pt",
"it-backward": f"{aws_path}/embeddings-stefan-it/lm-it-opus-large-backward-v0.1.pt",
# Japanese
"ja-forward": f"{aws_path}/embeddings-v0.4.1/lm__char-forward__ja-wikipedia-3GB/japanese-forward.pt",
"ja-backward": f"{aws_path}/embeddings-v0.4.1/lm__char-backward__ja-wikipedia-3GB/japanese-backward.pt",
# Dutch
"nl-forward": f"{aws_path}/embeddings-stefan-it/lm-nl-opus-large-forward-v0.1.pt",
"nl-backward": f"{aws_path}/embeddings-stefan-it/lm-nl-opus-large-backward-v0.1.pt",
"nl-v0-forward": f"{aws_path}/embeddings-v0.4/lm-nl-large-forward-v0.1.pt",
"nl-v0-backward": f"{aws_path}/embeddings-v0.4/lm-nl-large-backward-v0.1.pt",
# Norwegian
"no-forward": f"{aws_path}/embeddings-stefan-it/lm-no-opus-large-forward-v0.1.pt",
"no-backward": f"{aws_path}/embeddings-stefan-it/lm-no-opus-large-backward-v0.1.pt",
# Polish
"pl-forward": f"{aws_path}/embeddings/lm-polish-forward-v0.2.pt",
"pl-backward": f"{aws_path}/embeddings/lm-polish-backward-v0.2.pt",
"pl-opus-forward": f"{aws_path}/embeddings-stefan-it/lm-pl-opus-large-forward-v0.1.pt",
"pl-opus-backward": f"{aws_path}/embeddings-stefan-it/lm-pl-opus-large-backward-v0.1.pt",
# Portuguese
"pt-forward": f"{aws_path}/embeddings-v0.4/lm-pt-forward.pt",
"pt-backward": f"{aws_path}/embeddings-v0.4/lm-pt-backward.pt",
# Pubmed
"pubmed-forward": f"{aws_path}/embeddings-v0.4.1/pubmed-2015-fw-lm.pt",
"pubmed-backward": f"{aws_path}/embeddings-v0.4.1/pubmed-2015-bw-lm.pt",
# Slovenian
"sl-forward": f"{aws_path}/embeddings-stefan-it/lm-sl-opus-large-forward-v0.1.pt",
"sl-backward": f"{aws_path}/embeddings-stefan-it/lm-sl-opus-large-backward-v0.1.pt",
"sl-v0-forward": f"{aws_path}/embeddings-v0.3/lm-sl-large-forward-v0.1.pt",
"sl-v0-backward": f"{aws_path}/embeddings-v0.3/lm-sl-large-backward-v0.1.pt",
# Swedish
"sv-forward": f"{aws_path}/embeddings-stefan-it/lm-sv-opus-large-forward-v0.1.pt",
"sv-backward": f"{aws_path}/embeddings-stefan-it/lm-sv-opus-large-backward-v0.1.pt",
"sv-v0-forward": f"{aws_path}/embeddings-v0.4/lm-sv-large-forward-v0.1.pt",
"sv-v0-backward": f"{aws_path}/embeddings-v0.4/lm-sv-large-backward-v0.1.pt",
}
# load model if in pretrained model map
if model.lower() in self.PRETRAINED_MODEL_ARCHIVE_MAP:
base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[model.lower()]
model = cached_path(base_path, cache_dir=cache_dir)
elif replace_with_language_code(model) in self.PRETRAINED_MODEL_ARCHIVE_MAP:
base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[
replace_with_language_code(model)
]
model = cached_path(base_path, cache_dir=cache_dir)
elif not Path(model).exists():
raise ValueError(
f'The given model "{model}" is not available or is not a valid path.'
)
self.name = str(model)
self.static_embeddings = True
from flair.models import LanguageModel
self.lm = LanguageModel.load_language_model(model)
self.is_forward_lm: bool = self.lm.is_forward_lm
self.chars_per_chunk: int = chars_per_chunk
# initialize cache if use_cache set
self.cache = None
if use_cache:
cache_path = (
Path(f"{self.name}-tmp-cache.sqllite")
if not cache_directory
else cache_directory / f"{self.name}-tmp-cache.sqllite"
)
from sqlitedict import SqliteDict
self.cache = SqliteDict(str(cache_path), autocommit=True)
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
# set to eval mode
self.eval()
def train(self, mode=True):
pass
def __getstate__(self):
# Copy the object's state from self.__dict__ which contains
# all our instance attributes. Always use the dict.copy()
# method to avoid modifying the original state.
state = self.__dict__.copy()
# Remove the unpicklable entries.
state["cache"] = None
return state
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# make compatible with serialized models
if "chars_per_chunk" not in self.__dict__:
self.chars_per_chunk = 512
# if cache is used, try setting embeddings from cache first
if "cache" in self.__dict__ and self.cache is not None:
# try populating embeddings from cache
all_embeddings_retrieved_from_cache: bool = True
for sentence in sentences:
key = sentence.to_tokenized_string()
embeddings = self.cache.get(key)
if not embeddings:
all_embeddings_retrieved_from_cache = False
break
else:
for token, embedding in zip(sentence, embeddings):
token.set_embedding(self.name, torch.FloatTensor(embedding))
if all_embeddings_retrieved_from_cache:
return sentences
with torch.no_grad():
# if this is not possible, use LM to generate embedding. First, get text sentences
text_sentences = [sentence.to_tokenized_string() for sentence in sentences]
longest_character_sequence_in_batch: int = len(max(text_sentences, key=len))
# pad strings with whitespaces to longest sentence
sentences_padded: List[str] = []
append_padded_sentence = sentences_padded.append
start_marker = "\n"
end_marker = " "
extra_offset = len(start_marker)
for sentence_text in text_sentences:
pad_by = longest_character_sequence_in_batch - len(sentence_text)
if self.is_forward_lm:
padded = "{}{}{}{}".format(
start_marker, sentence_text, end_marker, pad_by * " "
)
append_padded_sentence(padded)
else:
padded = "{}{}{}{}".format(
start_marker, sentence_text[::-1], end_marker, pad_by * " "
)
append_padded_sentence(padded)
# get hidden states from language model
all_hidden_states_in_lm = self.lm.get_representation(
sentences_padded, self.chars_per_chunk
)
# take first or last hidden states from language model as word representation
for i, sentence in enumerate(sentences):
sentence_text = sentence.to_tokenized_string()
offset_forward: int = extra_offset
offset_backward: int = len(sentence_text) + extra_offset
for token in sentence.tokens:
offset_forward += len(token.text)
if self.is_forward_lm:
offset = offset_forward
else:
offset = offset_backward
embedding = all_hidden_states_in_lm[offset, i, :]
# if self.tokenized_lm or token.whitespace_after:
offset_forward += 1
offset_backward -= 1
offset_backward -= len(token.text)
token.set_embedding(self.name, embedding.clone().detach())
all_hidden_states_in_lm = None
if "cache" in self.__dict__ and self.cache is not None:
for sentence in sentences:
self.cache[sentence.to_tokenized_string()] = [
token._embeddings[self.name].tolist() for token in sentence
]
return sentences
def __str__(self):
return self.name
class PooledFlairEmbeddings(TokenEmbeddings):
def __init__(
self,
contextual_embeddings: Union[str, FlairEmbeddings],
pooling: str = "min",
only_capitalized: bool = False,
**kwargs,
):
super().__init__()
# use the character language model embeddings as basis
if type(contextual_embeddings) is str:
self.context_embeddings: FlairEmbeddings = FlairEmbeddings(
contextual_embeddings, **kwargs
)
else:
self.context_embeddings: FlairEmbeddings = contextual_embeddings
# length is twice the original character LM embedding length
self.embedding_length = self.context_embeddings.embedding_length * 2
self.name = self.context_embeddings.name + "-context"
# these fields are for the embedding memory
self.word_embeddings = {}
self.word_count = {}
# whether to add only capitalized words to memory (faster runtime and lower memory consumption)
self.only_capitalized = only_capitalized
# we re-compute embeddings dynamically at each epoch
self.static_embeddings = False
# set the memory method
self.pooling = pooling
if pooling == "mean":
self.aggregate_op = torch.add
elif pooling == "fade":
self.aggregate_op = torch.add
elif pooling == "max":
self.aggregate_op = torch.max
elif pooling == "min":
self.aggregate_op = torch.min
def train(self, mode=True):
super().train(mode=mode)
if mode:
# memory is wiped each time we do a training run
print("train mode resetting embeddings")
self.word_embeddings = {}
self.word_count = {}
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.context_embeddings.embed(sentences)
# if we keep a pooling, it needs to be updated continuously
for sentence in sentences:
for token in sentence.tokens:
# update embedding
local_embedding = token._embeddings[self.context_embeddings.name]
local_embedding = local_embedding.to(flair.device)
if token.text[0].isupper() or not self.only_capitalized:
if token.text not in self.word_embeddings:
self.word_embeddings[token.text] = local_embedding
self.word_count[token.text] = 1
else:
aggregated_embedding = self.aggregate_op(
self.word_embeddings[token.text], local_embedding
)
if self.pooling == "fade":
aggregated_embedding /= 2
self.word_embeddings[token.text] = aggregated_embedding
self.word_count[token.text] += 1
# add embeddings after updating
for sentence in sentences:
for token in sentence.tokens:
if token.text in self.word_embeddings:
base = (
self.word_embeddings[token.text] / self.word_count[token.text]
if self.pooling == "mean"
else self.word_embeddings[token.text]
)
else:
base = token._embeddings[self.context_embeddings.name]
token.set_embedding(self.name, base)
return sentences
def embedding_length(self) -> int:
return self.embedding_length
class BertEmbeddings(TokenEmbeddings):
def __init__(
self,
bert_model_or_path: str = "bert-base-uncased",
layers: str = "-1,-2,-3,-4",
pooling_operation: str = "first",
):
"""
Bidirectional transformer embeddings of words, as proposed in Devlin et al., 2018.
:param bert_model_or_path: name of BERT model ('') or directory path containing custom model, configuration file
and vocab file (names of three files should be - bert_config.json, pytorch_model.bin/model.chkpt, vocab.txt)
:param layers: string indicating which layers to take for embedding
:param pooling_operation: how to get from token piece embeddings to token embedding. Either pool them and take
the average ('mean') or use first word piece embedding as token embedding ('first)
"""
super().__init__()
self.tokenizer = BertTokenizer.from_pretrained(bert_model_or_path)
self.model = BertModel.from_pretrained(bert_model_or_path)
self.layer_indexes = [int(x) for x in layers.split(",")]
self.pooling_operation = pooling_operation
self.name = str(bert_model_or_path)
self.static_embeddings = True
class BertInputFeatures(object):
"""Private helper class for holding BERT-formatted features"""
def __init__(
self,
unique_id,
tokens,
input_ids,
input_mask,
input_type_ids,
token_subtoken_count,
):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
self.token_subtoken_count = token_subtoken_count
def _convert_sentences_to_features(
self, sentences, max_sequence_length: int
) -> [BertInputFeatures]:
max_sequence_length = max_sequence_length + 2
features: List[BertEmbeddings.BertInputFeatures] = []
for (sentence_index, sentence) in enumerate(sentences):
bert_tokenization: List[str] = []
token_subtoken_count: Dict[int, int] = {}
for token in sentence:
subtokens = self.tokenizer.tokenize(token.text)
bert_tokenization.extend(subtokens)
token_subtoken_count[token.idx] = len(subtokens)
if len(bert_tokenization) > max_sequence_length - 2:
bert_tokenization = bert_tokenization[0 : (max_sequence_length - 2)]
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in bert_tokenization:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_sequence_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
features.append(
BertEmbeddings.BertInputFeatures(
unique_id=sentence_index,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids,
token_subtoken_count=token_subtoken_count,
)
)
return features
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
"""Add embeddings to all words in a list of sentences. If embeddings are already added,
updates only if embeddings are non-static."""
# first, find longest sentence in batch
longest_sentence_in_batch: int = len(
max(
[
self.tokenizer.tokenize(sentence.to_tokenized_string())
for sentence in sentences
],
key=len,
)
)
# prepare id maps for BERT model
features = self._convert_sentences_to_features(
sentences, longest_sentence_in_batch
)
all_input_ids = torch.LongTensor([f.input_ids for f in features]).to(
flair.device
)
all_input_masks = torch.LongTensor([f.input_mask for f in features]).to(
flair.device
)
# put encoded batch through BERT model to get all hidden states of all encoder layers
self.model.to(flair.device)
self.model.eval()
all_encoder_layers, _ = self.model(
all_input_ids, token_type_ids=None, attention_mask=all_input_masks
)
with torch.no_grad():
for sentence_index, sentence in enumerate(sentences):
feature = features[sentence_index]
# get aggregated embeddings for each BERT-subtoken in sentence
subtoken_embeddings = []
for token_index, _ in enumerate(feature.tokens):
all_layers = []
for layer_index in self.layer_indexes:
layer_output = (
all_encoder_layers[int(layer_index)]
.detach()
.cpu()[sentence_index]
)
all_layers.append(layer_output[token_index])
subtoken_embeddings.append(torch.cat(all_layers))
# get the current sentence object
token_idx = 0
for token in sentence:
# add concatenated embedding to sentence
token_idx += 1
if self.pooling_operation == "first":
# use first subword embedding if pooling operation is 'first'
token.set_embedding(self.name, subtoken_embeddings[token_idx])
else:
# otherwise, do a mean over all subwords in token
embeddings = subtoken_embeddings[
token_idx : token_idx
+ feature.token_subtoken_count[token.idx]
]
embeddings = [
embedding.unsqueeze(0) for embedding in embeddings
]
mean = torch.mean(torch.cat(embeddings, dim=0), dim=0)
token.set_embedding(self.name, mean)
token_idx += feature.token_subtoken_count[token.idx] - 1
return sentences
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
return len(self.layer_indexes) * self.model.config.hidden_size
class CharLMEmbeddings(TokenEmbeddings):
"""Contextual string embeddings of words, as proposed in Akbik et al., 2018. """
@deprecated(version="0.4", reason="Use 'FlairEmbeddings' instead.")
def __init__(
self,
model: str,
detach: bool = True,
use_cache: bool = False,
cache_directory: Path = None,
):
"""
initializes contextual string embeddings using a character-level language model.
:param model: model string, one of 'news-forward', 'news-backward', 'news-forward-fast', 'news-backward-fast',
'mix-forward', 'mix-backward', 'german-forward', 'german-backward', 'polish-backward', 'polish-forward'
depending on which character language model is desired.
:param detach: if set to False, the gradient will propagate into the language model. this dramatically slows down
training and often leads to worse results, so not recommended.
:param use_cache: if set to False, will not write embeddings to file for later retrieval. this saves disk space but will
not allow re-use of once computed embeddings that do not fit into memory
:param cache_directory: if cache_directory is not set, the cache will be written to ~/.flair/embeddings. otherwise the cache
is written to the provided directory.
"""
super().__init__()
cache_dir = Path("embeddings")
# multilingual forward (English, German, French, Italian, Dutch, Polish)
if model.lower() == "multi-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-multi-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# multilingual backward (English, German, French, Italian, Dutch, Polish)
elif model.lower() == "multi-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-multi-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-forward
elif model.lower() == "news-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-forward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-backward
elif model.lower() == "news-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-backward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-forward
elif model.lower() == "news-forward-fast":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-forward-1024-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-backward
elif model.lower() == "news-backward-fast":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-backward-1024-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# mix-english-forward
elif model.lower() == "mix-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-english-forward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# mix-english-backward
elif model.lower() == "mix-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-english-backward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# mix-german-forward
elif model.lower() == "german-forward" or model.lower() == "de-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-german-forward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# mix-german-backward
elif model.lower() == "german-backward" or model.lower() == "de-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-german-backward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# common crawl Polish forward
elif model.lower() == "polish-forward" or model.lower() == "pl-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-polish-forward-v0.2.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# common crawl Polish backward
elif model.lower() == "polish-backward" or model.lower() == "pl-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-polish-backward-v0.2.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Slovenian forward
elif model.lower() == "slovenian-forward" or model.lower() == "sl-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-sl-large-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Slovenian backward
elif model.lower() == "slovenian-backward" or model.lower() == "sl-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-sl-large-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Bulgarian forward
elif model.lower() == "bulgarian-forward" or model.lower() == "bg-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-bg-small-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Bulgarian backward
elif model.lower() == "bulgarian-backward" or model.lower() == "bg-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-bg-small-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Dutch forward
elif model.lower() == "dutch-forward" or model.lower() == "nl-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-nl-large-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Dutch backward
elif model.lower() == "dutch-backward" or model.lower() == "nl-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-nl-large-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Swedish forward
elif model.lower() == "swedish-forward" or model.lower() == "sv-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-sv-large-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Swedish backward
elif model.lower() == "swedish-backward" or model.lower() == "sv-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-sv-large-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# French forward
elif model.lower() == "french-forward" or model.lower() == "fr-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-fr-charlm-forward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# French backward
elif model.lower() == "french-backward" or model.lower() == "fr-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-fr-charlm-backward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Czech forward
elif model.lower() == "czech-forward" or model.lower() == "cs-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-cs-large-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Czech backward
elif model.lower() == "czech-backward" or model.lower() == "cs-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-cs-large-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Portuguese forward
elif model.lower() == "portuguese-forward" or model.lower() == "pt-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-pt-forward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Portuguese backward
elif model.lower() == "portuguese-backward" or model.lower() == "pt-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-pt-backward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
elif not Path(model).exists():
raise ValueError(
f'The given model "{model}" is not available or is not a valid path.'
)
self.name = str(model)
self.static_embeddings = detach
from flair.models import LanguageModel
self.lm = LanguageModel.load_language_model(model)
self.detach = detach
self.is_forward_lm: bool = self.lm.is_forward_lm
# initialize cache if use_cache set
self.cache = None
if use_cache:
cache_path = (
Path(f"{self.name}-tmp-cache.sqllite")
if not cache_directory
else cache_directory / f"{self.name}-tmp-cache.sqllite"
)
from sqlitedict import SqliteDict
self.cache = SqliteDict(str(cache_path), autocommit=True)
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
# set to eval mode
self.eval()
def train(self, mode=True):
pass
def __getstate__(self):
# Copy the object's state from self.__dict__ which contains
# all our instance attributes. Always use the dict.copy()
# method to avoid modifying the original state.
state = self.__dict__.copy()
# Remove the unpicklable entries.
state["cache"] = None
return state
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# if cache is used, try setting embeddings from cache first
if "cache" in self.__dict__ and self.cache is not None:
# try populating embeddings from cache
all_embeddings_retrieved_from_cache: bool = True
for sentence in sentences:
key = sentence.to_tokenized_string()
embeddings = self.cache.get(key)
if not embeddings:
all_embeddings_retrieved_from_cache = False
break
else:
for token, embedding in zip(sentence, embeddings):
token.set_embedding(self.name, torch.FloatTensor(embedding))
if all_embeddings_retrieved_from_cache:
return sentences
# if this is not possible, use LM to generate embedding. First, get text sentences
text_sentences = [sentence.to_tokenized_string() for sentence in sentences]
longest_character_sequence_in_batch: int = len(max(text_sentences, key=len))
# pad strings with whitespaces to longest sentence
sentences_padded: List[str] = []
append_padded_sentence = sentences_padded.append
end_marker = " "
extra_offset = 1
for sentence_text in text_sentences:
pad_by = longest_character_sequence_in_batch - len(sentence_text)
if self.is_forward_lm:
padded = "\n{}{}{}".format(sentence_text, end_marker, pad_by * " ")
append_padded_sentence(padded)
else:
padded = "\n{}{}{}".format(
sentence_text[::-1], end_marker, pad_by * " "
)
append_padded_sentence(padded)
# get hidden states from language model
all_hidden_states_in_lm = self.lm.get_representation(sentences_padded)
# take first or last hidden states from language model as word representation
for i, sentence in enumerate(sentences):
sentence_text = sentence.to_tokenized_string()
offset_forward: int = extra_offset
offset_backward: int = len(sentence_text) + extra_offset
for token in sentence.tokens:
offset_forward += len(token.text)
if self.is_forward_lm:
offset = offset_forward
else:
offset = offset_backward
embedding = all_hidden_states_in_lm[offset, i, :]
# if self.tokenized_lm or token.whitespace_after:
offset_forward += 1
offset_backward -= 1
offset_backward -= len(token.text)
token.set_embedding(self.name, embedding)
if "cache" in self.__dict__ and self.cache is not None:
for sentence in sentences:
self.cache[sentence.to_tokenized_string()] = [
token._embeddings[self.name].tolist() for token in sentence
]
return sentences
def __str__(self):
return self.name
class DocumentMeanEmbeddings(DocumentEmbeddings):
@deprecated(
version="0.3.1",
reason="The functionality of this class is moved to 'DocumentPoolEmbeddings'",
)
def __init__(self, token_embeddings: List[TokenEmbeddings]):
"""The constructor takes a list of embeddings to be combined."""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(
embeddings=token_embeddings
)
self.name: str = "document_mean"
self.__embedding_length: int = self.embeddings.embedding_length
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to every sentence in the given list of sentences. If embeddings are already added, updates
only if embeddings are non-static."""
everything_embedded: bool = True
# if only one sentence is passed, convert to list of sentence
if type(sentences) is Sentence:
sentences = [sentences]
for sentence in sentences:
if self.name not in sentence._embeddings.keys():
everything_embedded = False
if not everything_embedded:
self.embeddings.embed(sentences)
for sentence in sentences:
word_embeddings = []
for token in sentence.tokens:
word_embeddings.append(token.get_embedding().unsqueeze(0))
word_embeddings = torch.cat(word_embeddings, dim=0).to(flair.device)
mean_embedding = torch.mean(word_embeddings, 0)
sentence.set_embedding(self.name, mean_embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
class DocumentPoolEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: List[TokenEmbeddings],
fine_tune_mode="linear",
pooling: str = "mean",
):
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param pooling: a string which can any value from ['mean', 'max', 'min']
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.__embedding_length = self.embeddings.embedding_length
# optional fine-tuning on top of embedding layer
self.fine_tune_mode = fine_tune_mode
if self.fine_tune_mode in ["nonlinear", "linear"]:
self.embedding_flex = torch.nn.Linear(
self.embedding_length, self.embedding_length, bias=False
)
self.embedding_flex.weight.data.copy_(torch.eye(self.embedding_length))
if self.fine_tune_mode in ["nonlinear"]:
self.embedding_flex_nonlinear = torch.nn.ReLU(self.embedding_length)
self.embedding_flex_nonlinear_map = torch.nn.Linear(
self.embedding_length, self.embedding_length
)
self.__embedding_length: int = self.embeddings.embedding_length
self.to(flair.device)
self.pooling = pooling
if self.pooling == "mean":
self.pool_op = torch.mean
elif pooling == "max":
self.pool_op = torch.max
elif pooling == "min":
self.pool_op = torch.min
else:
raise ValueError(f"Pooling operation for {self.mode!r} is not defined")
self.name: str = f"document_{self.pooling}"
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to every sentence in the given list of sentences. If embeddings are already added, updates
only if embeddings are non-static."""
# if only one sentence is passed, convert to list of sentence
if isinstance(sentences, Sentence):
sentences = [sentences]
self.embeddings.embed(sentences)
for sentence in sentences:
word_embeddings = []
for token in sentence.tokens:
word_embeddings.append(token.get_embedding().unsqueeze(0))
word_embeddings = torch.cat(word_embeddings, dim=0).to(flair.device)
if self.fine_tune_mode in ["nonlinear", "linear"]:
word_embeddings = self.embedding_flex(word_embeddings)
if self.fine_tune_mode in ["nonlinear"]:
word_embeddings = self.embedding_flex_nonlinear(word_embeddings)
word_embeddings = self.embedding_flex_nonlinear_map(word_embeddings)
if self.pooling == "mean":
pooled_embedding = self.pool_op(word_embeddings, 0)
else:
pooled_embedding, _ = self.pool_op(word_embeddings, 0)
sentence.set_embedding(self.name, pooled_embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
def extra_repr(self):
return f"fine_tune_mode={self.fine_tune_mode}, pooling={self.pooling}"
class DocumentRNNEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: List[TokenEmbeddings],
hidden_size=128,
rnn_layers=1,
reproject_words: bool = True,
reproject_words_dimension: int = None,
bidirectional: bool = False,
dropout: float = 0.5,
word_dropout: float = 0.0,
locked_dropout: float = 0.0,
rnn_type="GRU",
):
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param hidden_size: the number of hidden states in the rnn
:param rnn_layers: the number of layers for the rnn
:param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear
layer before putting them into the rnn or not
:param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output
dimension as before will be taken.
:param bidirectional: boolean value, indicating whether to use a bidirectional rnn or not
:param dropout: the dropout value to be used
:param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used
:param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used
:param rnn_type: 'GRU' or 'LSTM'
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.rnn_type = rnn_type
self.reproject_words = reproject_words
self.bidirectional = bidirectional
self.length_of_all_token_embeddings: int = self.embeddings.embedding_length
self.static_embeddings = False
self.__embedding_length: int = hidden_size
if self.bidirectional:
self.__embedding_length *= 4
self.embeddings_dimension: int = self.length_of_all_token_embeddings
if self.reproject_words and reproject_words_dimension is not None:
self.embeddings_dimension = reproject_words_dimension
self.word_reprojection_map = torch.nn.Linear(
self.length_of_all_token_embeddings, self.embeddings_dimension
)
# bidirectional RNN on top of embedding layer
if rnn_type == "LSTM":
self.rnn = torch.nn.LSTM(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
)
else:
self.rnn = torch.nn.GRU(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
)
self.name = "document_" + self.rnn._get_name()
# dropouts
if locked_dropout > 0.0:
self.dropout: torch.nn.Module = LockedDropout(locked_dropout)
else:
self.dropout = torch.nn.Dropout(dropout)
self.use_word_dropout: bool = word_dropout > 0.0
if self.use_word_dropout:
self.word_dropout = WordDropout(word_dropout)
torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update
only if embeddings are non-static."""
if type(sentences) is Sentence:
sentences = [sentences]
self.rnn.zero_grad()
sentences.sort(key=lambda x: len(x), reverse=True)
self.embeddings.embed(sentences)
# first, sort sentences by number of tokens
longest_token_sequence_in_batch: int = len(sentences[0])
all_sentence_tensors = []
lengths: List[int] = []
# go through each sentence in batch
for i, sentence in enumerate(sentences):
lengths.append(len(sentence.tokens))
word_embeddings = []
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
word_embeddings.append(token.get_embedding().unsqueeze(0))
# PADDING: pad shorter sentences out
for add in range(longest_token_sequence_in_batch - len(sentence.tokens)):
word_embeddings.append(
torch.zeros(
self.length_of_all_token_embeddings, dtype=torch.float
).unsqueeze(0)
)
word_embeddings_tensor = torch.cat(word_embeddings, 0).to(flair.device)
sentence_states = word_embeddings_tensor
# ADD TO SENTENCE LIST: add the representation
all_sentence_tensors.append(sentence_states.unsqueeze(1))
# --------------------------------------------------------------------
# GET REPRESENTATION FOR ENTIRE BATCH
# --------------------------------------------------------------------
sentence_tensor = torch.cat(all_sentence_tensors, 1)
# --------------------------------------------------------------------
# FF PART
# --------------------------------------------------------------------
# use word dropout if set
if self.use_word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
if self.reproject_words:
sentence_tensor = self.word_reprojection_map(sentence_tensor)
sentence_tensor = self.dropout(sentence_tensor)
packed = torch.nn.utils.rnn.pack_padded_sequence(sentence_tensor, lengths)
self.rnn.flatten_parameters()
rnn_out, hidden = self.rnn(packed)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(rnn_out)
outputs = self.dropout(outputs)
# --------------------------------------------------------------------
# EXTRACT EMBEDDINGS FROM RNN
# --------------------------------------------------------------------
for sentence_no, length in enumerate(lengths):
last_rep = outputs[length - 1, sentence_no]
embedding = last_rep
if self.bidirectional:
first_rep = outputs[0, sentence_no]
embedding = torch.cat([first_rep, last_rep], 0)
sentence = sentences[sentence_no]
sentence.set_embedding(self.name, embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
@deprecated(
version="0.4",
reason="The functionality of this class is moved to 'DocumentRNNEmbeddings'",
)
class DocumentLSTMEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: List[TokenEmbeddings],
hidden_size=128,
rnn_layers=1,
reproject_words: bool = True,
reproject_words_dimension: int = None,
bidirectional: bool = False,
dropout: float = 0.5,
word_dropout: float = 0.0,
locked_dropout: float = 0.0,
):
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param hidden_size: the number of hidden states in the lstm
:param rnn_layers: the number of layers for the lstm
:param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear
layer before putting them into the lstm or not
:param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output
dimension as before will be taken.
:param bidirectional: boolean value, indicating whether to use a bidirectional lstm or not
:param dropout: the dropout value to be used
:param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used
:param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.reproject_words = reproject_words
self.bidirectional = bidirectional
self.length_of_all_token_embeddings: int = self.embeddings.embedding_length
self.name = "document_lstm"
self.static_embeddings = False
self.__embedding_length: int = hidden_size
if self.bidirectional:
self.__embedding_length *= 4
self.embeddings_dimension: int = self.length_of_all_token_embeddings
if self.reproject_words and reproject_words_dimension is not None:
self.embeddings_dimension = reproject_words_dimension
# bidirectional LSTM on top of embedding layer
self.word_reprojection_map = torch.nn.Linear(
self.length_of_all_token_embeddings, self.embeddings_dimension
)
self.rnn = torch.nn.GRU(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
)
# dropouts
if locked_dropout > 0.0:
self.dropout: torch.nn.Module = LockedDropout(locked_dropout)
else:
self.dropout = torch.nn.Dropout(dropout)
self.use_word_dropout: bool = word_dropout > 0.0
if self.use_word_dropout:
self.word_dropout = WordDropout(word_dropout)
torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update
only if embeddings are non-static."""
if type(sentences) is Sentence:
sentences = [sentences]
self.rnn.zero_grad()
sentences.sort(key=lambda x: len(x), reverse=True)
self.embeddings.embed(sentences)
# first, sort sentences by number of tokens
longest_token_sequence_in_batch: int = len(sentences[0])
all_sentence_tensors = []
lengths: List[int] = []
# go through each sentence in batch
for i, sentence in enumerate(sentences):
lengths.append(len(sentence.tokens))
word_embeddings = []
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
word_embeddings.append(token.get_embedding().unsqueeze(0))
# PADDING: pad shorter sentences out
for add in range(longest_token_sequence_in_batch - len(sentence.tokens)):
word_embeddings.append(
torch.zeros(
self.length_of_all_token_embeddings, dtype=torch.float
).unsqueeze(0)
)
word_embeddings_tensor = torch.cat(word_embeddings, 0).to(flair.device)
sentence_states = word_embeddings_tensor
# ADD TO SENTENCE LIST: add the representation
all_sentence_tensors.append(sentence_states.unsqueeze(1))
# --------------------------------------------------------------------
# GET REPRESENTATION FOR ENTIRE BATCH
# --------------------------------------------------------------------
sentence_tensor = torch.cat(all_sentence_tensors, 1)
# --------------------------------------------------------------------
# FF PART
# --------------------------------------------------------------------
# use word dropout if set
if self.use_word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
if self.reproject_words:
sentence_tensor = self.word_reprojection_map(sentence_tensor)
sentence_tensor = self.dropout(sentence_tensor)
packed = torch.nn.utils.rnn.pack_padded_sequence(sentence_tensor, lengths)
self.rnn.flatten_parameters()
lstm_out, hidden = self.rnn(packed)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)
outputs = self.dropout(outputs)
# --------------------------------------------------------------------
# EXTRACT EMBEDDINGS FROM LSTM
# --------------------------------------------------------------------
for sentence_no, length in enumerate(lengths):
last_rep = outputs[length - 1, sentence_no]
embedding = last_rep
if self.bidirectional:
first_rep = outputs[0, sentence_no]
embedding = torch.cat([first_rep, last_rep], 0)
sentence = sentences[sentence_no]
sentence.set_embedding(self.name, embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
class DocumentLMEmbeddings(DocumentEmbeddings):
def __init__(self, flair_embeddings: List[FlairEmbeddings], detach: bool = True):
super().__init__()
self.embeddings = flair_embeddings
self.name = "document_lm"
self.static_embeddings = detach
self.detach = detach
self._embedding_length: int = sum(
embedding.embedding_length for embedding in flair_embeddings
)
@property
def embedding_length(self) -> int:
return self._embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
if type(sentences) is Sentence:
sentences = [sentences]
for embedding in self.embeddings:
embedding.embed(sentences)
# iterate over sentences
for sentence in sentences:
# if its a forward LM, take last state
if embedding.is_forward_lm:
sentence.set_embedding(
embedding.name,
sentence[len(sentence) - 1]._embeddings[embedding.name],
)
else:
sentence.set_embedding(
embedding.name, sentence[0]._embeddings[embedding.name]
)
return sentences
class NILCEmbeddings(WordEmbeddings):
def __init__(self, embeddings: str, model: str = "skip", size: int = 100):
"""
Initializes portuguese classic word embeddings trained by NILC Lab (http://www.nilc.icmc.usp.br/embeddings).
Constructor downloads required files if not there.
:param embeddings: one of: 'fasttext', 'glove', 'wang2vec' or 'word2vec'
:param model: one of: 'skip' or 'cbow'. This is not applicable to glove.
:param size: one of: 50, 100, 300, 600 or 1000.
"""
base_path = "http://143.107.183.175:22980/download.php?file=embeddings/"
cache_dir = Path("embeddings") / embeddings.lower()
# GLOVE embeddings
if embeddings.lower() == "glove":
cached_path(
f"{base_path}{embeddings}/{embeddings}_s{size}.zip", cache_dir=cache_dir
)
embeddings = cached_path(
f"{base_path}{embeddings}/{embeddings}_s{size}.zip", cache_dir=cache_dir
)
elif embeddings.lower() in ["fasttext", "wang2vec", "word2vec"]:
cached_path(
f"{base_path}{embeddings}/{model}_s{size}.zip", cache_dir=cache_dir
)
embeddings = cached_path(
f"{base_path}{embeddings}/{model}_s{size}.zip", cache_dir=cache_dir
)
elif not Path(embeddings).exists():
raise ValueError(
f'The given embeddings "{embeddings}" is not available or is not a valid path.'
)
self.name: str = str(embeddings)
self.static_embeddings = True
log.info("Reading embeddings from %s" % embeddings)
self.precomputed_word_embeddings = gensim.models.KeyedVectors.load_word2vec_format(
open_inside_zip(str(embeddings), cache_dir=cache_dir)
)
self.__embedding_length: int = self.precomputed_word_embeddings.vector_size
super(TokenEmbeddings, self).__init__()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def __str__(self):
return self.name
def replace_with_language_code(string: str):
string = string.replace("arabic-", "ar-")
string = string.replace("basque-", "eu-")
string = string.replace("bulgarian-", "bg-")
string = string.replace("croatian-", "hr-")
string = string.replace("czech-", "cs-")
string = string.replace("danish-", "da-")
string = string.replace("dutch-", "nl-")
string = string.replace("farsi-", "fa-")
string = string.replace("persian-", "fa-")
string = string.replace("finnish-", "fi-")
string = string.replace("french-", "fr-")
string = string.replace("german-", "de-")
string = string.replace("hebrew-", "he-")
string = string.replace("hindi-", "hi-")
string = string.replace("indonesian-", "id-")
string = string.replace("italian-", "it-")
string = string.replace("japanese-", "ja-")
string = string.replace("norwegian-", "no")
string = string.replace("polish-", "pl-")
string = string.replace("portuguese-", "pt-")
string = string.replace("slovenian-", "sl-")
string = string.replace("spanish-", "es-")
string = string.replace("swedish-", "sv-")
return string
|
import os
import re
import logging
from abc import abstractmethod
from collections import Counter
from pathlib import Path
from typing import List, Union, Dict
import gensim
import numpy as np
import torch
from bpemb import BPEmb
from deprecated import deprecated
from pytorch_pretrained_bert import (
BertTokenizer,
BertModel,
TransfoXLTokenizer,
TransfoXLModel,
OpenAIGPTModel,
OpenAIGPTTokenizer,
)
from pytorch_pretrained_bert.modeling_openai import (
PRETRAINED_MODEL_ARCHIVE_MAP as OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from pytorch_pretrained_bert.modeling_transfo_xl import (
PRETRAINED_MODEL_ARCHIVE_MAP as TRANSFORMER_XL_PRETRAINED_MODEL_ARCHIVE_MAP,
)
import flair
from flair.data import Corpus
from .nn import LockedDropout, WordDropout
from .data import Dictionary, Token, Sentence
from .file_utils import cached_path, open_inside_zip
log = logging.getLogger("flair")
class Embeddings(torch.nn.Module):
"""Abstract base class for all embeddings. Every new type of embedding must implement these methods."""
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
pass
@property
@abstractmethod
def embedding_type(self) -> str:
pass
def embed(self, sentences: Union[Sentence, List[Sentence]]) -> List[Sentence]:
"""Add embeddings to all words in a list of sentences. If embeddings are already added, updates only if embeddings
are non-static."""
# if only one sentence is passed, convert to list of sentence
if type(sentences) is Sentence:
sentences = [sentences]
everything_embedded: bool = True
if self.embedding_type == "word-level":
for sentence in sentences:
for token in sentence.tokens:
if self.name not in token._embeddings.keys():
everything_embedded = False
else:
for sentence in sentences:
if self.name not in sentence._embeddings.keys():
everything_embedded = False
if not everything_embedded or not self.static_embeddings:
self._add_embeddings_internal(sentences)
return sentences
@abstractmethod
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
"""Private method for adding embeddings to all words in a list of sentences."""
pass
class TokenEmbeddings(Embeddings):
"""Abstract base class for all token-level embeddings. Ever new type of word embedding must implement these methods."""
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
pass
@property
def embedding_type(self) -> str:
return "word-level"
class DocumentEmbeddings(Embeddings):
"""Abstract base class for all document-level embeddings. Ever new type of document embedding must implement these methods."""
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
pass
@property
def embedding_type(self) -> str:
return "sentence-level"
class StackedEmbeddings(TokenEmbeddings):
"""A stack of embeddings, used if you need to combine several different embedding types."""
def __init__(self, embeddings: List[TokenEmbeddings], detach: bool = True):
"""The constructor takes a list of embeddings to be combined."""
super().__init__()
self.embeddings = embeddings
# IMPORTANT: add embeddings as torch modules
for i, embedding in enumerate(embeddings):
self.add_module("list_embedding_{}".format(i), embedding)
self.detach: bool = detach
self.name: str = "Stack"
self.static_embeddings: bool = True
self.__embedding_type: str = embeddings[0].embedding_type
self.__embedding_length: int = 0
for embedding in embeddings:
self.__embedding_length += embedding.embedding_length
def embed(
self, sentences: Union[Sentence, List[Sentence]], static_embeddings: bool = True
):
# if only one sentence is passed, convert to list of sentence
if type(sentences) is Sentence:
sentences = [sentences]
for embedding in self.embeddings:
embedding.embed(sentences)
@property
def embedding_type(self) -> str:
return self.__embedding_type
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for embedding in self.embeddings:
embedding._add_embeddings_internal(sentences)
return sentences
def __str__(self):
return f'StackedEmbeddings [{",".join([str(e) for e in self.embeddings])}]'
class WordEmbeddings(TokenEmbeddings):
"""Standard static word embeddings, such as GloVe or FastText."""
def __init__(self, embeddings: str, field: str = None):
"""
Initializes classic word embeddings. Constructor downloads required files if not there.
:param embeddings: one of: 'glove', 'extvec', 'crawl' or two-letter language code or custom
If you want to use a custom embedding file, just pass the path to the embeddings as embeddings variable.
"""
self.embeddings = embeddings
old_base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/"
)
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/"
)
embeddings_path_v4 = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/"
)
embeddings_path_v4_1 = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4.1/"
cache_dir = Path("embeddings")
# GLOVE embeddings
if embeddings.lower() == "glove" or embeddings.lower() == "en-glove":
cached_path(f"{old_base_path}glove.gensim.vectors.npy", cache_dir=cache_dir)
embeddings = cached_path(
f"{old_base_path}glove.gensim", cache_dir=cache_dir
)
# TURIAN embeddings
elif embeddings.lower() == "turian" or embeddings.lower() == "en-turian":
cached_path(
f"{embeddings_path_v4_1}turian.vectors.npy", cache_dir=cache_dir
)
embeddings = cached_path(
f"{embeddings_path_v4_1}turian", cache_dir=cache_dir
)
# KOMNINOS embeddings
elif embeddings.lower() == "extvec" or embeddings.lower() == "en-extvec":
cached_path(
f"{old_base_path}extvec.gensim.vectors.npy", cache_dir=cache_dir
)
embeddings = cached_path(
f"{old_base_path}extvec.gensim", cache_dir=cache_dir
)
# FT-CRAWL embeddings
elif embeddings.lower() == "crawl" or embeddings.lower() == "en-crawl":
cached_path(
f"{base_path}en-fasttext-crawl-300d-1M.vectors.npy", cache_dir=cache_dir
)
embeddings = cached_path(
f"{base_path}en-fasttext-crawl-300d-1M", cache_dir=cache_dir
)
# FT-CRAWL embeddings
elif (
embeddings.lower() == "news"
or embeddings.lower() == "en-news"
or embeddings.lower() == "en"
):
cached_path(
f"{base_path}en-fasttext-news-300d-1M.vectors.npy", cache_dir=cache_dir
)
embeddings = cached_path(
f"{base_path}en-fasttext-news-300d-1M", cache_dir=cache_dir
)
# twitter embeddings
elif embeddings.lower() == "twitter" or embeddings.lower() == "en-twitter":
cached_path(
f"{old_base_path}twitter.gensim.vectors.npy", cache_dir=cache_dir
)
embeddings = cached_path(
f"{old_base_path}twitter.gensim", cache_dir=cache_dir
)
# two-letter language code wiki embeddings
elif len(embeddings.lower()) == 2:
cached_path(
f"{embeddings_path_v4}{embeddings}-wiki-fasttext-300d-1M.vectors.npy",
cache_dir=cache_dir,
)
embeddings = cached_path(
f"{embeddings_path_v4}{embeddings}-wiki-fasttext-300d-1M",
cache_dir=cache_dir,
)
# two-letter language code wiki embeddings
elif len(embeddings.lower()) == 7 and embeddings.endswith("-wiki"):
cached_path(
f"{embeddings_path_v4}{embeddings[:2]}-wiki-fasttext-300d-1M.vectors.npy",
cache_dir=cache_dir,
)
embeddings = cached_path(
f"{embeddings_path_v4}{embeddings[:2]}-wiki-fasttext-300d-1M",
cache_dir=cache_dir,
)
# two-letter language code crawl embeddings
elif len(embeddings.lower()) == 8 and embeddings.endswith("-crawl"):
cached_path(
f"{embeddings_path_v4}{embeddings[:2]}-crawl-fasttext-300d-1M.vectors.npy",
cache_dir=cache_dir,
)
embeddings = cached_path(
f"{embeddings_path_v4}{embeddings[:2]}-crawl-fasttext-300d-1M",
cache_dir=cache_dir,
)
elif not Path(embeddings).exists():
raise ValueError(
f'The given embeddings "{embeddings}" is not available or is not a valid path.'
)
self.name: str = str(embeddings)
self.static_embeddings = True
if str(embeddings).endswith(".bin"):
self.precomputed_word_embeddings = gensim.models.KeyedVectors.load_word2vec_format(
str(embeddings), binary=True
)
else:
self.precomputed_word_embeddings = gensim.models.KeyedVectors.load(
str(embeddings)
)
self.field = field
self.__embedding_length: int = self.precomputed_word_embeddings.vector_size
super().__init__()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for i, sentence in enumerate(sentences):
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
if "field" not in self.__dict__ or self.field is None:
word = token.text
else:
word = token.get_tag(self.field).value
if word in self.precomputed_word_embeddings:
word_embedding = self.precomputed_word_embeddings[word]
elif word.lower() in self.precomputed_word_embeddings:
word_embedding = self.precomputed_word_embeddings[word.lower()]
elif (
re.sub(r"\d", "#", word.lower()) in self.precomputed_word_embeddings
):
word_embedding = self.precomputed_word_embeddings[
re.sub(r"\d", "#", word.lower())
]
elif (
re.sub(r"\d", "0", word.lower()) in self.precomputed_word_embeddings
):
word_embedding = self.precomputed_word_embeddings[
re.sub(r"\d", "0", word.lower())
]
else:
word_embedding = np.zeros(self.embedding_length, dtype="float")
word_embedding = torch.FloatTensor(word_embedding)
token.set_embedding(self.name, word_embedding)
return sentences
def __str__(self):
return self.name
def extra_repr(self):
return f"'{self.embeddings}'"
class OneHotEmbeddings(TokenEmbeddings):
"""One-hot encoded embeddings."""
def __init__(
self,
corpus=Union[Corpus, List[Sentence]],
field: str = "text",
embedding_length: int = 300,
min_freq: int = 3,
):
super().__init__()
self.name = "one-hot"
self.static_embeddings = False
self.min_freq = min_freq
tokens = list(map((lambda s: s.tokens), corpus.train))
tokens = [token for sublist in tokens for token in sublist]
if field == "text":
most_common = Counter(list(map((lambda t: t.text), tokens))).most_common()
else:
most_common = Counter(
list(map((lambda t: t.get_tag(field)), tokens))
).most_common()
tokens = []
for token, freq in most_common:
if freq < min_freq:
break
tokens.append(token)
self.vocab_dictionary: Dictionary = Dictionary()
for token in tokens:
self.vocab_dictionary.add_item(token)
# max_tokens = 500
self.__embedding_length = embedding_length
print(self.vocab_dictionary.idx2item)
print(f"vocabulary size of {len(self.vocab_dictionary)}")
# model architecture
self.embedding_layer = torch.nn.Embedding(
len(self.vocab_dictionary), self.__embedding_length
)
torch.nn.init.xavier_uniform_(self.embedding_layer.weight)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
one_hot_sentences = []
for i, sentence in enumerate(sentences):
context_idxs = [
self.vocab_dictionary.get_idx_for_item(t.text) for t in sentence.tokens
]
one_hot_sentences.extend(context_idxs)
one_hot_sentences = torch.tensor(one_hot_sentences, dtype=torch.long).to(
flair.device
)
embedded = self.embedding_layer.forward(one_hot_sentences)
index = 0
for sentence in sentences:
for token in sentence:
embedding = embedded[index]
token.set_embedding(self.name, embedding)
index += 1
return sentences
def __str__(self):
return self.name
@property
def embedding_length(self) -> int:
return self.__embedding_length
def extra_repr(self):
return "min_freq={}".format(self.min_freq)
class BPEmbSerializable(BPEmb):
def __getstate__(self):
state = self.__dict__.copy()
# save the sentence piece model as binary file (not as path which may change)
state["spm_model_binary"] = open(self.model_file, mode="rb").read()
state["spm"] = None
return state
def __setstate__(self, state):
from bpemb.util import sentencepiece_load
model_file = self.model_tpl.format(lang=state["lang"], vs=state["vs"])
self.__dict__ = state
# write out the binary sentence piece model into the expected directory
self.cache_dir: Path = Path(flair.cache_root) / "embeddings"
if "spm_model_binary" in self.__dict__:
# if the model was saved as binary and it is not found on disk, write to appropriate path
if not os.path.exists(self.cache_dir / state["lang"]):
os.makedirs(self.cache_dir / state["lang"])
self.model_file = self.cache_dir / model_file
with open(self.model_file, "wb") as out:
out.write(self.__dict__["spm_model_binary"])
else:
# otherwise, use normal process and potentially trigger another download
self.model_file = self._load_file(model_file)
# once the modes if there, load it with sentence piece
state["spm"] = sentencepiece_load(self.model_file)
class BytePairEmbeddings(TokenEmbeddings):
def __init__(
self,
language: str,
dim: int = 50,
syllables: int = 100000,
cache_dir=Path(flair.cache_root) / "embeddings",
):
"""
Initializes BP embeddings. Constructor downloads required files if not there.
"""
self.name: str = f"bpe-{language}-{syllables}-{dim}"
self.static_embeddings = True
self.embedder = BPEmbSerializable(
lang=language, vs=syllables, dim=dim, cache_dir=cache_dir
)
self.__embedding_length: int = self.embedder.emb.vector_size * 2
super().__init__()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for i, sentence in enumerate(sentences):
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
if "field" not in self.__dict__ or self.field is None:
word = token.text
else:
word = token.get_tag(self.field).value
if word.strip() == "":
# empty words get no embedding
token.set_embedding(
self.name, torch.zeros(self.embedding_length, dtype=torch.float)
)
else:
# all other words get embedded
embeddings = self.embedder.embed(word.lower())
embedding = np.concatenate(
(embeddings[0], embeddings[len(embeddings) - 1])
)
token.set_embedding(
self.name, torch.tensor(embedding, dtype=torch.float)
)
return sentences
def __str__(self):
return self.name
def extra_repr(self):
return "model={}".format(self.name)
class ELMoEmbeddings(TokenEmbeddings):
"""Contextual word embeddings using word-level LM, as proposed in Peters et al., 2018."""
def __init__(
self, model: str = "original", options_file: str = None, weight_file: str = None
):
super().__init__()
try:
import allennlp.commands.elmo
except:
log.warning("-" * 100)
log.warning('ATTENTION! The library "allennlp" is not installed!')
log.warning(
'To use ELMoEmbeddings, please first install with "pip install allennlp"'
)
log.warning("-" * 100)
pass
self.name = "elmo-" + model
self.static_embeddings = True
if not options_file or not weight_file:
# the default model for ELMo is the 'original' model, which is very large
options_file = allennlp.commands.elmo.DEFAULT_OPTIONS_FILE
weight_file = allennlp.commands.elmo.DEFAULT_WEIGHT_FILE
# alternatively, a small, medium or portuguese model can be selected by passing the appropriate mode name
if model == "small":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_weights.hdf5"
if model == "medium":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_weights.hdf5"
if model == "pt" or model == "portuguese":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_weights.hdf5"
if model == "pubmed":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_weights_PubMed_only.hdf5"
# put on Cuda if available
from flair import device
if re.fullmatch(r'cuda:[0-9]+', str(device)):
cuda_device = int(str(device).split(':')[-1])
elif str(device) == "cpu":
cuda_device = -1
else:
cuda_device = 0
self.ee = allennlp.commands.elmo.ElmoEmbedder(
options_file=options_file, weight_file=weight_file, cuda_device=cuda_device
)
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
sentence_words: List[List[str]] = []
for sentence in sentences:
sentence_words.append([token.text for token in sentence])
embeddings = self.ee.embed_batch(sentence_words)
for i, sentence in enumerate(sentences):
sentence_embeddings = embeddings[i]
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
word_embedding = torch.cat(
[
torch.FloatTensor(sentence_embeddings[0, token_idx, :]),
torch.FloatTensor(sentence_embeddings[1, token_idx, :]),
torch.FloatTensor(sentence_embeddings[2, token_idx, :]),
],
0,
)
token.set_embedding(self.name, word_embedding)
return sentences
def extra_repr(self):
return "model={}".format(self.name)
def __str__(self):
return self.name
class ELMoTransformerEmbeddings(TokenEmbeddings):
"""Contextual word embeddings using word-level Transformer-based LM, as proposed in Peters et al., 2018."""
def __init__(self, model_file: str):
super().__init__()
try:
from allennlp.modules.token_embedders.bidirectional_language_model_token_embedder import (
BidirectionalLanguageModelTokenEmbedder,
)
from allennlp.data.token_indexers.elmo_indexer import (
ELMoTokenCharactersIndexer,
)
except:
log.warning("-" * 100)
log.warning('ATTENTION! The library "allennlp" is not installed!')
log.warning(
"To use ELMoTransformerEmbeddings, please first install a recent version from https://github.com/allenai/allennlp"
)
log.warning("-" * 100)
pass
self.name = "elmo-transformer"
self.static_embeddings = True
self.lm_embedder = BidirectionalLanguageModelTokenEmbedder(
archive_file=model_file,
dropout=0.2,
bos_eos_tokens=("<S>", "</S>"),
remove_bos_eos=True,
requires_grad=False,
)
self.lm_embedder = self.lm_embedder.to(device=flair.device)
self.vocab = self.lm_embedder._lm.vocab
self.indexer = ELMoTokenCharactersIndexer()
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# Avoid conflicts with flair's Token class
import allennlp.data.tokenizers.token as allen_nlp_token
indexer = self.indexer
vocab = self.vocab
for sentence in sentences:
character_indices = indexer.tokens_to_indices(
[allen_nlp_token.Token(token.text) for token in sentence], vocab, "elmo"
)["elmo"]
indices_tensor = torch.LongTensor([character_indices])
indices_tensor = indices_tensor.to(device=flair.device)
embeddings = self.lm_embedder(indices_tensor)[0].detach().cpu().numpy()
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
embedding = embeddings[token_idx]
word_embedding = torch.FloatTensor(embedding)
token.set_embedding(self.name, word_embedding)
return sentences
def extra_repr(self):
return "model={}".format(self.name)
def __str__(self):
return self.name
class TransformerXLEmbeddings(TokenEmbeddings):
def __init__(self, model: str = "transfo-xl-wt103"):
"""Transformer-XL embeddings, as proposed in Dai et al., 2019.
:param model: name of Transformer-XL model
"""
super().__init__()
if model not in TRANSFORMER_XL_PRETRAINED_MODEL_ARCHIVE_MAP.keys():
raise ValueError("Provided Transformer-XL model is not available.")
self.tokenizer = TransfoXLTokenizer.from_pretrained(model)
self.model = TransfoXLModel.from_pretrained(model)
self.name = model
self.static_embeddings = True
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
with torch.no_grad():
for sentence in sentences:
token_strings = [token.text for token in sentence.tokens]
indexed_tokens = self.tokenizer.convert_tokens_to_ids(token_strings)
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to(flair.device)
hidden_states, _ = self.model(tokens_tensor)
for token, token_idx in zip(
sentence.tokens, range(len(sentence.tokens))
):
token.set_embedding(self.name, hidden_states[0][token_idx])
return sentences
def extra_repr(self):
return "model={}".format(self.name)
def __str__(self):
return self.name
class OpenAIGPTEmbeddings(TokenEmbeddings):
def __init__(
self, model: str = "openai-gpt", pooling_operation: str = "first_last"
):
"""OpenAI GPT embeddings, as proposed in Radford et al. 2018.
:param model: name of OpenAI GPT model
:param pooling_operation: defines pooling operation for subwords
"""
super().__init__()
if model not in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP.keys():
raise ValueError("Provided OpenAI GPT model is not available.")
self.tokenizer = OpenAIGPTTokenizer.from_pretrained(model)
self.model = OpenAIGPTModel.from_pretrained(model)
self.name = model
self.static_embeddings = True
self.pooling_operation = pooling_operation
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
with torch.no_grad():
for sentence in sentences:
for token in sentence.tokens:
token_text = token.text
subwords = self.tokenizer.tokenize(token_text)
indexed_tokens = self.tokenizer.convert_tokens_to_ids(subwords)
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to(flair.device)
hidden_states = self.model(tokens_tensor)
if self.pooling_operation == "first":
# Use embedding of first subword
token.set_embedding(self.name, hidden_states[0][0])
elif self.pooling_operation == "last":
last_embedding = hidden_states[0][len(hidden_states[0]) - 1]
token.set_embedding(self.name, last_embedding)
elif self.pooling_operation == "first_last":
# Use embedding of first and last subword
first_embedding = hidden_states[0][0]
last_embedding = hidden_states[0][len(hidden_states[0]) - 1]
final_embedding = torch.cat([first_embedding, last_embedding])
token.set_embedding(self.name, final_embedding)
else:
# Otherwise, use mean over all subwords in token
all_embeddings = [
embedding.unsqueeze(0) for embedding in hidden_states[0]
]
mean = torch.mean(torch.cat(all_embeddings, dim=0), dim=0)
token.set_embedding(self.name, mean)
return sentences
def extra_repr(self):
return "model={}".format(self.name)
def __str__(self):
return self.name
class CharacterEmbeddings(TokenEmbeddings):
"""Character embeddings of words, as proposed in Lample et al., 2016."""
def __init__(self, path_to_char_dict: str = None, char_embedding_dim: int = 25, hidden_size_char: int = 25):
"""Uses the default character dictionary if none provided."""
super().__init__()
self.name = "Char"
self.static_embeddings = False
# use list of common characters if none provided
if path_to_char_dict is None:
self.char_dictionary: Dictionary = Dictionary.load("common-chars")
else:
self.char_dictionary: Dictionary = Dictionary.load_from_file(
path_to_char_dict
)
self.char_embedding_dim: int = char_embedding_dim
self.hidden_size_char: int = hidden_size_char
self.char_embedding = torch.nn.Embedding(
len(self.char_dictionary.item2idx), self.char_embedding_dim
)
self.char_rnn = torch.nn.LSTM(
self.char_embedding_dim,
self.hidden_size_char,
num_layers=1,
bidirectional=True,
)
self.__embedding_length = self.char_embedding_dim * 2
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
for sentence in sentences:
tokens_char_indices = []
# translate words in sentence into ints using dictionary
for token in sentence.tokens:
char_indices = [
self.char_dictionary.get_idx_for_item(char) for char in token.text
]
tokens_char_indices.append(char_indices)
# sort words by length, for batching and masking
tokens_sorted_by_length = sorted(
tokens_char_indices, key=lambda p: len(p), reverse=True
)
d = {}
for i, ci in enumerate(tokens_char_indices):
for j, cj in enumerate(tokens_sorted_by_length):
if ci == cj:
d[j] = i
continue
chars2_length = [len(c) for c in tokens_sorted_by_length]
longest_token_in_sentence = max(chars2_length)
tokens_mask = torch.zeros(
(len(tokens_sorted_by_length), longest_token_in_sentence),
dtype=torch.long,
device=flair.device,
)
for i, c in enumerate(tokens_sorted_by_length):
tokens_mask[i, : chars2_length[i]] = torch.tensor(
c, dtype=torch.long, device=flair.device
)
# chars for rnn processing
chars = tokens_mask
character_embeddings = self.char_embedding(chars).transpose(0, 1)
packed = torch.nn.utils.rnn.pack_padded_sequence(
character_embeddings, chars2_length
)
lstm_out, self.hidden = self.char_rnn(packed)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)
outputs = outputs.transpose(0, 1)
chars_embeds_temp = torch.zeros(
(outputs.size(0), outputs.size(2)),
dtype=torch.float,
device=flair.device,
)
for i, index in enumerate(output_lengths):
chars_embeds_temp[i] = outputs[i, index - 1]
character_embeddings = chars_embeds_temp.clone()
for i in range(character_embeddings.size(0)):
character_embeddings[d[i]] = chars_embeds_temp[i]
for token_number, token in enumerate(sentence.tokens):
token.set_embedding(self.name, character_embeddings[token_number])
def __str__(self):
return self.name
class FlairEmbeddings(TokenEmbeddings):
"""Contextual string embeddings of words, as proposed in Akbik et al., 2018."""
def __init__(
self,
model: str,
use_cache: bool = False,
cache_directory: Path = None,
chars_per_chunk: int = 512,
):
"""
initializes contextual string embeddings using a character-level language model.
:param model: model string, one of 'news-forward', 'news-backward', 'news-forward-fast', 'news-backward-fast',
'mix-forward', 'mix-backward', 'german-forward', 'german-backward', 'polish-backward', 'polish-forward'
depending on which character language model is desired.
:param use_cache: if set to False, will not write embeddings to file for later retrieval. this saves disk space but will
not allow re-use of once computed embeddings that do not fit into memory
:param cache_directory: if cache_directory is not set, the cache will be written to ~/.flair/embeddings. otherwise the cache
is written to the provided directory.
:param chars_per_chunk: max number of chars per rnn pass to control speed/memory tradeoff. Higher means faster but requires
more memory. Lower means slower but less memory.
"""
super().__init__()
cache_dir = Path("embeddings")
aws_path: str = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources"
self.PRETRAINED_MODEL_ARCHIVE_MAP = {
# multilingual models
"multi-forward": f"{aws_path}/embeddings-v0.4/lm-multi-forward-v0.1.pt",
"multi-backward": f"{aws_path}/embeddings-v0.4/lm-multi-backward-v0.1.pt",
"multi-forward-fast": f"{aws_path}/embeddings-v0.4/lm-multi-forward-fast-v0.1.pt",
"multi-backward-fast": f"{aws_path}/embeddings-v0.4/lm-multi-backward-fast-v0.1.pt",
# English models
"news-forward": f"{aws_path}/embeddings-v0.4.1/big-news-forward--h2048-l1-d0.05-lr30-0.25-20/news-forward-0.4.1.pt",
"news-backward": f"{aws_path}/embeddings-v0.4.1/big-news-backward--h2048-l1-d0.05-lr30-0.25-20/news-backward-0.4.1.pt",
"news-forward-fast": f"{aws_path}/embeddings/lm-news-english-forward-1024-v0.2rc.pt",
"news-backward-fast": f"{aws_path}/embeddings/lm-news-english-backward-1024-v0.2rc.pt",
"mix-forward": f"{aws_path}/embeddings/lm-mix-english-forward-v0.2rc.pt",
"mix-backward": f"{aws_path}/embeddings/lm-mix-english-backward-v0.2rc.pt",
# Arabic
"ar-forward": f"{aws_path}/embeddings-stefan-it/lm-ar-opus-large-forward-v0.1.pt",
"ar-backward": f"{aws_path}/embeddings-stefan-it/lm-ar-opus-large-backward-v0.1.pt",
# Bulgarian
"bg-forward-fast": f"{aws_path}/embeddings-v0.3/lm-bg-small-forward-v0.1.pt",
"bg-backward-fast": f"{aws_path}/embeddings-v0.3/lm-bg-small-backward-v0.1.pt",
"bg-forward": f"{aws_path}/embeddings-stefan-it/lm-bg-opus-large-forward-v0.1.pt",
"bg-backward": f"{aws_path}/embeddings-stefan-it/lm-bg-opus-large-backward-v0.1.pt",
# Czech
"cs-forward": f"{aws_path}/embeddings-stefan-it/lm-cs-opus-large-forward-v0.1.pt",
"cs-backward": f"{aws_path}/embeddings-stefan-it/lm-cs-opus-large-backward-v0.1.pt",
"cs-v0-forward": f"{aws_path}/embeddings-v0.4/lm-cs-large-forward-v0.1.pt",
"cs-v0-backward": f"{aws_path}/embeddings-v0.4/lm-cs-large-backward-v0.1.pt",
# Danish
"da-forward": f"{aws_path}/embeddings-stefan-it/lm-da-opus-large-forward-v0.1.pt",
"da-backward": f"{aws_path}/embeddings-stefan-it/lm-da-opus-large-backward-v0.1.pt",
# German
"de-forward": f"{aws_path}/embeddings/lm-mix-german-forward-v0.2rc.pt",
"de-backward": f"{aws_path}/embeddings/lm-mix-german-backward-v0.2rc.pt",
"de-historic-ha-forward": f"{aws_path}/embeddings-stefan-it/lm-historic-hamburger-anzeiger-forward-v0.1.pt",
"de-historic-ha-backward": f"{aws_path}/embeddings-stefan-it/lm-historic-hamburger-anzeiger-backward-v0.1.pt",
"de-historic-wz-forward": f"{aws_path}/embeddings-stefan-it/lm-historic-wiener-zeitung-forward-v0.1.pt",
"de-historic-wz-backward": f"{aws_path}/embeddings-stefan-it/lm-historic-wiener-zeitung-backward-v0.1.pt",
# Spanish
"es-forward": f"{aws_path}/embeddings-v0.4/language_model_es_forward_long/lm-es-forward.pt",
"es-backward": f"{aws_path}/embeddings-v0.4/language_model_es_backward_long/lm-es-backward.pt",
"es-forward-fast": f"{aws_path}/embeddings-v0.4/language_model_es_forward/lm-es-forward-fast.pt",
"es-backward-fast": f"{aws_path}/embeddings-v0.4/language_model_es_backward/lm-es-backward-fast.pt",
# Basque
"eu-forward": f"{aws_path}/embeddings-stefan-it/lm-eu-opus-large-forward-v0.1.pt",
"eu-backward": f"{aws_path}/embeddings-stefan-it/lm-eu-opus-large-backward-v0.1.pt",
"eu-v0-forward": f"{aws_path}/embeddings-v0.4/lm-eu-large-forward-v0.1.pt",
"eu-v0-backward": f"{aws_path}/embeddings-v0.4/lm-eu-large-backward-v0.1.pt",
# Persian
"fa-forward": f"{aws_path}/embeddings-stefan-it/lm-fa-opus-large-forward-v0.1.pt",
"fa-backward": f"{aws_path}/embeddings-stefan-it/lm-fa-opus-large-backward-v0.1.pt",
# Finnish
"fi-forward": f"{aws_path}/embeddings-stefan-it/lm-fi-opus-large-forward-v0.1.pt",
"fi-backward": f"{aws_path}/embeddings-stefan-it/lm-fi-opus-large-backward-v0.1.pt",
# French
"fr-forward": f"{aws_path}/embeddings/lm-fr-charlm-forward.pt",
"fr-backward": f"{aws_path}/embeddings/lm-fr-charlm-backward.pt",
# Hebrew
"he-forward": f"{aws_path}/embeddings-stefan-it/lm-he-opus-large-forward-v0.1.pt",
"he-backward": f"{aws_path}/embeddings-stefan-it/lm-he-opus-large-backward-v0.1.pt",
# Hindi
"hi-forward": f"{aws_path}/embeddings-stefan-it/lm-hi-opus-large-forward-v0.1.pt",
"hi-backward": f"{aws_path}/embeddings-stefan-it/lm-hi-opus-large-backward-v0.1.pt",
# Croatian
"hr-forward": f"{aws_path}/embeddings-stefan-it/lm-hr-opus-large-forward-v0.1.pt",
"hr-backward": f"{aws_path}/embeddings-stefan-it/lm-hr-opus-large-backward-v0.1.pt",
# Indonesian
"id-forward": f"{aws_path}/embeddings-stefan-it/lm-id-opus-large-forward-v0.1.pt",
"id-backward": f"{aws_path}/embeddings-stefan-it/lm-id-opus-large-backward-v0.1.pt",
# Italian
"it-forward": f"{aws_path}/embeddings-stefan-it/lm-it-opus-large-forward-v0.1.pt",
"it-backward": f"{aws_path}/embeddings-stefan-it/lm-it-opus-large-backward-v0.1.pt",
# Japanese
"ja-forward": f"{aws_path}/embeddings-v0.4.1/lm__char-forward__ja-wikipedia-3GB/japanese-forward.pt",
"ja-backward": f"{aws_path}/embeddings-v0.4.1/lm__char-backward__ja-wikipedia-3GB/japanese-backward.pt",
# Dutch
"nl-forward": f"{aws_path}/embeddings-stefan-it/lm-nl-opus-large-forward-v0.1.pt",
"nl-backward": f"{aws_path}/embeddings-stefan-it/lm-nl-opus-large-backward-v0.1.pt",
"nl-v0-forward": f"{aws_path}/embeddings-v0.4/lm-nl-large-forward-v0.1.pt",
"nl-v0-backward": f"{aws_path}/embeddings-v0.4/lm-nl-large-backward-v0.1.pt",
# Norwegian
"no-forward": f"{aws_path}/embeddings-stefan-it/lm-no-opus-large-forward-v0.1.pt",
"no-backward": f"{aws_path}/embeddings-stefan-it/lm-no-opus-large-backward-v0.1.pt",
# Polish
"pl-forward": f"{aws_path}/embeddings/lm-polish-forward-v0.2.pt",
"pl-backward": f"{aws_path}/embeddings/lm-polish-backward-v0.2.pt",
"pl-opus-forward": f"{aws_path}/embeddings-stefan-it/lm-pl-opus-large-forward-v0.1.pt",
"pl-opus-backward": f"{aws_path}/embeddings-stefan-it/lm-pl-opus-large-backward-v0.1.pt",
# Portuguese
"pt-forward": f"{aws_path}/embeddings-v0.4/lm-pt-forward.pt",
"pt-backward": f"{aws_path}/embeddings-v0.4/lm-pt-backward.pt",
# Pubmed
"pubmed-forward": f"{aws_path}/embeddings-v0.4.1/pubmed-2015-fw-lm.pt",
"pubmed-backward": f"{aws_path}/embeddings-v0.4.1/pubmed-2015-bw-lm.pt",
# Slovenian
"sl-forward": f"{aws_path}/embeddings-stefan-it/lm-sl-opus-large-forward-v0.1.pt",
"sl-backward": f"{aws_path}/embeddings-stefan-it/lm-sl-opus-large-backward-v0.1.pt",
"sl-v0-forward": f"{aws_path}/embeddings-v0.3/lm-sl-large-forward-v0.1.pt",
"sl-v0-backward": f"{aws_path}/embeddings-v0.3/lm-sl-large-backward-v0.1.pt",
# Swedish
"sv-forward": f"{aws_path}/embeddings-stefan-it/lm-sv-opus-large-forward-v0.1.pt",
"sv-backward": f"{aws_path}/embeddings-stefan-it/lm-sv-opus-large-backward-v0.1.pt",
"sv-v0-forward": f"{aws_path}/embeddings-v0.4/lm-sv-large-forward-v0.1.pt",
"sv-v0-backward": f"{aws_path}/embeddings-v0.4/lm-sv-large-backward-v0.1.pt",
}
# load model if in pretrained model map
if model.lower() in self.PRETRAINED_MODEL_ARCHIVE_MAP:
base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[model.lower()]
model = cached_path(base_path, cache_dir=cache_dir)
elif replace_with_language_code(model) in self.PRETRAINED_MODEL_ARCHIVE_MAP:
base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[
replace_with_language_code(model)
]
model = cached_path(base_path, cache_dir=cache_dir)
elif not Path(model).exists():
raise ValueError(
f'The given model "{model}" is not available or is not a valid path.'
)
self.name = str(model)
self.static_embeddings = True
from flair.models import LanguageModel
self.lm = LanguageModel.load_language_model(model)
self.is_forward_lm: bool = self.lm.is_forward_lm
self.chars_per_chunk: int = chars_per_chunk
# initialize cache if use_cache set
self.cache = None
if use_cache:
cache_path = (
Path(f"{self.name}-tmp-cache.sqllite")
if not cache_directory
else cache_directory / f"{self.name}-tmp-cache.sqllite"
)
from sqlitedict import SqliteDict
self.cache = SqliteDict(str(cache_path), autocommit=True)
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
# set to eval mode
self.eval()
def train(self, mode=True):
pass
def __getstate__(self):
# Copy the object's state from self.__dict__ which contains
# all our instance attributes. Always use the dict.copy()
# method to avoid modifying the original state.
state = self.__dict__.copy()
# Remove the unpicklable entries.
state["cache"] = None
return state
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# make compatible with serialized models
if "chars_per_chunk" not in self.__dict__:
self.chars_per_chunk = 512
# if cache is used, try setting embeddings from cache first
if "cache" in self.__dict__ and self.cache is not None:
# try populating embeddings from cache
all_embeddings_retrieved_from_cache: bool = True
for sentence in sentences:
key = sentence.to_tokenized_string()
embeddings = self.cache.get(key)
if not embeddings:
all_embeddings_retrieved_from_cache = False
break
else:
for token, embedding in zip(sentence, embeddings):
token.set_embedding(self.name, torch.FloatTensor(embedding))
if all_embeddings_retrieved_from_cache:
return sentences
with torch.no_grad():
# if this is not possible, use LM to generate embedding. First, get text sentences
text_sentences = [sentence.to_tokenized_string() for sentence in sentences]
longest_character_sequence_in_batch: int = len(max(text_sentences, key=len))
# pad strings with whitespaces to longest sentence
sentences_padded: List[str] = []
append_padded_sentence = sentences_padded.append
start_marker = "\n"
end_marker = " "
extra_offset = len(start_marker)
for sentence_text in text_sentences:
pad_by = longest_character_sequence_in_batch - len(sentence_text)
if self.is_forward_lm:
padded = "{}{}{}{}".format(
start_marker, sentence_text, end_marker, pad_by * " "
)
append_padded_sentence(padded)
else:
padded = "{}{}{}{}".format(
start_marker, sentence_text[::-1], end_marker, pad_by * " "
)
append_padded_sentence(padded)
# get hidden states from language model
all_hidden_states_in_lm = self.lm.get_representation(
sentences_padded, self.chars_per_chunk
)
# take first or last hidden states from language model as word representation
for i, sentence in enumerate(sentences):
sentence_text = sentence.to_tokenized_string()
offset_forward: int = extra_offset
offset_backward: int = len(sentence_text) + extra_offset
for token in sentence.tokens:
offset_forward += len(token.text)
if self.is_forward_lm:
offset = offset_forward
else:
offset = offset_backward
embedding = all_hidden_states_in_lm[offset, i, :]
# if self.tokenized_lm or token.whitespace_after:
offset_forward += 1
offset_backward -= 1
offset_backward -= len(token.text)
token.set_embedding(self.name, embedding.clone().detach())
all_hidden_states_in_lm = None
if "cache" in self.__dict__ and self.cache is not None:
for sentence in sentences:
self.cache[sentence.to_tokenized_string()] = [
token._embeddings[self.name].tolist() for token in sentence
]
return sentences
def __str__(self):
return self.name
class PooledFlairEmbeddings(TokenEmbeddings):
def __init__(
self,
contextual_embeddings: Union[str, FlairEmbeddings],
pooling: str = "min",
only_capitalized: bool = False,
**kwargs,
):
super().__init__()
# use the character language model embeddings as basis
if type(contextual_embeddings) is str:
self.context_embeddings: FlairEmbeddings = FlairEmbeddings(
contextual_embeddings, **kwargs
)
else:
self.context_embeddings: FlairEmbeddings = contextual_embeddings
# length is twice the original character LM embedding length
self.embedding_length = self.context_embeddings.embedding_length * 2
self.name = self.context_embeddings.name + "-context"
# these fields are for the embedding memory
self.word_embeddings = {}
self.word_count = {}
# whether to add only capitalized words to memory (faster runtime and lower memory consumption)
self.only_capitalized = only_capitalized
# we re-compute embeddings dynamically at each epoch
self.static_embeddings = False
# set the memory method
self.pooling = pooling
if pooling == "mean":
self.aggregate_op = torch.add
elif pooling == "fade":
self.aggregate_op = torch.add
elif pooling == "max":
self.aggregate_op = torch.max
elif pooling == "min":
self.aggregate_op = torch.min
def train(self, mode=True):
super().train(mode=mode)
if mode:
# memory is wiped each time we do a training run
print("train mode resetting embeddings")
self.word_embeddings = {}
self.word_count = {}
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.context_embeddings.embed(sentences)
# if we keep a pooling, it needs to be updated continuously
for sentence in sentences:
for token in sentence.tokens:
# update embedding
local_embedding = token._embeddings[self.context_embeddings.name]
local_embedding = local_embedding.to(flair.device)
if token.text[0].isupper() or not self.only_capitalized:
if token.text not in self.word_embeddings:
self.word_embeddings[token.text] = local_embedding
self.word_count[token.text] = 1
else:
aggregated_embedding = self.aggregate_op(
self.word_embeddings[token.text], local_embedding
)
if self.pooling == "fade":
aggregated_embedding /= 2
self.word_embeddings[token.text] = aggregated_embedding
self.word_count[token.text] += 1
# add embeddings after updating
for sentence in sentences:
for token in sentence.tokens:
if token.text in self.word_embeddings:
base = (
self.word_embeddings[token.text] / self.word_count[token.text]
if self.pooling == "mean"
else self.word_embeddings[token.text]
)
else:
base = token._embeddings[self.context_embeddings.name]
token.set_embedding(self.name, base)
return sentences
def embedding_length(self) -> int:
return self.embedding_length
class BertEmbeddings(TokenEmbeddings):
def __init__(
self,
bert_model_or_path: str = "bert-base-uncased",
layers: str = "-1,-2,-3,-4",
pooling_operation: str = "first",
):
"""
Bidirectional transformer embeddings of words, as proposed in Devlin et al., 2018.
:param bert_model_or_path: name of BERT model ('') or directory path containing custom model, configuration file
and vocab file (names of three files should be - bert_config.json, pytorch_model.bin/model.chkpt, vocab.txt)
:param layers: string indicating which layers to take for embedding
:param pooling_operation: how to get from token piece embeddings to token embedding. Either pool them and take
the average ('mean') or use first word piece embedding as token embedding ('first)
"""
super().__init__()
self.tokenizer = BertTokenizer.from_pretrained(bert_model_or_path)
self.model = BertModel.from_pretrained(bert_model_or_path)
self.layer_indexes = [int(x) for x in layers.split(",")]
self.pooling_operation = pooling_operation
self.name = str(bert_model_or_path)
self.static_embeddings = True
class BertInputFeatures(object):
"""Private helper class for holding BERT-formatted features"""
def __init__(
self,
unique_id,
tokens,
input_ids,
input_mask,
input_type_ids,
token_subtoken_count,
):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
self.token_subtoken_count = token_subtoken_count
def _convert_sentences_to_features(
self, sentences, max_sequence_length: int
) -> [BertInputFeatures]:
max_sequence_length = max_sequence_length + 2
features: List[BertEmbeddings.BertInputFeatures] = []
for (sentence_index, sentence) in enumerate(sentences):
bert_tokenization: List[str] = []
token_subtoken_count: Dict[int, int] = {}
for token in sentence:
subtokens = self.tokenizer.tokenize(token.text)
bert_tokenization.extend(subtokens)
token_subtoken_count[token.idx] = len(subtokens)
if len(bert_tokenization) > max_sequence_length - 2:
bert_tokenization = bert_tokenization[0 : (max_sequence_length - 2)]
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in bert_tokenization:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_sequence_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
features.append(
BertEmbeddings.BertInputFeatures(
unique_id=sentence_index,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids,
token_subtoken_count=token_subtoken_count,
)
)
return features
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
"""Add embeddings to all words in a list of sentences. If embeddings are already added,
updates only if embeddings are non-static."""
# first, find longest sentence in batch
longest_sentence_in_batch: int = len(
max(
[
self.tokenizer.tokenize(sentence.to_tokenized_string())
for sentence in sentences
],
key=len,
)
)
# prepare id maps for BERT model
features = self._convert_sentences_to_features(
sentences, longest_sentence_in_batch
)
all_input_ids = torch.LongTensor([f.input_ids for f in features]).to(
flair.device
)
all_input_masks = torch.LongTensor([f.input_mask for f in features]).to(
flair.device
)
# put encoded batch through BERT model to get all hidden states of all encoder layers
self.model.to(flair.device)
self.model.eval()
all_encoder_layers, _ = self.model(
all_input_ids, token_type_ids=None, attention_mask=all_input_masks
)
with torch.no_grad():
for sentence_index, sentence in enumerate(sentences):
feature = features[sentence_index]
# get aggregated embeddings for each BERT-subtoken in sentence
subtoken_embeddings = []
for token_index, _ in enumerate(feature.tokens):
all_layers = []
for layer_index in self.layer_indexes:
layer_output = (
all_encoder_layers[int(layer_index)]
.detach()
.cpu()[sentence_index]
)
all_layers.append(layer_output[token_index])
subtoken_embeddings.append(torch.cat(all_layers))
# get the current sentence object
token_idx = 0
for token in sentence:
# add concatenated embedding to sentence
token_idx += 1
if self.pooling_operation == "first":
# use first subword embedding if pooling operation is 'first'
token.set_embedding(self.name, subtoken_embeddings[token_idx])
else:
# otherwise, do a mean over all subwords in token
embeddings = subtoken_embeddings[
token_idx : token_idx
+ feature.token_subtoken_count[token.idx]
]
embeddings = [
embedding.unsqueeze(0) for embedding in embeddings
]
mean = torch.mean(torch.cat(embeddings, dim=0), dim=0)
token.set_embedding(self.name, mean)
token_idx += feature.token_subtoken_count[token.idx] - 1
return sentences
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
return len(self.layer_indexes) * self.model.config.hidden_size
class CharLMEmbeddings(TokenEmbeddings):
"""Contextual string embeddings of words, as proposed in Akbik et al., 2018. """
@deprecated(version="0.4", reason="Use 'FlairEmbeddings' instead.")
def __init__(
self,
model: str,
detach: bool = True,
use_cache: bool = False,
cache_directory: Path = None,
):
"""
initializes contextual string embeddings using a character-level language model.
:param model: model string, one of 'news-forward', 'news-backward', 'news-forward-fast', 'news-backward-fast',
'mix-forward', 'mix-backward', 'german-forward', 'german-backward', 'polish-backward', 'polish-forward'
depending on which character language model is desired.
:param detach: if set to False, the gradient will propagate into the language model. this dramatically slows down
training and often leads to worse results, so not recommended.
:param use_cache: if set to False, will not write embeddings to file for later retrieval. this saves disk space but will
not allow re-use of once computed embeddings that do not fit into memory
:param cache_directory: if cache_directory is not set, the cache will be written to ~/.flair/embeddings. otherwise the cache
is written to the provided directory.
"""
super().__init__()
cache_dir = Path("embeddings")
# multilingual forward (English, German, French, Italian, Dutch, Polish)
if model.lower() == "multi-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-multi-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# multilingual backward (English, German, French, Italian, Dutch, Polish)
elif model.lower() == "multi-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-multi-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-forward
elif model.lower() == "news-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-forward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-backward
elif model.lower() == "news-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-backward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-forward
elif model.lower() == "news-forward-fast":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-forward-1024-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-backward
elif model.lower() == "news-backward-fast":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-backward-1024-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# mix-english-forward
elif model.lower() == "mix-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-english-forward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# mix-english-backward
elif model.lower() == "mix-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-english-backward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# mix-german-forward
elif model.lower() == "german-forward" or model.lower() == "de-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-german-forward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# mix-german-backward
elif model.lower() == "german-backward" or model.lower() == "de-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-german-backward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# common crawl Polish forward
elif model.lower() == "polish-forward" or model.lower() == "pl-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-polish-forward-v0.2.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# common crawl Polish backward
elif model.lower() == "polish-backward" or model.lower() == "pl-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-polish-backward-v0.2.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Slovenian forward
elif model.lower() == "slovenian-forward" or model.lower() == "sl-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-sl-large-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Slovenian backward
elif model.lower() == "slovenian-backward" or model.lower() == "sl-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-sl-large-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Bulgarian forward
elif model.lower() == "bulgarian-forward" or model.lower() == "bg-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-bg-small-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Bulgarian backward
elif model.lower() == "bulgarian-backward" or model.lower() == "bg-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-bg-small-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Dutch forward
elif model.lower() == "dutch-forward" or model.lower() == "nl-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-nl-large-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Dutch backward
elif model.lower() == "dutch-backward" or model.lower() == "nl-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-nl-large-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Swedish forward
elif model.lower() == "swedish-forward" or model.lower() == "sv-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-sv-large-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Swedish backward
elif model.lower() == "swedish-backward" or model.lower() == "sv-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-sv-large-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# French forward
elif model.lower() == "french-forward" or model.lower() == "fr-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-fr-charlm-forward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# French backward
elif model.lower() == "french-backward" or model.lower() == "fr-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-fr-charlm-backward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Czech forward
elif model.lower() == "czech-forward" or model.lower() == "cs-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-cs-large-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Czech backward
elif model.lower() == "czech-backward" or model.lower() == "cs-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-cs-large-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Portuguese forward
elif model.lower() == "portuguese-forward" or model.lower() == "pt-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-pt-forward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Portuguese backward
elif model.lower() == "portuguese-backward" or model.lower() == "pt-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-pt-backward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
elif not Path(model).exists():
raise ValueError(
f'The given model "{model}" is not available or is not a valid path.'
)
self.name = str(model)
self.static_embeddings = detach
from flair.models import LanguageModel
self.lm = LanguageModel.load_language_model(model)
self.detach = detach
self.is_forward_lm: bool = self.lm.is_forward_lm
# initialize cache if use_cache set
self.cache = None
if use_cache:
cache_path = (
Path(f"{self.name}-tmp-cache.sqllite")
if not cache_directory
else cache_directory / f"{self.name}-tmp-cache.sqllite"
)
from sqlitedict import SqliteDict
self.cache = SqliteDict(str(cache_path), autocommit=True)
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
# set to eval mode
self.eval()
def train(self, mode=True):
pass
def __getstate__(self):
# Copy the object's state from self.__dict__ which contains
# all our instance attributes. Always use the dict.copy()
# method to avoid modifying the original state.
state = self.__dict__.copy()
# Remove the unpicklable entries.
state["cache"] = None
return state
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# if cache is used, try setting embeddings from cache first
if "cache" in self.__dict__ and self.cache is not None:
# try populating embeddings from cache
all_embeddings_retrieved_from_cache: bool = True
for sentence in sentences:
key = sentence.to_tokenized_string()
embeddings = self.cache.get(key)
if not embeddings:
all_embeddings_retrieved_from_cache = False
break
else:
for token, embedding in zip(sentence, embeddings):
token.set_embedding(self.name, torch.FloatTensor(embedding))
if all_embeddings_retrieved_from_cache:
return sentences
# if this is not possible, use LM to generate embedding. First, get text sentences
text_sentences = [sentence.to_tokenized_string() for sentence in sentences]
longest_character_sequence_in_batch: int = len(max(text_sentences, key=len))
# pad strings with whitespaces to longest sentence
sentences_padded: List[str] = []
append_padded_sentence = sentences_padded.append
end_marker = " "
extra_offset = 1
for sentence_text in text_sentences:
pad_by = longest_character_sequence_in_batch - len(sentence_text)
if self.is_forward_lm:
padded = "\n{}{}{}".format(sentence_text, end_marker, pad_by * " ")
append_padded_sentence(padded)
else:
padded = "\n{}{}{}".format(
sentence_text[::-1], end_marker, pad_by * " "
)
append_padded_sentence(padded)
# get hidden states from language model
all_hidden_states_in_lm = self.lm.get_representation(sentences_padded)
# take first or last hidden states from language model as word representation
for i, sentence in enumerate(sentences):
sentence_text = sentence.to_tokenized_string()
offset_forward: int = extra_offset
offset_backward: int = len(sentence_text) + extra_offset
for token in sentence.tokens:
offset_forward += len(token.text)
if self.is_forward_lm:
offset = offset_forward
else:
offset = offset_backward
embedding = all_hidden_states_in_lm[offset, i, :]
# if self.tokenized_lm or token.whitespace_after:
offset_forward += 1
offset_backward -= 1
offset_backward -= len(token.text)
token.set_embedding(self.name, embedding)
if "cache" in self.__dict__ and self.cache is not None:
for sentence in sentences:
self.cache[sentence.to_tokenized_string()] = [
token._embeddings[self.name].tolist() for token in sentence
]
return sentences
def __str__(self):
return self.name
class DocumentMeanEmbeddings(DocumentEmbeddings):
@deprecated(
version="0.3.1",
reason="The functionality of this class is moved to 'DocumentPoolEmbeddings'",
)
def __init__(self, token_embeddings: List[TokenEmbeddings]):
"""The constructor takes a list of embeddings to be combined."""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(
embeddings=token_embeddings
)
self.name: str = "document_mean"
self.__embedding_length: int = self.embeddings.embedding_length
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to every sentence in the given list of sentences. If embeddings are already added, updates
only if embeddings are non-static."""
everything_embedded: bool = True
# if only one sentence is passed, convert to list of sentence
if type(sentences) is Sentence:
sentences = [sentences]
for sentence in sentences:
if self.name not in sentence._embeddings.keys():
everything_embedded = False
if not everything_embedded:
self.embeddings.embed(sentences)
for sentence in sentences:
word_embeddings = []
for token in sentence.tokens:
word_embeddings.append(token.get_embedding().unsqueeze(0))
word_embeddings = torch.cat(word_embeddings, dim=0).to(flair.device)
mean_embedding = torch.mean(word_embeddings, 0)
sentence.set_embedding(self.name, mean_embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
class DocumentPoolEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: List[TokenEmbeddings],
fine_tune_mode="linear",
pooling: str = "mean",
):
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param pooling: a string which can any value from ['mean', 'max', 'min']
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.__embedding_length = self.embeddings.embedding_length
# optional fine-tuning on top of embedding layer
self.fine_tune_mode = fine_tune_mode
if self.fine_tune_mode in ["nonlinear", "linear"]:
self.embedding_flex = torch.nn.Linear(
self.embedding_length, self.embedding_length, bias=False
)
self.embedding_flex.weight.data.copy_(torch.eye(self.embedding_length))
if self.fine_tune_mode in ["nonlinear"]:
self.embedding_flex_nonlinear = torch.nn.ReLU(self.embedding_length)
self.embedding_flex_nonlinear_map = torch.nn.Linear(
self.embedding_length, self.embedding_length
)
self.__embedding_length: int = self.embeddings.embedding_length
self.to(flair.device)
self.pooling = pooling
if self.pooling == "mean":
self.pool_op = torch.mean
elif pooling == "max":
self.pool_op = torch.max
elif pooling == "min":
self.pool_op = torch.min
else:
raise ValueError(f"Pooling operation for {self.mode!r} is not defined")
self.name: str = f"document_{self.pooling}"
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to every sentence in the given list of sentences. If embeddings are already added, updates
only if embeddings are non-static."""
# if only one sentence is passed, convert to list of sentence
if isinstance(sentences, Sentence):
sentences = [sentences]
self.embeddings.embed(sentences)
for sentence in sentences:
word_embeddings = []
for token in sentence.tokens:
word_embeddings.append(token.get_embedding().unsqueeze(0))
word_embeddings = torch.cat(word_embeddings, dim=0).to(flair.device)
if self.fine_tune_mode in ["nonlinear", "linear"]:
word_embeddings = self.embedding_flex(word_embeddings)
if self.fine_tune_mode in ["nonlinear"]:
word_embeddings = self.embedding_flex_nonlinear(word_embeddings)
word_embeddings = self.embedding_flex_nonlinear_map(word_embeddings)
if self.pooling == "mean":
pooled_embedding = self.pool_op(word_embeddings, 0)
else:
pooled_embedding, _ = self.pool_op(word_embeddings, 0)
sentence.set_embedding(self.name, pooled_embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
def extra_repr(self):
return f"fine_tune_mode={self.fine_tune_mode}, pooling={self.pooling}"
class DocumentRNNEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: List[TokenEmbeddings],
hidden_size=128,
rnn_layers=1,
reproject_words: bool = True,
reproject_words_dimension: int = None,
bidirectional: bool = False,
dropout: float = 0.5,
word_dropout: float = 0.0,
locked_dropout: float = 0.0,
rnn_type="GRU",
):
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param hidden_size: the number of hidden states in the rnn
:param rnn_layers: the number of layers for the rnn
:param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear
layer before putting them into the rnn or not
:param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output
dimension as before will be taken.
:param bidirectional: boolean value, indicating whether to use a bidirectional rnn or not
:param dropout: the dropout value to be used
:param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used
:param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used
:param rnn_type: 'GRU' or 'LSTM'
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.rnn_type = rnn_type
self.reproject_words = reproject_words
self.bidirectional = bidirectional
self.length_of_all_token_embeddings: int = self.embeddings.embedding_length
self.static_embeddings = False
self.__embedding_length: int = hidden_size
if self.bidirectional:
self.__embedding_length *= 4
self.embeddings_dimension: int = self.length_of_all_token_embeddings
if self.reproject_words and reproject_words_dimension is not None:
self.embeddings_dimension = reproject_words_dimension
self.word_reprojection_map = torch.nn.Linear(
self.length_of_all_token_embeddings, self.embeddings_dimension
)
# bidirectional RNN on top of embedding layer
if rnn_type == "LSTM":
self.rnn = torch.nn.LSTM(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
)
else:
self.rnn = torch.nn.GRU(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
)
self.name = "document_" + self.rnn._get_name()
# dropouts
if locked_dropout > 0.0:
self.dropout: torch.nn.Module = LockedDropout(locked_dropout)
else:
self.dropout = torch.nn.Dropout(dropout)
self.use_word_dropout: bool = word_dropout > 0.0
if self.use_word_dropout:
self.word_dropout = WordDropout(word_dropout)
torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update
only if embeddings are non-static."""
if type(sentences) is Sentence:
sentences = [sentences]
self.rnn.zero_grad()
sentences.sort(key=lambda x: len(x), reverse=True)
self.embeddings.embed(sentences)
# first, sort sentences by number of tokens
longest_token_sequence_in_batch: int = len(sentences[0])
all_sentence_tensors = []
lengths: List[int] = []
# go through each sentence in batch
for i, sentence in enumerate(sentences):
lengths.append(len(sentence.tokens))
word_embeddings = []
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
word_embeddings.append(token.get_embedding().unsqueeze(0))
# PADDING: pad shorter sentences out
for add in range(longest_token_sequence_in_batch - len(sentence.tokens)):
word_embeddings.append(
torch.zeros(
self.length_of_all_token_embeddings, dtype=torch.float
).unsqueeze(0)
)
word_embeddings_tensor = torch.cat(word_embeddings, 0).to(flair.device)
sentence_states = word_embeddings_tensor
# ADD TO SENTENCE LIST: add the representation
all_sentence_tensors.append(sentence_states.unsqueeze(1))
# --------------------------------------------------------------------
# GET REPRESENTATION FOR ENTIRE BATCH
# --------------------------------------------------------------------
sentence_tensor = torch.cat(all_sentence_tensors, 1)
# --------------------------------------------------------------------
# FF PART
# --------------------------------------------------------------------
# use word dropout if set
if self.use_word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
if self.reproject_words:
sentence_tensor = self.word_reprojection_map(sentence_tensor)
sentence_tensor = self.dropout(sentence_tensor)
packed = torch.nn.utils.rnn.pack_padded_sequence(sentence_tensor, lengths)
self.rnn.flatten_parameters()
rnn_out, hidden = self.rnn(packed)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(rnn_out)
outputs = self.dropout(outputs)
# --------------------------------------------------------------------
# EXTRACT EMBEDDINGS FROM RNN
# --------------------------------------------------------------------
for sentence_no, length in enumerate(lengths):
last_rep = outputs[length - 1, sentence_no]
embedding = last_rep
if self.bidirectional:
first_rep = outputs[0, sentence_no]
embedding = torch.cat([first_rep, last_rep], 0)
sentence = sentences[sentence_no]
sentence.set_embedding(self.name, embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
@deprecated(
version="0.4",
reason="The functionality of this class is moved to 'DocumentRNNEmbeddings'",
)
class DocumentLSTMEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: List[TokenEmbeddings],
hidden_size=128,
rnn_layers=1,
reproject_words: bool = True,
reproject_words_dimension: int = None,
bidirectional: bool = False,
dropout: float = 0.5,
word_dropout: float = 0.0,
locked_dropout: float = 0.0,
):
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param hidden_size: the number of hidden states in the lstm
:param rnn_layers: the number of layers for the lstm
:param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear
layer before putting them into the lstm or not
:param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output
dimension as before will be taken.
:param bidirectional: boolean value, indicating whether to use a bidirectional lstm or not
:param dropout: the dropout value to be used
:param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used
:param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.reproject_words = reproject_words
self.bidirectional = bidirectional
self.length_of_all_token_embeddings: int = self.embeddings.embedding_length
self.name = "document_lstm"
self.static_embeddings = False
self.__embedding_length: int = hidden_size
if self.bidirectional:
self.__embedding_length *= 4
self.embeddings_dimension: int = self.length_of_all_token_embeddings
if self.reproject_words and reproject_words_dimension is not None:
self.embeddings_dimension = reproject_words_dimension
# bidirectional LSTM on top of embedding layer
self.word_reprojection_map = torch.nn.Linear(
self.length_of_all_token_embeddings, self.embeddings_dimension
)
self.rnn = torch.nn.GRU(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
)
# dropouts
if locked_dropout > 0.0:
self.dropout: torch.nn.Module = LockedDropout(locked_dropout)
else:
self.dropout = torch.nn.Dropout(dropout)
self.use_word_dropout: bool = word_dropout > 0.0
if self.use_word_dropout:
self.word_dropout = WordDropout(word_dropout)
torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update
only if embeddings are non-static."""
if type(sentences) is Sentence:
sentences = [sentences]
self.rnn.zero_grad()
sentences.sort(key=lambda x: len(x), reverse=True)
self.embeddings.embed(sentences)
# first, sort sentences by number of tokens
longest_token_sequence_in_batch: int = len(sentences[0])
all_sentence_tensors = []
lengths: List[int] = []
# go through each sentence in batch
for i, sentence in enumerate(sentences):
lengths.append(len(sentence.tokens))
word_embeddings = []
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
word_embeddings.append(token.get_embedding().unsqueeze(0))
# PADDING: pad shorter sentences out
for add in range(longest_token_sequence_in_batch - len(sentence.tokens)):
word_embeddings.append(
torch.zeros(
self.length_of_all_token_embeddings, dtype=torch.float
).unsqueeze(0)
)
word_embeddings_tensor = torch.cat(word_embeddings, 0).to(flair.device)
sentence_states = word_embeddings_tensor
# ADD TO SENTENCE LIST: add the representation
all_sentence_tensors.append(sentence_states.unsqueeze(1))
# --------------------------------------------------------------------
# GET REPRESENTATION FOR ENTIRE BATCH
# --------------------------------------------------------------------
sentence_tensor = torch.cat(all_sentence_tensors, 1)
# --------------------------------------------------------------------
# FF PART
# --------------------------------------------------------------------
# use word dropout if set
if self.use_word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
if self.reproject_words:
sentence_tensor = self.word_reprojection_map(sentence_tensor)
sentence_tensor = self.dropout(sentence_tensor)
packed = torch.nn.utils.rnn.pack_padded_sequence(sentence_tensor, lengths)
self.rnn.flatten_parameters()
lstm_out, hidden = self.rnn(packed)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)
outputs = self.dropout(outputs)
# --------------------------------------------------------------------
# EXTRACT EMBEDDINGS FROM LSTM
# --------------------------------------------------------------------
for sentence_no, length in enumerate(lengths):
last_rep = outputs[length - 1, sentence_no]
embedding = last_rep
if self.bidirectional:
first_rep = outputs[0, sentence_no]
embedding = torch.cat([first_rep, last_rep], 0)
sentence = sentences[sentence_no]
sentence.set_embedding(self.name, embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
class DocumentLMEmbeddings(DocumentEmbeddings):
def __init__(self, flair_embeddings: List[FlairEmbeddings], detach: bool = True):
super().__init__()
self.embeddings = flair_embeddings
self.name = "document_lm"
self.static_embeddings = detach
self.detach = detach
self._embedding_length: int = sum(
embedding.embedding_length for embedding in flair_embeddings
)
@property
def embedding_length(self) -> int:
return self._embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
if type(sentences) is Sentence:
sentences = [sentences]
for embedding in self.embeddings:
embedding.embed(sentences)
# iterate over sentences
for sentence in sentences:
# if its a forward LM, take last state
if embedding.is_forward_lm:
sentence.set_embedding(
embedding.name,
sentence[len(sentence) - 1]._embeddings[embedding.name],
)
else:
sentence.set_embedding(
embedding.name, sentence[0]._embeddings[embedding.name]
)
return sentences
class NILCEmbeddings(WordEmbeddings):
def __init__(self, embeddings: str, model: str = "skip", size: int = 100):
"""
Initializes portuguese classic word embeddings trained by NILC Lab (http://www.nilc.icmc.usp.br/embeddings).
Constructor downloads required files if not there.
:param embeddings: one of: 'fasttext', 'glove', 'wang2vec' or 'word2vec'
:param model: one of: 'skip' or 'cbow'. This is not applicable to glove.
:param size: one of: 50, 100, 300, 600 or 1000.
"""
base_path = "http://143.107.183.175:22980/download.php?file=embeddings/"
cache_dir = Path("embeddings") / embeddings.lower()
# GLOVE embeddings
if embeddings.lower() == "glove":
cached_path(
f"{base_path}{embeddings}/{embeddings}_s{size}.zip", cache_dir=cache_dir
)
embeddings = cached_path(
f"{base_path}{embeddings}/{embeddings}_s{size}.zip", cache_dir=cache_dir
)
elif embeddings.lower() in ["fasttext", "wang2vec", "word2vec"]:
cached_path(
f"{base_path}{embeddings}/{model}_s{size}.zip", cache_dir=cache_dir
)
embeddings = cached_path(
f"{base_path}{embeddings}/{model}_s{size}.zip", cache_dir=cache_dir
)
elif not Path(embeddings).exists():
raise ValueError(
f'The given embeddings "{embeddings}" is not available or is not a valid path.'
)
self.name: str = str(embeddings)
self.static_embeddings = True
log.info("Reading embeddings from %s" % embeddings)
self.precomputed_word_embeddings = gensim.models.KeyedVectors.load_word2vec_format(
open_inside_zip(str(embeddings), cache_dir=cache_dir)
)
self.__embedding_length: int = self.precomputed_word_embeddings.vector_size
super(TokenEmbeddings, self).__init__()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def __str__(self):
return self.name
def replace_with_language_code(string: str):
string = string.replace("arabic-", "ar-")
string = string.replace("basque-", "eu-")
string = string.replace("bulgarian-", "bg-")
string = string.replace("croatian-", "hr-")
string = string.replace("czech-", "cs-")
string = string.replace("danish-", "da-")
string = string.replace("dutch-", "nl-")
string = string.replace("farsi-", "fa-")
string = string.replace("persian-", "fa-")
string = string.replace("finnish-", "fi-")
string = string.replace("french-", "fr-")
string = string.replace("german-", "de-")
string = string.replace("hebrew-", "he-")
string = string.replace("hindi-", "hi-")
string = string.replace("indonesian-", "id-")
string = string.replace("italian-", "it-")
string = string.replace("japanese-", "ja-")
string = string.replace("norwegian-", "no")
string = string.replace("polish-", "pl-")
string = string.replace("portuguese-", "pt-")
string = string.replace("slovenian-", "sl-")
string = string.replace("spanish-", "es-")
string = string.replace("swedish-", "sv-")
return string
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""nn.Module with additional great features."""
import collections
import copy
import inspect
import os
import re
import tempfile
from abc import ABC
from argparse import Namespace
from pathlib import Path
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import torch
from torch import ScriptModule, Tensor
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from pytorch_lightning import _logger as log
from pytorch_lightning.core.grads import GradInformation
from pytorch_lightning.core.hooks import CheckpointHooks, DataHooks, ModelHooks
from pytorch_lightning.core.memory import ModelSummary
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.saving import ALLOWED_CONFIG_TYPES, ModelIO, PRIMITIVE_TYPES
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.utilities import rank_zero_warn, TPU_AVAILABLE
from pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.parsing import AttributeDict, collect_init_args, get_init_args
if TPU_AVAILABLE:
import torch_xla.core.xla_model as xm
class LightningModule(
ABC,
DeviceDtypeModuleMixin,
GradInformation,
ModelIO,
ModelHooks,
DataHooks,
CheckpointHooks,
Module,
):
# Below is for property support of JIT in PyTorch 1.7
# since none of them is important when using JIT, we are going to ignore them.
__jit_unused_properties__ = [
"datamodule",
"example_input_array",
"hparams",
"hparams_initial",
"on_gpu",
"current_epoch",
"global_step",
] + DeviceDtypeModuleMixin.__jit_unused_properties__
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/
# torch/nn/modules/module.py#L227)
torch._C._log_api_usage_once(f"lightning.module.{self.__class__.__name__}")
self.exp_save_path = None
self.loaded_optimizer_states_dict = {}
#: Pointer to the trainer object
self.trainer = None
#: Pointer to the logger object
self.logger = None
#: True if using dp
self.use_dp = False
#: True if using ddp
self.use_ddp = False
#: True if using ddp2
self.use_ddp2 = False
# True if on tpu
self.use_tpu = False
#: True if using amp
self.use_amp = False
#: The precision used
self.precision = 32
# optionally can be set by user
self._example_input_array = None
self._datamodule = None
self._results: Optional[Result] = None
self._current_fx_name = ''
self._running_manual_backward = False
self._current_hook_fx_name = None
self._current_dataloader_idx = None
self._automatic_optimization: bool = True
def optimizers(self, use_pl_optimizer: bool = True) -> Union[Optimizer, List[Optimizer], List[LightningOptimizer]]:
if use_pl_optimizer:
opts = list(self.trainer.lightning_optimizers.values())
else:
opts = self.trainer.optimizers
# single optimizer
if isinstance(opts, list) and len(opts) == 1 and isinstance(opts[0], Optimizer):
return opts[0]
# multiple opts
return opts
@property
def example_input_array(self) -> Any:
return self._example_input_array
@property
def current_epoch(self) -> int:
"""The current epoch"""
return self.trainer.current_epoch if self.trainer else 0
@property
def global_step(self) -> int:
"""Total training batches seen across all epochs"""
return self.trainer.global_step if self.trainer else 0
@example_input_array.setter
def example_input_array(self, example: Any) -> None:
self._example_input_array = example
@property
def datamodule(self) -> Any:
return self._datamodule
@datamodule.setter
def datamodule(self, datamodule: Any) -> None:
self._datamodule = datamodule
@property
def on_gpu(self):
"""
True if your model is currently running on GPUs.
Useful to set flags around the LightningModule for different CPU vs GPU behavior.
"""
return self.device.type == "cuda"
@property
def automatic_optimization(self) -> bool:
"""
If False you are responsible for calling .backward, .step, zero_grad.
"""
return self._automatic_optimization
@automatic_optimization.setter
def automatic_optimization(self, automatic_optimization: bool) -> None:
self._automatic_optimization = automatic_optimization
def print(self, *args, **kwargs) -> None:
r"""
Prints only from process 0. Use this in any distributed mode to log only once.
Args:
*args: The thing to print. Will be passed to Python's built-in print function.
**kwargs: Will be passed to Python's built-in print function.
Example:
.. code-block:: python
def forward(self, x):
self.print(x, 'in forward')
"""
if self.trainer.is_global_zero:
print(*args, **kwargs)
def log(
self,
name: str,
value: Any,
prog_bar: bool = False,
logger: bool = True,
on_step: Optional[bool] = None,
on_epoch: Optional[bool] = None,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
):
"""
Log a key, value
Example::
self.log('train_loss', loss)
The default behavior per hook is as follows
.. csv-table:: ``*`` also applies to the test loop
:header: "LightningMoule Hook", "on_step", "on_epoch", "prog_bar", "logger"
:widths: 20, 10, 10, 10, 10
"training_step", "T", "F", "F", "T"
"training_step_end", "T", "F", "F", "T"
"training_epoch_end", "F", "T", "F", "T"
"validation_step*", "F", "T", "F", "T"
"validation_step_end*", "F", "T", "F", "T"
"validation_epoch_end*", "F", "T", "F", "T"
Args:
name: key name
value: value name
prog_bar: if True logs to the progress bar
logger: if True logs to the logger
on_step: if True logs at this step. None auto-logs at the training_step but not validation/test_step
on_epoch: if True logs epoch accumulated metrics. None auto-logs at the val/test step but not training_step
reduce_fx: reduction function over step values for end of epoch. Torch.mean by default
tbptt_reduce_fx: function to reduce on truncated back prop
tbptt_pad_token: token to use for padding
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_op: the op to sync across GPUs/TPUs
sync_dist_group: the ddp group
"""
if self._results is not None:
# in any epoch end can't log step metrics (only epoch metric)
if 'epoch_end' in self._current_fx_name and on_step:
m = f'on_step=True cannot be used on {self._current_fx_name} method'
raise MisconfigurationException(m)
if 'epoch_end' in self._current_fx_name and on_epoch is False:
m = f'on_epoch cannot be False when called from the {self._current_fx_name} method'
raise MisconfigurationException(m)
# add log_dict
# TODO: if logged twice fail with crash
# set the default depending on the fx_name
on_step = self.__auto_choose_log_on_step(on_step)
on_epoch = self.__auto_choose_log_on_epoch(on_epoch)
if self._current_hook_fx_name is not None:
self.trainer.logger_connector.check_logging_in_callbacks(
self._current_hook_fx_name,
on_step=on_step,
on_epoch=on_epoch
)
# make sure user doesn't introduce logic for multi-dataloaders
if "/dataloader_idx_" in name:
raise MisconfigurationException(
f"Logged key: {name} should not contain information about dataloader_idx.")
accelerator = self.trainer.accelerator_backend
self._results.log(
name,
value,
prog_bar,
logger,
on_step,
on_epoch,
reduce_fx,
tbptt_reduce_fx,
tbptt_pad_token,
enable_graph,
sync_dist,
sync_dist_op,
sync_dist_group,
accelerator.sync_tensor,
self._current_dataloader_idx,
self.device,
)
def log_dict(
self,
dictionary: dict,
prog_bar: bool = False,
logger: bool = True,
on_step: Optional[bool] = None,
on_epoch: Optional[bool] = None,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
):
"""
Log a dictonary of values at once
Example::
values = {'loss': loss, 'acc': acc, ..., 'metric_n': metric_n}
self.log_dict(values)
Args:
dictionary: key value pairs (str, tensors)
prog_bar: if True logs to the progress base
logger: if True logs to the logger
on_step: if True logs at this step. None auto-logs for training_step but not validation/test_step
on_epoch: if True logs epoch accumulated metrics. None auto-logs for val/test step but not training_step
reduce_fx: reduction function over step values for end of epoch. Torch.mean by default
tbptt_reduce_fx: function to reduce on truncated back prop
tbptt_pad_token: token to use for padding
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_op: the op to sync across GPUs/TPUs
sync_dist_group: the ddp group:
"""
for k, v in dictionary.items():
self.log(
name=k,
value=v,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
sync_dist=sync_dist,
sync_dist_group=sync_dist_group,
sync_dist_op=sync_dist_op,
tbptt_pad_token=tbptt_pad_token,
tbptt_reduce_fx=tbptt_reduce_fx,
)
def write_prediction(self, name, value, filename='predictions.pt'):
self.trainer.evaluation_loop.predictions._add_prediction(name, value, filename)
def write_prediction_dict(self, predictions_dict, filename='predictions.pt'):
for k, v in predictions_dict.items():
self.write_prediction(k, v, filename)
def __auto_choose_log_on_step(self, on_step):
if on_step is None:
if self._current_fx_name in {'training_step', 'training_step_end'}:
on_step = True
elif self._current_fx_name in {'evaluation_step', 'evaluation_step_end',
'evaluation_epoch_end', 'training_epoch_end'}:
on_step = False
else:
on_step = False
return on_step
def __auto_choose_log_on_epoch(self, on_epoch):
if on_epoch is None:
if self._current_fx_name in {'training_step', 'training_step_end'}:
on_epoch = False
elif self._current_fx_name in {'evaluation_step', 'evaluation_step_end',
'evaluation_epoch_end', 'training_epoch_end'}:
on_epoch = True
else:
on_epoch = True
return on_epoch
def all_gather(self, tensor: Union[torch.Tensor], group: Optional[Any] = None, sync_grads: bool = False):
r"""
Allows users to call ``self.all_gather()`` from the LightningModule, thus making
the ```all_gather``` operation accelerator agnostic.
```all_gather``` is a function provided by accelerators to gather a tensor from several
distributed processes
Args:
tensor: tensor of shape (batch, ...)
group: the process group to gather results from. Defaults to all processes (world)
sync_grads: flag that allows users to synchronize gradients for all_gather op
Return:
A tensor of shape (world_size, batch, ...)
"""
return self.trainer.accelerator_backend.all_gather(tensor, group=group, sync_grads=sync_grads)
def forward(self, *args, **kwargs):
r"""
Same as :meth:`torch.nn.Module.forward()`, however in Lightning you want this to define
the operations you want to use for prediction (i.e.: on a server or as a feature extractor).
Normally you'd call ``self()`` from your :meth:`training_step` method.
This makes it easy to write a complex system for training with the outputs
you'd want in a prediction setting.
You may also find the :func:`~pytorch_lightning.core.decorators.auto_move_data` decorator useful
when using the module outside Lightning in a production setting.
Args:
*args: Whatever you decide to pass into the forward method.
**kwargs: Keyword arguments are also possible.
Return:
Predicted output
Examples:
.. code-block:: python
# example if we were using this model as a feature extractor
def forward(self, x):
feature_maps = self.convnet(x)
return feature_maps
def training_step(self, batch, batch_idx):
x, y = batch
feature_maps = self(x)
logits = self.classifier(feature_maps)
# ...
return loss
# splitting it this way allows model to be used a feature extractor
model = MyModelAbove()
inputs = server.get_request()
results = model(inputs)
server.write_results(results)
# -------------
# This is in stark contrast to torch.nn.Module where normally you would have this:
def forward(self, batch):
x, y = batch
feature_maps = self.convnet(x)
logits = self.classifier(feature_maps)
return logits
"""
return super().forward(*args, **kwargs)
def training_step(self, *args, **kwargs):
r"""
Here you compute and return the training loss and some additional metrics for e.g.
the progress bar or logger.
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): Integer displaying index of this batch
optimizer_idx (int): When using multiple optimizers, this argument will also be present.
hiddens(:class:`~torch.Tensor`): Passed in if
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.truncated_bptt_steps` > 0.
Return:
Any of.
- :class:`~torch.Tensor` - The loss tensor
- `dict` - A dictionary. Can include any keys, but must include the key 'loss'
- `None` - Training will skip to the next batch
In this step you'd normally do the forward pass and calculate the loss for a batch.
You can also do fancier things like multiple forward passes or something model specific.
Example::
def training_step(self, batch, batch_idx):
x, y, z = batch
out = self.encoder(x)
loss = self.loss(out, x)
return loss
If you define multiple optimizers, this step will be called with an additional
``optimizer_idx`` parameter.
.. code-block:: python
# Multiple optimizers (e.g.: GANs)
def training_step(self, batch, batch_idx, optimizer_idx):
if optimizer_idx == 0:
# do training_step with encoder
if optimizer_idx == 1:
# do training_step with decoder
If you add truncated back propagation through time you will also get an additional
argument with the hidden states of the previous step.
.. code-block:: python
# Truncated back-propagation through time
def training_step(self, batch, batch_idx, hiddens):
# hiddens are the hidden states from the previous truncated backprop step
...
out, hiddens = self.lstm(data, hiddens)
...
return {'loss': loss, 'hiddens': hiddens}
Note:
The loss value shown in the progress bar is smoothed (averaged) over the last values,
so it differs from the actual loss returned in train/validation step.
"""
rank_zero_warn(
"`training_step` must be implemented to be used with the Lightning Trainer"
)
def training_step_end(self, *args, **kwargs):
"""
Use this when training with dp or ddp2 because :meth:`training_step`
will operate on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [training_step(sub_batch) for sub_batch in sub_batches]
training_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in `training_step` for each batch part.
Return:
Anything
When using dp/ddp2 distributed backends, only a portion of the batch is inside the training_step:
.. code-block:: python
def training_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
# softmax uses only a portion of the batch in the denomintaor
loss = self.softmax(out)
loss = nce_loss(loss)
return loss
If you wish to do something with all the parts of the batch, then use this method to do it:
.. code-block:: python
def training_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
return {'pred': out}
def training_step_end(self, training_step_outputs):
gpu_0_pred = training_step_outputs[0]['pred']
gpu_1_pred = training_step_outputs[1]['pred']
gpu_n_pred = training_step_outputs[n]['pred']
# this softmax now uses the full batch
loss = nce_loss([gpu_0_pred, gpu_1_pred, gpu_n_pred])
return loss
See Also:
See the :ref:`multi_gpu` guide for more details.
"""
def training_epoch_end(self, outputs: List[Any]) -> None:
"""
Called at the end of the training epoch with the outputs of all training steps.
Use this in case you need to do something with all the outputs for every training_step.
.. code-block:: python
# the pseudocode for these calls
train_outs = []
for train_batch in train_data:
out = training_step(train_batch)
train_outs.append(out)
training_epoch_end(train_outs)
Args:
outputs: List of outputs you defined in :meth:`training_step`, or if there are
multiple dataloaders, a list containing a list of outputs for each dataloader.
Return:
None
Note:
If this method is not overridden, this won't be called.
Example::
def training_epoch_end(self, training_step_outputs):
# do something with all training_step outputs
return result
With multiple dataloaders, ``outputs`` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each training step for that dataloader.
.. code-block:: python
def training_epoch_end(self, training_step_outputs):
for out in training_step_outputs:
# do something here
"""
def validation_step(self, *args, **kwargs):
r"""
Operates on a single batch of data from the validation set.
In this step you'd might generate examples or calculate anything of interest like accuracy.
.. code-block:: python
# the pseudocode for these calls
val_outs = []
for val_batch in val_data:
out = validation_step(val_batch)
val_outs.append(out)
validation_epoch_end(val_outs)
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): The index of this batch
dataloader_idx (int): The index of the dataloader that produced this batch
(only if multiple val dataloaders used)
Return:
Any of.
- Any object or value
- `None` - Validation will skip to the next batch
.. code-block:: python
# pseudocode of order
out = validation_step()
if defined('validation_step_end'):
out = validation_step_end(out)
out = validation_epoch_end(out)
.. code-block:: python
# if you have one val dataloader:
def validation_step(self, batch, batch_idx)
# if you have multiple val dataloaders:
def validation_step(self, batch, batch_idx, dataloader_idx)
Examples:
.. code-block:: python
# CASE 1: A single validation dataset
def validation_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self(x)
loss = self.loss(out, y)
# log 6 example images
# or generated text... or whatever
sample_imgs = x[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image('example_images', grid, 0)
# calculate acc
labels_hat = torch.argmax(out, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
# log the outputs!
self.log_dict({'val_loss': loss, 'val_acc': val_acc})
If you pass in multiple val dataloaders, :meth:`validation_step` will have an additional argument.
.. code-block:: python
# CASE 2: multiple validation dataloaders
def validation_step(self, batch, batch_idx, dataloader_idx):
# dataloader_idx tells you which dataset this is.
Note:
If you don't need to validate you don't need to implement this method.
Note:
When the :meth:`validation_step` is called, the model has been put in eval mode
and PyTorch gradients have been disabled. At the end of validation,
the model goes back to training mode and gradients are enabled.
"""
def validation_step_end(self, *args, **kwargs):
"""
Use this when validating with dp or ddp2 because :meth:`validation_step`
will operate on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code.
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [validation_step(sub_batch) for sub_batch in sub_batches]
validation_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in :meth:`validation_step`
for each batch part.
Return:
None or anything
.. code-block:: python
# WITHOUT validation_step_end
# if used in DP or DDP2, this batch is 1/num_gpus large
def validation_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
loss = self.softmax(out)
loss = nce_loss(loss)
self.log('val_loss', loss)
# --------------
# with validation_step_end to do softmax over the full batch
def validation_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
return out
def validation_step_end(self, val_step_outputs):
for out in val_step_outputs:
# do something with these
See Also:
See the :ref:`multi_gpu` guide for more details.
"""
def validation_epoch_end(self, outputs: List[Any]) -> None:
"""
Called at the end of the validation epoch with the outputs of all validation steps.
.. code-block:: python
# the pseudocode for these calls
val_outs = []
for val_batch in val_data:
out = validation_step(val_batch)
val_outs.append(out)
validation_epoch_end(val_outs)
Args:
outputs: List of outputs you defined in :meth:`validation_step`, or if there
are multiple dataloaders, a list containing a list of outputs for each dataloader.
Return:
None
Note:
If you didn't define a :meth:`validation_step`, this won't be called.
Examples:
With a single dataloader:
.. code-block:: python
def validation_epoch_end(self, val_step_outputs):
for out in val_step_outputs:
# do something
With multiple dataloaders, `outputs` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each validation step for that dataloader.
.. code-block:: python
def validation_epoch_end(self, outputs):
for dataloader_output_result in outputs:
dataloader_outs = dataloader_output_result.dataloader_i_outputs
self.log('final_metric', final_value)
"""
def test_step(self, *args, **kwargs):
r"""
Operates on a single batch of data from the test set.
In this step you'd normally generate examples or calculate anything of interest
such as accuracy.
.. code-block:: python
# the pseudocode for these calls
test_outs = []
for test_batch in test_data:
out = test_step(test_batch)
test_outs.append(out)
test_epoch_end(test_outs)
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): The index of this batch.
dataloader_idx (int): The index of the dataloader that produced this batch
(only if multiple test dataloaders used).
Return:
Any of.
- Any object or value
- `None` - Testing will skip to the next batch
.. code-block:: python
# if you have one test dataloader:
def test_step(self, batch, batch_idx)
# if you have multiple test dataloaders:
def test_step(self, batch, batch_idx, dataloader_idx)
Examples:
.. code-block:: python
# CASE 1: A single test dataset
def test_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self(x)
loss = self.loss(out, y)
# log 6 example images
# or generated text... or whatever
sample_imgs = x[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image('example_images', grid, 0)
# calculate acc
labels_hat = torch.argmax(out, dim=1)
test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
# log the outputs!
self.log_dict({'test_loss': loss, 'test_acc': test_acc})
If you pass in multiple test dataloaders, :meth:`test_step` will have an additional
argument.
.. code-block:: python
# CASE 2: multiple test dataloaders
def test_step(self, batch, batch_idx, dataloader_idx):
# dataloader_idx tells you which dataset this is.
Note:
If you don't need to test you don't need to implement this method.
Note:
When the :meth:`test_step` is called, the model has been put in eval mode and
PyTorch gradients have been disabled. At the end of the test epoch, the model goes back
to training mode and gradients are enabled.
"""
def test_step_end(self, *args, **kwargs):
"""
Use this when testing with dp or ddp2 because :meth:`test_step` will operate
on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code.
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [test_step(sub_batch) for sub_batch in sub_batches]
test_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in :meth:`test_step` for each batch part.
Return:
None or anything
.. code-block:: python
# WITHOUT test_step_end
# if used in DP or DDP2, this batch is 1/num_gpus large
def test_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
loss = self.softmax(out)
self.log('test_loss', loss)
# --------------
# with test_step_end to do softmax over the full batch
def test_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
return out
def test_step_end(self, output_results):
# this out is now the full size of the batch
all_test_step_outs = output_results.out
loss = nce_loss(all_test_step_outs)
self.log('test_loss', loss)
See Also:
See the :ref:`multi_gpu` guide for more details.
"""
def test_epoch_end(
self, outputs: List[Any]
) -> None:
"""
Called at the end of a test epoch with the output of all test steps.
.. code-block:: python
# the pseudocode for these calls
test_outs = []
for test_batch in test_data:
out = test_step(test_batch)
test_outs.append(out)
test_epoch_end(test_outs)
Args:
outputs: List of outputs you defined in :meth:`test_step_end`, or if there
are multiple dataloaders, a list containing a list of outputs for each dataloader
Return:
None
Note:
If you didn't define a :meth:`test_step`, this won't be called.
Examples:
With a single dataloader:
.. code-block:: python
def test_epoch_end(self, outputs):
# do something with the outputs of all test batches
all_test_preds = test_step_outputs.predictions
some_result = calc_all_results(all_test_preds)
self.log(some_result)
With multiple dataloaders, `outputs` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each test step for that dataloader.
.. code-block:: python
def test_epoch_end(self, outputs):
final_value = 0
for dataloader_outputs in outputs:
for test_step_out in dataloader_outputs:
# do something
final_value += test_step_out
self.log('final_metric', final_value)
"""
def configure_optimizers(
self,
):
r"""
Choose what optimizers and learning-rate schedulers to use in your optimization.
Normally you'd need one. But in the case of GANs or similar you might have multiple.
Return:
Any of these 6 options.
- Single optimizer.
- List or Tuple - List of optimizers.
- Two lists - The first list has multiple optimizers, the second a list of LR schedulers (or lr_dict).
- Dictionary, with an 'optimizer' key, and (optionally) a 'lr_scheduler'
key whose value is a single LR scheduler or lr_dict.
- Tuple of dictionaries as described, with an optional 'frequency' key.
- None - Fit will run without any optimizer.
Note:
The 'frequency' value is an int corresponding to the number of sequential batches
optimized with the specific optimizer. It should be given to none or to all of the optimizers.
There is a difference between passing multiple optimizers in a list,
and passing multiple optimizers in dictionaries with a frequency of 1:
In the former case, all optimizers will operate on the given batch in each optimization step.
In the latter, only one optimizer will operate on the given batch at every step.
The lr_dict is a dictionary which contains the scheduler and its associated configuration.
The default configuration is shown below.
.. code-block:: python
{
'scheduler': lr_scheduler, # The LR scheduler instance (required)
'interval': 'epoch', # The unit of the scheduler's step size
'frequency': 1, # The frequency of the scheduler
'reduce_on_plateau': False, # For ReduceLROnPlateau scheduler
'monitor': 'val_loss', # Metric for ReduceLROnPlateau to monitor
'strict': True, # Whether to crash the training if `monitor` is not found
'name': None, # Custom name for LearningRateMonitor to use
}
Only the ``scheduler`` key is required, the rest will be set to the defaults above.
Examples:
.. code-block:: python
# most cases
def configure_optimizers(self):
opt = Adam(self.parameters(), lr=1e-3)
return opt
# multiple optimizer case (e.g.: GAN)
def configure_optimizers(self):
generator_opt = Adam(self.model_gen.parameters(), lr=0.01)
disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)
return generator_opt, disriminator_opt
# example with learning rate schedulers
def configure_optimizers(self):
generator_opt = Adam(self.model_gen.parameters(), lr=0.01)
disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)
discriminator_sched = CosineAnnealing(discriminator_opt, T_max=10)
return [generator_opt, disriminator_opt], [discriminator_sched]
# example with step-based learning rate schedulers
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_disc.parameters(), lr=0.02)
gen_sched = {'scheduler': ExponentialLR(gen_opt, 0.99),
'interval': 'step'} # called after each training step
dis_sched = CosineAnnealing(discriminator_opt, T_max=10) # called every epoch
return [gen_opt, dis_opt], [gen_sched, dis_sched]
# example with optimizer frequencies
# see training procedure in `Improved Training of Wasserstein GANs`, Algorithm 1
# https://arxiv.org/abs/1704.00028
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_disc.parameters(), lr=0.02)
n_critic = 5
return (
{'optimizer': dis_opt, 'frequency': n_critic},
{'optimizer': gen_opt, 'frequency': 1}
)
Note:
Some things to know:
- Lightning calls ``.backward()`` and ``.step()`` on each optimizer
and learning rate scheduler as needed.
- If you use 16-bit precision (``precision=16``), Lightning will automatically
handle the optimizers for you.
- If you use multiple optimizers, :meth:`training_step` will have an additional
``optimizer_idx`` parameter.
- If you use LBFGS Lightning handles the closure function automatically for you.
- If you use multiple optimizers, gradients will be calculated only
for the parameters of current optimizer at each training step.
- If you need to control how often those optimizers step or override the
default ``.step()`` schedule, override the :meth:`optimizer_step` hook.
- If you only want to call a learning rate scheduler every ``x`` step or epoch,
or want to monitor a custom metric, you can specify these in a lr_dict:
.. code-block:: python
{
'scheduler': lr_scheduler,
'interval': 'step', # or 'epoch'
'monitor': 'val_f1',
'frequency': x,
}
"""
rank_zero_warn(
"`configure_optimizers` must be implemented to be used with the Lightning Trainer"
)
def manual_backward(self, loss: Tensor, optimizer: Optimizer, *args, **kwargs) -> None:
"""
Call this directly from your training_step when doing optimizations manually.
By using this we can ensure that all the proper scaling when using 16-bit etc has been done for you
This function forwards all args to the .backward() call as well.
.. tip:: In manual mode we still automatically clip grads if Trainer(gradient_clip_val=x) is set
.. tip:: In manual mode we still automatically accumulate grad over batches if
Trainer(accumulate_grad_batches=x) is set and you use `optimizer.step()`
Example::
def training_step(...):
(opt_a, opt_b) = self.optimizers()
loss = ...
# automatically applies scaling, etc...
self.manual_backward(loss, opt_a)
opt_a.step()
"""
# make sure we're using manual opt
self._verify_is_manual_optimization('manual_backward')
# backward
self._running_manual_backward = True
self.trainer.train_loop.backward(loss, optimizer, -1, *args, **kwargs)
self._running_manual_backward = False
def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args, **kwargs) -> None:
"""
Override backward with your own implementation if you need to.
Args:
loss: Loss is already scaled by accumulated grads
optimizer: Current optimizer being used
optimizer_idx: Index of the current optimizer being used
Called to perform backward step.
Feel free to override as needed.
The loss passed in has already been scaled for accumulated gradients if requested.
Example::
def backward(self, loss, optimizer, optimizer_idx):
loss.backward()
"""
if self.trainer.train_loop.automatic_optimization or self._running_manual_backward:
loss.backward(*args, **kwargs)
def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int):
"""
Makes sure only the gradients of the current optimizer's parameters are calculated
in the training step to prevent dangling gradients in multiple-optimizer setup.
.. note:: Only called when using multiple optimizers
Override for your own behavior
Args:
optimizer:
optimizer_idx:
"""
for param in self.parameters():
param.requires_grad = False
for group in optimizer.param_groups:
for param in group['params']:
param.requires_grad = True
def optimizer_step(
self,
epoch: int = None,
batch_idx: int = None,
optimizer: Optimizer = None,
optimizer_idx: int = None,
optimizer_closure: Optional[Callable] = None,
on_tpu: bool = None,
using_native_amp: bool = None,
using_lbfgs: bool = None,
) -> None:
r"""
Override this method to adjust the default way the
:class:`~pytorch_lightning.trainer.trainer.Trainer` calls each optimizer.
By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example
once per optimizer.
.. tip:: With `Trainer(enable_pl_optimizer=True)`, you can user `optimizer.step()` directly and it will handle zero_grad, accumulated gradients, AMP, TPU and more automatically for you.
Warning:
If you are overriding this method, make sure that you pass the ``optimizer_closure`` parameter
to ``optimizer.step()`` function as shown in the examples. This ensures that
``train_step_and_backward_closure`` is called within
:meth:`~pytorch_lightning.trainer.training_loop.TrainLoop.run_training_batch`.
Args:
epoch: Current epoch
batch_idx: Index of current batch
optimizer: A PyTorch optimizer
optimizer_idx: If you used multiple optimizers this indexes into that list.
optimizer_closure: closure for all optimizers
on_tpu: true if TPU backward is required
using_native_amp: True if using native amp
using_lbfgs: True if the matching optimizer is lbfgs
Examples:
.. code-block:: python
# DEFAULT
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
optimizer.step(closure=optimizer_closure)
# Alternating schedule for optimizer steps (i.e.: GANs)
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# update generator opt every 2 steps
if optimizer_idx == 0:
if batch_idx % 2 == 0 :
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
# update discriminator opt every 4 steps
if optimizer_idx == 1:
if batch_idx % 4 == 0 :
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
# ...
# add as many optimizers as you want
Here's another example showing how to use this for more advanced things such as
learning rate warm-up:
.. code-block:: python
# learning rate warm-up
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# warm up lr
if self.trainer.global_step < 500:
lr_scale = min(1., float(self.trainer.global_step + 1) / 500.)
for pg in optimizer.param_groups:
pg['lr'] = lr_scale * self.learning_rate
# update params
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
"""
optimizer.step(closure=optimizer_closure)
def optimizer_zero_grad(
self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int
):
optimizer.zero_grad()
def tbptt_split_batch(self, batch: Tensor, split_size: int) -> list:
r"""
When using truncated backpropagation through time, each batch must be split along the
time dimension. Lightning handles this by default, but for custom behavior override
this function.
Args:
batch: Current batch
split_size: The size of the split
Return:
List of batch splits. Each split will be passed to :meth:`training_step` to enable truncated
back propagation through time. The default implementation splits root level Tensors and
Sequences at dim=1 (i.e. time dim). It assumes that each time dim is the same length.
Examples:
.. code-block:: python
def tbptt_split_batch(self, batch, split_size):
splits = []
for t in range(0, time_dims[0], split_size):
batch_split = []
for i, x in enumerate(batch):
if isinstance(x, torch.Tensor):
split_x = x[:, t:t + split_size]
elif isinstance(x, collections.Sequence):
split_x = [None] * len(x)
for batch_idx in range(len(x)):
split_x[batch_idx] = x[batch_idx][t:t + split_size]
batch_split.append(split_x)
splits.append(batch_split)
return splits
Note:
Called in the training loop after
:meth:`~pytorch_lightning.callbacks.base.Callback.on_batch_start`
if :paramref:`~pytorch_lightning.trainer.Trainer.truncated_bptt_steps` > 0.
Each returned batch split is passed separately to :meth:`training_step`.
"""
time_dims = [
len(x[0])
for x in batch
if isinstance(x, (torch.Tensor, collections.Sequence))
]
assert len(time_dims) >= 1, "Unable to determine batch time dimension"
assert all(
x == time_dims[0] for x in time_dims
), "Batch time dimension length is ambiguous"
splits = []
for t in range(0, time_dims[0], split_size):
batch_split = []
for i, x in enumerate(batch):
if isinstance(x, torch.Tensor):
split_x = x[:, t: t + split_size]
elif isinstance(x, collections.Sequence):
split_x = [None] * len(x)
for batch_idx in range(len(x)):
split_x[batch_idx] = x[batch_idx][t: t + split_size]
batch_split.append(split_x)
splits.append(batch_split)
return splits
def summarize(self, mode: Optional[str] = ModelSummary.MODE_DEFAULT) -> Optional[ModelSummary]:
model_summary = None
if mode in ModelSummary.MODES:
model_summary = ModelSummary(self, mode=mode)
log.info("\n" + str(model_summary))
elif mode is not None:
raise MisconfigurationException(
f"`mode` can be None, {", ".join(ModelSummary.MODES)}, got {mode}"
)
return model_summary
def freeze(self) -> None:
r"""
Freeze all params for inference.
Example:
.. code-block:: python
model = MyLightningModule(...)
model.freeze()
"""
for param in self.parameters():
param.requires_grad = False
self.eval()
def unfreeze(self) -> None:
"""
Unfreeze all parameters for training.
.. code-block:: python
model = MyLightningModule(...)
model.unfreeze()
"""
for param in self.parameters():
param.requires_grad = True
self.train()
def get_progress_bar_dict(self) -> Dict[str, Union[int, str]]:
r"""
Implement this to override the default items displayed in the progress bar.
By default it includes the average loss value, split index of BPTT (if used)
and the version of the experiment when using a logger.
.. code-block::
Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, loss=4.501, v_num=10]
Here is an example how to override the defaults:
.. code-block:: python
def get_progress_bar_dict(self):
# don't show the version number
items = super().get_progress_bar_dict()
items.pop("v_num", None)
return items
Return:
Dictionary with the items to be displayed in the progress bar.
"""
# call .item() only once but store elements without graphs
running_train_loss = self.trainer.train_loop.running_loss.mean()
avg_training_loss = None
if running_train_loss is not None:
avg_training_loss = running_train_loss.cpu().item()
elif self.trainer.train_loop.automatic_optimization:
avg_training_loss = float('NaN')
tqdm_dict = {}
if avg_training_loss is not None:
tqdm_dict["loss"] = f"{avg_training_loss:.3g}"
if self.trainer.truncated_bptt_steps is not None:
tqdm_dict["split_idx"] = self.trainer.split_idx
if self.trainer.logger is not None and self.trainer.logger.version is not None:
version = self.trainer.logger.version
# show last 4 places of long version strings
version = version[-4:] if isinstance(version, str) else version
tqdm_dict["v_num"] = version
return tqdm_dict
def _verify_is_manual_optimization(self, fn_name):
if self.trainer.train_loop.automatic_optimization:
raise MisconfigurationException(
f'to use {fn_name}, please disable automatic optimization:'
' set model property `automatic_optimization` as False'
)
@classmethod
def _auto_collect_arguments(cls, frame=None) -> Tuple[Dict, Dict]:
"""
Collect all module arguments in the current constructor and all child constructors.
The child constructors are all the ``__init__`` methods that reach the current class through
(chained) ``super().__init__()`` calls.
Args:
frame: instance frame
Returns:
self_arguments: arguments dictionary of the first instance
parents_arguments: arguments dictionary of the parent's instances
"""
if not frame:
frame = inspect.currentframe()
frame_args = collect_init_args(frame.f_back, [])
self_arguments = frame_args[-1]
# set hyper_parameters in child
self_arguments = self_arguments
parents_arguments = {}
# add all arguments from parents
for args in frame_args[:-1]:
parents_arguments.update(args)
return self_arguments, parents_arguments
def save_hyperparameters(self, *args, frame=None) -> None:
"""Save all model arguments.
Args:
args: single object of `dict`, `NameSpace` or `OmegaConf`
or string names or argumenst from class `__init__`
>>> from collections import OrderedDict
>>> class ManuallyArgsModel(LightningModule):
... def __init__(self, arg1, arg2, arg3):
... super().__init__()
... # manually assign arguments
... self.save_hyperparameters('arg1', 'arg3')
... def forward(self, *args, **kwargs):
... ...
>>> model = ManuallyArgsModel(1, 'abc', 3.14)
>>> model.hparams
"arg1": 1
"arg3": 3.14
>>> class AutomaticArgsModel(LightningModule):
... def __init__(self, arg1, arg2, arg3):
... super().__init__()
... # equivalent automatic
... self.save_hyperparameters()
... def forward(self, *args, **kwargs):
... ...
>>> model = AutomaticArgsModel(1, 'abc', 3.14)
>>> model.hparams
"arg1": 1
"arg2": abc
"arg3": 3.14
>>> class SingleArgModel(LightningModule):
... def __init__(self, params):
... super().__init__()
... # manually assign single argument
... self.save_hyperparameters(params)
... def forward(self, *args, **kwargs):
... ...
>>> model = SingleArgModel(Namespace(p1=1, p2='abc', p3=3.14))
>>> model.hparams
"p1": 1
"p2": abc
"p3": 3.14
"""
if not frame:
frame = inspect.currentframe().f_back
init_args = get_init_args(frame)
assert init_args, "failed to inspect the self init"
if not args:
# take all arguments
hp = init_args
self._hparams_name = "kwargs" if hp else None
else:
# take only listed arguments in `save_hparams`
isx_non_str = [i for i, arg in enumerate(args) if not isinstance(arg, str)]
if len(isx_non_str) == 1:
hp = args[isx_non_str[0]]
cand_names = [k for k, v in init_args.items() if v == hp]
self._hparams_name = cand_names[0] if cand_names else None
else:
hp = {arg: init_args[arg] for arg in args if isinstance(arg, str)}
self._hparams_name = "kwargs"
# `hparams` are expected here
if hp:
self._set_hparams(hp)
# make deep copy so there is not other runtime changes reflected
self._hparams_initial = copy.deepcopy(self._hparams)
def _set_hparams(self, hp: Union[dict, Namespace, str]) -> None:
if isinstance(hp, Namespace):
hp = vars(hp)
if isinstance(hp, dict):
hp = AttributeDict(hp)
elif isinstance(hp, PRIMITIVE_TYPES):
raise ValueError(f"Primitives {PRIMITIVE_TYPES} are not allowed.")
elif not isinstance(hp, ALLOWED_CONFIG_TYPES):
raise ValueError(f"Unsupported config type of {type(hp)}.")
if isinstance(hp, dict) and isinstance(self.hparams, dict):
self.hparams.update(hp)
else:
self._hparams = hp
@torch.no_grad()
def to_onnx(
self,
file_path: Union[str, Path],
input_sample: Optional[Any] = None,
**kwargs,
):
"""
Saves the model in ONNX format
Args:
file_path: The path of the file the onnx model should be saved to.
input_sample: An input for tracing. Default: None (Use self.example_input_array)
**kwargs: Will be passed to torch.onnx.export function.
Example:
>>> class SimpleModel(LightningModule):
... def __init__(self):
... super().__init__()
... self.l1 = torch.nn.Linear(in_features=64, out_features=4)
...
... def forward(self, x):
... return torch.relu(self.l1(x.view(x.size(0), -1)))
>>> with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as tmpfile:
... model = SimpleModel()
... input_sample = torch.randn((1, 64))
... model.to_onnx(tmpfile.name, input_sample, export_params=True)
... os.path.isfile(tmpfile.name)
True
"""
mode = self.training
if input_sample is None:
if self.example_input_array is None:
raise ValueError(
"Could not export to ONNX since neither `input_sample` nor"
" `model.example_input_array` attribute is set."
)
input_sample = self.example_input_array
input_sample = self.transfer_batch_to_device(input_sample)
if "example_outputs" not in kwargs:
self.eval()
kwargs["example_outputs"] = self(input_sample)
torch.onnx.export(self, input_sample, file_path, **kwargs)
self.train(mode)
@torch.no_grad()
def to_torchscript(
self,
file_path: Optional[Union[str, Path]] = None,
method: Optional[str] = 'script',
example_inputs: Optional[Any] = None,
**kwargs,
) -> Union[ScriptModule, Dict[str, ScriptModule]]:
"""
By default compiles the whole model to a :class:`~torch.jit.ScriptModule`.
If you want to use tracing, please provided the argument `method='trace'` and make sure that either the
example_inputs argument is provided, or the model has self.example_input_array set.
If you would like to customize the modules that are scripted you should override this method.
In case you want to return multiple modules, we recommend using a dictionary.
Args:
file_path: Path where to save the torchscript. Default: None (no file saved).
method: Whether to use TorchScript's script or trace method. Default: 'script'
example_inputs: An input to be used to do tracing when method is set to 'trace'.
Default: None (Use self.example_input_array)
**kwargs: Additional arguments that will be passed to the :func:`torch.jit.script` or
:func:`torch.jit.trace` function.
Note:
- Requires the implementation of the
:meth:`~pytorch_lightning.core.lightning.LightningModule.forward` method.
- The exported script will be set to evaluation mode.
- It is recommended that you install the latest supported version of PyTorch
to use this feature without limitations. See also the :mod:`torch.jit`
documentation for supported features.
Example:
>>> class SimpleModel(LightningModule):
... def __init__(self):
... super().__init__()
... self.l1 = torch.nn.Linear(in_features=64, out_features=4)
...
... def forward(self, x):
... return torch.relu(self.l1(x.view(x.size(0), -1)))
...
>>> model = SimpleModel()
>>> torch.jit.save(model.to_torchscript(), "model.pt") # doctest: +SKIP
>>> os.path.isfile("model.pt") # doctest: +SKIP
>>> torch.jit.save(model.to_torchscript(file_path="model_trace.pt", method='trace', # doctest: +SKIP
... example_inputs=torch.randn(1, 64))) # doctest: +SKIP
>>> os.path.isfile("model_trace.pt") # doctest: +SKIP
True
Return:
This LightningModule as a torchscript, regardless of whether file_path is
defined or not.
"""
mode = self.training
if method == 'script':
torchscript_module = torch.jit.script(self.eval(), **kwargs)
elif method == 'trace':
# if no example inputs are provided, try to see if model has example_input_array set
if example_inputs is None:
if self.example_input_array is None:
raise ValueError(
'Choosing method=`trace` requires either `example_inputs`'
' or `model.example_input_array` to be defined'
)
example_inputs = self.example_input_array
# automatically send example inputs to the right device and use trace
example_inputs = self.transfer_batch_to_device(example_inputs)
torchscript_module = torch.jit.trace(func=self.eval(), example_inputs=example_inputs, **kwargs)
else:
raise ValueError("The 'method' parameter only supports 'script' or 'trace',"
f" but value given was: {method}")
self.train(mode)
if file_path is not None:
torch.jit.save(torchscript_module, file_path)
return torchscript_module
@property
def hparams(self) -> Union[AttributeDict, dict, Namespace]:
if not hasattr(self, "_hparams"):
self._hparams = AttributeDict()
return self._hparams
@property
def hparams_initial(self) -> AttributeDict:
if not hasattr(self, "_hparams_initial"):
return AttributeDict()
# prevent any change
return copy.deepcopy(self._hparams_initial)
@hparams.setter
def hparams(self, hp: Union[dict, Namespace, Any]):
# TODO: remove this method in v1.3.0.
rank_zero_warn(
"The setter for self.hparams in LightningModule is deprecated since v1.1.0 and will be"
" removed in v1.3.0. Replace the assignment `self.hparams = hparams` with "
" `self.save_hyperparameters()`.",
DeprecationWarning
)
hparams_assignment_name = self.__get_hparams_assignment_variable()
self._hparams_name = hparams_assignment_name
self._set_hparams(hp)
# this resolves case when user does not uses `save_hyperparameters` and do hard assignement in init
if not hasattr(self, "_hparams_initial"):
self._hparams_initial = copy.deepcopy(self._hparams)
def __get_hparams_assignment_variable(self):
"""
looks at the code of the class to figure out what the user named self.hparams
this only happens when the user explicitly sets self.hparams
"""
try:
class_code = inspect.getsource(self.__class__)
lines = class_code.split("\n")
for line in lines:
line = re.sub(r"\s+", "", line, flags=re.UNICODE)
if ".hparams=" in line:
return line.split("=")[1]
except Exception:
return "hparams"
return None
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""nn.Module with additional great features."""
import collections
import copy
import inspect
import os
import re
import tempfile
from abc import ABC
from argparse import Namespace
from pathlib import Path
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import torch
from torch import ScriptModule, Tensor
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from pytorch_lightning import _logger as log
from pytorch_lightning.core.grads import GradInformation
from pytorch_lightning.core.hooks import CheckpointHooks, DataHooks, ModelHooks
from pytorch_lightning.core.memory import ModelSummary
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.saving import ALLOWED_CONFIG_TYPES, ModelIO, PRIMITIVE_TYPES
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.utilities import rank_zero_warn, TPU_AVAILABLE
from pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.parsing import AttributeDict, collect_init_args, get_init_args
if TPU_AVAILABLE:
import torch_xla.core.xla_model as xm
class LightningModule(
ABC,
DeviceDtypeModuleMixin,
GradInformation,
ModelIO,
ModelHooks,
DataHooks,
CheckpointHooks,
Module,
):
# Below is for property support of JIT in PyTorch 1.7
# since none of them is important when using JIT, we are going to ignore them.
__jit_unused_properties__ = [
"datamodule",
"example_input_array",
"hparams",
"hparams_initial",
"on_gpu",
"current_epoch",
"global_step",
] + DeviceDtypeModuleMixin.__jit_unused_properties__
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/
# torch/nn/modules/module.py#L227)
torch._C._log_api_usage_once(f"lightning.module.{self.__class__.__name__}")
self.exp_save_path = None
self.loaded_optimizer_states_dict = {}
#: Pointer to the trainer object
self.trainer = None
#: Pointer to the logger object
self.logger = None
#: True if using dp
self.use_dp = False
#: True if using ddp
self.use_ddp = False
#: True if using ddp2
self.use_ddp2 = False
# True if on tpu
self.use_tpu = False
#: True if using amp
self.use_amp = False
#: The precision used
self.precision = 32
# optionally can be set by user
self._example_input_array = None
self._datamodule = None
self._results: Optional[Result] = None
self._current_fx_name = ''
self._running_manual_backward = False
self._current_hook_fx_name = None
self._current_dataloader_idx = None
self._automatic_optimization: bool = True
def optimizers(self, use_pl_optimizer: bool = True) -> Union[Optimizer, List[Optimizer], List[LightningOptimizer]]:
if use_pl_optimizer:
opts = list(self.trainer.lightning_optimizers.values())
else:
opts = self.trainer.optimizers
# single optimizer
if isinstance(opts, list) and len(opts) == 1 and isinstance(opts[0], Optimizer):
return opts[0]
# multiple opts
return opts
@property
def example_input_array(self) -> Any:
return self._example_input_array
@property
def current_epoch(self) -> int:
"""The current epoch"""
return self.trainer.current_epoch if self.trainer else 0
@property
def global_step(self) -> int:
"""Total training batches seen across all epochs"""
return self.trainer.global_step if self.trainer else 0
@example_input_array.setter
def example_input_array(self, example: Any) -> None:
self._example_input_array = example
@property
def datamodule(self) -> Any:
return self._datamodule
@datamodule.setter
def datamodule(self, datamodule: Any) -> None:
self._datamodule = datamodule
@property
def on_gpu(self):
"""
True if your model is currently running on GPUs.
Useful to set flags around the LightningModule for different CPU vs GPU behavior.
"""
return self.device.type == "cuda"
@property
def automatic_optimization(self) -> bool:
"""
If False you are responsible for calling .backward, .step, zero_grad.
"""
return self._automatic_optimization
@automatic_optimization.setter
def automatic_optimization(self, automatic_optimization: bool) -> None:
self._automatic_optimization = automatic_optimization
def print(self, *args, **kwargs) -> None:
r"""
Prints only from process 0. Use this in any distributed mode to log only once.
Args:
*args: The thing to print. Will be passed to Python's built-in print function.
**kwargs: Will be passed to Python's built-in print function.
Example:
.. code-block:: python
def forward(self, x):
self.print(x, 'in forward')
"""
if self.trainer.is_global_zero:
print(*args, **kwargs)
def log(
self,
name: str,
value: Any,
prog_bar: bool = False,
logger: bool = True,
on_step: Optional[bool] = None,
on_epoch: Optional[bool] = None,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
):
"""
Log a key, value
Example::
self.log('train_loss', loss)
The default behavior per hook is as follows
.. csv-table:: ``*`` also applies to the test loop
:header: "LightningMoule Hook", "on_step", "on_epoch", "prog_bar", "logger"
:widths: 20, 10, 10, 10, 10
"training_step", "T", "F", "F", "T"
"training_step_end", "T", "F", "F", "T"
"training_epoch_end", "F", "T", "F", "T"
"validation_step*", "F", "T", "F", "T"
"validation_step_end*", "F", "T", "F", "T"
"validation_epoch_end*", "F", "T", "F", "T"
Args:
name: key name
value: value name
prog_bar: if True logs to the progress bar
logger: if True logs to the logger
on_step: if True logs at this step. None auto-logs at the training_step but not validation/test_step
on_epoch: if True logs epoch accumulated metrics. None auto-logs at the val/test step but not training_step
reduce_fx: reduction function over step values for end of epoch. Torch.mean by default
tbptt_reduce_fx: function to reduce on truncated back prop
tbptt_pad_token: token to use for padding
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_op: the op to sync across GPUs/TPUs
sync_dist_group: the ddp group
"""
if self._results is not None:
# in any epoch end can't log step metrics (only epoch metric)
if 'epoch_end' in self._current_fx_name and on_step:
m = f'on_step=True cannot be used on {self._current_fx_name} method'
raise MisconfigurationException(m)
if 'epoch_end' in self._current_fx_name and on_epoch is False:
m = f'on_epoch cannot be False when called from the {self._current_fx_name} method'
raise MisconfigurationException(m)
# add log_dict
# TODO: if logged twice fail with crash
# set the default depending on the fx_name
on_step = self.__auto_choose_log_on_step(on_step)
on_epoch = self.__auto_choose_log_on_epoch(on_epoch)
if self._current_hook_fx_name is not None:
self.trainer.logger_connector.check_logging_in_callbacks(
self._current_hook_fx_name,
on_step=on_step,
on_epoch=on_epoch
)
# make sure user doesn't introduce logic for multi-dataloaders
if "/dataloader_idx_" in name:
raise MisconfigurationException(
f"Logged key: {name} should not contain information about dataloader_idx.")
accelerator = self.trainer.accelerator_backend
self._results.log(
name,
value,
prog_bar,
logger,
on_step,
on_epoch,
reduce_fx,
tbptt_reduce_fx,
tbptt_pad_token,
enable_graph,
sync_dist,
sync_dist_op,
sync_dist_group,
accelerator.sync_tensor,
self._current_dataloader_idx,
self.device,
)
def log_dict(
self,
dictionary: dict,
prog_bar: bool = False,
logger: bool = True,
on_step: Optional[bool] = None,
on_epoch: Optional[bool] = None,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
):
"""
Log a dictonary of values at once
Example::
values = {'loss': loss, 'acc': acc, ..., 'metric_n': metric_n}
self.log_dict(values)
Args:
dictionary: key value pairs (str, tensors)
prog_bar: if True logs to the progress base
logger: if True logs to the logger
on_step: if True logs at this step. None auto-logs for training_step but not validation/test_step
on_epoch: if True logs epoch accumulated metrics. None auto-logs for val/test step but not training_step
reduce_fx: reduction function over step values for end of epoch. Torch.mean by default
tbptt_reduce_fx: function to reduce on truncated back prop
tbptt_pad_token: token to use for padding
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_op: the op to sync across GPUs/TPUs
sync_dist_group: the ddp group:
"""
for k, v in dictionary.items():
self.log(
name=k,
value=v,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
sync_dist=sync_dist,
sync_dist_group=sync_dist_group,
sync_dist_op=sync_dist_op,
tbptt_pad_token=tbptt_pad_token,
tbptt_reduce_fx=tbptt_reduce_fx,
)
def write_prediction(self, name, value, filename='predictions.pt'):
self.trainer.evaluation_loop.predictions._add_prediction(name, value, filename)
def write_prediction_dict(self, predictions_dict, filename='predictions.pt'):
for k, v in predictions_dict.items():
self.write_prediction(k, v, filename)
def __auto_choose_log_on_step(self, on_step):
if on_step is None:
if self._current_fx_name in {'training_step', 'training_step_end'}:
on_step = True
elif self._current_fx_name in {'evaluation_step', 'evaluation_step_end',
'evaluation_epoch_end', 'training_epoch_end'}:
on_step = False
else:
on_step = False
return on_step
def __auto_choose_log_on_epoch(self, on_epoch):
if on_epoch is None:
if self._current_fx_name in {'training_step', 'training_step_end'}:
on_epoch = False
elif self._current_fx_name in {'evaluation_step', 'evaluation_step_end',
'evaluation_epoch_end', 'training_epoch_end'}:
on_epoch = True
else:
on_epoch = True
return on_epoch
def all_gather(self, tensor: Union[torch.Tensor], group: Optional[Any] = None, sync_grads: bool = False):
r"""
Allows users to call ``self.all_gather()`` from the LightningModule, thus making
the ```all_gather``` operation accelerator agnostic.
```all_gather``` is a function provided by accelerators to gather a tensor from several
distributed processes
Args:
tensor: tensor of shape (batch, ...)
group: the process group to gather results from. Defaults to all processes (world)
sync_grads: flag that allows users to synchronize gradients for all_gather op
Return:
A tensor of shape (world_size, batch, ...)
"""
return self.trainer.accelerator_backend.all_gather(tensor, group=group, sync_grads=sync_grads)
def forward(self, *args, **kwargs):
r"""
Same as :meth:`torch.nn.Module.forward()`, however in Lightning you want this to define
the operations you want to use for prediction (i.e.: on a server or as a feature extractor).
Normally you'd call ``self()`` from your :meth:`training_step` method.
This makes it easy to write a complex system for training with the outputs
you'd want in a prediction setting.
You may also find the :func:`~pytorch_lightning.core.decorators.auto_move_data` decorator useful
when using the module outside Lightning in a production setting.
Args:
*args: Whatever you decide to pass into the forward method.
**kwargs: Keyword arguments are also possible.
Return:
Predicted output
Examples:
.. code-block:: python
# example if we were using this model as a feature extractor
def forward(self, x):
feature_maps = self.convnet(x)
return feature_maps
def training_step(self, batch, batch_idx):
x, y = batch
feature_maps = self(x)
logits = self.classifier(feature_maps)
# ...
return loss
# splitting it this way allows model to be used a feature extractor
model = MyModelAbove()
inputs = server.get_request()
results = model(inputs)
server.write_results(results)
# -------------
# This is in stark contrast to torch.nn.Module where normally you would have this:
def forward(self, batch):
x, y = batch
feature_maps = self.convnet(x)
logits = self.classifier(feature_maps)
return logits
"""
return super().forward(*args, **kwargs)
def training_step(self, *args, **kwargs):
r"""
Here you compute and return the training loss and some additional metrics for e.g.
the progress bar or logger.
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): Integer displaying index of this batch
optimizer_idx (int): When using multiple optimizers, this argument will also be present.
hiddens(:class:`~torch.Tensor`): Passed in if
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.truncated_bptt_steps` > 0.
Return:
Any of.
- :class:`~torch.Tensor` - The loss tensor
- `dict` - A dictionary. Can include any keys, but must include the key 'loss'
- `None` - Training will skip to the next batch
In this step you'd normally do the forward pass and calculate the loss for a batch.
You can also do fancier things like multiple forward passes or something model specific.
Example::
def training_step(self, batch, batch_idx):
x, y, z = batch
out = self.encoder(x)
loss = self.loss(out, x)
return loss
If you define multiple optimizers, this step will be called with an additional
``optimizer_idx`` parameter.
.. code-block:: python
# Multiple optimizers (e.g.: GANs)
def training_step(self, batch, batch_idx, optimizer_idx):
if optimizer_idx == 0:
# do training_step with encoder
if optimizer_idx == 1:
# do training_step with decoder
If you add truncated back propagation through time you will also get an additional
argument with the hidden states of the previous step.
.. code-block:: python
# Truncated back-propagation through time
def training_step(self, batch, batch_idx, hiddens):
# hiddens are the hidden states from the previous truncated backprop step
...
out, hiddens = self.lstm(data, hiddens)
...
return {'loss': loss, 'hiddens': hiddens}
Note:
The loss value shown in the progress bar is smoothed (averaged) over the last values,
so it differs from the actual loss returned in train/validation step.
"""
rank_zero_warn(
"`training_step` must be implemented to be used with the Lightning Trainer"
)
def training_step_end(self, *args, **kwargs):
"""
Use this when training with dp or ddp2 because :meth:`training_step`
will operate on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [training_step(sub_batch) for sub_batch in sub_batches]
training_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in `training_step` for each batch part.
Return:
Anything
When using dp/ddp2 distributed backends, only a portion of the batch is inside the training_step:
.. code-block:: python
def training_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
# softmax uses only a portion of the batch in the denomintaor
loss = self.softmax(out)
loss = nce_loss(loss)
return loss
If you wish to do something with all the parts of the batch, then use this method to do it:
.. code-block:: python
def training_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
return {'pred': out}
def training_step_end(self, training_step_outputs):
gpu_0_pred = training_step_outputs[0]['pred']
gpu_1_pred = training_step_outputs[1]['pred']
gpu_n_pred = training_step_outputs[n]['pred']
# this softmax now uses the full batch
loss = nce_loss([gpu_0_pred, gpu_1_pred, gpu_n_pred])
return loss
See Also:
See the :ref:`multi_gpu` guide for more details.
"""
def training_epoch_end(self, outputs: List[Any]) -> None:
"""
Called at the end of the training epoch with the outputs of all training steps.
Use this in case you need to do something with all the outputs for every training_step.
.. code-block:: python
# the pseudocode for these calls
train_outs = []
for train_batch in train_data:
out = training_step(train_batch)
train_outs.append(out)
training_epoch_end(train_outs)
Args:
outputs: List of outputs you defined in :meth:`training_step`, or if there are
multiple dataloaders, a list containing a list of outputs for each dataloader.
Return:
None
Note:
If this method is not overridden, this won't be called.
Example::
def training_epoch_end(self, training_step_outputs):
# do something with all training_step outputs
return result
With multiple dataloaders, ``outputs`` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each training step for that dataloader.
.. code-block:: python
def training_epoch_end(self, training_step_outputs):
for out in training_step_outputs:
# do something here
"""
def validation_step(self, *args, **kwargs):
r"""
Operates on a single batch of data from the validation set.
In this step you'd might generate examples or calculate anything of interest like accuracy.
.. code-block:: python
# the pseudocode for these calls
val_outs = []
for val_batch in val_data:
out = validation_step(val_batch)
val_outs.append(out)
validation_epoch_end(val_outs)
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): The index of this batch
dataloader_idx (int): The index of the dataloader that produced this batch
(only if multiple val dataloaders used)
Return:
Any of.
- Any object or value
- `None` - Validation will skip to the next batch
.. code-block:: python
# pseudocode of order
out = validation_step()
if defined('validation_step_end'):
out = validation_step_end(out)
out = validation_epoch_end(out)
.. code-block:: python
# if you have one val dataloader:
def validation_step(self, batch, batch_idx)
# if you have multiple val dataloaders:
def validation_step(self, batch, batch_idx, dataloader_idx)
Examples:
.. code-block:: python
# CASE 1: A single validation dataset
def validation_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self(x)
loss = self.loss(out, y)
# log 6 example images
# or generated text... or whatever
sample_imgs = x[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image('example_images', grid, 0)
# calculate acc
labels_hat = torch.argmax(out, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
# log the outputs!
self.log_dict({'val_loss': loss, 'val_acc': val_acc})
If you pass in multiple val dataloaders, :meth:`validation_step` will have an additional argument.
.. code-block:: python
# CASE 2: multiple validation dataloaders
def validation_step(self, batch, batch_idx, dataloader_idx):
# dataloader_idx tells you which dataset this is.
Note:
If you don't need to validate you don't need to implement this method.
Note:
When the :meth:`validation_step` is called, the model has been put in eval mode
and PyTorch gradients have been disabled. At the end of validation,
the model goes back to training mode and gradients are enabled.
"""
def validation_step_end(self, *args, **kwargs):
"""
Use this when validating with dp or ddp2 because :meth:`validation_step`
will operate on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code.
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [validation_step(sub_batch) for sub_batch in sub_batches]
validation_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in :meth:`validation_step`
for each batch part.
Return:
None or anything
.. code-block:: python
# WITHOUT validation_step_end
# if used in DP or DDP2, this batch is 1/num_gpus large
def validation_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
loss = self.softmax(out)
loss = nce_loss(loss)
self.log('val_loss', loss)
# --------------
# with validation_step_end to do softmax over the full batch
def validation_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
return out
def validation_step_end(self, val_step_outputs):
for out in val_step_outputs:
# do something with these
See Also:
See the :ref:`multi_gpu` guide for more details.
"""
def validation_epoch_end(self, outputs: List[Any]) -> None:
"""
Called at the end of the validation epoch with the outputs of all validation steps.
.. code-block:: python
# the pseudocode for these calls
val_outs = []
for val_batch in val_data:
out = validation_step(val_batch)
val_outs.append(out)
validation_epoch_end(val_outs)
Args:
outputs: List of outputs you defined in :meth:`validation_step`, or if there
are multiple dataloaders, a list containing a list of outputs for each dataloader.
Return:
None
Note:
If you didn't define a :meth:`validation_step`, this won't be called.
Examples:
With a single dataloader:
.. code-block:: python
def validation_epoch_end(self, val_step_outputs):
for out in val_step_outputs:
# do something
With multiple dataloaders, `outputs` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each validation step for that dataloader.
.. code-block:: python
def validation_epoch_end(self, outputs):
for dataloader_output_result in outputs:
dataloader_outs = dataloader_output_result.dataloader_i_outputs
self.log('final_metric', final_value)
"""
def test_step(self, *args, **kwargs):
r"""
Operates on a single batch of data from the test set.
In this step you'd normally generate examples or calculate anything of interest
such as accuracy.
.. code-block:: python
# the pseudocode for these calls
test_outs = []
for test_batch in test_data:
out = test_step(test_batch)
test_outs.append(out)
test_epoch_end(test_outs)
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): The index of this batch.
dataloader_idx (int): The index of the dataloader that produced this batch
(only if multiple test dataloaders used).
Return:
Any of.
- Any object or value
- `None` - Testing will skip to the next batch
.. code-block:: python
# if you have one test dataloader:
def test_step(self, batch, batch_idx)
# if you have multiple test dataloaders:
def test_step(self, batch, batch_idx, dataloader_idx)
Examples:
.. code-block:: python
# CASE 1: A single test dataset
def test_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self(x)
loss = self.loss(out, y)
# log 6 example images
# or generated text... or whatever
sample_imgs = x[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image('example_images', grid, 0)
# calculate acc
labels_hat = torch.argmax(out, dim=1)
test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
# log the outputs!
self.log_dict({'test_loss': loss, 'test_acc': test_acc})
If you pass in multiple test dataloaders, :meth:`test_step` will have an additional
argument.
.. code-block:: python
# CASE 2: multiple test dataloaders
def test_step(self, batch, batch_idx, dataloader_idx):
# dataloader_idx tells you which dataset this is.
Note:
If you don't need to test you don't need to implement this method.
Note:
When the :meth:`test_step` is called, the model has been put in eval mode and
PyTorch gradients have been disabled. At the end of the test epoch, the model goes back
to training mode and gradients are enabled.
"""
def test_step_end(self, *args, **kwargs):
"""
Use this when testing with dp or ddp2 because :meth:`test_step` will operate
on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code.
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [test_step(sub_batch) for sub_batch in sub_batches]
test_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in :meth:`test_step` for each batch part.
Return:
None or anything
.. code-block:: python
# WITHOUT test_step_end
# if used in DP or DDP2, this batch is 1/num_gpus large
def test_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
loss = self.softmax(out)
self.log('test_loss', loss)
# --------------
# with test_step_end to do softmax over the full batch
def test_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
return out
def test_step_end(self, output_results):
# this out is now the full size of the batch
all_test_step_outs = output_results.out
loss = nce_loss(all_test_step_outs)
self.log('test_loss', loss)
See Also:
See the :ref:`multi_gpu` guide for more details.
"""
def test_epoch_end(
self, outputs: List[Any]
) -> None:
"""
Called at the end of a test epoch with the output of all test steps.
.. code-block:: python
# the pseudocode for these calls
test_outs = []
for test_batch in test_data:
out = test_step(test_batch)
test_outs.append(out)
test_epoch_end(test_outs)
Args:
outputs: List of outputs you defined in :meth:`test_step_end`, or if there
are multiple dataloaders, a list containing a list of outputs for each dataloader
Return:
None
Note:
If you didn't define a :meth:`test_step`, this won't be called.
Examples:
With a single dataloader:
.. code-block:: python
def test_epoch_end(self, outputs):
# do something with the outputs of all test batches
all_test_preds = test_step_outputs.predictions
some_result = calc_all_results(all_test_preds)
self.log(some_result)
With multiple dataloaders, `outputs` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each test step for that dataloader.
.. code-block:: python
def test_epoch_end(self, outputs):
final_value = 0
for dataloader_outputs in outputs:
for test_step_out in dataloader_outputs:
# do something
final_value += test_step_out
self.log('final_metric', final_value)
"""
def configure_optimizers(
self,
):
r"""
Choose what optimizers and learning-rate schedulers to use in your optimization.
Normally you'd need one. But in the case of GANs or similar you might have multiple.
Return:
Any of these 6 options.
- Single optimizer.
- List or Tuple - List of optimizers.
- Two lists - The first list has multiple optimizers, the second a list of LR schedulers (or lr_dict).
- Dictionary, with an 'optimizer' key, and (optionally) a 'lr_scheduler'
key whose value is a single LR scheduler or lr_dict.
- Tuple of dictionaries as described, with an optional 'frequency' key.
- None - Fit will run without any optimizer.
Note:
The 'frequency' value is an int corresponding to the number of sequential batches
optimized with the specific optimizer. It should be given to none or to all of the optimizers.
There is a difference between passing multiple optimizers in a list,
and passing multiple optimizers in dictionaries with a frequency of 1:
In the former case, all optimizers will operate on the given batch in each optimization step.
In the latter, only one optimizer will operate on the given batch at every step.
The lr_dict is a dictionary which contains the scheduler and its associated configuration.
The default configuration is shown below.
.. code-block:: python
{
'scheduler': lr_scheduler, # The LR scheduler instance (required)
'interval': 'epoch', # The unit of the scheduler's step size
'frequency': 1, # The frequency of the scheduler
'reduce_on_plateau': False, # For ReduceLROnPlateau scheduler
'monitor': 'val_loss', # Metric for ReduceLROnPlateau to monitor
'strict': True, # Whether to crash the training if `monitor` is not found
'name': None, # Custom name for LearningRateMonitor to use
}
Only the ``scheduler`` key is required, the rest will be set to the defaults above.
Examples:
.. code-block:: python
# most cases
def configure_optimizers(self):
opt = Adam(self.parameters(), lr=1e-3)
return opt
# multiple optimizer case (e.g.: GAN)
def configure_optimizers(self):
generator_opt = Adam(self.model_gen.parameters(), lr=0.01)
disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)
return generator_opt, disriminator_opt
# example with learning rate schedulers
def configure_optimizers(self):
generator_opt = Adam(self.model_gen.parameters(), lr=0.01)
disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)
discriminator_sched = CosineAnnealing(discriminator_opt, T_max=10)
return [generator_opt, disriminator_opt], [discriminator_sched]
# example with step-based learning rate schedulers
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_disc.parameters(), lr=0.02)
gen_sched = {'scheduler': ExponentialLR(gen_opt, 0.99),
'interval': 'step'} # called after each training step
dis_sched = CosineAnnealing(discriminator_opt, T_max=10) # called every epoch
return [gen_opt, dis_opt], [gen_sched, dis_sched]
# example with optimizer frequencies
# see training procedure in `Improved Training of Wasserstein GANs`, Algorithm 1
# https://arxiv.org/abs/1704.00028
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_disc.parameters(), lr=0.02)
n_critic = 5
return (
{'optimizer': dis_opt, 'frequency': n_critic},
{'optimizer': gen_opt, 'frequency': 1}
)
Note:
Some things to know:
- Lightning calls ``.backward()`` and ``.step()`` on each optimizer
and learning rate scheduler as needed.
- If you use 16-bit precision (``precision=16``), Lightning will automatically
handle the optimizers for you.
- If you use multiple optimizers, :meth:`training_step` will have an additional
``optimizer_idx`` parameter.
- If you use LBFGS Lightning handles the closure function automatically for you.
- If you use multiple optimizers, gradients will be calculated only
for the parameters of current optimizer at each training step.
- If you need to control how often those optimizers step or override the
default ``.step()`` schedule, override the :meth:`optimizer_step` hook.
- If you only want to call a learning rate scheduler every ``x`` step or epoch,
or want to monitor a custom metric, you can specify these in a lr_dict:
.. code-block:: python
{
'scheduler': lr_scheduler,
'interval': 'step', # or 'epoch'
'monitor': 'val_f1',
'frequency': x,
}
"""
rank_zero_warn(
"`configure_optimizers` must be implemented to be used with the Lightning Trainer"
)
def manual_backward(self, loss: Tensor, optimizer: Optimizer, *args, **kwargs) -> None:
"""
Call this directly from your training_step when doing optimizations manually.
By using this we can ensure that all the proper scaling when using 16-bit etc has been done for you
This function forwards all args to the .backward() call as well.
.. tip:: In manual mode we still automatically clip grads if Trainer(gradient_clip_val=x) is set
.. tip:: In manual mode we still automatically accumulate grad over batches if
Trainer(accumulate_grad_batches=x) is set and you use `optimizer.step()`
Example::
def training_step(...):
(opt_a, opt_b) = self.optimizers()
loss = ...
# automatically applies scaling, etc...
self.manual_backward(loss, opt_a)
opt_a.step()
"""
# make sure we're using manual opt
self._verify_is_manual_optimization('manual_backward')
# backward
self._running_manual_backward = True
self.trainer.train_loop.backward(loss, optimizer, -1, *args, **kwargs)
self._running_manual_backward = False
def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args, **kwargs) -> None:
"""
Override backward with your own implementation if you need to.
Args:
loss: Loss is already scaled by accumulated grads
optimizer: Current optimizer being used
optimizer_idx: Index of the current optimizer being used
Called to perform backward step.
Feel free to override as needed.
The loss passed in has already been scaled for accumulated gradients if requested.
Example::
def backward(self, loss, optimizer, optimizer_idx):
loss.backward()
"""
if self.trainer.train_loop.automatic_optimization or self._running_manual_backward:
loss.backward(*args, **kwargs)
def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int):
"""
Makes sure only the gradients of the current optimizer's parameters are calculated
in the training step to prevent dangling gradients in multiple-optimizer setup.
.. note:: Only called when using multiple optimizers
Override for your own behavior
Args:
optimizer:
optimizer_idx:
"""
for param in self.parameters():
param.requires_grad = False
for group in optimizer.param_groups:
for param in group['params']:
param.requires_grad = True
def optimizer_step(
self,
epoch: int = None,
batch_idx: int = None,
optimizer: Optimizer = None,
optimizer_idx: int = None,
optimizer_closure: Optional[Callable] = None,
on_tpu: bool = None,
using_native_amp: bool = None,
using_lbfgs: bool = None,
) -> None:
r"""
Override this method to adjust the default way the
:class:`~pytorch_lightning.trainer.trainer.Trainer` calls each optimizer.
By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example
once per optimizer.
.. tip:: With `Trainer(enable_pl_optimizer=True)`, you can user `optimizer.step()` directly and it will handle zero_grad, accumulated gradients, AMP, TPU and more automatically for you.
Warning:
If you are overriding this method, make sure that you pass the ``optimizer_closure`` parameter
to ``optimizer.step()`` function as shown in the examples. This ensures that
``train_step_and_backward_closure`` is called within
:meth:`~pytorch_lightning.trainer.training_loop.TrainLoop.run_training_batch`.
Args:
epoch: Current epoch
batch_idx: Index of current batch
optimizer: A PyTorch optimizer
optimizer_idx: If you used multiple optimizers this indexes into that list.
optimizer_closure: closure for all optimizers
on_tpu: true if TPU backward is required
using_native_amp: True if using native amp
using_lbfgs: True if the matching optimizer is lbfgs
Examples:
.. code-block:: python
# DEFAULT
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
optimizer.step(closure=optimizer_closure)
# Alternating schedule for optimizer steps (i.e.: GANs)
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# update generator opt every 2 steps
if optimizer_idx == 0:
if batch_idx % 2 == 0 :
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
# update discriminator opt every 4 steps
if optimizer_idx == 1:
if batch_idx % 4 == 0 :
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
# ...
# add as many optimizers as you want
Here's another example showing how to use this for more advanced things such as
learning rate warm-up:
.. code-block:: python
# learning rate warm-up
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# warm up lr
if self.trainer.global_step < 500:
lr_scale = min(1., float(self.trainer.global_step + 1) / 500.)
for pg in optimizer.param_groups:
pg['lr'] = lr_scale * self.learning_rate
# update params
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
"""
optimizer.step(closure=optimizer_closure)
def optimizer_zero_grad(
self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int
):
optimizer.zero_grad()
def tbptt_split_batch(self, batch: Tensor, split_size: int) -> list:
r"""
When using truncated backpropagation through time, each batch must be split along the
time dimension. Lightning handles this by default, but for custom behavior override
this function.
Args:
batch: Current batch
split_size: The size of the split
Return:
List of batch splits. Each split will be passed to :meth:`training_step` to enable truncated
back propagation through time. The default implementation splits root level Tensors and
Sequences at dim=1 (i.e. time dim). It assumes that each time dim is the same length.
Examples:
.. code-block:: python
def tbptt_split_batch(self, batch, split_size):
splits = []
for t in range(0, time_dims[0], split_size):
batch_split = []
for i, x in enumerate(batch):
if isinstance(x, torch.Tensor):
split_x = x[:, t:t + split_size]
elif isinstance(x, collections.Sequence):
split_x = [None] * len(x)
for batch_idx in range(len(x)):
split_x[batch_idx] = x[batch_idx][t:t + split_size]
batch_split.append(split_x)
splits.append(batch_split)
return splits
Note:
Called in the training loop after
:meth:`~pytorch_lightning.callbacks.base.Callback.on_batch_start`
if :paramref:`~pytorch_lightning.trainer.Trainer.truncated_bptt_steps` > 0.
Each returned batch split is passed separately to :meth:`training_step`.
"""
time_dims = [
len(x[0])
for x in batch
if isinstance(x, (torch.Tensor, collections.Sequence))
]
assert len(time_dims) >= 1, "Unable to determine batch time dimension"
assert all(
x == time_dims[0] for x in time_dims
), "Batch time dimension length is ambiguous"
splits = []
for t in range(0, time_dims[0], split_size):
batch_split = []
for i, x in enumerate(batch):
if isinstance(x, torch.Tensor):
split_x = x[:, t: t + split_size]
elif isinstance(x, collections.Sequence):
split_x = [None] * len(x)
for batch_idx in range(len(x)):
split_x[batch_idx] = x[batch_idx][t: t + split_size]
batch_split.append(split_x)
splits.append(batch_split)
return splits
def summarize(self, mode: Optional[str] = ModelSummary.MODE_DEFAULT) -> Optional[ModelSummary]:
model_summary = None
if mode in ModelSummary.MODES:
model_summary = ModelSummary(self, mode=mode)
log.info("\n" + str(model_summary))
elif mode is not None:
raise MisconfigurationException(
f"`mode` can be None, {', '.join(ModelSummary.MODES)}, got {mode}"
)
return model_summary
def freeze(self) -> None:
r"""
Freeze all params for inference.
Example:
.. code-block:: python
model = MyLightningModule(...)
model.freeze()
"""
for param in self.parameters():
param.requires_grad = False
self.eval()
def unfreeze(self) -> None:
"""
Unfreeze all parameters for training.
.. code-block:: python
model = MyLightningModule(...)
model.unfreeze()
"""
for param in self.parameters():
param.requires_grad = True
self.train()
def get_progress_bar_dict(self) -> Dict[str, Union[int, str]]:
r"""
Implement this to override the default items displayed in the progress bar.
By default it includes the average loss value, split index of BPTT (if used)
and the version of the experiment when using a logger.
.. code-block::
Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, loss=4.501, v_num=10]
Here is an example how to override the defaults:
.. code-block:: python
def get_progress_bar_dict(self):
# don't show the version number
items = super().get_progress_bar_dict()
items.pop("v_num", None)
return items
Return:
Dictionary with the items to be displayed in the progress bar.
"""
# call .item() only once but store elements without graphs
running_train_loss = self.trainer.train_loop.running_loss.mean()
avg_training_loss = None
if running_train_loss is not None:
avg_training_loss = running_train_loss.cpu().item()
elif self.trainer.train_loop.automatic_optimization:
avg_training_loss = float('NaN')
tqdm_dict = {}
if avg_training_loss is not None:
tqdm_dict["loss"] = f"{avg_training_loss:.3g}"
if self.trainer.truncated_bptt_steps is not None:
tqdm_dict["split_idx"] = self.trainer.split_idx
if self.trainer.logger is not None and self.trainer.logger.version is not None:
version = self.trainer.logger.version
# show last 4 places of long version strings
version = version[-4:] if isinstance(version, str) else version
tqdm_dict["v_num"] = version
return tqdm_dict
def _verify_is_manual_optimization(self, fn_name):
if self.trainer.train_loop.automatic_optimization:
raise MisconfigurationException(
f'to use {fn_name}, please disable automatic optimization:'
' set model property `automatic_optimization` as False'
)
@classmethod
def _auto_collect_arguments(cls, frame=None) -> Tuple[Dict, Dict]:
"""
Collect all module arguments in the current constructor and all child constructors.
The child constructors are all the ``__init__`` methods that reach the current class through
(chained) ``super().__init__()`` calls.
Args:
frame: instance frame
Returns:
self_arguments: arguments dictionary of the first instance
parents_arguments: arguments dictionary of the parent's instances
"""
if not frame:
frame = inspect.currentframe()
frame_args = collect_init_args(frame.f_back, [])
self_arguments = frame_args[-1]
# set hyper_parameters in child
self_arguments = self_arguments
parents_arguments = {}
# add all arguments from parents
for args in frame_args[:-1]:
parents_arguments.update(args)
return self_arguments, parents_arguments
def save_hyperparameters(self, *args, frame=None) -> None:
"""Save all model arguments.
Args:
args: single object of `dict`, `NameSpace` or `OmegaConf`
or string names or argumenst from class `__init__`
>>> from collections import OrderedDict
>>> class ManuallyArgsModel(LightningModule):
... def __init__(self, arg1, arg2, arg3):
... super().__init__()
... # manually assign arguments
... self.save_hyperparameters('arg1', 'arg3')
... def forward(self, *args, **kwargs):
... ...
>>> model = ManuallyArgsModel(1, 'abc', 3.14)
>>> model.hparams
"arg1": 1
"arg3": 3.14
>>> class AutomaticArgsModel(LightningModule):
... def __init__(self, arg1, arg2, arg3):
... super().__init__()
... # equivalent automatic
... self.save_hyperparameters()
... def forward(self, *args, **kwargs):
... ...
>>> model = AutomaticArgsModel(1, 'abc', 3.14)
>>> model.hparams
"arg1": 1
"arg2": abc
"arg3": 3.14
>>> class SingleArgModel(LightningModule):
... def __init__(self, params):
... super().__init__()
... # manually assign single argument
... self.save_hyperparameters(params)
... def forward(self, *args, **kwargs):
... ...
>>> model = SingleArgModel(Namespace(p1=1, p2='abc', p3=3.14))
>>> model.hparams
"p1": 1
"p2": abc
"p3": 3.14
"""
if not frame:
frame = inspect.currentframe().f_back
init_args = get_init_args(frame)
assert init_args, "failed to inspect the self init"
if not args:
# take all arguments
hp = init_args
self._hparams_name = "kwargs" if hp else None
else:
# take only listed arguments in `save_hparams`
isx_non_str = [i for i, arg in enumerate(args) if not isinstance(arg, str)]
if len(isx_non_str) == 1:
hp = args[isx_non_str[0]]
cand_names = [k for k, v in init_args.items() if v == hp]
self._hparams_name = cand_names[0] if cand_names else None
else:
hp = {arg: init_args[arg] for arg in args if isinstance(arg, str)}
self._hparams_name = "kwargs"
# `hparams` are expected here
if hp:
self._set_hparams(hp)
# make deep copy so there is not other runtime changes reflected
self._hparams_initial = copy.deepcopy(self._hparams)
def _set_hparams(self, hp: Union[dict, Namespace, str]) -> None:
if isinstance(hp, Namespace):
hp = vars(hp)
if isinstance(hp, dict):
hp = AttributeDict(hp)
elif isinstance(hp, PRIMITIVE_TYPES):
raise ValueError(f"Primitives {PRIMITIVE_TYPES} are not allowed.")
elif not isinstance(hp, ALLOWED_CONFIG_TYPES):
raise ValueError(f"Unsupported config type of {type(hp)}.")
if isinstance(hp, dict) and isinstance(self.hparams, dict):
self.hparams.update(hp)
else:
self._hparams = hp
@torch.no_grad()
def to_onnx(
self,
file_path: Union[str, Path],
input_sample: Optional[Any] = None,
**kwargs,
):
"""
Saves the model in ONNX format
Args:
file_path: The path of the file the onnx model should be saved to.
input_sample: An input for tracing. Default: None (Use self.example_input_array)
**kwargs: Will be passed to torch.onnx.export function.
Example:
>>> class SimpleModel(LightningModule):
... def __init__(self):
... super().__init__()
... self.l1 = torch.nn.Linear(in_features=64, out_features=4)
...
... def forward(self, x):
... return torch.relu(self.l1(x.view(x.size(0), -1)))
>>> with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as tmpfile:
... model = SimpleModel()
... input_sample = torch.randn((1, 64))
... model.to_onnx(tmpfile.name, input_sample, export_params=True)
... os.path.isfile(tmpfile.name)
True
"""
mode = self.training
if input_sample is None:
if self.example_input_array is None:
raise ValueError(
"Could not export to ONNX since neither `input_sample` nor"
" `model.example_input_array` attribute is set."
)
input_sample = self.example_input_array
input_sample = self.transfer_batch_to_device(input_sample)
if "example_outputs" not in kwargs:
self.eval()
kwargs["example_outputs"] = self(input_sample)
torch.onnx.export(self, input_sample, file_path, **kwargs)
self.train(mode)
@torch.no_grad()
def to_torchscript(
self,
file_path: Optional[Union[str, Path]] = None,
method: Optional[str] = 'script',
example_inputs: Optional[Any] = None,
**kwargs,
) -> Union[ScriptModule, Dict[str, ScriptModule]]:
"""
By default compiles the whole model to a :class:`~torch.jit.ScriptModule`.
If you want to use tracing, please provided the argument `method='trace'` and make sure that either the
example_inputs argument is provided, or the model has self.example_input_array set.
If you would like to customize the modules that are scripted you should override this method.
In case you want to return multiple modules, we recommend using a dictionary.
Args:
file_path: Path where to save the torchscript. Default: None (no file saved).
method: Whether to use TorchScript's script or trace method. Default: 'script'
example_inputs: An input to be used to do tracing when method is set to 'trace'.
Default: None (Use self.example_input_array)
**kwargs: Additional arguments that will be passed to the :func:`torch.jit.script` or
:func:`torch.jit.trace` function.
Note:
- Requires the implementation of the
:meth:`~pytorch_lightning.core.lightning.LightningModule.forward` method.
- The exported script will be set to evaluation mode.
- It is recommended that you install the latest supported version of PyTorch
to use this feature without limitations. See also the :mod:`torch.jit`
documentation for supported features.
Example:
>>> class SimpleModel(LightningModule):
... def __init__(self):
... super().__init__()
... self.l1 = torch.nn.Linear(in_features=64, out_features=4)
...
... def forward(self, x):
... return torch.relu(self.l1(x.view(x.size(0), -1)))
...
>>> model = SimpleModel()
>>> torch.jit.save(model.to_torchscript(), "model.pt") # doctest: +SKIP
>>> os.path.isfile("model.pt") # doctest: +SKIP
>>> torch.jit.save(model.to_torchscript(file_path="model_trace.pt", method='trace', # doctest: +SKIP
... example_inputs=torch.randn(1, 64))) # doctest: +SKIP
>>> os.path.isfile("model_trace.pt") # doctest: +SKIP
True
Return:
This LightningModule as a torchscript, regardless of whether file_path is
defined or not.
"""
mode = self.training
if method == 'script':
torchscript_module = torch.jit.script(self.eval(), **kwargs)
elif method == 'trace':
# if no example inputs are provided, try to see if model has example_input_array set
if example_inputs is None:
if self.example_input_array is None:
raise ValueError(
'Choosing method=`trace` requires either `example_inputs`'
' or `model.example_input_array` to be defined'
)
example_inputs = self.example_input_array
# automatically send example inputs to the right device and use trace
example_inputs = self.transfer_batch_to_device(example_inputs)
torchscript_module = torch.jit.trace(func=self.eval(), example_inputs=example_inputs, **kwargs)
else:
raise ValueError("The 'method' parameter only supports 'script' or 'trace',"
f" but value given was: {method}")
self.train(mode)
if file_path is not None:
torch.jit.save(torchscript_module, file_path)
return torchscript_module
@property
def hparams(self) -> Union[AttributeDict, dict, Namespace]:
if not hasattr(self, "_hparams"):
self._hparams = AttributeDict()
return self._hparams
@property
def hparams_initial(self) -> AttributeDict:
if not hasattr(self, "_hparams_initial"):
return AttributeDict()
# prevent any change
return copy.deepcopy(self._hparams_initial)
@hparams.setter
def hparams(self, hp: Union[dict, Namespace, Any]):
# TODO: remove this method in v1.3.0.
rank_zero_warn(
"The setter for self.hparams in LightningModule is deprecated since v1.1.0 and will be"
" removed in v1.3.0. Replace the assignment `self.hparams = hparams` with "
" `self.save_hyperparameters()`.",
DeprecationWarning
)
hparams_assignment_name = self.__get_hparams_assignment_variable()
self._hparams_name = hparams_assignment_name
self._set_hparams(hp)
# this resolves case when user does not uses `save_hyperparameters` and do hard assignement in init
if not hasattr(self, "_hparams_initial"):
self._hparams_initial = copy.deepcopy(self._hparams)
def __get_hparams_assignment_variable(self):
"""
looks at the code of the class to figure out what the user named self.hparams
this only happens when the user explicitly sets self.hparams
"""
try:
class_code = inspect.getsource(self.__class__)
lines = class_code.split("\n")
for line in lines:
line = re.sub(r"\s+", "", line, flags=re.UNICODE)
if ".hparams=" in line:
return line.split("=")[1]
except Exception:
return "hparams"
return None
|
# -*- coding: utf-8 -*-
"""Click commands."""
import os
from glob import glob
from subprocess import call
import click
HERE = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.join(HERE, os.pardir)
TEST_PATH = os.path.join(PROJECT_ROOT, "tests")
@click.command()
def test():
"""Run the tests."""
import pytest
rv = pytest.main([TEST_PATH, "--verbose"])
exit(rv)
@click.command()
@click.option(
"-f",
"--fix-imports",
default=True,
is_flag=True,
help="Fix imports using isort, before linting",
)
@click.option(
"-c",
"--check",
default=False,
is_flag=True,
help="Don't make any changes to files, just confirm they are formatted correctly",
)
def lint(fix_imports, check):
"""Lint and check code style with black, flake8 and isort."""
skip = ["node_modules", "requirements", "migrations"]
root_files = glob("*.py")
root_directories = [
name for name in next(os.walk("."))[1] if not name.startswith(".")
]
files_and_directories = [
arg for arg in root_files + root_directories if arg not in skip
]
def execute_tool(description, *args):
"""Execute a checking tool with its arguments."""
command_line = list(args) + files_and_directories
click.echo(f"{description}: {" ".join(command_line)}") #removed f from echo(f
rv = call(command_line)
if rv != 0:
exit(rv)
isort_args = ["-rc"]
black_args = []
if check:
isort_args.append("-c")
black_args.append("--check")
if fix_imports:
execute_tool("Fixing import order", "isort", *isort_args)
execute_tool("Formatting style", "black", *black_args)
execute_tool("Checking code style", "flake8")
|
# -*- coding: utf-8 -*-
"""Click commands."""
import os
from glob import glob
from subprocess import call
import click
HERE = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.join(HERE, os.pardir)
TEST_PATH = os.path.join(PROJECT_ROOT, "tests")
@click.command()
def test():
"""Run the tests."""
import pytest
rv = pytest.main([TEST_PATH, "--verbose"])
exit(rv)
@click.command()
@click.option(
"-f",
"--fix-imports",
default=True,
is_flag=True,
help="Fix imports using isort, before linting",
)
@click.option(
"-c",
"--check",
default=False,
is_flag=True,
help="Don't make any changes to files, just confirm they are formatted correctly",
)
def lint(fix_imports, check):
"""Lint and check code style with black, flake8 and isort."""
skip = ["node_modules", "requirements", "migrations"]
root_files = glob("*.py")
root_directories = [
name for name in next(os.walk("."))[1] if not name.startswith(".")
]
files_and_directories = [
arg for arg in root_files + root_directories if arg not in skip
]
def execute_tool(description, *args):
"""Execute a checking tool with its arguments."""
command_line = list(args) + files_and_directories
click.echo(f"{description}: {' '.join(command_line)}") #removed f from echo(f
rv = call(command_line)
if rv != 0:
exit(rv)
isort_args = ["-rc"]
black_args = []
if check:
isort_args.append("-c")
black_args.append("--check")
if fix_imports:
execute_tool("Fixing import order", "isort", *isort_args)
execute_tool("Formatting style", "black", *black_args)
execute_tool("Checking code style", "flake8")
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Extract a list of passes form the LLVM source tree.
Usage:
$ extract_passes_from_llvm_source_tree /path/to/llvm/source/root
Optionally accepts a list of specific files to examine:
$ extract_passes_from_llvm_source_tree /path/to/llvm/source/root /path/to/llvm/source/file
Implementation notes
--------------------
This implements a not-very-good parser for the INITIALIZE_PASS() family of
macros, which are used in the LLVM sources to declare a pass using it's name,
flag, and docstring. Parsing known macros like this is fragile and likely to
break as the LLVM sources evolve. Currently only tested on LLVM 10.0.
A more robust solution would be to parse the C++ sources and extract all classes
which inherit from ModulePass etc.
"""
import codecs
import csv
import logging
import os
import re
import subprocess
import sys
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Tuple
from compiler_gym.envs.llvm.service.passes.common import Pass
from compiler_gym.envs.llvm.service.passes.config import CREATE_PASS_NAME_MAP
logger = logging.getLogger(__name__)
# A regular expression to match the start of an invocation of one of the
# InitializePass helper macros.
INITIALIZE_PASS_RE = r"(INITIALIZE_PASS|INITIALIZE_PASS_BEGIN|INITIALIZE_PASS_WITH_OPTIONS|INITIALIZE_PASS_WITH_OPTIONS_BEGIN)\("
# A regular expression to match static const string definitions.
CONST_CHAR_RE = r'^\s*static\s+const\s+char(\s+(?P<name>[a-zA-Z_]+)\s*\[\s*\]|\s*\*\s*(?P<ptr_name>[a-zA-Z_]+))\s*=\s*(?P<value>".+")\s*;'
class ParseError(ValueError):
def __init__(self, message: str, source: str, components: List[str]):
self.message = message
self.source = source
self.components = components
def parse_initialize_pass(
source_path: Path, header: Optional[str], input_source: str, defines: Dict[str, str]
) -> Iterable[Pass]:
"""A shitty parser for INITIALIZE_PASS() macro invocations.."""
# Squish down to a single line.
source = re.sub(r"\n\s*", " ", input_source, re.MULTILINE)
# Contract multi-spaces to single space.
source = re.sub(r",", ", ", source)
source = re.sub(r"\s+", " ", source)
source = re.sub(r"\(\s+", "(", source)
source = re.sub(r"\)\s+", ")", source)
# Strip the INITIALIZE_PASS(...) macro.
match = re.match(rf"^\s*{INITIALIZE_PASS_RE}(?P<args>.+)\)", source)
if not match:
raise ParseError("Failed to match INITIALIZE_PASS regex", source, [])
source = match.group("args")
components = []
start = 0
in_quotes = False
in_comment = False
for i in range(len(source)):
if (
not in_comment
and source[i] == "/"
and i < len(source) - 1
and source[i + 1] == "*"
):
in_comment = True
if (
in_comment
and source[i] == "*"
and i < len(source) - 1
and source[i + 1] == "/"
):
in_comment = False
start = i + 2
if source[i] == '"':
in_quotes = not in_quotes
if not in_quotes and source[i] == ",":
components.append(source[start:i].strip())
start = i + 2
components.append(source[start:].strip())
if len(components) != 5:
raise ParseError(
f"Expected 5 components, found {len(components)}", source, components
)
pass_name, arg, name, cfg, analysis = components
# Strip quotation marks in arg and name.
if not arg:
raise ParseError(f"Empty arg: `{arg}`", source, components)
if not name:
raise ParseError(f"Empty name: `{name}`", source, components)
while arg in defines:
arg = defines[arg]
while name in defines:
name = defines[name]
if not (arg[0] == '"' and arg[-1] == '"'):
raise ParseError(f"Could not interpret arg `{arg}`", source, components)
arg = arg[1:-1]
if not (name[0] == '"' and name[-1] == '"'):
raise ParseError(f"Could not interpret name `{name}`", source, components)
name = name[1:-1]
# Convert cfg and analysis to bool.
if cfg not in {"true", "false"}:
raise ParseError(
f"Could not interpret bool cfg argument `{cfg}`", source, components
)
if analysis not in {"true", "false"}:
raise ParseError(
f"Could not interpret bool analysis argument `{analysis}`",
source,
components,
)
cfg = cfg == "true"
analysis = analysis == "true"
opts = {
"source": source_path,
"header": header,
"name": pass_name,
"flag": f"-{arg}",
"description": name,
"cfg": cfg,
"is_analysis": analysis,
}
pass_name_or_list = CREATE_PASS_NAME_MAP.get(pass_name, pass_name)
if isinstance(pass_name_or_list, str):
opts["name"] = pass_name_or_list
yield Pass(**opts)
else:
for name in pass_name_or_list:
opts["name"] = name
yield Pass(**opts)
def build_defines(source: str) -> Dict[str, str]:
"""A quick-and-dirty technique to build a translation table from #defines
and string literals to their values."""
defines = {}
lines = source.split("\n")
for i in range(len(lines)):
line = lines[i].strip()
if line.startswith("#define"):
# Match #define strings.
components = line[len("#define ") :].split()
name = components[0]
value = " ".join(components[1:]).strip()
if value == "\\":
value = lines[i + 1].strip()
defines[name] = value
else:
# Match string literals.
match = re.match(CONST_CHAR_RE, line)
if match:
defines[match.group("name") or match.group("ptr_name")] = match.group(
"value"
)
return defines
def handle_file(source_path: Path) -> Tuple[Path, List[Pass]]:
"""Parse the passes declared in a file."""
assert str(source_path).endswith(".cpp"), f"Unexpected file type: {source_path}"
header = Path("include/llvm/" + str(source_path)[len("lib") : -len("cpp")] + "h")
if not header.is_file():
header = ""
with codecs.open(source_path, "r", "utf-8") as f:
source = f.read()
defines = build_defines(source)
passes: List[Pass] = []
for match in re.finditer(INITIALIZE_PASS_RE, source):
start = match.start()
first_bracket = source.find("(", start)
bracket_depth = 1
end = first_bracket
for end in range(first_bracket + 1, len(source)):
if source[end] == "(":
bracket_depth += 1
elif source[end] == ")":
bracket_depth -= 1
if not bracket_depth:
break
try:
passes += list(
parse_initialize_pass(
source_path, header, source[start : end + 1], defines
)
)
except ParseError as e:
print(f"Parsing error: {e.message}", file=sys.stderr)
print(f"Parsed components: {e.components}", file=sys.stderr)
print(f"In line: {e.source}", file=sys.stderr)
print(f"In file: {source_path}", file=sys.stderr)
print("Fatal error. Aborting now.", file=sys.stderr)
sys.exit(1)
if passes:
logger.debug(
f"Extracted {len(passes)} {"passes" if len(passes) - 1 else "pass"} from {source_path}",
)
else:
logger.debug(f"Found no passes in {source_path}")
return passes
def main(argv):
root = Path(argv[1])
assert root.is_dir(), f"Not a directory: {root}"
os.chdir(root)
if len(argv) > 2:
paths = [Path(path) for path in argv[2:]]
else:
# Get the names of all files which contain a pass definition.
matching_paths = []
grep = subprocess.check_output(
["grep", "-l", "-E", rf"^\s*{INITIALIZE_PASS_RE}", "-R", "lib/"],
universal_newlines=True,
)
matching_paths += grep.strip().split("\n")
logger.debug("Processing %s files ...", len(matching_paths))
paths = [Path(path) for path in matching_paths]
# Build a list of pass entries.
rows = []
for path in sorted(paths):
passes = handle_file(path)
if passes:
rows += passes
writer = csv.writer(sys.stdout, delimiter=",", quotechar='"')
writer.writerow(Pass._fields)
writer.writerows(sorted(rows, key=lambda r: r.name))
if __name__ == "__main__":
main(sys.argv)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Extract a list of passes form the LLVM source tree.
Usage:
$ extract_passes_from_llvm_source_tree /path/to/llvm/source/root
Optionally accepts a list of specific files to examine:
$ extract_passes_from_llvm_source_tree /path/to/llvm/source/root /path/to/llvm/source/file
Implementation notes
--------------------
This implements a not-very-good parser for the INITIALIZE_PASS() family of
macros, which are used in the LLVM sources to declare a pass using it's name,
flag, and docstring. Parsing known macros like this is fragile and likely to
break as the LLVM sources evolve. Currently only tested on LLVM 10.0.
A more robust solution would be to parse the C++ sources and extract all classes
which inherit from ModulePass etc.
"""
import codecs
import csv
import logging
import os
import re
import subprocess
import sys
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Tuple
from compiler_gym.envs.llvm.service.passes.common import Pass
from compiler_gym.envs.llvm.service.passes.config import CREATE_PASS_NAME_MAP
logger = logging.getLogger(__name__)
# A regular expression to match the start of an invocation of one of the
# InitializePass helper macros.
INITIALIZE_PASS_RE = r"(INITIALIZE_PASS|INITIALIZE_PASS_BEGIN|INITIALIZE_PASS_WITH_OPTIONS|INITIALIZE_PASS_WITH_OPTIONS_BEGIN)\("
# A regular expression to match static const string definitions.
CONST_CHAR_RE = r'^\s*static\s+const\s+char(\s+(?P<name>[a-zA-Z_]+)\s*\[\s*\]|\s*\*\s*(?P<ptr_name>[a-zA-Z_]+))\s*=\s*(?P<value>".+")\s*;'
class ParseError(ValueError):
def __init__(self, message: str, source: str, components: List[str]):
self.message = message
self.source = source
self.components = components
def parse_initialize_pass(
source_path: Path, header: Optional[str], input_source: str, defines: Dict[str, str]
) -> Iterable[Pass]:
"""A shitty parser for INITIALIZE_PASS() macro invocations.."""
# Squish down to a single line.
source = re.sub(r"\n\s*", " ", input_source, re.MULTILINE)
# Contract multi-spaces to single space.
source = re.sub(r",", ", ", source)
source = re.sub(r"\s+", " ", source)
source = re.sub(r"\(\s+", "(", source)
source = re.sub(r"\)\s+", ")", source)
# Strip the INITIALIZE_PASS(...) macro.
match = re.match(rf"^\s*{INITIALIZE_PASS_RE}(?P<args>.+)\)", source)
if not match:
raise ParseError("Failed to match INITIALIZE_PASS regex", source, [])
source = match.group("args")
components = []
start = 0
in_quotes = False
in_comment = False
for i in range(len(source)):
if (
not in_comment
and source[i] == "/"
and i < len(source) - 1
and source[i + 1] == "*"
):
in_comment = True
if (
in_comment
and source[i] == "*"
and i < len(source) - 1
and source[i + 1] == "/"
):
in_comment = False
start = i + 2
if source[i] == '"':
in_quotes = not in_quotes
if not in_quotes and source[i] == ",":
components.append(source[start:i].strip())
start = i + 2
components.append(source[start:].strip())
if len(components) != 5:
raise ParseError(
f"Expected 5 components, found {len(components)}", source, components
)
pass_name, arg, name, cfg, analysis = components
# Strip quotation marks in arg and name.
if not arg:
raise ParseError(f"Empty arg: `{arg}`", source, components)
if not name:
raise ParseError(f"Empty name: `{name}`", source, components)
while arg in defines:
arg = defines[arg]
while name in defines:
name = defines[name]
if not (arg[0] == '"' and arg[-1] == '"'):
raise ParseError(f"Could not interpret arg `{arg}`", source, components)
arg = arg[1:-1]
if not (name[0] == '"' and name[-1] == '"'):
raise ParseError(f"Could not interpret name `{name}`", source, components)
name = name[1:-1]
# Convert cfg and analysis to bool.
if cfg not in {"true", "false"}:
raise ParseError(
f"Could not interpret bool cfg argument `{cfg}`", source, components
)
if analysis not in {"true", "false"}:
raise ParseError(
f"Could not interpret bool analysis argument `{analysis}`",
source,
components,
)
cfg = cfg == "true"
analysis = analysis == "true"
opts = {
"source": source_path,
"header": header,
"name": pass_name,
"flag": f"-{arg}",
"description": name,
"cfg": cfg,
"is_analysis": analysis,
}
pass_name_or_list = CREATE_PASS_NAME_MAP.get(pass_name, pass_name)
if isinstance(pass_name_or_list, str):
opts["name"] = pass_name_or_list
yield Pass(**opts)
else:
for name in pass_name_or_list:
opts["name"] = name
yield Pass(**opts)
def build_defines(source: str) -> Dict[str, str]:
"""A quick-and-dirty technique to build a translation table from #defines
and string literals to their values."""
defines = {}
lines = source.split("\n")
for i in range(len(lines)):
line = lines[i].strip()
if line.startswith("#define"):
# Match #define strings.
components = line[len("#define ") :].split()
name = components[0]
value = " ".join(components[1:]).strip()
if value == "\\":
value = lines[i + 1].strip()
defines[name] = value
else:
# Match string literals.
match = re.match(CONST_CHAR_RE, line)
if match:
defines[match.group("name") or match.group("ptr_name")] = match.group(
"value"
)
return defines
def handle_file(source_path: Path) -> Tuple[Path, List[Pass]]:
"""Parse the passes declared in a file."""
assert str(source_path).endswith(".cpp"), f"Unexpected file type: {source_path}"
header = Path("include/llvm/" + str(source_path)[len("lib") : -len("cpp")] + "h")
if not header.is_file():
header = ""
with codecs.open(source_path, "r", "utf-8") as f:
source = f.read()
defines = build_defines(source)
passes: List[Pass] = []
for match in re.finditer(INITIALIZE_PASS_RE, source):
start = match.start()
first_bracket = source.find("(", start)
bracket_depth = 1
end = first_bracket
for end in range(first_bracket + 1, len(source)):
if source[end] == "(":
bracket_depth += 1
elif source[end] == ")":
bracket_depth -= 1
if not bracket_depth:
break
try:
passes += list(
parse_initialize_pass(
source_path, header, source[start : end + 1], defines
)
)
except ParseError as e:
print(f"Parsing error: {e.message}", file=sys.stderr)
print(f"Parsed components: {e.components}", file=sys.stderr)
print(f"In line: {e.source}", file=sys.stderr)
print(f"In file: {source_path}", file=sys.stderr)
print("Fatal error. Aborting now.", file=sys.stderr)
sys.exit(1)
if passes:
logger.debug(
f"Extracted {len(passes)} {'passes' if len(passes) - 1 else 'pass'} from {source_path}",
)
else:
logger.debug(f"Found no passes in {source_path}")
return passes
def main(argv):
root = Path(argv[1])
assert root.is_dir(), f"Not a directory: {root}"
os.chdir(root)
if len(argv) > 2:
paths = [Path(path) for path in argv[2:]]
else:
# Get the names of all files which contain a pass definition.
matching_paths = []
grep = subprocess.check_output(
["grep", "-l", "-E", rf"^\s*{INITIALIZE_PASS_RE}", "-R", "lib/"],
universal_newlines=True,
)
matching_paths += grep.strip().split("\n")
logger.debug("Processing %s files ...", len(matching_paths))
paths = [Path(path) for path in matching_paths]
# Build a list of pass entries.
rows = []
for path in sorted(paths):
passes = handle_file(path)
if passes:
rows += passes
writer = csv.writer(sys.stdout, delimiter=",", quotechar='"')
writer.writerow(Pass._fields)
writer.writerows(sorted(rows, key=lambda r: r.name))
if __name__ == "__main__":
main(sys.argv)
|
import datetime as dt
from abc import abstractmethod
from django.db import models
from tacticalrmm.middleware import get_debug_info, get_username
ACTION_TYPE_CHOICES = [
("schedreboot", "Scheduled Reboot"),
("taskaction", "Scheduled Task Action"),
("agentupdate", "Agent Update"),
("chocoinstall", "Chocolatey Software Install"),
]
AUDIT_ACTION_TYPE_CHOICES = [
("login", "User Login"),
("failed_login", "Failed User Login"),
("delete", "Delete Object"),
("modify", "Modify Object"),
("add", "Add Object"),
("view", "View Object"),
("check_run", "Check Run"),
("task_run", "Task Run"),
("agent_install", "Agent Install"),
("remote_session", "Remote Session"),
("execute_script", "Execute Script"),
("execute_command", "Execute Command"),
("bulk_action", "Bulk Action"),
]
AUDIT_OBJECT_TYPE_CHOICES = [
("user", "User"),
("script", "Script"),
("agent", "Agent"),
("policy", "Policy"),
("winupdatepolicy", "Patch Policy"),
("client", "Client"),
("site", "Site"),
("check", "Check"),
("automatedtask", "Automated Task"),
("coresettings", "Core Settings"),
("bulk", "Bulk"),
]
# taskaction details format
# {
# "action": "taskcreate" | "taskdelete" | "tasktoggle",
# "value": "Enable" | "Disable" # only needed for task toggle,
# "task_id": 1
# }
STATUS_CHOICES = [
("pending", "Pending"),
("completed", "Completed"),
]
class AuditLog(models.Model):
username = models.CharField(max_length=100)
agent = models.CharField(max_length=255, null=True, blank=True)
entry_time = models.DateTimeField(auto_now_add=True)
action = models.CharField(max_length=100, choices=AUDIT_ACTION_TYPE_CHOICES)
object_type = models.CharField(max_length=100, choices=AUDIT_OBJECT_TYPE_CHOICES)
before_value = models.JSONField(null=True, blank=True)
after_value = models.JSONField(null=True, blank=True)
message = models.CharField(max_length=255, null=True, blank=True)
debug_info = models.JSONField(null=True, blank=True)
def __str__(self):
return f"{self.username} {self.action} {self.object_type}"
def save(self, *args, **kwargs):
if not self.pk and self.message:
# truncate message field if longer than 255 characters
self.message = (
(self.message[:253] + "..") if len(self.message) > 255 else self.message
)
return super(AuditLog, self).save(*args, **kwargs)
@staticmethod
def audit_mesh_session(username, hostname, debug_info={}):
AuditLog.objects.create(
username=username,
agent=hostname,
object_type="agent",
action="remote_session",
message=f"{username} used Mesh Central to initiate a remote session to {hostname}.",
debug_info=debug_info,
)
@staticmethod
def audit_raw_command(username, hostname, cmd, shell, debug_info={}):
AuditLog.objects.create(
username=username,
agent=hostname,
object_type="agent",
action="execute_command",
message=f"{username} issued {shell} command on {hostname}.",
after_value=cmd,
debug_info=debug_info,
)
@staticmethod
def audit_object_changed(
username, object_type, before, after, name="", debug_info={}
):
AuditLog.objects.create(
username=username,
object_type=object_type,
action="modify",
message=f"{username} modified {object_type} {name}",
before_value=before,
after_value=after,
debug_info=debug_info,
)
@staticmethod
def audit_object_add(username, object_type, after, name="", debug_info={}):
AuditLog.objects.create(
username=username,
object_type=object_type,
action="add",
message=f"{username} added {object_type} {name}",
after_value=after,
debug_info=debug_info,
)
@staticmethod
def audit_object_delete(username, object_type, before, name="", debug_info={}):
AuditLog.objects.create(
username=username,
object_type=object_type,
action="delete",
message=f"{username} deleted {object_type} {name}",
before_value=before,
debug_info=debug_info,
)
@staticmethod
def audit_script_run(username, hostname, script, debug_info={}):
AuditLog.objects.create(
agent=hostname,
username=username,
object_type="agent",
action="execute_script",
message=f'{username} ran script: "{script}" on {hostname}',
debug_info=debug_info,
)
@staticmethod
def audit_user_failed_login(username, debug_info={}):
AuditLog.objects.create(
username=username,
object_type="user",
action="failed_login",
message=f"{username} failed to login: Credentials were rejected",
debug_info=debug_info,
)
@staticmethod
def audit_user_failed_twofactor(username, debug_info={}):
AuditLog.objects.create(
username=username,
object_type="user",
action="failed_login",
message=f"{username} failed to login: Two Factor token rejected",
debug_info=debug_info,
)
@staticmethod
def audit_user_login_successful(username, debug_info={}):
AuditLog.objects.create(
username=username,
object_type="user",
action="login",
message=f"{username} logged in successfully",
debug_info=debug_info,
)
@staticmethod
def audit_bulk_action(username, action, affected, debug_info={}):
from agents.models import Agent
from clients.models import Client, Site
from scripts.models import Script
target = ""
agents = None
if affected["target"] == "all":
target = "on all agents"
elif affected["target"] == "client":
client = Client.objects.get(pk=affected["client"])
target = f"on all agents within client: {client.name}"
elif affected["target"] == "site":
site = Site.objects.get(pk=affected["site"])
target = f"on all agents within site: {site.client.name}\\{site.name}"
elif affected["target"] == "agents":
agents = Agent.objects.filter(pk__in=affected["agentPKs"]).values_list(
"hostname", flat=True
)
target = "on multiple agents"
if action == "script":
script = Script.objects.get(pk=affected["scriptPK"])
action = f"script: {script.name}"
if agents:
affected["agent_hostnames"] = list(agents)
AuditLog.objects.create(
username=username,
object_type="bulk",
action="bulk_action",
message=f"{username} executed bulk {action} {target}",
debug_info=debug_info,
after_value=affected,
)
class DebugLog(models.Model):
pass
class PendingAction(models.Model):
agent = models.ForeignKey(
"agents.Agent",
related_name="pendingactions",
on_delete=models.CASCADE,
)
entry_time = models.DateTimeField(auto_now_add=True)
action_type = models.CharField(
max_length=255, choices=ACTION_TYPE_CHOICES, null=True, blank=True
)
status = models.CharField(
max_length=255,
choices=STATUS_CHOICES,
default="pending",
)
celery_id = models.CharField(null=True, blank=True, max_length=255)
details = models.JSONField(null=True, blank=True)
def __str__(self):
return f"{self.agent.hostname} - {self.action_type}"
@property
def due(self):
if self.action_type == "schedreboot":
obj = dt.datetime.strptime(self.details["time"], "%Y-%m-%d %H:%M:%S")
return dt.datetime.strftime(obj, "%B %d, %Y at %I:%M %p")
elif self.action_type == "taskaction":
return "Next agent check-in"
elif self.action_type == "agentupdate":
return "Next update cycle"
elif self.action_type == "chocoinstall":
return "ASAP"
@property
def description(self):
if self.action_type == "schedreboot":
return "Device pending reboot"
elif self.action_type == "agentupdate":
return f"Agent update to {self.details["version"]}"
elif self.action_type == "chocoinstall":
return f"{self.details["name"]} software install"
elif self.action_type == "taskaction":
if self.details["action"] == "taskdelete":
return "Device pending task deletion"
elif self.details["action"] == "taskcreate":
return "Device pending task creation"
elif self.details["action"] == "tasktoggle":
# value is bool
if self.details["value"]:
action = "enable"
else:
action = "disable"
return f"Device pending task {action}"
class BaseAuditModel(models.Model):
# abstract base class for auditing models
class Meta:
abstract = True
# create audit fields
created_by = models.CharField(max_length=100, null=True, blank=True)
created_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
modified_by = models.CharField(max_length=100, null=True, blank=True)
modified_time = models.DateTimeField(auto_now=True, null=True, blank=True)
@abstractmethod
def serialize():
pass
def save(self, *args, **kwargs):
if get_username():
before_value = {}
object_class = type(self)
object_name = object_class.__name__.lower()
username = get_username()
# populate created_by and modified_by fields on instance
if not getattr(self, "created_by", None):
self.created_by = username
if hasattr(self, "modified_by"):
self.modified_by = username
# capture object properties before edit
if self.pk:
before_value = object_class.objects.get(pk=self.id)
# dont create entry for agent add since that is done in view
if not self.pk:
AuditLog.audit_object_add(
username,
object_name,
object_class.serialize(self),
self.__str__(),
debug_info=get_debug_info(),
)
else:
AuditLog.audit_object_changed(
username,
object_class.__name__.lower(),
object_class.serialize(before_value),
object_class.serialize(self),
self.__str__(),
debug_info=get_debug_info(),
)
return super(BaseAuditModel, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
if get_username():
object_class = type(self)
AuditLog.audit_object_delete(
get_username(),
object_class.__name__.lower(),
object_class.serialize(self),
self.__str__(),
debug_info=get_debug_info(),
)
return super(BaseAuditModel, self).delete(*args, **kwargs)
|
import datetime as dt
from abc import abstractmethod
from django.db import models
from tacticalrmm.middleware import get_debug_info, get_username
ACTION_TYPE_CHOICES = [
("schedreboot", "Scheduled Reboot"),
("taskaction", "Scheduled Task Action"),
("agentupdate", "Agent Update"),
("chocoinstall", "Chocolatey Software Install"),
]
AUDIT_ACTION_TYPE_CHOICES = [
("login", "User Login"),
("failed_login", "Failed User Login"),
("delete", "Delete Object"),
("modify", "Modify Object"),
("add", "Add Object"),
("view", "View Object"),
("check_run", "Check Run"),
("task_run", "Task Run"),
("agent_install", "Agent Install"),
("remote_session", "Remote Session"),
("execute_script", "Execute Script"),
("execute_command", "Execute Command"),
("bulk_action", "Bulk Action"),
]
AUDIT_OBJECT_TYPE_CHOICES = [
("user", "User"),
("script", "Script"),
("agent", "Agent"),
("policy", "Policy"),
("winupdatepolicy", "Patch Policy"),
("client", "Client"),
("site", "Site"),
("check", "Check"),
("automatedtask", "Automated Task"),
("coresettings", "Core Settings"),
("bulk", "Bulk"),
]
# taskaction details format
# {
# "action": "taskcreate" | "taskdelete" | "tasktoggle",
# "value": "Enable" | "Disable" # only needed for task toggle,
# "task_id": 1
# }
STATUS_CHOICES = [
("pending", "Pending"),
("completed", "Completed"),
]
class AuditLog(models.Model):
username = models.CharField(max_length=100)
agent = models.CharField(max_length=255, null=True, blank=True)
entry_time = models.DateTimeField(auto_now_add=True)
action = models.CharField(max_length=100, choices=AUDIT_ACTION_TYPE_CHOICES)
object_type = models.CharField(max_length=100, choices=AUDIT_OBJECT_TYPE_CHOICES)
before_value = models.JSONField(null=True, blank=True)
after_value = models.JSONField(null=True, blank=True)
message = models.CharField(max_length=255, null=True, blank=True)
debug_info = models.JSONField(null=True, blank=True)
def __str__(self):
return f"{self.username} {self.action} {self.object_type}"
def save(self, *args, **kwargs):
if not self.pk and self.message:
# truncate message field if longer than 255 characters
self.message = (
(self.message[:253] + "..") if len(self.message) > 255 else self.message
)
return super(AuditLog, self).save(*args, **kwargs)
@staticmethod
def audit_mesh_session(username, hostname, debug_info={}):
AuditLog.objects.create(
username=username,
agent=hostname,
object_type="agent",
action="remote_session",
message=f"{username} used Mesh Central to initiate a remote session to {hostname}.",
debug_info=debug_info,
)
@staticmethod
def audit_raw_command(username, hostname, cmd, shell, debug_info={}):
AuditLog.objects.create(
username=username,
agent=hostname,
object_type="agent",
action="execute_command",
message=f"{username} issued {shell} command on {hostname}.",
after_value=cmd,
debug_info=debug_info,
)
@staticmethod
def audit_object_changed(
username, object_type, before, after, name="", debug_info={}
):
AuditLog.objects.create(
username=username,
object_type=object_type,
action="modify",
message=f"{username} modified {object_type} {name}",
before_value=before,
after_value=after,
debug_info=debug_info,
)
@staticmethod
def audit_object_add(username, object_type, after, name="", debug_info={}):
AuditLog.objects.create(
username=username,
object_type=object_type,
action="add",
message=f"{username} added {object_type} {name}",
after_value=after,
debug_info=debug_info,
)
@staticmethod
def audit_object_delete(username, object_type, before, name="", debug_info={}):
AuditLog.objects.create(
username=username,
object_type=object_type,
action="delete",
message=f"{username} deleted {object_type} {name}",
before_value=before,
debug_info=debug_info,
)
@staticmethod
def audit_script_run(username, hostname, script, debug_info={}):
AuditLog.objects.create(
agent=hostname,
username=username,
object_type="agent",
action="execute_script",
message=f'{username} ran script: "{script}" on {hostname}',
debug_info=debug_info,
)
@staticmethod
def audit_user_failed_login(username, debug_info={}):
AuditLog.objects.create(
username=username,
object_type="user",
action="failed_login",
message=f"{username} failed to login: Credentials were rejected",
debug_info=debug_info,
)
@staticmethod
def audit_user_failed_twofactor(username, debug_info={}):
AuditLog.objects.create(
username=username,
object_type="user",
action="failed_login",
message=f"{username} failed to login: Two Factor token rejected",
debug_info=debug_info,
)
@staticmethod
def audit_user_login_successful(username, debug_info={}):
AuditLog.objects.create(
username=username,
object_type="user",
action="login",
message=f"{username} logged in successfully",
debug_info=debug_info,
)
@staticmethod
def audit_bulk_action(username, action, affected, debug_info={}):
from agents.models import Agent
from clients.models import Client, Site
from scripts.models import Script
target = ""
agents = None
if affected["target"] == "all":
target = "on all agents"
elif affected["target"] == "client":
client = Client.objects.get(pk=affected["client"])
target = f"on all agents within client: {client.name}"
elif affected["target"] == "site":
site = Site.objects.get(pk=affected["site"])
target = f"on all agents within site: {site.client.name}\\{site.name}"
elif affected["target"] == "agents":
agents = Agent.objects.filter(pk__in=affected["agentPKs"]).values_list(
"hostname", flat=True
)
target = "on multiple agents"
if action == "script":
script = Script.objects.get(pk=affected["scriptPK"])
action = f"script: {script.name}"
if agents:
affected["agent_hostnames"] = list(agents)
AuditLog.objects.create(
username=username,
object_type="bulk",
action="bulk_action",
message=f"{username} executed bulk {action} {target}",
debug_info=debug_info,
after_value=affected,
)
class DebugLog(models.Model):
pass
class PendingAction(models.Model):
agent = models.ForeignKey(
"agents.Agent",
related_name="pendingactions",
on_delete=models.CASCADE,
)
entry_time = models.DateTimeField(auto_now_add=True)
action_type = models.CharField(
max_length=255, choices=ACTION_TYPE_CHOICES, null=True, blank=True
)
status = models.CharField(
max_length=255,
choices=STATUS_CHOICES,
default="pending",
)
celery_id = models.CharField(null=True, blank=True, max_length=255)
details = models.JSONField(null=True, blank=True)
def __str__(self):
return f"{self.agent.hostname} - {self.action_type}"
@property
def due(self):
if self.action_type == "schedreboot":
obj = dt.datetime.strptime(self.details["time"], "%Y-%m-%d %H:%M:%S")
return dt.datetime.strftime(obj, "%B %d, %Y at %I:%M %p")
elif self.action_type == "taskaction":
return "Next agent check-in"
elif self.action_type == "agentupdate":
return "Next update cycle"
elif self.action_type == "chocoinstall":
return "ASAP"
@property
def description(self):
if self.action_type == "schedreboot":
return "Device pending reboot"
elif self.action_type == "agentupdate":
return f"Agent update to {self.details['version']}"
elif self.action_type == "chocoinstall":
return f"{self.details['name']} software install"
elif self.action_type == "taskaction":
if self.details["action"] == "taskdelete":
return "Device pending task deletion"
elif self.details["action"] == "taskcreate":
return "Device pending task creation"
elif self.details["action"] == "tasktoggle":
# value is bool
if self.details["value"]:
action = "enable"
else:
action = "disable"
return f"Device pending task {action}"
class BaseAuditModel(models.Model):
# abstract base class for auditing models
class Meta:
abstract = True
# create audit fields
created_by = models.CharField(max_length=100, null=True, blank=True)
created_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
modified_by = models.CharField(max_length=100, null=True, blank=True)
modified_time = models.DateTimeField(auto_now=True, null=True, blank=True)
@abstractmethod
def serialize():
pass
def save(self, *args, **kwargs):
if get_username():
before_value = {}
object_class = type(self)
object_name = object_class.__name__.lower()
username = get_username()
# populate created_by and modified_by fields on instance
if not getattr(self, "created_by", None):
self.created_by = username
if hasattr(self, "modified_by"):
self.modified_by = username
# capture object properties before edit
if self.pk:
before_value = object_class.objects.get(pk=self.id)
# dont create entry for agent add since that is done in view
if not self.pk:
AuditLog.audit_object_add(
username,
object_name,
object_class.serialize(self),
self.__str__(),
debug_info=get_debug_info(),
)
else:
AuditLog.audit_object_changed(
username,
object_class.__name__.lower(),
object_class.serialize(before_value),
object_class.serialize(self),
self.__str__(),
debug_info=get_debug_info(),
)
return super(BaseAuditModel, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
if get_username():
object_class = type(self)
AuditLog.audit_object_delete(
get_username(),
object_class.__name__.lower(),
object_class.serialize(self),
self.__str__(),
debug_info=get_debug_info(),
)
return super(BaseAuditModel, self).delete(*args, **kwargs)
|
#! /usr/bin/env python3
"""
Sherlock: Find Usernames Across Social Networks Module
This module contains the main logic to search for usernames at social
networks.
"""
import csv
import json
import os
import platform
import re
import sys
import random
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from concurrent.futures import ThreadPoolExecutor
from time import time
import requests
from colorama import Fore, Style, init
from requests_futures.sessions import FuturesSession
from torrequest import TorRequest
from load_proxies import load_proxies_from_csv, check_proxy_list
module_name = "Sherlock: Find Usernames Across Social Networks"
__version__ = "0.8.3"
amount = 0
global proxy_list
proxy_list = []
class ElapsedFuturesSession(FuturesSession):
"""
Extends FutureSession to add a response time metric to each request.
This is taken (almost) directly from here: https://github.com/ross/requests-futures#working-in-the-background
"""
def request(self, method, url, hooks={}, *args, **kwargs):
start = time()
def timing(r, *args, **kwargs):
elapsed_sec = time() - start
r.elapsed = round(elapsed_sec * 1000)
try:
if isinstance(hooks['response'], (list, tuple)):
# needs to be first so we don't time other hooks execution
hooks['response'].insert(0, timing)
else:
hooks['response'] = [timing, hooks['response']]
except KeyError:
hooks['response'] = timing
return super(ElapsedFuturesSession, self).request(method, url, hooks=hooks, *args, **kwargs)
def print_info(title, info):
print(Style.BRIGHT + Fore.GREEN + "[" +
Fore.YELLOW + "*" +
Fore.GREEN + f"] {title}" +
Fore.WHITE + f" {info}" +
Fore.GREEN + " on:")
def print_error(err, errstr, var, verbose=False):
print(Style.BRIGHT + Fore.WHITE + "[" +
Fore.RED + "-" +
Fore.WHITE + "]" +
Fore.RED + f" {errstr}" +
Fore.YELLOW + f" {err if verbose else var}")
def format_response_time(response_time, verbose):
return " [{} ms]".format(response_time) if verbose else ""
def print_found(social_network, url, response_time, verbose=False):
print((Style.BRIGHT + Fore.WHITE + "[" +
Fore.GREEN + "+" +
Fore.WHITE + "]" +
format_response_time(response_time, verbose) +
Fore.GREEN + f" {social_network}:"), url)
def print_not_found(social_network, response_time, verbose=False):
print((Style.BRIGHT + Fore.WHITE + "[" +
Fore.RED + "-" +
Fore.WHITE + "]" +
format_response_time(response_time, verbose) +
Fore.GREEN + f" {social_network}:" +
Fore.YELLOW + " Not Found!"))
def print_invalid(social_network, msg):
"""Print invalid search result."""
print((Style.BRIGHT + Fore.WHITE + "[" +
Fore.RED + "-" +
Fore.WHITE + "]" +
Fore.GREEN + f" {social_network}:" +
Fore.YELLOW + f" {msg}"))
def get_response(request_future, error_type, social_network, verbose=False, retry_no=None):
global proxy_list
try:
rsp = request_future.result()
if rsp.status_code:
return rsp, error_type, rsp.elapsed
except requests.exceptions.HTTPError as errh:
print_error(errh, "HTTP Error:", social_network, verbose)
# In case our proxy fails, we retry with another proxy.
except requests.exceptions.ProxyError as errp:
if retry_no>0 and len(proxy_list)>0:
#Selecting the new proxy.
new_proxy = random.choice(proxy_list)
new_proxy = f'{new_proxy.protocol}://{new_proxy.ip}:{new_proxy.port}'
print(f'Retrying with {new_proxy}')
request_future.proxy = {'http':new_proxy,'https':new_proxy}
get_response(request_future,error_type, social_network, verbose,retry_no=retry_no-1)
else:
print_error(errp, "Proxy error:", social_network, verbose)
except requests.exceptions.ConnectionError as errc:
print_error(errc, "Error Connecting:", social_network, verbose)
except requests.exceptions.Timeout as errt:
print_error(errt, "Timeout Error:", social_network, verbose)
except requests.exceptions.RequestException as err:
print_error(err, "Unknown error:", social_network, verbose)
return None, "", -1
def sherlock(username, site_data, verbose=False, tor=False, unique_tor=False, proxy=None, print_found_only=False):
"""Run Sherlock Analysis.
Checks for existence of username on various social media sites.
Keyword Arguments:
username -- String indicating username that report
should be created against.
site_data -- Dictionary containing all of the site data.
verbose -- Boolean indicating whether to give verbose output.
tor -- Boolean indicating whether to use a tor circuit for the requests.
unique_tor -- Boolean indicating whether to use a new tor circuit for each request.
proxy -- String indicating the proxy URL
Return Value:
Dictionary containing results from report. Key of dictionary is the name
of the social network site, and the value is another dictionary with
the following keys:
url_main: URL of main site.
url_user: URL of user on site (if account exists).
exists: String indicating results of test for account existence.
http_status: HTTP status code of query which checked for existence on
site.
response_text: Text that came back from request. May be None if
there was an HTTP error when checking for existence.
"""
global amount
print_info("Checking username", username)
# A user agent is needed because some sites don't
# return the correct information since they think that
# we are bots
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0'
}
# Allow 1 thread for each external service, so `len(site_data)` threads total
executor = ThreadPoolExecutor(max_workers=len(site_data))
# Create session based on request methodology
underlying_session = requests.session()
underlying_request = requests.Request()
if tor or unique_tor:
underlying_request = TorRequest()
underlying_session = underlying_request.session
# Create multi-threaded session for all requests. Use our custom FuturesSession that exposes response time
session = ElapsedFuturesSession(
executor=executor, session=underlying_session)
# Results from analysis of all sites
results_total = {}
# First create futures for all requests. This allows for the requests to run in parallel
for social_network, net_info in site_data.items():
# Results from analysis of this specific site
results_site = {}
# Record URL of main site
results_site['url_main'] = net_info.get("urlMain")
# Don't make request if username is invalid for the site
regex_check = net_info.get("regexCheck")
if regex_check and re.search(regex_check, username) is None:
# No need to do the check at the site: this user name is not allowed.
print_invalid(social_network, "Illegal Username Format For This Site!")
results_site["exists"] = "illegal"
results_site["url_user"] = ""
results_site['http_status'] = ""
results_site['response_text'] = ""
results_site['response_time_ms'] = ""
else:
# URL of user on site (if it exists)
url = net_info["url"].format(username)
results_site["url_user"] = url
url_probe = net_info.get("urlProbe")
if url_probe is None:
#Probe URL is normal one seen by people out on the web.
url_probe = url
else:
#There is a special URL for probing existence separate
#from where the user profile normally can be found.
url_probe = url_probe.format(username)
request_method = session.get
if social_network != "GitHub":
# If only the status_code is needed don't download the body
if net_info["errorType"] == 'status_code':
request_method = session.head
if net_info["errorType"] == "response_url":
# Site forwards request to a different URL if username not
# found. Disallow the redirect so we can capture the
# http status from the original URL request.
allow_redirects = False
else:
# Allow whatever redirect that the site wants to do.
# The final result of the request will be what is available.
allow_redirects = True
# This future starts running the request in a new thread, doesn't block the main thread
if proxy != None:
proxies = {"http": proxy, "https": proxy}
future = request_method(url=url_probe, headers=headers,
proxies=proxies,
allow_redirects=allow_redirects
)
else:
future = request_method(url=url_probe, headers=headers,
allow_redirects=allow_redirects
)
# Store future in data for access later
net_info["request_future"] = future
# Reset identify for tor (if needed)
if unique_tor:
underlying_request.reset_identity()
# Add this site's results into final dictionary with all of the other results.
results_total[social_network] = results_site
# Open the file containing account links
# Core logic: If tor requests, make them here. If multi-threaded requests, wait for responses
for social_network, net_info in site_data.items():
# Retrieve results again
results_site = results_total.get(social_network)
# Retrieve other site information again
url = results_site.get("url_user")
exists = results_site.get("exists")
if exists is not None:
# We have already determined the user doesn't exist here
continue
# Get the expected error type
error_type = net_info["errorType"]
# Default data in case there are any failures in doing a request.
http_status = "?"
response_text = ""
# Retrieve future and ensure it has finished
future = net_info["request_future"]
r, error_type, response_time = get_response(request_future=future,
error_type=error_type,
social_network=social_network,
verbose=verbose,
retry_no=3)
# Attempt to get request information
try:
http_status = r.status_code
except:
pass
try:
response_text = r.text.encode(r.encoding)
except:
pass
if error_type == "message":
error = net_info.get("errorMsg")
# Checks if the error message is in the HTML
if not error in r.text:
print_found(social_network, url, response_time, verbose)
exists = "yes"
amount = amount+1
else:
if not print_found_only:
print_not_found(social_network, response_time, verbose)
exists = "no"
elif error_type == "status_code":
# Checks if the status code of the response is 2XX
if not r.status_code >= 300 or r.status_code < 200:
print_found(social_network, url, response_time, verbose)
exists = "yes"
amount = amount+1
else:
if not print_found_only:
print_not_found(social_network, response_time, verbose)
exists = "no"
elif error_type == "response_url":
# For this detection method, we have turned off the redirect.
# So, there is no need to check the response URL: it will always
# match the request. Instead, we will ensure that the response
# code indicates that the request was successful (i.e. no 404, or
# forward to some odd redirect).
if 200 <= r.status_code < 300:
#
print_found(social_network, url, response_time, verbose)
exists = "yes"
amount = amount+1
else:
if not print_found_only:
print_not_found(social_network, response_time, verbose)
exists = "no"
elif error_type == "":
if not print_found_only:
print_invalid(social_network, "Error!")
exists = "error"
# Save exists flag
results_site['exists'] = exists
# Save results from request
results_site['http_status'] = http_status
results_site['response_text'] = response_text
results_site['response_time_ms'] = response_time
# Add this site's results into final dictionary with all of the other results.
results_total[social_network] = results_site
return results_total
def main():
# Colorama module's initialization.
init(autoreset=True)
version_string = f"%(prog)s {__version__}\n" + \
f"{requests.__description__}: {requests.__version__}\n" + \
f"Python: {platform.python_version()}"
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
description=f"{module_name} (Version {__version__})"
)
parser.add_argument("--version",
action="version", version=version_string,
help="Display version information and dependencies."
)
parser.add_argument("--verbose", "-v", "-d", "--debug",
action="store_true", dest="verbose", default=False,
help="Display extra debugging information and metrics."
)
parser.add_argument("--rank", "-r",
action="store_true", dest="rank", default=False,
help="Present websites ordered by their Alexa.com global rank in popularity.")
parser.add_argument("--folderoutput", "-fo", dest="folderoutput",
help="If using multiple usernames, the output of the results will be saved at this folder."
)
parser.add_argument("--output", "-o", dest="output",
help="If using single username, the output of the result will be saved at this file."
)
parser.add_argument("--tor", "-t",
action="store_true", dest="tor", default=False,
help="Make requests over Tor; increases runtime; requires Tor to be installed and in system path.")
parser.add_argument("--unique-tor", "-u",
action="store_true", dest="unique_tor", default=False,
help="Make requests over Tor with new Tor circuit after each request; increases runtime; requires Tor to be installed and in system path.")
parser.add_argument("--csv",
action="store_true", dest="csv", default=False,
help="Create Comma-Separated Values (CSV) File."
)
parser.add_argument("--site",
action="append", metavar='SITE_NAME',
dest="site_list", default=None,
help="Limit analysis to just the listed sites. Add multiple options to specify more than one site."
)
parser.add_argument("--proxy", "-p", metavar='PROXY_URL',
action="store", dest="proxy", default=None,
help="Make requests over a proxy. e.g. socks5://127.0.0.1:1080"
)
parser.add_argument("--json", "-j", metavar="JSON_FILE",
dest="json_file", default="data.json",
help="Load data from a JSON file or an online, valid, JSON file.")
parser.add_argument("--proxy_list", "-pl", metavar='PROXY_LIST',
action="store", dest="proxy_list", default=None,
help="Make requests over a proxy randomly chosen from a list generated from a .csv file."
)
parser.add_argument("--check_proxies", "-cp", metavar='CHECK_PROXY',
action="store", dest="check_prox", default=None,
help="To be used with the '--proxy_list' parameter. "
"The script will check if the proxies supplied in the .csv file are working and anonymous."
"Put 0 for no limit on successfully checked proxies, or another number to institute a limit."
)
parser.add_argument("--print-found",
action="store_true", dest="print_found_only", default=False,
help="Do not output sites where the username was not found."
)
parser.add_argument("username",
nargs='+', metavar='USERNAMES',
action="store",
help="One or more usernames to check with social networks."
)
args = parser.parse_args()
# Argument check
# TODO regex check on args.proxy
if args.tor and (args.proxy != None or args.proxy_list != None):
raise Exception("Tor and Proxy cannot be set in the meantime.")
# Proxy argument check.
# Does not necessarily need to throw an error,
# since we could join the single proxy with the ones generated from the .csv,
# but it seems unnecessarily complex at this time.
if args.proxy != None and args.proxy_list != None:
raise Exception("A single proxy cannot be used along with proxy list.")
# Make prompts
if args.proxy != None:
print("Using the proxy: " + args.proxy)
global proxy_list
if args.proxy_list != None:
print_info("Loading proxies from", args.proxy_list)
proxy_list = load_proxies_from_csv(args.proxy_list)
# Checking if proxies should be checked for anonymity.
if args.check_prox != None and args.proxy_list != None:
try:
limit = int(args.check_prox)
if limit == 0:
proxy_list = check_proxy_list(proxy_list)
elif limit > 0:
proxy_list = check_proxy_list(proxy_list, limit)
else:
raise ValueError
except ValueError:
raise Exception("Prameter --check_proxies/-cp must be a positive intiger.")
if args.tor or args.unique_tor:
print("Using Tor to make requests")
print("Warning: some websites might refuse connecting over Tor, so note that using this option might increase connection errors.")
# Check if both output methods are entered as input.
if args.output is not None and args.folderoutput is not None:
print("You can only use one of the output methods.")
sys.exit(1)
# Check validity for single username output.
if args.output is not None and len(args.username) != 1:
print("You can only use --output with a single username")
sys.exit(1)
response_json_online = None
site_data_all = None
# Try to load json from website.
try:
response_json_online = requests.get(url=args.json_file)
except requests.exceptions.MissingSchema: # In case the schema is wrong it's because it may not be a website
pass
# Check if the response is appropriate.
if response_json_online is not None and response_json_online.status_code == 200:
# Since we got data from a website, try to load json and exit if parsing fails.
try:
site_data_all = response_json_online.json()
except ValueError:
print("Invalid JSON from website!")
sys.exit(1)
pass
data_file_path = os.path.join(os.path.dirname(
os.path.realpath(__file__)), args.json_file)
# This will be none if the request had a missing schema
if site_data_all is None:
# Check if the file exists otherwise exit.
if not os.path.exists(data_file_path):
print("JSON file at doesn't exist.")
print(
"If this is not a file but a website, make sure you have appended http:// or https://.")
sys.exit(1)
else:
raw = open(data_file_path, "r", encoding="utf-8")
try:
site_data_all = json.load(raw)
except:
print("Invalid JSON loaded from file.")
if args.site_list is None:
# Not desired to look at a sub-set of sites
site_data = site_data_all
else:
# User desires to selectively run queries on a sub-set of the site list.
# Make sure that the sites are supported & build up pruned site database.
site_data = {}
site_missing = []
for site in args.site_list:
for existing_site in site_data_all:
if site.lower() == existing_site.lower():
site_data[existing_site] = site_data_all[existing_site]
if not site_data:
# Build up list of sites not supported for future error message.
site_missing.append(f"'{site}'")
if site_missing:
print(
f"Error: Desired sites not found: {", ".join(site_missing)}.")
sys.exit(1)
if args.rank:
# Sort data by rank
site_dataCpy = dict(site_data)
ranked_sites = sorted(site_data, key=lambda k: ("rank" not in k, site_data[k].get("rank", sys.maxsize)))
site_data = {}
for site in ranked_sites:
site_data[site] = site_dataCpy.get(site)
# Run report on all specified users.
for username in args.username:
print()
if args.output:
file = open(args.output, "w", encoding="utf-8")
elif args.folderoutput: # In case we handle multiple usernames at a targetted folder.
# If the folder doesnt exist, create it first
if not os.path.isdir(args.folderoutput):
os.mkdir(args.folderoutput)
file = open(os.path.join(args.folderoutput,
username + ".txt"), "w", encoding="utf-8")
else:
file = open(username + ".txt", "w", encoding="utf-8")
# We try to ad a random member of the 'proxy_list' var as the proxy of the request.
# If we can't access the list or it is empty, we proceed with args.proxy as the proxy.
try:
random_proxy = random.choice(proxy_list)
proxy = f'{random_proxy.protocol}://{random_proxy.ip}:{random_proxy.port}'
except (NameError, IndexError):
proxy = args.proxy
results = {}
results = sherlock(username, site_data, verbose=args.verbose,
tor=args.tor, unique_tor=args.unique_tor, proxy=args.proxy, print_found_only=args.print_found_only)
exists_counter = 0
for website_name in results:
dictionary = results[website_name]
if dictionary.get("exists") == "yes":
exists_counter += 1
file.write(dictionary["url_user"] + "\n")
file.write("Total Websites : {}".format(exists_counter))
file.close()
if args.csv == True:
with open(username + ".csv", "w", newline='', encoding="utf-8") as csv_report:
writer = csv.writer(csv_report)
writer.writerow(['username',
'name',
'url_main',
'url_user',
'exists',
'http_status',
'response_time_ms'
]
)
for site in results:
writer.writerow([username,
site,
results[site]['url_main'],
results[site]['url_user'],
results[site]['exists'],
results[site]['http_status'],
results[site]['response_time_ms']
]
)
if __name__ == "__main__":
main()
|
#! /usr/bin/env python3
"""
Sherlock: Find Usernames Across Social Networks Module
This module contains the main logic to search for usernames at social
networks.
"""
import csv
import json
import os
import platform
import re
import sys
import random
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from concurrent.futures import ThreadPoolExecutor
from time import time
import requests
from colorama import Fore, Style, init
from requests_futures.sessions import FuturesSession
from torrequest import TorRequest
from load_proxies import load_proxies_from_csv, check_proxy_list
module_name = "Sherlock: Find Usernames Across Social Networks"
__version__ = "0.8.3"
amount = 0
global proxy_list
proxy_list = []
class ElapsedFuturesSession(FuturesSession):
"""
Extends FutureSession to add a response time metric to each request.
This is taken (almost) directly from here: https://github.com/ross/requests-futures#working-in-the-background
"""
def request(self, method, url, hooks={}, *args, **kwargs):
start = time()
def timing(r, *args, **kwargs):
elapsed_sec = time() - start
r.elapsed = round(elapsed_sec * 1000)
try:
if isinstance(hooks['response'], (list, tuple)):
# needs to be first so we don't time other hooks execution
hooks['response'].insert(0, timing)
else:
hooks['response'] = [timing, hooks['response']]
except KeyError:
hooks['response'] = timing
return super(ElapsedFuturesSession, self).request(method, url, hooks=hooks, *args, **kwargs)
def print_info(title, info):
print(Style.BRIGHT + Fore.GREEN + "[" +
Fore.YELLOW + "*" +
Fore.GREEN + f"] {title}" +
Fore.WHITE + f" {info}" +
Fore.GREEN + " on:")
def print_error(err, errstr, var, verbose=False):
print(Style.BRIGHT + Fore.WHITE + "[" +
Fore.RED + "-" +
Fore.WHITE + "]" +
Fore.RED + f" {errstr}" +
Fore.YELLOW + f" {err if verbose else var}")
def format_response_time(response_time, verbose):
return " [{} ms]".format(response_time) if verbose else ""
def print_found(social_network, url, response_time, verbose=False):
print((Style.BRIGHT + Fore.WHITE + "[" +
Fore.GREEN + "+" +
Fore.WHITE + "]" +
format_response_time(response_time, verbose) +
Fore.GREEN + f" {social_network}:"), url)
def print_not_found(social_network, response_time, verbose=False):
print((Style.BRIGHT + Fore.WHITE + "[" +
Fore.RED + "-" +
Fore.WHITE + "]" +
format_response_time(response_time, verbose) +
Fore.GREEN + f" {social_network}:" +
Fore.YELLOW + " Not Found!"))
def print_invalid(social_network, msg):
"""Print invalid search result."""
print((Style.BRIGHT + Fore.WHITE + "[" +
Fore.RED + "-" +
Fore.WHITE + "]" +
Fore.GREEN + f" {social_network}:" +
Fore.YELLOW + f" {msg}"))
def get_response(request_future, error_type, social_network, verbose=False, retry_no=None):
global proxy_list
try:
rsp = request_future.result()
if rsp.status_code:
return rsp, error_type, rsp.elapsed
except requests.exceptions.HTTPError as errh:
print_error(errh, "HTTP Error:", social_network, verbose)
# In case our proxy fails, we retry with another proxy.
except requests.exceptions.ProxyError as errp:
if retry_no>0 and len(proxy_list)>0:
#Selecting the new proxy.
new_proxy = random.choice(proxy_list)
new_proxy = f'{new_proxy.protocol}://{new_proxy.ip}:{new_proxy.port}'
print(f'Retrying with {new_proxy}')
request_future.proxy = {'http':new_proxy,'https':new_proxy}
get_response(request_future,error_type, social_network, verbose,retry_no=retry_no-1)
else:
print_error(errp, "Proxy error:", social_network, verbose)
except requests.exceptions.ConnectionError as errc:
print_error(errc, "Error Connecting:", social_network, verbose)
except requests.exceptions.Timeout as errt:
print_error(errt, "Timeout Error:", social_network, verbose)
except requests.exceptions.RequestException as err:
print_error(err, "Unknown error:", social_network, verbose)
return None, "", -1
def sherlock(username, site_data, verbose=False, tor=False, unique_tor=False, proxy=None, print_found_only=False):
"""Run Sherlock Analysis.
Checks for existence of username on various social media sites.
Keyword Arguments:
username -- String indicating username that report
should be created against.
site_data -- Dictionary containing all of the site data.
verbose -- Boolean indicating whether to give verbose output.
tor -- Boolean indicating whether to use a tor circuit for the requests.
unique_tor -- Boolean indicating whether to use a new tor circuit for each request.
proxy -- String indicating the proxy URL
Return Value:
Dictionary containing results from report. Key of dictionary is the name
of the social network site, and the value is another dictionary with
the following keys:
url_main: URL of main site.
url_user: URL of user on site (if account exists).
exists: String indicating results of test for account existence.
http_status: HTTP status code of query which checked for existence on
site.
response_text: Text that came back from request. May be None if
there was an HTTP error when checking for existence.
"""
global amount
print_info("Checking username", username)
# A user agent is needed because some sites don't
# return the correct information since they think that
# we are bots
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0'
}
# Allow 1 thread for each external service, so `len(site_data)` threads total
executor = ThreadPoolExecutor(max_workers=len(site_data))
# Create session based on request methodology
underlying_session = requests.session()
underlying_request = requests.Request()
if tor or unique_tor:
underlying_request = TorRequest()
underlying_session = underlying_request.session
# Create multi-threaded session for all requests. Use our custom FuturesSession that exposes response time
session = ElapsedFuturesSession(
executor=executor, session=underlying_session)
# Results from analysis of all sites
results_total = {}
# First create futures for all requests. This allows for the requests to run in parallel
for social_network, net_info in site_data.items():
# Results from analysis of this specific site
results_site = {}
# Record URL of main site
results_site['url_main'] = net_info.get("urlMain")
# Don't make request if username is invalid for the site
regex_check = net_info.get("regexCheck")
if regex_check and re.search(regex_check, username) is None:
# No need to do the check at the site: this user name is not allowed.
print_invalid(social_network, "Illegal Username Format For This Site!")
results_site["exists"] = "illegal"
results_site["url_user"] = ""
results_site['http_status'] = ""
results_site['response_text'] = ""
results_site['response_time_ms'] = ""
else:
# URL of user on site (if it exists)
url = net_info["url"].format(username)
results_site["url_user"] = url
url_probe = net_info.get("urlProbe")
if url_probe is None:
#Probe URL is normal one seen by people out on the web.
url_probe = url
else:
#There is a special URL for probing existence separate
#from where the user profile normally can be found.
url_probe = url_probe.format(username)
request_method = session.get
if social_network != "GitHub":
# If only the status_code is needed don't download the body
if net_info["errorType"] == 'status_code':
request_method = session.head
if net_info["errorType"] == "response_url":
# Site forwards request to a different URL if username not
# found. Disallow the redirect so we can capture the
# http status from the original URL request.
allow_redirects = False
else:
# Allow whatever redirect that the site wants to do.
# The final result of the request will be what is available.
allow_redirects = True
# This future starts running the request in a new thread, doesn't block the main thread
if proxy != None:
proxies = {"http": proxy, "https": proxy}
future = request_method(url=url_probe, headers=headers,
proxies=proxies,
allow_redirects=allow_redirects
)
else:
future = request_method(url=url_probe, headers=headers,
allow_redirects=allow_redirects
)
# Store future in data for access later
net_info["request_future"] = future
# Reset identify for tor (if needed)
if unique_tor:
underlying_request.reset_identity()
# Add this site's results into final dictionary with all of the other results.
results_total[social_network] = results_site
# Open the file containing account links
# Core logic: If tor requests, make them here. If multi-threaded requests, wait for responses
for social_network, net_info in site_data.items():
# Retrieve results again
results_site = results_total.get(social_network)
# Retrieve other site information again
url = results_site.get("url_user")
exists = results_site.get("exists")
if exists is not None:
# We have already determined the user doesn't exist here
continue
# Get the expected error type
error_type = net_info["errorType"]
# Default data in case there are any failures in doing a request.
http_status = "?"
response_text = ""
# Retrieve future and ensure it has finished
future = net_info["request_future"]
r, error_type, response_time = get_response(request_future=future,
error_type=error_type,
social_network=social_network,
verbose=verbose,
retry_no=3)
# Attempt to get request information
try:
http_status = r.status_code
except:
pass
try:
response_text = r.text.encode(r.encoding)
except:
pass
if error_type == "message":
error = net_info.get("errorMsg")
# Checks if the error message is in the HTML
if not error in r.text:
print_found(social_network, url, response_time, verbose)
exists = "yes"
amount = amount+1
else:
if not print_found_only:
print_not_found(social_network, response_time, verbose)
exists = "no"
elif error_type == "status_code":
# Checks if the status code of the response is 2XX
if not r.status_code >= 300 or r.status_code < 200:
print_found(social_network, url, response_time, verbose)
exists = "yes"
amount = amount+1
else:
if not print_found_only:
print_not_found(social_network, response_time, verbose)
exists = "no"
elif error_type == "response_url":
# For this detection method, we have turned off the redirect.
# So, there is no need to check the response URL: it will always
# match the request. Instead, we will ensure that the response
# code indicates that the request was successful (i.e. no 404, or
# forward to some odd redirect).
if 200 <= r.status_code < 300:
#
print_found(social_network, url, response_time, verbose)
exists = "yes"
amount = amount+1
else:
if not print_found_only:
print_not_found(social_network, response_time, verbose)
exists = "no"
elif error_type == "":
if not print_found_only:
print_invalid(social_network, "Error!")
exists = "error"
# Save exists flag
results_site['exists'] = exists
# Save results from request
results_site['http_status'] = http_status
results_site['response_text'] = response_text
results_site['response_time_ms'] = response_time
# Add this site's results into final dictionary with all of the other results.
results_total[social_network] = results_site
return results_total
def main():
# Colorama module's initialization.
init(autoreset=True)
version_string = f"%(prog)s {__version__}\n" + \
f"{requests.__description__}: {requests.__version__}\n" + \
f"Python: {platform.python_version()}"
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
description=f"{module_name} (Version {__version__})"
)
parser.add_argument("--version",
action="version", version=version_string,
help="Display version information and dependencies."
)
parser.add_argument("--verbose", "-v", "-d", "--debug",
action="store_true", dest="verbose", default=False,
help="Display extra debugging information and metrics."
)
parser.add_argument("--rank", "-r",
action="store_true", dest="rank", default=False,
help="Present websites ordered by their Alexa.com global rank in popularity.")
parser.add_argument("--folderoutput", "-fo", dest="folderoutput",
help="If using multiple usernames, the output of the results will be saved at this folder."
)
parser.add_argument("--output", "-o", dest="output",
help="If using single username, the output of the result will be saved at this file."
)
parser.add_argument("--tor", "-t",
action="store_true", dest="tor", default=False,
help="Make requests over Tor; increases runtime; requires Tor to be installed and in system path.")
parser.add_argument("--unique-tor", "-u",
action="store_true", dest="unique_tor", default=False,
help="Make requests over Tor with new Tor circuit after each request; increases runtime; requires Tor to be installed and in system path.")
parser.add_argument("--csv",
action="store_true", dest="csv", default=False,
help="Create Comma-Separated Values (CSV) File."
)
parser.add_argument("--site",
action="append", metavar='SITE_NAME',
dest="site_list", default=None,
help="Limit analysis to just the listed sites. Add multiple options to specify more than one site."
)
parser.add_argument("--proxy", "-p", metavar='PROXY_URL',
action="store", dest="proxy", default=None,
help="Make requests over a proxy. e.g. socks5://127.0.0.1:1080"
)
parser.add_argument("--json", "-j", metavar="JSON_FILE",
dest="json_file", default="data.json",
help="Load data from a JSON file or an online, valid, JSON file.")
parser.add_argument("--proxy_list", "-pl", metavar='PROXY_LIST',
action="store", dest="proxy_list", default=None,
help="Make requests over a proxy randomly chosen from a list generated from a .csv file."
)
parser.add_argument("--check_proxies", "-cp", metavar='CHECK_PROXY',
action="store", dest="check_prox", default=None,
help="To be used with the '--proxy_list' parameter. "
"The script will check if the proxies supplied in the .csv file are working and anonymous."
"Put 0 for no limit on successfully checked proxies, or another number to institute a limit."
)
parser.add_argument("--print-found",
action="store_true", dest="print_found_only", default=False,
help="Do not output sites where the username was not found."
)
parser.add_argument("username",
nargs='+', metavar='USERNAMES',
action="store",
help="One or more usernames to check with social networks."
)
args = parser.parse_args()
# Argument check
# TODO regex check on args.proxy
if args.tor and (args.proxy != None or args.proxy_list != None):
raise Exception("Tor and Proxy cannot be set in the meantime.")
# Proxy argument check.
# Does not necessarily need to throw an error,
# since we could join the single proxy with the ones generated from the .csv,
# but it seems unnecessarily complex at this time.
if args.proxy != None and args.proxy_list != None:
raise Exception("A single proxy cannot be used along with proxy list.")
# Make prompts
if args.proxy != None:
print("Using the proxy: " + args.proxy)
global proxy_list
if args.proxy_list != None:
print_info("Loading proxies from", args.proxy_list)
proxy_list = load_proxies_from_csv(args.proxy_list)
# Checking if proxies should be checked for anonymity.
if args.check_prox != None and args.proxy_list != None:
try:
limit = int(args.check_prox)
if limit == 0:
proxy_list = check_proxy_list(proxy_list)
elif limit > 0:
proxy_list = check_proxy_list(proxy_list, limit)
else:
raise ValueError
except ValueError:
raise Exception("Prameter --check_proxies/-cp must be a positive intiger.")
if args.tor or args.unique_tor:
print("Using Tor to make requests")
print("Warning: some websites might refuse connecting over Tor, so note that using this option might increase connection errors.")
# Check if both output methods are entered as input.
if args.output is not None and args.folderoutput is not None:
print("You can only use one of the output methods.")
sys.exit(1)
# Check validity for single username output.
if args.output is not None and len(args.username) != 1:
print("You can only use --output with a single username")
sys.exit(1)
response_json_online = None
site_data_all = None
# Try to load json from website.
try:
response_json_online = requests.get(url=args.json_file)
except requests.exceptions.MissingSchema: # In case the schema is wrong it's because it may not be a website
pass
# Check if the response is appropriate.
if response_json_online is not None and response_json_online.status_code == 200:
# Since we got data from a website, try to load json and exit if parsing fails.
try:
site_data_all = response_json_online.json()
except ValueError:
print("Invalid JSON from website!")
sys.exit(1)
pass
data_file_path = os.path.join(os.path.dirname(
os.path.realpath(__file__)), args.json_file)
# This will be none if the request had a missing schema
if site_data_all is None:
# Check if the file exists otherwise exit.
if not os.path.exists(data_file_path):
print("JSON file at doesn't exist.")
print(
"If this is not a file but a website, make sure you have appended http:// or https://.")
sys.exit(1)
else:
raw = open(data_file_path, "r", encoding="utf-8")
try:
site_data_all = json.load(raw)
except:
print("Invalid JSON loaded from file.")
if args.site_list is None:
# Not desired to look at a sub-set of sites
site_data = site_data_all
else:
# User desires to selectively run queries on a sub-set of the site list.
# Make sure that the sites are supported & build up pruned site database.
site_data = {}
site_missing = []
for site in args.site_list:
for existing_site in site_data_all:
if site.lower() == existing_site.lower():
site_data[existing_site] = site_data_all[existing_site]
if not site_data:
# Build up list of sites not supported for future error message.
site_missing.append(f"'{site}'")
if site_missing:
print(
f"Error: Desired sites not found: {', '.join(site_missing)}.")
sys.exit(1)
if args.rank:
# Sort data by rank
site_dataCpy = dict(site_data)
ranked_sites = sorted(site_data, key=lambda k: ("rank" not in k, site_data[k].get("rank", sys.maxsize)))
site_data = {}
for site in ranked_sites:
site_data[site] = site_dataCpy.get(site)
# Run report on all specified users.
for username in args.username:
print()
if args.output:
file = open(args.output, "w", encoding="utf-8")
elif args.folderoutput: # In case we handle multiple usernames at a targetted folder.
# If the folder doesnt exist, create it first
if not os.path.isdir(args.folderoutput):
os.mkdir(args.folderoutput)
file = open(os.path.join(args.folderoutput,
username + ".txt"), "w", encoding="utf-8")
else:
file = open(username + ".txt", "w", encoding="utf-8")
# We try to ad a random member of the 'proxy_list' var as the proxy of the request.
# If we can't access the list or it is empty, we proceed with args.proxy as the proxy.
try:
random_proxy = random.choice(proxy_list)
proxy = f'{random_proxy.protocol}://{random_proxy.ip}:{random_proxy.port}'
except (NameError, IndexError):
proxy = args.proxy
results = {}
results = sherlock(username, site_data, verbose=args.verbose,
tor=args.tor, unique_tor=args.unique_tor, proxy=args.proxy, print_found_only=args.print_found_only)
exists_counter = 0
for website_name in results:
dictionary = results[website_name]
if dictionary.get("exists") == "yes":
exists_counter += 1
file.write(dictionary["url_user"] + "\n")
file.write("Total Websites : {}".format(exists_counter))
file.close()
if args.csv == True:
with open(username + ".csv", "w", newline='', encoding="utf-8") as csv_report:
writer = csv.writer(csv_report)
writer.writerow(['username',
'name',
'url_main',
'url_user',
'exists',
'http_status',
'response_time_ms'
]
)
for site in results:
writer.writerow([username,
site,
results[site]['url_main'],
results[site]['url_user'],
results[site]['exists'],
results[site]['http_status'],
results[site]['response_time_ms']
]
)
if __name__ == "__main__":
main()
|
from . import util
from engine import metroverse as mv
def render_boosts(blocks=None, highlight=False, render_stacked=False):
active_boosts = mv.active_boosts(blocks)
names = set()
if blocks is not None:
for block in blocks:
names.update(block['buildings']['all'].keys())
large_hood = len(blocks) > 10 # TODO: move elsewhere
s = """
<h2 style="margin-bottom: 2">Hood Boosts</h2>
<small>(reference <a href='https://docs.metroverse.com/overview/neighborhood-boost#list-of-neighborhood-boosts'>docs</a>)</small>
<p>
<table>
"""
s += """<tr>
<th>Name</th>
<th>Boost % (base)</th>
<th>Building 1</th>
<th>Building 2</th>
<th>Building 3</th>
"""
if render_stacked:
s += """
<th>Stacked boosts</th>
"""
if large_hood:
s += """
<th>"Adjusted" stacked boosts<br/><small style="font-weight: normal">(for large hoods)</small></th>
"""
s += """
<th>Stacked boost multiplier<br/><small style="font-weight: normal">(after diminishing returns)</small></th>
<th>Earned Boost %<br><small style="font-weight: normal">(stacked multiplier * base boost)</small></th>
"""
s += "</tr>"
for boost in mv.BOOSTS:
s += f"<tr><td>{boost["name"]}</td>"
s += f"<td>{boost["pct"]}%</td>"
for building in boost['buildings']:
building_name = building["name"]
# which blocks have the building?
blocks_with_b = [block for block in blocks
if building_name in block['buildings']['all']
or building_name == block["pathway"]]
count = 0
if building_name in mv.BUILDINGS:
count = mv.BUILDINGS[building_name]['count']
elif building_name in mv.PUBLIC:
count = mv.PUBLIC[building_name]['count']
pct = 100.0*count/10000
s += f"""<td {util.highlight_if(len(blocks_with_b) > 0)}>
{building_name} {"(" + str(pct) + "%)" if count > 0 else ""}
"""
if len(blocks_with_b) > 0:
s += "<br />"
for block in blocks_with_b:
if highlight:
s += f""" <a target="_blank" href="/b/{block["num"]}">#{block["num"]}</a>"""
s += "</td>"
if render_stacked:
stacked = active_boosts[boost['name']]["full"]
s += f"<td {util.highlight_if(stacked>0)}>{stacked}</td>"
if large_hood:
# Adjusted stacked
adjusted_stacked = stacked * mv.large_hood_multiplier(len(blocks))
s += f"<td {util.highlight_if(adjusted_stacked>0)}>{adjusted_stacked/1000}</td>"
stacked_boost_multiplier = mv.boost_formula(len(blocks), stacked)
s += f"<td {util.highlight_if(stacked_boost_multiplier>0)}>{stacked_boost_multiplier/1000}</td>"
total_boost = (stacked_boost_multiplier * boost['bps'])//1000/100
s += f"<td {util.highlight_if(total_boost>0)}><b>{total_boost}%</b></td>"
s += "</tr>"
return s + "</table>"
|
from . import util
from engine import metroverse as mv
def render_boosts(blocks=None, highlight=False, render_stacked=False):
active_boosts = mv.active_boosts(blocks)
names = set()
if blocks is not None:
for block in blocks:
names.update(block['buildings']['all'].keys())
large_hood = len(blocks) > 10 # TODO: move elsewhere
s = """
<h2 style="margin-bottom: 2">Hood Boosts</h2>
<small>(reference <a href='https://docs.metroverse.com/overview/neighborhood-boost#list-of-neighborhood-boosts'>docs</a>)</small>
<p>
<table>
"""
s += """<tr>
<th>Name</th>
<th>Boost % (base)</th>
<th>Building 1</th>
<th>Building 2</th>
<th>Building 3</th>
"""
if render_stacked:
s += """
<th>Stacked boosts</th>
"""
if large_hood:
s += """
<th>"Adjusted" stacked boosts<br/><small style="font-weight: normal">(for large hoods)</small></th>
"""
s += """
<th>Stacked boost multiplier<br/><small style="font-weight: normal">(after diminishing returns)</small></th>
<th>Earned Boost %<br><small style="font-weight: normal">(stacked multiplier * base boost)</small></th>
"""
s += "</tr>"
for boost in mv.BOOSTS:
s += f"<tr><td>{boost['name']}</td>"
s += f"<td>{boost['pct']}%</td>"
for building in boost['buildings']:
building_name = building["name"]
# which blocks have the building?
blocks_with_b = [block for block in blocks
if building_name in block['buildings']['all']
or building_name == block["pathway"]]
count = 0
if building_name in mv.BUILDINGS:
count = mv.BUILDINGS[building_name]['count']
elif building_name in mv.PUBLIC:
count = mv.PUBLIC[building_name]['count']
pct = 100.0*count/10000
s += f"""<td {util.highlight_if(len(blocks_with_b) > 0)}>
{building_name} {"(" + str(pct) + "%)" if count > 0 else ""}
"""
if len(blocks_with_b) > 0:
s += "<br />"
for block in blocks_with_b:
if highlight:
s += f""" <a target="_blank" href="/b/{block['num']}">#{block['num']}</a>"""
s += "</td>"
if render_stacked:
stacked = active_boosts[boost['name']]["full"]
s += f"<td {util.highlight_if(stacked>0)}>{stacked}</td>"
if large_hood:
# Adjusted stacked
adjusted_stacked = stacked * mv.large_hood_multiplier(len(blocks))
s += f"<td {util.highlight_if(adjusted_stacked>0)}>{adjusted_stacked/1000}</td>"
stacked_boost_multiplier = mv.boost_formula(len(blocks), stacked)
s += f"<td {util.highlight_if(stacked_boost_multiplier>0)}>{stacked_boost_multiplier/1000}</td>"
total_boost = (stacked_boost_multiplier * boost['bps'])//1000/100
s += f"<td {util.highlight_if(total_boost>0)}><b>{total_boost}%</b></td>"
s += "</tr>"
return s + "</table>"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
This script computes the max mean mass concentration of several pollutants
from a CSV file containing the following columns:
- 'DateTime' : ISO 8601 date and time
- 'Timestamp': seconds elapsed since 01/01/1970
- 'PM10 (µg/m3)' (optional)
- 'PM2.5 (µg/m3)' (optional)
- 'PM1 (µg/m3)' (optional)
- 'NO2 (µg/m3)' (optional)
- 'CO (mg/m3)' (optional)
- 'O3 (µg/m3)' (optional)
USAGE:
./exposure.py [csv_file]
If no csv_file is provided, the script opens a file dialog box.
'''
__author__ = "Olivier Nocent and Quentin Martinet"
__copyright__ = "Copyright 2021, Université de Reims Champagne Ardenne"
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Olivier Nocent"
__email__ = "olivier.nocent@univ-reims.fr"
__status__ = "Experimental"
from os import path
import sys
import easygui
import glob
import pandas as pd
from aerolab_utils import *
if len(sys.argv) == 1:
filename = easygui.fileopenbox(
title='Exposure estimation', msg='Choose a CSV file', filetypes=[['*.csv', 'CSV files']])
else:
filename = sys.argv[1]
if not path.exists(filename):
print('\nERROR:', filename, 'does not exist!\n\n')
exit(0)
df = pd.read_csv(filename)
pollutants = ['PM10 (µg/m3)', 'PM2.5 (µg/m3)', 'PM1 (µg/m3)', 'NO2 (µg/m3)',
'CO (mg/m3)', 'O3 (µg/m3)']
threshold = {
'PM10 (µg/m3)': 45,
'PM2.5 (µg/m3)': 15,
'PM1 (µg/m3)': 15,
'NO2 (µg/m3)': 25,
'CO (mg/m3)': 4,
'O3 (µg/m3)': 100
}
max_value = {}
max_index = {}
for pollutant in pollutants:
max_value[pollutant] = 0
max_index[pollutant] = 0
i, end = 0, df['Timestamp'].iloc[-1] - 24 * 3600
while df.loc[i, 'Timestamp'] < end:
start = df.loc[i, 'Timestamp']
df_24h = df[(df['Timestamp'] >= start) & (
df['Timestamp'] < start + 24 * 3600)]
for pollutant in pollutants:
if pollutant in df.columns:
mean_value = df_24h[pollutant].median()
if mean_value > max_value[pollutant]:
max_value[pollutant] = mean_value
max_index[pollutant] = i
i += 1
if 'O3 (µg/m3)' in df.columns:
i, end = 0, df['Timestamp'].iloc[-1] - 8 * 3600
while df.loc[i, 'Timestamp'] < end:
start = df.loc[i, 'Timestamp']
df_8h = df[(df['Timestamp'] >= start) & (
df['Timestamp'] < start + 8 * 3600)]
mean_value = df_24h['O3 (µg/m3)'].median()
if mean_value > max_value['O3 (µg/m3)']:
max_value['O3 (µg/m3)'] = mean_value
max_index['O3 (µg/m3)'] = i
i += 1
print('\nMaximum mean mass concentration during 24h:\n')
if 'PM10 (µg/m3)' in df.columns:
print(f"PM10 : {max_value["PM10 (µg/m3)"]: >6.2f} µg/m3\t\t(45 µg/m3) at {df["DateTime"][max_index["PM10 (µg/m3)"]]}")
if 'PM2.5 (µg/m3)' in df.columns:
print(f"PM2.5 : {max_value["PM2.5 (µg/m3)"]: >6.2f} µg/m3\t\t(15 µg/m3) at {df["DateTime"][max_index["PM2.5 (µg/m3)"]]}")
if 'PM1 (µg/m3)' in df.columns:
print(f"PM1 :' {max_value["PM1 (µg/m3)"]: >6.2f} µg/m3\t\t( ? µg/m3) at {df["DateTime"][max_index["PM1 (µg/m3)"]]}")
if 'NO2 (µg/m3)' in df.columns:
print(f"NO2 : {max_value["NO2 (µg/m3)"]: >6.2f} µg/m3\t\t(25 µg/m3) at {df["DateTime"][max_index["NO2 (µg/m3)"]]}")
if 'CO (mg/m3)' in df.columns:
print(f"CO : {max_value["CO (mg/m3)"]: >6.2f} mg/m3\t\t( 4 mg/m3) at {df["DateTime"][max_index["CO (mg/m3)"]]}")
if 'O3 (µg/m3)' in df.columns:
print('\nMaximum mean mass concentration during 8h:\n')
print(f"O3 : {max_value["O3 (µg/m3)"]: >6.2f} µg/m3\t\t(100 µg/m3) at {df["DateTime"][max_index["O3 (µg/m3)"]]}")
period = {
'PM10 (µg/m3)': 0,
'PM2.5 (µg/m3)': 0,
'PM1 (µg/m3)': 0,
'NO2 (µg/m3)': 0,
'CO (mg/m3)': 0,
'O3 (µg/m3)': 0
}
for i in range(1,len(df.index)):
for pollutant in pollutants:
if pollutant in df.columns and df[pollutant][i] > threshold[pollutant]:
period[pollutant] += df['Timestamp'][i] - df['Timestamp'][i-1]
total = df['Timestamp'][len(df.index)-1] - df['Timestamp'][0]
print(f'\nTotal time above thresholds during {format_duration(total)}:\n')
for pollutant in pollutants:
if pollutant in df.columns:
print(f'{pollutant} : {format_duration(period[pollutant])}')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
This script computes the max mean mass concentration of several pollutants
from a CSV file containing the following columns:
- 'DateTime' : ISO 8601 date and time
- 'Timestamp': seconds elapsed since 01/01/1970
- 'PM10 (µg/m3)' (optional)
- 'PM2.5 (µg/m3)' (optional)
- 'PM1 (µg/m3)' (optional)
- 'NO2 (µg/m3)' (optional)
- 'CO (mg/m3)' (optional)
- 'O3 (µg/m3)' (optional)
USAGE:
./exposure.py [csv_file]
If no csv_file is provided, the script opens a file dialog box.
'''
__author__ = "Olivier Nocent and Quentin Martinet"
__copyright__ = "Copyright 2021, Université de Reims Champagne Ardenne"
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Olivier Nocent"
__email__ = "olivier.nocent@univ-reims.fr"
__status__ = "Experimental"
from os import path
import sys
import easygui
import glob
import pandas as pd
from aerolab_utils import *
if len(sys.argv) == 1:
filename = easygui.fileopenbox(
title='Exposure estimation', msg='Choose a CSV file', filetypes=[['*.csv', 'CSV files']])
else:
filename = sys.argv[1]
if not path.exists(filename):
print('\nERROR:', filename, 'does not exist!\n\n')
exit(0)
df = pd.read_csv(filename)
pollutants = ['PM10 (µg/m3)', 'PM2.5 (µg/m3)', 'PM1 (µg/m3)', 'NO2 (µg/m3)',
'CO (mg/m3)', 'O3 (µg/m3)']
threshold = {
'PM10 (µg/m3)': 45,
'PM2.5 (µg/m3)': 15,
'PM1 (µg/m3)': 15,
'NO2 (µg/m3)': 25,
'CO (mg/m3)': 4,
'O3 (µg/m3)': 100
}
max_value = {}
max_index = {}
for pollutant in pollutants:
max_value[pollutant] = 0
max_index[pollutant] = 0
i, end = 0, df['Timestamp'].iloc[-1] - 24 * 3600
while df.loc[i, 'Timestamp'] < end:
start = df.loc[i, 'Timestamp']
df_24h = df[(df['Timestamp'] >= start) & (
df['Timestamp'] < start + 24 * 3600)]
for pollutant in pollutants:
if pollutant in df.columns:
mean_value = df_24h[pollutant].median()
if mean_value > max_value[pollutant]:
max_value[pollutant] = mean_value
max_index[pollutant] = i
i += 1
if 'O3 (µg/m3)' in df.columns:
i, end = 0, df['Timestamp'].iloc[-1] - 8 * 3600
while df.loc[i, 'Timestamp'] < end:
start = df.loc[i, 'Timestamp']
df_8h = df[(df['Timestamp'] >= start) & (
df['Timestamp'] < start + 8 * 3600)]
mean_value = df_24h['O3 (µg/m3)'].median()
if mean_value > max_value['O3 (µg/m3)']:
max_value['O3 (µg/m3)'] = mean_value
max_index['O3 (µg/m3)'] = i
i += 1
print('\nMaximum mean mass concentration during 24h:\n')
if 'PM10 (µg/m3)' in df.columns:
print(f"PM10 : {max_value['PM10 (µg/m3)']: >6.2f} µg/m3\t\t(45 µg/m3) at {df['DateTime'][max_index['PM10 (µg/m3)']]}")
if 'PM2.5 (µg/m3)' in df.columns:
print(f"PM2.5 : {max_value['PM2.5 (µg/m3)']: >6.2f} µg/m3\t\t(15 µg/m3) at {df['DateTime'][max_index['PM2.5 (µg/m3)']]}")
if 'PM1 (µg/m3)' in df.columns:
print(f"PM1 :' {max_value['PM1 (µg/m3)']: >6.2f} µg/m3\t\t( ? µg/m3) at {df['DateTime'][max_index['PM1 (µg/m3)']]}")
if 'NO2 (µg/m3)' in df.columns:
print(f"NO2 : {max_value['NO2 (µg/m3)']: >6.2f} µg/m3\t\t(25 µg/m3) at {df['DateTime'][max_index['NO2 (µg/m3)']]}")
if 'CO (mg/m3)' in df.columns:
print(f"CO : {max_value['CO (mg/m3)']: >6.2f} mg/m3\t\t( 4 mg/m3) at {df['DateTime'][max_index['CO (mg/m3)']]}")
if 'O3 (µg/m3)' in df.columns:
print('\nMaximum mean mass concentration during 8h:\n')
print(f"O3 : {max_value['O3 (µg/m3)']: >6.2f} µg/m3\t\t(100 µg/m3) at {df['DateTime'][max_index['O3 (µg/m3)']]}")
period = {
'PM10 (µg/m3)': 0,
'PM2.5 (µg/m3)': 0,
'PM1 (µg/m3)': 0,
'NO2 (µg/m3)': 0,
'CO (mg/m3)': 0,
'O3 (µg/m3)': 0
}
for i in range(1,len(df.index)):
for pollutant in pollutants:
if pollutant in df.columns and df[pollutant][i] > threshold[pollutant]:
period[pollutant] += df['Timestamp'][i] - df['Timestamp'][i-1]
total = df['Timestamp'][len(df.index)-1] - df['Timestamp'][0]
print(f'\nTotal time above thresholds during {format_duration(total)}:\n')
for pollutant in pollutants:
if pollutant in df.columns:
print(f'{pollutant} : {format_duration(period[pollutant])}')
|
"""
This Module interacts with Gerrit and retrieves Data from Gerrit
"""
import os
import json
import logging
import argparse
import pandas as pd
from datetime import datetime, timedelta
from json.decoder import JSONDecodeError
from urllib.parse import urlunsplit, urlencode
from typing import Tuple, Union
try:
from requests import __version__, Session, adapters, exceptions, urllib3, status_codes
logging.debug(f'Available request module of version {__version__}')
except ImportError:
logging.error('Please install requests module. Use pip install requests.')
class GerritApi:
"""
*Class name :* GerritHandler
*Description :* Class to retrieve data from Gerrit
"""
GET_ALL_REPO_URI = "/projects/?d"
GET_ALL_CHANGES_URI = "/changes/?q=repo:{repo_name}"
GET_ALL_ACTIVE_USERS_URI = "/accounts/?q=is:active"
GET_COMMITS_BY_AGE = "/changes/?q=-age:"
GET_COMMITS_USING_AFTER = "/changes/?q=after:"
def __init__(self, gerrit_server: str, username: str=None, password: str=None):
"""
*Method description :* Initializing values for Gerrit operations from OVF
:param username: Username to login to Gerrit
:type username: String
:param password: Password to login to Gerrit
:type password: String
:param url: Gerrit URL to get commit details
:type url: String
"""
self.gerrit_username = username
self.gerrit_password = password
self.gerrit_url = f"https://{gerrit_server}"
logging.debug(f"GerritDetails:: {self.gerrit_url}, {self.gerrit_username}, {self.gerrit_password}")
if username and password:
self.rest_engine = RestEngine(auth=(self.gerrit_username, self.gerrit_password))
else:
self.rest_engine = RestEngine()
def get_all_projects(self) -> dict:
"""
Method to get all repositories
:returns: :class:`repo_details`: All repo details
:rtype: :class:`repo_details`: Dict
"""
all_repo_details = {}
get_all_repo_url = f"{self.gerrit_url}{GerritApi.GET_ALL_REPO_URI}"
all_repo_resp = self.decode_response(self.rest_engine.rest_request(get_all_repo_url))
for key, value in all_repo_resp.items():
all_repo_details[key] = {"id": value.get("id"), "description": value.get("description"),
"state": value.get("state")}
logging.info(f"List of All repositories : {all_repo_details} {len(all_repo_details)}")
return all_repo_details
def get_all_active_projects(self) -> list:
"""
Method to get all active repositories
:returns: :class:`active_repo_list`: List of active repositories
:rtype: :class:`active_repo_list`: List
"""
active_repo_list = []
all_repo_details = self.get_all_projects()
for key, value in all_repo_details.items():
if value["state"] == "ACTIVE":
active_repo_list.append(key)
logging.info(f"List of active repositories : {active_repo_list} {len(active_repo_list)}")
return active_repo_list
def get_active_user_accounts(self) -> list:
"""
*Method description :* Method to get active user accounts in server
:returns: :class:`all_users_details`: List of commit changes as dict
:rtype: :class:`all_users_details`: list
"""
all_users_details = []
all_users_list, mocker_response = [], []
all_users_url = f"{self.gerrit_url}{GerritApi.GET_ALL_ACTIVE_USERS_URI}&S=0"
response = self.decode_response(self.rest_engine.rest_request(all_users_url))
all_users_list.extend(response)
mocker_response = self.no_limit_mocker(response, mocker_response,
url_to_be_used=f"{self.gerrit_url}{GerritApi.GET_ALL_ACTIVE_USERS_URI}")
if all_users_list:
all_users_list.extend(mocker_response)
logging.info(f"Number Of Active User Accounts in Gerrit: {len(all_users_list)}")
for each_user in all_users_list:
user_id = each_user.get("_account_id")
user_details_url = f"{self.gerrit_url}/accounts/{user_id}/detail"
detailed_response = self.decode_response(self.rest_engine.rest_request(user_details_url))
all_users_details.append(detailed_response)
logging.info(f"Active User Account Details in Gerrit: {all_users_details}")
return all_users_details
def get_commit_details_in_given_period(self, start=None, duration="24Hours", stop=datetime.utcnow()):
all_commits_list, mocker_response = [], []
if not start:
start = self.get_start_time(duration, stop)
commits_url = f"{self.gerrit_url}{GerritApi.GET_COMMITS_USING_AFTER}\"{start}\"&S=0"
print(commits_url)
response = self.decode_response(self.rest_engine.rest_request(commits_url))
all_commits_list.extend(response)
mocker_response = self.no_limit_mocker(response, mocker_response,
url_to_be_used=f"{self.gerrit_url}{GerritApi.GET_COMMITS_USING_AFTER}\"{start}\"")
if mocker_response:
all_commits_list.extend(mocker_response)
for each_commit in all_commits_list:
owner_account_url = f"{self.gerrit_url}/accounts/{each_commit.get("owner").get("_account_id")}/detail"
each_commit["owner"] = self.decode_response(self.rest_engine.rest_request(owner_account_url)).get("name")
if each_commit.get("submitter"):
submitter_id = each_commit.get('submitter').get('_account_id')
submit_account_url = f"{self.gerrit_url}/accounts/{submitter_id}/detail"
each_commit["submitter"] = self.decode_response(self.rest_engine.rest_request(
submit_account_url)).get("name")
print(f"Total commits from {start} is: {len(all_commits_list)}")
return all_commits_list
@staticmethod
def get_start_time(duration, stop):
if "minutes" in str(duration).lower():
min_delta = int(str(duration).lower().strip("minutes"))
start = stop - timedelta(minutes=min_delta)
if "hours" in str(duration).lower():
hour_delta = int(str(duration).lower().strip("hours"))
start = stop - timedelta(hours=hour_delta)
elif "days" in str(duration).lower():
day_delta = int(str(duration).lower().strip("days"))
start = stop - timedelta(days=day_delta)
elif "months" in str(duration).lower():
month_delta = int(str(duration).lower().strip("months"))
start = stop - timedelta(months=month_delta)
return start
@staticmethod
def decode_response(response: str) -> dict:
"""
*Method description :* Method to decode rest response with Gerrit Magic Prefix
:param response: Raw REST Response Content
:type response: String
:raises: :class:`ValueError`: Invaid Response Json Content
:returns: :class:`resp_dict`: Dictionary of the given Response content
:rtype: :class:`resp_dict`: Dictionary
"""
output = response[1]
# prefix that comes with the json responses.
gerrit_magic_json_prefix = ")]}'\n"
if str(response[0]) == '200' and isinstance(response[1], str):
if response[1].startswith(gerrit_magic_json_prefix):
output = response[1][len(gerrit_magic_json_prefix):]
try:
output = json.loads(output)
except ValueError:
logging.error(f"Invalid Json in response {output}")
else:
logging.error(f'Rest Call Failed with the status code {response[0]} and response {response[1]}')
return output
def no_limit_mocker(self, response: str, mocker_response: list, url_to_be_used: str,
def_limit: int =0) -> list:
"""
*Method description :* Method to mock no_limit option in Gerrit Server
:param response: Previous GET Call Response
:type response: String
:param mocker_response: Mocker response list on which no_limit responses are accumulated
:type mocker_response: list
:param url_to_be_used: URL to be used for REST Call in no_limits mocker block
:type url_to_be_used: String
:param def_limit: Number Of Commits Limit for GET call
:type def_limit: Integer
:returns: :class:`mocker_response`: Get REST Response in List
:rtype: :class:`mocker_response`: List
"""
if "_more_" in str(response):
def_limit = def_limit + 500
start_limit = def_limit - 500 + 1
logging.info(f"Fetching {start_limit} - {def_limit} Records. Please Wait...")
new_url = f"{url_to_be_used}&S={str(def_limit)}&n=500"
int_response = self.decode_response(self.rest_engine.rest_request(new_url))
mocker_response.extend(int_response)
self.no_limit_mocker(int_response, mocker_response, url_to_be_used, def_limit)
else:
def_limit = def_limit + 500
new_url = f"{url_to_be_used}&S={str(def_limit)}&n=500"
int_response = self.decode_response(self.rest_engine.rest_request(new_url))
mocker_response.extend(int_response)
return mocker_response
class RestEngine:
"""
Class to perform rest operations like PUT, PATCH, POST, GET
DELETE, HEAD, OPTIONS.
"""
def __init__(self, **session_args: str):
"""
*Method description :* Initialization method.
1. Initialize a http session with the session parameters passed by user
2. Default authentication is set to (username, password) as (admin, admin).
And a header with json content type is added.
3. These session level parameters are overwritten when the same are provided
at the method level.
:param session_args: Rest arguments that can be set at the session level.
Supported: 'headers', 'cookies', 'auth', 'proxies', 'hooks',
'params', 'verify', 'cert', 'stream', 'trust_env', 'max_redirects'
:type session_args: dict
"""
self.http_session = Session()
self.http_session.auth = session_args.get('auth')
self.http_session.headers.update(session_args.get('headers', {}))
#as verify is set to False,requests in this session will accept any TLS certificate
#will ignore SSL certificate verification
self.http_session.verify = session_args.get('verify', False)
#Retries to establish a http secure connection.
https_adapter = adapters.HTTPAdapter(max_retries=3)
self.http_session.mount('https://', https_adapter)
#To set other session parameters supported by requests
self.http_session.params = session_args.get('params')
self.http_session.proxies = session_args.get('proxies')
self.http_session.cert = session_args.get('cert')
self.http_session.hooks = session_args.get('hooks')
self.http_session.stream = session_args.get('stream')
self.http_session.max_redirects = session_args.get('max_redirects')
self.http_session.cookies.update(session_args.get('cookies', {}))
self.http_session.trust_env = session_args.get('trust_env')
@staticmethod
def build_api_url(netloc: str, scheme: str ="https", path: str ="", query: Union[str, dict]="",
fragments: str ="") -> str:
"""Generates complete url from the inputs provided by the user.
URL format : scheme://netloc/path?query#fragments
#query str: page=12
eg : https://docs.python.com/tutorial/index.html?page=12#datatypes
#query dict: {page:12, type:tuple)
eg : https://docs.python.com/tutorial/index.html?page=12&type=tuple#datatypes
:param netloc: Network location part. Domain name should be given as input.
(eg): example.com, 168.0.0.1:8080, jenkins.com:8443
:type netloc: str
:param scheme: URL scheme specifier. Can be either http or https, defaults to "https"
:type scheme: str, optional
:param path: Hierarchical path. Additional path to be added to the netloc, defaults to ""
:type path: str, optional
:param query: query string needed to be added. It will be added after the "?" symbol.
Can be given directly as string or dict with multiple key value pairs. if multiple key
value pairs are given then query string will be concatenated with "&" symbol, defaults to ""
:type query: str or dict, optional
:param fragments: Additional piece of information to be added to the url. This will be added
after the "#" symbol, defaults to ""
:type fragments: str, optional
:return: complete api url
:rtype: str
"""
query_str = urlencode(query) if isinstance(query, dict) else query
api_url = urlunsplit((scheme, netloc, path, query_str, fragments))
logging.debug(f"Api url formed --> {api_url}")
return api_url
def rest_request(self, uri: str, operation: str ='GET', **func_args: str) -> Tuple[int, str, dict]:
"""
*Method description :* Common rest request method be called for performing the rest operations.
:param uri: rest uri
:type uri: str
:param operation: rest operation, could be GET, POST, PATCH, DELETE, PUT, HEAD, OPTIONS.
:type operation: str
:param func_args: Rest arguments such as 'auth', 'cookies', 'data', 'files',
'headers', 'hooks', 'json', 'params', 'timeout', 'allow_redirects', 'proxies',
'hooks', 'stream', 'verify', 'cert' that can be set at the method request level.
Overrides the session arguments.
:type func_args: dict
:returns: :class:`response_code`: Response code of the rest request call performed
:class:`response`: Response received from the rest request call
:class:'response_headers`: Headers in response
:rtype: :class:`response_code`: int
:class:`response`: dict/str
:class:`response_headers`: dict
"""
response_code, response, response_headers = None, None, None
#suppress Insecure certificate warning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
rest_response = self.http_session.request(operation.upper(), uri, **func_args)
logging.debug(f'Request uri : {rest_response.request.url}')
logging.debug(f'Request method : {rest_response.request.method}')
logging.debug(f'Request headers : {rest_response.request.headers}')
logging.debug(f'Request data : {rest_response.request.body}')
response_code, response, response_headers = rest_response.status_code, rest_response.content, rest_response.headers
#Uncomment the below line if status code has to raise an exception/error
#rest_response.raise_for_status()
if response:
try:
response = rest_response.json()
except JSONDecodeError:
#default utf-8 encoding is done.
response = rest_response.text
except exceptions.InvalidURL:
logging.error(f'The uri {uri} passed for this {operation.upper()} method is invalid')
except exceptions.HTTPError:
logging.error(f'The {operation.upper()} method failed with the status code {response_code}' \
f' and status message would be any of {status_codes._codes[response_code]}.')
except exceptions.SSLError:
logging.error('SSL Certificate verification failed.')
except exceptions.ConnectionError:
logging.error(f'Failed to establish a connection with {uri}')
except exceptions.InvalidHeader:
logging.error(f'Invalid header exception. Request headers added : {rest_response.request.headers}')
except exceptions.TooManyRedirects:
logging.error('The URL redirects has crossed the maximum limit of 30.')
except exceptions.Timeout:
logging.error(f'{operation.upper()} request timed out. Can be either Connection or Read timeout.')
except exceptions.RequestException:
logging.error('Exception occurred while handling request. Please check if the input passed are correct.')
except TypeError:
logging.error('Please re-check if the input arguments passed are valid.')
logging.debug(f'Rest Response : {response}')
logging.debug(f'Rest Response status code : {response_code}')
logging.debug(f'Rest Response headers : {response_headers}')
if response_code:
logging.debug(f'Possible status message for {response_code} : {status_codes._codes[response_code]}')
return response_code, response, response_headers
class Common:
"""
Class to perform rest operations like PUT, PATCH, POST, GET
DELETE, HEAD, OPTIONS.
"""
@staticmethod
def convert_json_to_dict(json_file: str) -> Union[dict, None]:
"""Converts the input json file into dictionary
:param json_file: Name of the json file to be converted
:type json_file: str
:return: Converted dictionary
:rtype: dict or None
"""
try:
assert os.path.exists(json_file)
with open(json_file, 'r') as file_obj:
data_dict = json.load(file_obj)
return data_dict
except AssertionError:
logging.error(f'Json file {json_file} doesnot exists')
except json.decoder.JSONDecodeError as decode_err:
logging.error(f'unable to parse {json_file}. Kindly validate the json file. Error occured: {decode_err}')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--servername", type=str, help="Gerrit Server Name/IP")
parser.add_argument("-u", "--user", type=str, help="Gerrit Login Username", default=None)
parser.add_argument("-p", "--password", type=str, help="Gerrit Login Password", default=None)
parser.add_argument("-d", "--duration", type=str, help="Duration for which gerrit changes to be fetched\n\
Supported are Minutes, Hours, Days, Months. Examples: 120Minutes, 48Hours, 2Days, 1Month \n\
Default : 24Hours", default="24Hours")
args = parser.parse_args()
if args.servername and args.duration:
obj = GerritApi(f"{args.servername}")
commits_list = obj.get_commit_details_in_given_period(duration=args.duration)
print(f"Gerrit commits for given {args.duration} is: {len(commits_list)}\n")
print("Gerrit Commits Details are saved in new_commits.csv file")
cl_df = pd.DataFrame(commits_list)
cl_df.to_csv('new_commits.csv')
else:
print("Please pass Gerrit server name with -s and duration with -d argument !!!")
|
"""
This Module interacts with Gerrit and retrieves Data from Gerrit
"""
import os
import json
import logging
import argparse
import pandas as pd
from datetime import datetime, timedelta
from json.decoder import JSONDecodeError
from urllib.parse import urlunsplit, urlencode
from typing import Tuple, Union
try:
from requests import __version__, Session, adapters, exceptions, urllib3, status_codes
logging.debug(f'Available request module of version {__version__}')
except ImportError:
logging.error('Please install requests module. Use pip install requests.')
class GerritApi:
"""
*Class name :* GerritHandler
*Description :* Class to retrieve data from Gerrit
"""
GET_ALL_REPO_URI = "/projects/?d"
GET_ALL_CHANGES_URI = "/changes/?q=repo:{repo_name}"
GET_ALL_ACTIVE_USERS_URI = "/accounts/?q=is:active"
GET_COMMITS_BY_AGE = "/changes/?q=-age:"
GET_COMMITS_USING_AFTER = "/changes/?q=after:"
def __init__(self, gerrit_server: str, username: str=None, password: str=None):
"""
*Method description :* Initializing values for Gerrit operations from OVF
:param username: Username to login to Gerrit
:type username: String
:param password: Password to login to Gerrit
:type password: String
:param url: Gerrit URL to get commit details
:type url: String
"""
self.gerrit_username = username
self.gerrit_password = password
self.gerrit_url = f"https://{gerrit_server}"
logging.debug(f"GerritDetails:: {self.gerrit_url}, {self.gerrit_username}, {self.gerrit_password}")
if username and password:
self.rest_engine = RestEngine(auth=(self.gerrit_username, self.gerrit_password))
else:
self.rest_engine = RestEngine()
def get_all_projects(self) -> dict:
"""
Method to get all repositories
:returns: :class:`repo_details`: All repo details
:rtype: :class:`repo_details`: Dict
"""
all_repo_details = {}
get_all_repo_url = f"{self.gerrit_url}{GerritApi.GET_ALL_REPO_URI}"
all_repo_resp = self.decode_response(self.rest_engine.rest_request(get_all_repo_url))
for key, value in all_repo_resp.items():
all_repo_details[key] = {"id": value.get("id"), "description": value.get("description"),
"state": value.get("state")}
logging.info(f"List of All repositories : {all_repo_details} {len(all_repo_details)}")
return all_repo_details
def get_all_active_projects(self) -> list:
"""
Method to get all active repositories
:returns: :class:`active_repo_list`: List of active repositories
:rtype: :class:`active_repo_list`: List
"""
active_repo_list = []
all_repo_details = self.get_all_projects()
for key, value in all_repo_details.items():
if value["state"] == "ACTIVE":
active_repo_list.append(key)
logging.info(f"List of active repositories : {active_repo_list} {len(active_repo_list)}")
return active_repo_list
def get_active_user_accounts(self) -> list:
"""
*Method description :* Method to get active user accounts in server
:returns: :class:`all_users_details`: List of commit changes as dict
:rtype: :class:`all_users_details`: list
"""
all_users_details = []
all_users_list, mocker_response = [], []
all_users_url = f"{self.gerrit_url}{GerritApi.GET_ALL_ACTIVE_USERS_URI}&S=0"
response = self.decode_response(self.rest_engine.rest_request(all_users_url))
all_users_list.extend(response)
mocker_response = self.no_limit_mocker(response, mocker_response,
url_to_be_used=f"{self.gerrit_url}{GerritApi.GET_ALL_ACTIVE_USERS_URI}")
if all_users_list:
all_users_list.extend(mocker_response)
logging.info(f"Number Of Active User Accounts in Gerrit: {len(all_users_list)}")
for each_user in all_users_list:
user_id = each_user.get("_account_id")
user_details_url = f"{self.gerrit_url}/accounts/{user_id}/detail"
detailed_response = self.decode_response(self.rest_engine.rest_request(user_details_url))
all_users_details.append(detailed_response)
logging.info(f"Active User Account Details in Gerrit: {all_users_details}")
return all_users_details
def get_commit_details_in_given_period(self, start=None, duration="24Hours", stop=datetime.utcnow()):
all_commits_list, mocker_response = [], []
if not start:
start = self.get_start_time(duration, stop)
commits_url = f"{self.gerrit_url}{GerritApi.GET_COMMITS_USING_AFTER}\"{start}\"&S=0"
print(commits_url)
response = self.decode_response(self.rest_engine.rest_request(commits_url))
all_commits_list.extend(response)
mocker_response = self.no_limit_mocker(response, mocker_response,
url_to_be_used=f"{self.gerrit_url}{GerritApi.GET_COMMITS_USING_AFTER}\"{start}\"")
if mocker_response:
all_commits_list.extend(mocker_response)
for each_commit in all_commits_list:
owner_account_url = f"{self.gerrit_url}/accounts/{each_commit.get('owner').get('_account_id')}/detail"
each_commit["owner"] = self.decode_response(self.rest_engine.rest_request(owner_account_url)).get("name")
if each_commit.get("submitter"):
submitter_id = each_commit.get('submitter').get('_account_id')
submit_account_url = f"{self.gerrit_url}/accounts/{submitter_id}/detail"
each_commit["submitter"] = self.decode_response(self.rest_engine.rest_request(
submit_account_url)).get("name")
print(f"Total commits from {start} is: {len(all_commits_list)}")
return all_commits_list
@staticmethod
def get_start_time(duration, stop):
if "minutes" in str(duration).lower():
min_delta = int(str(duration).lower().strip("minutes"))
start = stop - timedelta(minutes=min_delta)
if "hours" in str(duration).lower():
hour_delta = int(str(duration).lower().strip("hours"))
start = stop - timedelta(hours=hour_delta)
elif "days" in str(duration).lower():
day_delta = int(str(duration).lower().strip("days"))
start = stop - timedelta(days=day_delta)
elif "months" in str(duration).lower():
month_delta = int(str(duration).lower().strip("months"))
start = stop - timedelta(months=month_delta)
return start
@staticmethod
def decode_response(response: str) -> dict:
"""
*Method description :* Method to decode rest response with Gerrit Magic Prefix
:param response: Raw REST Response Content
:type response: String
:raises: :class:`ValueError`: Invaid Response Json Content
:returns: :class:`resp_dict`: Dictionary of the given Response content
:rtype: :class:`resp_dict`: Dictionary
"""
output = response[1]
# prefix that comes with the json responses.
gerrit_magic_json_prefix = ")]}'\n"
if str(response[0]) == '200' and isinstance(response[1], str):
if response[1].startswith(gerrit_magic_json_prefix):
output = response[1][len(gerrit_magic_json_prefix):]
try:
output = json.loads(output)
except ValueError:
logging.error(f"Invalid Json in response {output}")
else:
logging.error(f'Rest Call Failed with the status code {response[0]} and response {response[1]}')
return output
def no_limit_mocker(self, response: str, mocker_response: list, url_to_be_used: str,
def_limit: int =0) -> list:
"""
*Method description :* Method to mock no_limit option in Gerrit Server
:param response: Previous GET Call Response
:type response: String
:param mocker_response: Mocker response list on which no_limit responses are accumulated
:type mocker_response: list
:param url_to_be_used: URL to be used for REST Call in no_limits mocker block
:type url_to_be_used: String
:param def_limit: Number Of Commits Limit for GET call
:type def_limit: Integer
:returns: :class:`mocker_response`: Get REST Response in List
:rtype: :class:`mocker_response`: List
"""
if "_more_" in str(response):
def_limit = def_limit + 500
start_limit = def_limit - 500 + 1
logging.info(f"Fetching {start_limit} - {def_limit} Records. Please Wait...")
new_url = f"{url_to_be_used}&S={str(def_limit)}&n=500"
int_response = self.decode_response(self.rest_engine.rest_request(new_url))
mocker_response.extend(int_response)
self.no_limit_mocker(int_response, mocker_response, url_to_be_used, def_limit)
else:
def_limit = def_limit + 500
new_url = f"{url_to_be_used}&S={str(def_limit)}&n=500"
int_response = self.decode_response(self.rest_engine.rest_request(new_url))
mocker_response.extend(int_response)
return mocker_response
class RestEngine:
"""
Class to perform rest operations like PUT, PATCH, POST, GET
DELETE, HEAD, OPTIONS.
"""
def __init__(self, **session_args: str):
"""
*Method description :* Initialization method.
1. Initialize a http session with the session parameters passed by user
2. Default authentication is set to (username, password) as (admin, admin).
And a header with json content type is added.
3. These session level parameters are overwritten when the same are provided
at the method level.
:param session_args: Rest arguments that can be set at the session level.
Supported: 'headers', 'cookies', 'auth', 'proxies', 'hooks',
'params', 'verify', 'cert', 'stream', 'trust_env', 'max_redirects'
:type session_args: dict
"""
self.http_session = Session()
self.http_session.auth = session_args.get('auth')
self.http_session.headers.update(session_args.get('headers', {}))
#as verify is set to False,requests in this session will accept any TLS certificate
#will ignore SSL certificate verification
self.http_session.verify = session_args.get('verify', False)
#Retries to establish a http secure connection.
https_adapter = adapters.HTTPAdapter(max_retries=3)
self.http_session.mount('https://', https_adapter)
#To set other session parameters supported by requests
self.http_session.params = session_args.get('params')
self.http_session.proxies = session_args.get('proxies')
self.http_session.cert = session_args.get('cert')
self.http_session.hooks = session_args.get('hooks')
self.http_session.stream = session_args.get('stream')
self.http_session.max_redirects = session_args.get('max_redirects')
self.http_session.cookies.update(session_args.get('cookies', {}))
self.http_session.trust_env = session_args.get('trust_env')
@staticmethod
def build_api_url(netloc: str, scheme: str ="https", path: str ="", query: Union[str, dict]="",
fragments: str ="") -> str:
"""Generates complete url from the inputs provided by the user.
URL format : scheme://netloc/path?query#fragments
#query str: page=12
eg : https://docs.python.com/tutorial/index.html?page=12#datatypes
#query dict: {page:12, type:tuple)
eg : https://docs.python.com/tutorial/index.html?page=12&type=tuple#datatypes
:param netloc: Network location part. Domain name should be given as input.
(eg): example.com, 168.0.0.1:8080, jenkins.com:8443
:type netloc: str
:param scheme: URL scheme specifier. Can be either http or https, defaults to "https"
:type scheme: str, optional
:param path: Hierarchical path. Additional path to be added to the netloc, defaults to ""
:type path: str, optional
:param query: query string needed to be added. It will be added after the "?" symbol.
Can be given directly as string or dict with multiple key value pairs. if multiple key
value pairs are given then query string will be concatenated with "&" symbol, defaults to ""
:type query: str or dict, optional
:param fragments: Additional piece of information to be added to the url. This will be added
after the "#" symbol, defaults to ""
:type fragments: str, optional
:return: complete api url
:rtype: str
"""
query_str = urlencode(query) if isinstance(query, dict) else query
api_url = urlunsplit((scheme, netloc, path, query_str, fragments))
logging.debug(f"Api url formed --> {api_url}")
return api_url
def rest_request(self, uri: str, operation: str ='GET', **func_args: str) -> Tuple[int, str, dict]:
"""
*Method description :* Common rest request method be called for performing the rest operations.
:param uri: rest uri
:type uri: str
:param operation: rest operation, could be GET, POST, PATCH, DELETE, PUT, HEAD, OPTIONS.
:type operation: str
:param func_args: Rest arguments such as 'auth', 'cookies', 'data', 'files',
'headers', 'hooks', 'json', 'params', 'timeout', 'allow_redirects', 'proxies',
'hooks', 'stream', 'verify', 'cert' that can be set at the method request level.
Overrides the session arguments.
:type func_args: dict
:returns: :class:`response_code`: Response code of the rest request call performed
:class:`response`: Response received from the rest request call
:class:'response_headers`: Headers in response
:rtype: :class:`response_code`: int
:class:`response`: dict/str
:class:`response_headers`: dict
"""
response_code, response, response_headers = None, None, None
#suppress Insecure certificate warning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
rest_response = self.http_session.request(operation.upper(), uri, **func_args)
logging.debug(f'Request uri : {rest_response.request.url}')
logging.debug(f'Request method : {rest_response.request.method}')
logging.debug(f'Request headers : {rest_response.request.headers}')
logging.debug(f'Request data : {rest_response.request.body}')
response_code, response, response_headers = rest_response.status_code, rest_response.content, rest_response.headers
#Uncomment the below line if status code has to raise an exception/error
#rest_response.raise_for_status()
if response:
try:
response = rest_response.json()
except JSONDecodeError:
#default utf-8 encoding is done.
response = rest_response.text
except exceptions.InvalidURL:
logging.error(f'The uri {uri} passed for this {operation.upper()} method is invalid')
except exceptions.HTTPError:
logging.error(f'The {operation.upper()} method failed with the status code {response_code}' \
f' and status message would be any of {status_codes._codes[response_code]}.')
except exceptions.SSLError:
logging.error('SSL Certificate verification failed.')
except exceptions.ConnectionError:
logging.error(f'Failed to establish a connection with {uri}')
except exceptions.InvalidHeader:
logging.error(f'Invalid header exception. Request headers added : {rest_response.request.headers}')
except exceptions.TooManyRedirects:
logging.error('The URL redirects has crossed the maximum limit of 30.')
except exceptions.Timeout:
logging.error(f'{operation.upper()} request timed out. Can be either Connection or Read timeout.')
except exceptions.RequestException:
logging.error('Exception occurred while handling request. Please check if the input passed are correct.')
except TypeError:
logging.error('Please re-check if the input arguments passed are valid.')
logging.debug(f'Rest Response : {response}')
logging.debug(f'Rest Response status code : {response_code}')
logging.debug(f'Rest Response headers : {response_headers}')
if response_code:
logging.debug(f'Possible status message for {response_code} : {status_codes._codes[response_code]}')
return response_code, response, response_headers
class Common:
"""
Class to perform rest operations like PUT, PATCH, POST, GET
DELETE, HEAD, OPTIONS.
"""
@staticmethod
def convert_json_to_dict(json_file: str) -> Union[dict, None]:
"""Converts the input json file into dictionary
:param json_file: Name of the json file to be converted
:type json_file: str
:return: Converted dictionary
:rtype: dict or None
"""
try:
assert os.path.exists(json_file)
with open(json_file, 'r') as file_obj:
data_dict = json.load(file_obj)
return data_dict
except AssertionError:
logging.error(f'Json file {json_file} doesnot exists')
except json.decoder.JSONDecodeError as decode_err:
logging.error(f'unable to parse {json_file}. Kindly validate the json file. Error occured: {decode_err}')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--servername", type=str, help="Gerrit Server Name/IP")
parser.add_argument("-u", "--user", type=str, help="Gerrit Login Username", default=None)
parser.add_argument("-p", "--password", type=str, help="Gerrit Login Password", default=None)
parser.add_argument("-d", "--duration", type=str, help="Duration for which gerrit changes to be fetched\n\
Supported are Minutes, Hours, Days, Months. Examples: 120Minutes, 48Hours, 2Days, 1Month \n\
Default : 24Hours", default="24Hours")
args = parser.parse_args()
if args.servername and args.duration:
obj = GerritApi(f"{args.servername}")
commits_list = obj.get_commit_details_in_given_period(duration=args.duration)
print(f"Gerrit commits for given {args.duration} is: {len(commits_list)}\n")
print("Gerrit Commits Details are saved in new_commits.csv file")
cl_df = pd.DataFrame(commits_list)
cl_df.to_csv('new_commits.csv')
else:
print("Please pass Gerrit server name with -s and duration with -d argument !!!")
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch PROMPTBERT model. """
import math
import os
import warnings
import numpy as np
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss
from torch.distributions.beta import Beta
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
DualPassageEncoderModelOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_promptbert import PromptBertConfig
from ..bert.modeling_bert import BertEmbeddings as PromptBertEmbeddings
from ..bert.modeling_bert import BertEncoder as PromptBertEncoder
from ..bert.modeling_bert import BertPooler as PromptBertPooler
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "PromptBertConfig"
_TOKENIZER_FOR_DOC = "PromptBertTokenizer"
PROMPTBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_promptbert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {"/".join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {"/".join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class PromptBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = PromptBertConfig
load_tf_weights = load_tf_weights_in_promptbert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
PROMPTBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
PROMPTBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare PromptBert Model transformer outputting raw hidden-states without any specific head on top.",
PROMPTBERT_START_DOCSTRING,
)
class PromptBertModel(PromptBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = PromptBertEmbeddings(config)
self.encoder = PromptBertEncoder(config)
self.pooler = PromptBertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
PromptBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled output).
""",
PROMPTBERT_START_DOCSTRING,
)
class PromptBertForSequenceClassification(PromptBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = PromptBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a dual encoder head on top for passage retrieval tasks (a linear layer on top of the pooled output
for computing source-target similarity).
""",
PROMPTBERT_START_DOCSTRING,
)
class PromptBertForDualPassageEncoder(PromptBertPreTrainedModel):
def __init__(self, config, cls_loss_wgt=None):
super().__init__(config)
self.num_labels = config.num_labels
self.cls_loss_wgt = cls_loss_wgt
self.bert = PromptBertModel(config)
self.pooler = PromptBertPooler(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if self.cls_loss_wgt is not None and cls_loss_wgt > 0.0:
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format("batch_size, 2, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=DualPassageEncoderModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is None or len(input_ids.size()) < 3:
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = self.pooler(outputs[0])
pooled_output = self.dropout(pooled_output)
if not return_dict:
return (pooled_output,) + outputs[2:]
return DualPassageEncoderModelOutput(
pooled_output=pooled_output,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
b, _, l = input_ids.size()
flatten_input_ids = input_ids.reshape(-1, l)
flatten_attention_mask = attention_mask.reshape(-1, l) if attention_mask is not None else None
flatten_token_type_ids = token_type_ids.reshape(-1, l) if token_type_ids is not None else None
flatten_position_ids = position_ids.reshape(-1, l) if position_ids is not None else None
flatten_inputs_embeds = inputs_embeds.reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None
flatten_outputs = self.bert(
flatten_input_ids,
attention_mask=flatten_attention_mask,
token_type_ids=flatten_token_type_ids,
position_ids=flatten_position_ids,
head_mask=head_mask,
inputs_embeds=flatten_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
flatten_pooled_output = self.pooler(flatten_outputs[0])
src_pooled_output, trg_pooled_output = flatten_pooled_output.reshape(b, 2, self.config.hidden_size).chunk(2, dim=1)
src_pooled_output, trg_pooled_output = src_pooled_output.squeeze(dim=1).contiguous(), trg_pooled_output.squeeze(dim=1).contiguous()
mask = (labels.unsqueeze(-1).expand(-1, b) == labels.unsqueeze(0).expand(b, -1)) & (1 - torch.eye(b)).to(labels.device).bool()
cl_logits = torch.einsum('ik,jk->ij', src_pooled_output, trg_pooled_output).masked_fill(mask, float('-inf'))
cl_labels = torch.arange(b).to(labels.device)
loss_fct = CrossEntropyLoss()
cl_loss = loss_fct(cl_logits.view(-1, labels.size(0)), cl_labels.view(-1))
if self.cls_loss_wgt is not None and self.cls_loss_wgt > 0.0:
flatten_logits = self.classifier(self.dropout(flatten_outputs[1]))
src_logits, trg_logits = flatten_logits.reshape(b, 2, self.num_labels).chunk(2, dim=1)
src_logits, trg_logits = src_logits.squeeze(dim=1).contiguous(), trg_logits.squeeze(dim=1).contiguous()
src_loss = loss_fct(src_logits.view(-1, self.num_labels), labels.view(-1))
trg_loss = loss_fct(trg_logits.view(-1, self.num_labels), labels.view(-1))
cls_loss = src_loss + trg_loss
cls_logits = src_logits + trg_logits
loss = cl_loss + cls_loss * self.cls_loss_wgt
logits = cls_logits
else:
loss = cl_loss
logits = cl_logits
if not return_dict:
return (loss, logits,)
return DualPassageEncoderModelOutput(
loss=loss,
logits=logits,
)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch PROMPTBERT model. """
import math
import os
import warnings
import numpy as np
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss
from torch.distributions.beta import Beta
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
DualPassageEncoderModelOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_promptbert import PromptBertConfig
from ..bert.modeling_bert import BertEmbeddings as PromptBertEmbeddings
from ..bert.modeling_bert import BertEncoder as PromptBertEncoder
from ..bert.modeling_bert import BertPooler as PromptBertPooler
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "PromptBertConfig"
_TOKENIZER_FOR_DOC = "PromptBertTokenizer"
PROMPTBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_promptbert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class PromptBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = PromptBertConfig
load_tf_weights = load_tf_weights_in_promptbert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
PROMPTBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
PROMPTBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare PromptBert Model transformer outputting raw hidden-states without any specific head on top.",
PROMPTBERT_START_DOCSTRING,
)
class PromptBertModel(PromptBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = PromptBertEmbeddings(config)
self.encoder = PromptBertEncoder(config)
self.pooler = PromptBertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
PromptBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled output).
""",
PROMPTBERT_START_DOCSTRING,
)
class PromptBertForSequenceClassification(PromptBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = PromptBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a dual encoder head on top for passage retrieval tasks (a linear layer on top of the pooled output
for computing source-target similarity).
""",
PROMPTBERT_START_DOCSTRING,
)
class PromptBertForDualPassageEncoder(PromptBertPreTrainedModel):
def __init__(self, config, cls_loss_wgt=None):
super().__init__(config)
self.num_labels = config.num_labels
self.cls_loss_wgt = cls_loss_wgt
self.bert = PromptBertModel(config)
self.pooler = PromptBertPooler(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if self.cls_loss_wgt is not None and cls_loss_wgt > 0.0:
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format("batch_size, 2, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=DualPassageEncoderModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is None or len(input_ids.size()) < 3:
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = self.pooler(outputs[0])
pooled_output = self.dropout(pooled_output)
if not return_dict:
return (pooled_output,) + outputs[2:]
return DualPassageEncoderModelOutput(
pooled_output=pooled_output,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
b, _, l = input_ids.size()
flatten_input_ids = input_ids.reshape(-1, l)
flatten_attention_mask = attention_mask.reshape(-1, l) if attention_mask is not None else None
flatten_token_type_ids = token_type_ids.reshape(-1, l) if token_type_ids is not None else None
flatten_position_ids = position_ids.reshape(-1, l) if position_ids is not None else None
flatten_inputs_embeds = inputs_embeds.reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None
flatten_outputs = self.bert(
flatten_input_ids,
attention_mask=flatten_attention_mask,
token_type_ids=flatten_token_type_ids,
position_ids=flatten_position_ids,
head_mask=head_mask,
inputs_embeds=flatten_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
flatten_pooled_output = self.pooler(flatten_outputs[0])
src_pooled_output, trg_pooled_output = flatten_pooled_output.reshape(b, 2, self.config.hidden_size).chunk(2, dim=1)
src_pooled_output, trg_pooled_output = src_pooled_output.squeeze(dim=1).contiguous(), trg_pooled_output.squeeze(dim=1).contiguous()
mask = (labels.unsqueeze(-1).expand(-1, b) == labels.unsqueeze(0).expand(b, -1)) & (1 - torch.eye(b)).to(labels.device).bool()
cl_logits = torch.einsum('ik,jk->ij', src_pooled_output, trg_pooled_output).masked_fill(mask, float('-inf'))
cl_labels = torch.arange(b).to(labels.device)
loss_fct = CrossEntropyLoss()
cl_loss = loss_fct(cl_logits.view(-1, labels.size(0)), cl_labels.view(-1))
if self.cls_loss_wgt is not None and self.cls_loss_wgt > 0.0:
flatten_logits = self.classifier(self.dropout(flatten_outputs[1]))
src_logits, trg_logits = flatten_logits.reshape(b, 2, self.num_labels).chunk(2, dim=1)
src_logits, trg_logits = src_logits.squeeze(dim=1).contiguous(), trg_logits.squeeze(dim=1).contiguous()
src_loss = loss_fct(src_logits.view(-1, self.num_labels), labels.view(-1))
trg_loss = loss_fct(trg_logits.view(-1, self.num_labels), labels.view(-1))
cls_loss = src_loss + trg_loss
cls_logits = src_logits + trg_logits
loss = cl_loss + cls_loss * self.cls_loss_wgt
logits = cls_logits
else:
loss = cl_loss
logits = cl_logits
if not return_dict:
return (loss, logits,)
return DualPassageEncoderModelOutput(
loss=loss,
logits=logits,
)
|
#!/usr/bin/env python3
# Usage: ./main.py
"""
Copyright (C) 2020-2021 John C. Allwein 'johnnyapol' (admin@johnnyapol.me)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import pickle
import requests
from random import choice
from subprocess import run
import sys
import traceback
from datetime import date, timedelta, datetime
from copy import deepcopy
from itertools import chain
from io import BytesIO
from bs4 import BeautifulSoup
from discord_webhook import DiscordEmbed, DiscordWebhook
import matplotlib.pyplot as plot
import savepagenow
# Import configuration (if available)
try:
import config
WEBHOOKS = config.webhooks
PSA = config.PSA
QUIET = config.QUIET
except:
print("No discord webhooks supplied - data will just be stored locally")
traceback.print_exc()
WEBHOOKS = None
PSA = None
QUIET = False
DASHBOARD = "https://covid19.rpi.edu/dashboard"
class CovidData:
def __init__(self):
self.rpi_array = [0] * 5
self.last_updated = date.today() - timedelta(days=1)
self.historicalData = {}
def update(self, case_data):
today = date.today()
if today != self.last_updated:
self.last_updated = today
self.historicalData[today] = case_data
self.rpi_array = case_data
def get_rolling(self):
return sum(self.get_rolling_iterator(self.last_updated))
def get_case_data(self):
return self.rpi_array
def get_rolling_iterator(self, day=date.today()):
dates = [day - timedelta(days=x) for x in range(13, -1, -1)]
return [
self.historicalData[date][0] if date in self.historicalData else 0
for date in dates
]
def check_for_updates():
global DASHBOARD
request = requests.get(
DASHBOARD,
headers={
"User-Agent": "RPICovidScraper https://github.com/johnnyapol/RPICovidScraper"
},
)
soup = BeautifulSoup(request.text, features="lxml")
header = "field field--name-field-stats field--type-entity-reference-revisions field--label-hidden field__items"
header2 = "field field--name-field-stat field--type-string field--label-hidden field__item"
date_header = "field field--name-field-stats-caption field--type-string field--label-hidden field__item"
"""
Current data format:
case_data[0] = positive tests (last 24 hours)
case_data[1] = positive test results (last 7 days)
case_data[2] = positive test results (since august 17th)
case_data[3] = total tests (last 7 days)
case_data[4] = total tests (since august 17th)
"""
return (
[
int("".join(("".join(x.text.strip().split(" "))).split(",")))
for x in soup.find("div", {"class": header}).findAll(
"div", {"class": header2}
)
],
soup.find("div", {"class": date_header}).text,
)
def case_value_to_string(case_data, previous_case_data, index):
diff = case_data[index] - previous_case_data[index]
diff_string = f"({diff:+,})" if diff != 0 else ""
return f"{case_data[index]:,} {diff_string}"
def get_source_url():
start = "https://github.com/johnnyapol/RPICovidScraper/"
try:
return f'{start}commit/{run(['git', 'log', '--pretty=format:%H', '-n', '1'], capture_output=True).stdout.decode('ascii')}'
except:
return start
def post_discord(
rolling, old_rolling, case_data, previous_case_data, date, dashboard_url, graph
):
global WEBHOOKS
global PSA
global QUIET
if WEBHOOKS is None:
return print("Skipping posting to discord as no webhooks supplied")
positive_thumbnails = [
"https://www.continentalmessage.com/wp-content/uploads/2015/09/123rf-alert2.jpg",
"https://i.kym-cdn.com/photos/images/newsfeed/000/675/645/2c7.gif",
"https://media.discordapp.net/attachments/783375197604413445/790625854202839100/image0.png",
"https://media.tenor.com/images/6603c0a47ff16ad8d3682e481e727f76/tenor.gif",
]
neutral_thumbnails = [
"https://steamcdn-a.akamaihd.net/steamcommunity/public/images/clans/5671259/7923c9b8e0a5799d4d422208b31f5ca0f4f49067.png",
"https://static01.nyt.com/images/2020/01/28/science/28VIRUS-BATS1/28VIRUS-BATS1-videoSixteenByNineJumbo1600.jpg",
"https://ih1.redbubble.net/image.1877589148.0162/ur,mask_flatlay_front,wide_portrait,750x1000.jpg",
"https://media.giphy.com/media/KHEgvyrgYnL9RW08h6/giphy.gif",
]
negative_thumbnails = [
"https://media.giphy.com/media/WS0MDT0DITCTLwcNNx/giphy.gif",
"https://cdn.vox-cdn.com/thumbor/iuL4QWaANcy5lyeCDXxIrBq7_uQ=/0x0:3000x2000/1400x1050/filters:focal(1436x422:1916x902):no_upscale()/cdn.vox-cdn.com/uploads/chorus_image/image/68718659/AP_20331457642255.0.jpg",
]
emojis = ["❤️", "✨", "🥓", "🍺", "🧻", "🐍", "☃️", "😷"]
if QUIET and case_data[0] == 0:
return
embed = DiscordEmbed()
if case_data[0] > 4:
embed.set_color(15158332)
embed.set_thumbnail(url=choice(positive_thumbnails))
elif case_data[0] > 0:
embed.set_color(0xFFFF00)
embed.set_thumbnail(url=choice(neutral_thumbnails))
else:
embed.set_color(3066993)
embed.set_thumbnail(url=choice(negative_thumbnails))
if PSA is not None:
embed.add_embed_field(name="ANNOUNCEMENT", value=PSA, inline=False)
embed.color = 15844367
embed.add_embed_field(
name="New Positive Tests",
value=f"{case_data[0]}",
inline=False,
)
embed.add_embed_field(
name="Positive Tests (7 days)",
value=case_value_to_string(case_data, previous_case_data, 1),
inline=False,
)
embed.add_embed_field(
name="Positive Tests (14 days)",
value=case_value_to_string([rolling], [old_rolling], 0),
inline=False,
)
embed.add_embed_field(
name="Weekly Test Count",
value=case_value_to_string(case_data, previous_case_data, 3),
inline=False,
)
if case_data[1] != 0:
# Calculate weekly positivity rate
pcr = (case_data[1] / case_data[3]) * 100
embed.add_embed_field(name="Weekly Positivity Rate", value=f"{round(pcr, 4)}%")
embed.add_embed_field(
name="Total Positive Tests",
value=case_value_to_string(case_data, previous_case_data, 2),
)
# Since discord footers don't support "rich" content, hack on a footer to the last field
date = "".join(date.split("\n"))
embed.add_embed_field(
name="Total Tests",
value=f"{case_value_to_string(case_data, previous_case_data, 4)}\n{date} Made with {choice(emojis)} - [source]({get_source_url()})",
inline=False,
)
embed.set_author(
name="Click for dashboard",
url=dashboard_url,
icon_url="https://i.redd.it/14nqzc0hswy31.png",
)
hook = DiscordWebhook(
url=WEBHOOKS,
content=choice(
[
"The RPI Covid Dashboard has been updated!",
"I got yer COVID data right here!",
"Special delivery!",
"Beep beep boop",
"I found some data!",
]
),
username="RPI Covid Dashboard",
avatar_url="https://www.minnpost.com/wp-content/uploads/2020/03/coronavirusCDC640.png",
)
if graph != None:
hook.add_file(file=graph.read(), filename="graph.png")
embed.set_image(url="attachment://graph.png")
hook.add_embed(embed)
hook.execute()
def load_previous():
try:
with open(".cache", "rb") as file:
return pickle.load(file)
except:
print("Cache read failed")
return CovidData()
def save(case_data):
with open(".cache", "wb") as file:
pickle.dump(case_data, file)
def create_graph(data):
x = [int(z) for z in data.get_rolling_iterator()]
cum = [x[0]]
for i in range(1, len(x)):
cum.append(cum[-1] + x[i])
# thanks to https://www.tutorialspoint.com/matplotlib/matplotlib_bar_plot.htm for help
today = date.today()
monthday = lambda d: f"{d.month}-{d.day}"
dates = [today - timedelta(days=x) for x in range(13, -1, -1)]
plot.title(f"Previous 14 days")
plot.bar(dates, x, color="red", label="daily positive tests")
plot.plot(dates, cum, color="orange", label=f"Positives since {monthday(dates[0])}")
# Add individual day labels
for i, v in zip(dates, x):
if v == 0:
continue
plot.text(i, v, str(v), color="blue", fontweight="bold", ha="center")
plot.plot(
dates,
[sum(data.get_rolling_iterator(date)) for date in dates],
color="green",
label="Rolling 2 week sum",
)
plot.xticks(dates, [monthday(date) for date in dates], rotation=45)
plot.legend()
data = BytesIO()
plot.subplots_adjust(bottom=0.17)
plot.ylabel("Number of positive tests")
plot.xlabel("Day reported")
now = datetime.now()
plot.figtext(
0.5,
0.01,
f"Generated on {now.strftime("%m/%d/%y %H:%M")} {datetime.now().astimezone().tzinfo.tzname(None)}",
ha="center",
fontsize=8,
)
plot.savefig(data, format="png")
data.seek(0)
return data
def main():
global DASHBOARD
covid_data = load_previous()
previous_case_data = deepcopy(covid_data.get_case_data())
current_case_data, date = check_for_updates()
ci = any(x.lower() == "--ci" for x in sys.argv)
force = any(x.lower() == "--force" for x in sys.argv)
# Only post under the following conditions:
# 1. There is new data from RPI
# - AND -
# 2. there are new positive tests OR new weekly/total numbers reported
# This avoids the bs updates where all RPI does is reset the daily/weekly numbers
if (
force
or current_case_data != previous_case_data
and (
current_case_data[0] != 0
or any(
current_case_data[x] != previous_case_data[x]
for x in range(2, len(current_case_data))
)
)
):
dashboard_url = DASHBOARD
try:
# We don't want to abuse the Wayback Machine in actions
if not ci:
dashboard_url = savepagenow.capture(DASHBOARD, accept_cache=True)
else:
print("Skipping page archive as we are running in CI mode")
except:
print(f"Page archived failed")
traceback.print_exc()
old_rolling = covid_data.get_rolling()
covid_data.update(current_case_data)
post_discord(
covid_data.get_rolling(),
old_rolling,
current_case_data,
previous_case_data,
date,
dashboard_url,
create_graph(covid_data),
)
save(covid_data)
print(
f"Done. Old: {previous_case_data} New: {current_case_data}\n Rolling: {covid_data.get_rolling()}"
)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Usage: ./main.py
"""
Copyright (C) 2020-2021 John C. Allwein 'johnnyapol' (admin@johnnyapol.me)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import pickle
import requests
from random import choice
from subprocess import run
import sys
import traceback
from datetime import date, timedelta, datetime
from copy import deepcopy
from itertools import chain
from io import BytesIO
from bs4 import BeautifulSoup
from discord_webhook import DiscordEmbed, DiscordWebhook
import matplotlib.pyplot as plot
import savepagenow
# Import configuration (if available)
try:
import config
WEBHOOKS = config.webhooks
PSA = config.PSA
QUIET = config.QUIET
except:
print("No discord webhooks supplied - data will just be stored locally")
traceback.print_exc()
WEBHOOKS = None
PSA = None
QUIET = False
DASHBOARD = "https://covid19.rpi.edu/dashboard"
class CovidData:
def __init__(self):
self.rpi_array = [0] * 5
self.last_updated = date.today() - timedelta(days=1)
self.historicalData = {}
def update(self, case_data):
today = date.today()
if today != self.last_updated:
self.last_updated = today
self.historicalData[today] = case_data
self.rpi_array = case_data
def get_rolling(self):
return sum(self.get_rolling_iterator(self.last_updated))
def get_case_data(self):
return self.rpi_array
def get_rolling_iterator(self, day=date.today()):
dates = [day - timedelta(days=x) for x in range(13, -1, -1)]
return [
self.historicalData[date][0] if date in self.historicalData else 0
for date in dates
]
def check_for_updates():
global DASHBOARD
request = requests.get(
DASHBOARD,
headers={
"User-Agent": "RPICovidScraper https://github.com/johnnyapol/RPICovidScraper"
},
)
soup = BeautifulSoup(request.text, features="lxml")
header = "field field--name-field-stats field--type-entity-reference-revisions field--label-hidden field__items"
header2 = "field field--name-field-stat field--type-string field--label-hidden field__item"
date_header = "field field--name-field-stats-caption field--type-string field--label-hidden field__item"
"""
Current data format:
case_data[0] = positive tests (last 24 hours)
case_data[1] = positive test results (last 7 days)
case_data[2] = positive test results (since august 17th)
case_data[3] = total tests (last 7 days)
case_data[4] = total tests (since august 17th)
"""
return (
[
int("".join(("".join(x.text.strip().split(" "))).split(",")))
for x in soup.find("div", {"class": header}).findAll(
"div", {"class": header2}
)
],
soup.find("div", {"class": date_header}).text,
)
def case_value_to_string(case_data, previous_case_data, index):
diff = case_data[index] - previous_case_data[index]
diff_string = f"({diff:+,})" if diff != 0 else ""
return f"{case_data[index]:,} {diff_string}"
def get_source_url():
start = "https://github.com/johnnyapol/RPICovidScraper/"
try:
return f'{start}commit/{run(["git", "log", "--pretty=format:%H", "-n", "1"], capture_output=True).stdout.decode("ascii")}'
except:
return start
def post_discord(
rolling, old_rolling, case_data, previous_case_data, date, dashboard_url, graph
):
global WEBHOOKS
global PSA
global QUIET
if WEBHOOKS is None:
return print("Skipping posting to discord as no webhooks supplied")
positive_thumbnails = [
"https://www.continentalmessage.com/wp-content/uploads/2015/09/123rf-alert2.jpg",
"https://i.kym-cdn.com/photos/images/newsfeed/000/675/645/2c7.gif",
"https://media.discordapp.net/attachments/783375197604413445/790625854202839100/image0.png",
"https://media.tenor.com/images/6603c0a47ff16ad8d3682e481e727f76/tenor.gif",
]
neutral_thumbnails = [
"https://steamcdn-a.akamaihd.net/steamcommunity/public/images/clans/5671259/7923c9b8e0a5799d4d422208b31f5ca0f4f49067.png",
"https://static01.nyt.com/images/2020/01/28/science/28VIRUS-BATS1/28VIRUS-BATS1-videoSixteenByNineJumbo1600.jpg",
"https://ih1.redbubble.net/image.1877589148.0162/ur,mask_flatlay_front,wide_portrait,750x1000.jpg",
"https://media.giphy.com/media/KHEgvyrgYnL9RW08h6/giphy.gif",
]
negative_thumbnails = [
"https://media.giphy.com/media/WS0MDT0DITCTLwcNNx/giphy.gif",
"https://cdn.vox-cdn.com/thumbor/iuL4QWaANcy5lyeCDXxIrBq7_uQ=/0x0:3000x2000/1400x1050/filters:focal(1436x422:1916x902):no_upscale()/cdn.vox-cdn.com/uploads/chorus_image/image/68718659/AP_20331457642255.0.jpg",
]
emojis = ["❤️", "✨", "🥓", "🍺", "🧻", "🐍", "☃️", "😷"]
if QUIET and case_data[0] == 0:
return
embed = DiscordEmbed()
if case_data[0] > 4:
embed.set_color(15158332)
embed.set_thumbnail(url=choice(positive_thumbnails))
elif case_data[0] > 0:
embed.set_color(0xFFFF00)
embed.set_thumbnail(url=choice(neutral_thumbnails))
else:
embed.set_color(3066993)
embed.set_thumbnail(url=choice(negative_thumbnails))
if PSA is not None:
embed.add_embed_field(name="ANNOUNCEMENT", value=PSA, inline=False)
embed.color = 15844367
embed.add_embed_field(
name="New Positive Tests",
value=f"{case_data[0]}",
inline=False,
)
embed.add_embed_field(
name="Positive Tests (7 days)",
value=case_value_to_string(case_data, previous_case_data, 1),
inline=False,
)
embed.add_embed_field(
name="Positive Tests (14 days)",
value=case_value_to_string([rolling], [old_rolling], 0),
inline=False,
)
embed.add_embed_field(
name="Weekly Test Count",
value=case_value_to_string(case_data, previous_case_data, 3),
inline=False,
)
if case_data[1] != 0:
# Calculate weekly positivity rate
pcr = (case_data[1] / case_data[3]) * 100
embed.add_embed_field(name="Weekly Positivity Rate", value=f"{round(pcr, 4)}%")
embed.add_embed_field(
name="Total Positive Tests",
value=case_value_to_string(case_data, previous_case_data, 2),
)
# Since discord footers don't support "rich" content, hack on a footer to the last field
date = "".join(date.split("\n"))
embed.add_embed_field(
name="Total Tests",
value=f"{case_value_to_string(case_data, previous_case_data, 4)}\n{date} Made with {choice(emojis)} - [source]({get_source_url()})",
inline=False,
)
embed.set_author(
name="Click for dashboard",
url=dashboard_url,
icon_url="https://i.redd.it/14nqzc0hswy31.png",
)
hook = DiscordWebhook(
url=WEBHOOKS,
content=choice(
[
"The RPI Covid Dashboard has been updated!",
"I got yer COVID data right here!",
"Special delivery!",
"Beep beep boop",
"I found some data!",
]
),
username="RPI Covid Dashboard",
avatar_url="https://www.minnpost.com/wp-content/uploads/2020/03/coronavirusCDC640.png",
)
if graph != None:
hook.add_file(file=graph.read(), filename="graph.png")
embed.set_image(url="attachment://graph.png")
hook.add_embed(embed)
hook.execute()
def load_previous():
try:
with open(".cache", "rb") as file:
return pickle.load(file)
except:
print("Cache read failed")
return CovidData()
def save(case_data):
with open(".cache", "wb") as file:
pickle.dump(case_data, file)
def create_graph(data):
x = [int(z) for z in data.get_rolling_iterator()]
cum = [x[0]]
for i in range(1, len(x)):
cum.append(cum[-1] + x[i])
# thanks to https://www.tutorialspoint.com/matplotlib/matplotlib_bar_plot.htm for help
today = date.today()
monthday = lambda d: f"{d.month}-{d.day}"
dates = [today - timedelta(days=x) for x in range(13, -1, -1)]
plot.title(f"Previous 14 days")
plot.bar(dates, x, color="red", label="daily positive tests")
plot.plot(dates, cum, color="orange", label=f"Positives since {monthday(dates[0])}")
# Add individual day labels
for i, v in zip(dates, x):
if v == 0:
continue
plot.text(i, v, str(v), color="blue", fontweight="bold", ha="center")
plot.plot(
dates,
[sum(data.get_rolling_iterator(date)) for date in dates],
color="green",
label="Rolling 2 week sum",
)
plot.xticks(dates, [monthday(date) for date in dates], rotation=45)
plot.legend()
data = BytesIO()
plot.subplots_adjust(bottom=0.17)
plot.ylabel("Number of positive tests")
plot.xlabel("Day reported")
now = datetime.now()
plot.figtext(
0.5,
0.01,
f"Generated on {now.strftime('%m/%d/%y %H:%M')} {datetime.now().astimezone().tzinfo.tzname(None)}",
ha="center",
fontsize=8,
)
plot.savefig(data, format="png")
data.seek(0)
return data
def main():
global DASHBOARD
covid_data = load_previous()
previous_case_data = deepcopy(covid_data.get_case_data())
current_case_data, date = check_for_updates()
ci = any(x.lower() == "--ci" for x in sys.argv)
force = any(x.lower() == "--force" for x in sys.argv)
# Only post under the following conditions:
# 1. There is new data from RPI
# - AND -
# 2. there are new positive tests OR new weekly/total numbers reported
# This avoids the bs updates where all RPI does is reset the daily/weekly numbers
if (
force
or current_case_data != previous_case_data
and (
current_case_data[0] != 0
or any(
current_case_data[x] != previous_case_data[x]
for x in range(2, len(current_case_data))
)
)
):
dashboard_url = DASHBOARD
try:
# We don't want to abuse the Wayback Machine in actions
if not ci:
dashboard_url = savepagenow.capture(DASHBOARD, accept_cache=True)
else:
print("Skipping page archive as we are running in CI mode")
except:
print(f"Page archived failed")
traceback.print_exc()
old_rolling = covid_data.get_rolling()
covid_data.update(current_case_data)
post_discord(
covid_data.get_rolling(),
old_rolling,
current_case_data,
previous_case_data,
date,
dashboard_url,
create_graph(covid_data),
)
save(covid_data)
print(
f"Done. Old: {previous_case_data} New: {current_case_data}\n Rolling: {covid_data.get_rolling()}"
)
if __name__ == "__main__":
main()
|
print(f'\033[1:33m{'-'*40:^40}\033[m')
print(f'\033[1:33m{'DICIONÁRIO EM PYTHON':^40}\033[m')
print(f'\033[1:33m{'-'*40:^40}\033[m')
aluno = dict()
aluno['Nome'] = str(input('Nome: '))
aluno['Média'] = float(input(f'Média de {aluno['Nome']}: '))
if aluno['Média'] >= 7:
aluno['Situação'] = '\033[1:32mAprovado\033[m'
else:
aluno['Situação'] = '\033[1:31mReprovado\033[m'
for k, v in aluno.items():
print(f'{k} é igual a {v}')
|
print(f'\033[1:33m{"-"*40:^40}\033[m')
print(f'\033[1:33m{"DICIONÁRIO EM PYTHON":^40}\033[m')
print(f'\033[1:33m{"-"*40:^40}\033[m')
aluno = dict()
aluno['Nome'] = str(input('Nome: '))
aluno['Média'] = float(input(f'Média de {aluno["Nome"]}: '))
if aluno['Média'] >= 7:
aluno['Situação'] = '\033[1:32mAprovado\033[m'
else:
aluno['Situação'] = '\033[1:31mReprovado\033[m'
for k, v in aluno.items():
print(f'{k} é igual a {v}')
|
#!/usr/bin/env python
"""
Import MPPT CSV data and plot it.
CSV format:
Volts,volts,amps,watts,state,mode_str,panelSN,resistance,timestamp
29.646,29.646,0.0,0.0,0,CR,B41J00052893,100000,20210913_120014.79
14.267,14.267,0.354,5.05,1,CR,B41J00052893,40.0,20210913_120016.16
"""
from __future__ import print_function
import os
import sys
import argparse
from datetime import datetime as dt
import time
import pandas as pd
from numpy import *
import numpy as np
from mpl_toolkits.axes_grid1 import host_subplot
from mpl_toolkits import axisartist
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from csv import reader
import pprint
pp = pprint.PrettyPrinter(indent=4, depth=4).pprint
pp_str = pprint.PrettyPrinter(indent=4, depth=4).pformat
def plot_df(df):
'''
0 1 2 3 4 5 6 7 8
0 Volts, volts, amps, watts, state, mode_str, panelSN, resistance, timestamp
1 29.646, 29.646, 0.0, 0.0, 0, CR, B41J00052893, 100000, 20210913_120014.79 <--Voc
2 14.267, 14.267, 0.354, 5.05, 1, CR, B41J00052893, 40.0, 20210913_120016.16
'''
print(df)
title_sn = df['panelSN'][1]
volt_series = df['Volts'][1:]
std_voltage_series = np.arange(50, 0, 0-(50.0 /volt_series.size ))
print(f"{volt_series.size=}")
print(f"std_voltage_series-> size {len(std_voltage_series)}, {std_voltage_series})")
amps_series = df['amps'][1:]
watts_series = df['watts'][1:]
ohms_series = df['resistance'][1:]
# print(volt_series)
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('Voltage')
# ax1.set_ylabel('Current', color=color)
ax1.set_ylim(1, 6)
# ax1.plot(volt_series, amps_series, color=color)
ax1.plot(std_voltage_series, amps_series, color=color, label='Current')
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
# ax2.set_ylabel('Watts', color=color) # we already handled the x-label with ax1
# ax2.plot(volt_series, watts_series, color=color)
ax2.plot(std_voltage_series, watts_series, color=color, label='Watts')
ax2.tick_params(axis='y', labelcolor=color)
plt.title(f"Panel S/N {title_sn}")
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.legend()
plt.show()
def get_and_plot_mppt(df):
# IL = array(ItemList)
host = host_subplot(111, axes_class=axisartist.Axes) # (1 row, 1 column, plot number 1)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
# par2 = host.twinx()
# par2.axis["right"] = par2.new_fixed_axis(loc="right", offset=(60, 0))
par1.axis["right"].toggle(all=True)
# OUT_FOR_SINGLE par2.axis["right"].toggle() #all=True)
'''
0 1 2 3 4 5 6 7 8
0 Volts, volts, amps, watts, state, mode_str, panelSN, resistance, timestamp
1 29.646, 29.646, 0.0, 0.0, 0, CR, B41J00052893, 100000, 20210913_120014.79 <--Voc
2 14.267, 14.267, 0.354, 5.05, 1, CR, B41J00052893, 40.0, 20210913_120016.16
'''
# print
# '\n'.join(['%i: %s' % (n, l[n]) for n in xrange(len(l))])
# print(f"Current: {["%.2f".format(x[0]) for x in IL[2:]]}, \n {[x[2] for x in `IL`[2:]]}")
print("Voltage: %s"%(", ".join(["%.1f"%float(x[0]) for x in IL[2:]]))) # , \n {[x[2] for x in IL[2:]]}")
print("Current: %s"%(", ".join(["%.1f"%float(x[2]) for x in IL[2:]]))) # , \n {[x[2] for x in IL[2:]]}")
# OUT_FOR_SINGLE print(f"Watts: {[x[3] for x in IL[2:]]}, \n {[x[3] for x in IL[2:]]}")
# OUT_FOR_SINGLE print(f"Resistance: {[x[7] for x in IL[2:]]}, \n {[x[7] for x in IL[2:]]}")
p1, = host.plot([float(x[0]) for x in IL[2:]], [float(x[2]) for x in IL[2:]], label="Current")
p2, = par1.plot([float(x[0]) for x in IL[2:]], [float(x[3]) for x in IL[2:]], label="Watts")
# OUT_FOR_SINGLE p3, = host.plot([x[7] for x in IL[2:]], [x[7] for x in IL[2:]], label="Resistance")
xlim_min = 0 # min([x[0] for x in IL[2:]])
xlim_max = 50 # max([x[0] for x in IL[2:]])
print(f"X-Axis {xlim_min=}, {xlim_max=}")
ylim_min = min([x[2] for x in IL[2:]])
ylim_max = max([x[2] for x in IL[2:]])
print(f"Y-Axis {ylim_min=}, {ylim_max=}")
host.set_xlim( xlim_min, xlim_max) # X Axis (Voltage)
host.set_ylim( ylim_min, ylim_max) # # Left Y Axis (Current)
par1.set_ylim( 0, 200) # Right Y Axis 1 (Wattage)
# OUT_FOR_SINGLE par2.set_ylim( IL[2][7], IL[-1][7]) # Right Y Axis 2 (Resistance)
host.set_xlabel("Voltage")
host.set_ylabel("Current (Amps)")
par1.set_ylabel("Watts")
# OUT_FOR_SINGLE par2.set_ylabel("Load Resistance")
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
# OUT_FOR_SINGLE par2.axis["right"].label.set_color(p3.get_color())
# from MAYBE related examples axes.yaxis.set_major_locator(MaxNLocator(5))
host.yaxis.set_major_locator(MaxNLocator(10))
host.xaxis.set_major_locator(MaxNLocator(8))
# par1.yaxis.set_major_locator(MaxNLocator(8))
plt.show()
def main(arguments=None):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infile', help="Input file") # type=argparse.FileType('r'))
# parser.add_argument('-o', '--outfile', help="Output file",
# default=sys.stdout, type=argparse.FileType('w'))
args = parser.parse_args(arguments)
# print(pp_str(args))
# read csv file as a list of lists
# with open(args.infile, 'r') as read_obj:
# # pass the file object to reader() to get the reader object
# csv_reader = reader(read_obj)
# # Pass reader object to list() to get a list of lists
# list_of_rows = list(csv_reader)
# # print(pp_str(list_of_rows))
# for i in list_of_rows:
# print(f"{i}")
df = pd.read_csv(args.infile)
# get_and_plot_mppt(df)
plot_df(df)
if __name__ == '__main__':
main(sys.argv[1:])
# time.sleep(2.612)
sys.exit(0)
|
#!/usr/bin/env python
"""
Import MPPT CSV data and plot it.
CSV format:
Volts,volts,amps,watts,state,mode_str,panelSN,resistance,timestamp
29.646,29.646,0.0,0.0,0,CR,B41J00052893,100000,20210913_120014.79
14.267,14.267,0.354,5.05,1,CR,B41J00052893,40.0,20210913_120016.16
"""
from __future__ import print_function
import os
import sys
import argparse
from datetime import datetime as dt
import time
import pandas as pd
from numpy import *
import numpy as np
from mpl_toolkits.axes_grid1 import host_subplot
from mpl_toolkits import axisartist
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from csv import reader
import pprint
pp = pprint.PrettyPrinter(indent=4, depth=4).pprint
pp_str = pprint.PrettyPrinter(indent=4, depth=4).pformat
def plot_df(df):
'''
0 1 2 3 4 5 6 7 8
0 Volts, volts, amps, watts, state, mode_str, panelSN, resistance, timestamp
1 29.646, 29.646, 0.0, 0.0, 0, CR, B41J00052893, 100000, 20210913_120014.79 <--Voc
2 14.267, 14.267, 0.354, 5.05, 1, CR, B41J00052893, 40.0, 20210913_120016.16
'''
print(df)
title_sn = df['panelSN'][1]
volt_series = df['Volts'][1:]
std_voltage_series = np.arange(50, 0, 0-(50.0 /volt_series.size ))
print(f"{volt_series.size=}")
print(f"std_voltage_series-> size {len(std_voltage_series)}, {std_voltage_series})")
amps_series = df['amps'][1:]
watts_series = df['watts'][1:]
ohms_series = df['resistance'][1:]
# print(volt_series)
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('Voltage')
# ax1.set_ylabel('Current', color=color)
ax1.set_ylim(1, 6)
# ax1.plot(volt_series, amps_series, color=color)
ax1.plot(std_voltage_series, amps_series, color=color, label='Current')
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
# ax2.set_ylabel('Watts', color=color) # we already handled the x-label with ax1
# ax2.plot(volt_series, watts_series, color=color)
ax2.plot(std_voltage_series, watts_series, color=color, label='Watts')
ax2.tick_params(axis='y', labelcolor=color)
plt.title(f"Panel S/N {title_sn}")
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.legend()
plt.show()
def get_and_plot_mppt(df):
# IL = array(ItemList)
host = host_subplot(111, axes_class=axisartist.Axes) # (1 row, 1 column, plot number 1)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
# par2 = host.twinx()
# par2.axis["right"] = par2.new_fixed_axis(loc="right", offset=(60, 0))
par1.axis["right"].toggle(all=True)
# OUT_FOR_SINGLE par2.axis["right"].toggle() #all=True)
'''
0 1 2 3 4 5 6 7 8
0 Volts, volts, amps, watts, state, mode_str, panelSN, resistance, timestamp
1 29.646, 29.646, 0.0, 0.0, 0, CR, B41J00052893, 100000, 20210913_120014.79 <--Voc
2 14.267, 14.267, 0.354, 5.05, 1, CR, B41J00052893, 40.0, 20210913_120016.16
'''
# print
# '\n'.join(['%i: %s' % (n, l[n]) for n in xrange(len(l))])
# print(f"Current: {['%.2f'.format(x[0]) for x in IL[2:]]}, \n {[x[2] for x in `IL`[2:]]}")
print("Voltage: %s"%(", ".join(["%.1f"%float(x[0]) for x in IL[2:]]))) # , \n {[x[2] for x in IL[2:]]}")
print("Current: %s"%(", ".join(["%.1f"%float(x[2]) for x in IL[2:]]))) # , \n {[x[2] for x in IL[2:]]}")
# OUT_FOR_SINGLE print(f"Watts: {[x[3] for x in IL[2:]]}, \n {[x[3] for x in IL[2:]]}")
# OUT_FOR_SINGLE print(f"Resistance: {[x[7] for x in IL[2:]]}, \n {[x[7] for x in IL[2:]]}")
p1, = host.plot([float(x[0]) for x in IL[2:]], [float(x[2]) for x in IL[2:]], label="Current")
p2, = par1.plot([float(x[0]) for x in IL[2:]], [float(x[3]) for x in IL[2:]], label="Watts")
# OUT_FOR_SINGLE p3, = host.plot([x[7] for x in IL[2:]], [x[7] for x in IL[2:]], label="Resistance")
xlim_min = 0 # min([x[0] for x in IL[2:]])
xlim_max = 50 # max([x[0] for x in IL[2:]])
print(f"X-Axis {xlim_min=}, {xlim_max=}")
ylim_min = min([x[2] for x in IL[2:]])
ylim_max = max([x[2] for x in IL[2:]])
print(f"Y-Axis {ylim_min=}, {ylim_max=}")
host.set_xlim( xlim_min, xlim_max) # X Axis (Voltage)
host.set_ylim( ylim_min, ylim_max) # # Left Y Axis (Current)
par1.set_ylim( 0, 200) # Right Y Axis 1 (Wattage)
# OUT_FOR_SINGLE par2.set_ylim( IL[2][7], IL[-1][7]) # Right Y Axis 2 (Resistance)
host.set_xlabel("Voltage")
host.set_ylabel("Current (Amps)")
par1.set_ylabel("Watts")
# OUT_FOR_SINGLE par2.set_ylabel("Load Resistance")
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
# OUT_FOR_SINGLE par2.axis["right"].label.set_color(p3.get_color())
# from MAYBE related examples axes.yaxis.set_major_locator(MaxNLocator(5))
host.yaxis.set_major_locator(MaxNLocator(10))
host.xaxis.set_major_locator(MaxNLocator(8))
# par1.yaxis.set_major_locator(MaxNLocator(8))
plt.show()
def main(arguments=None):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infile', help="Input file") # type=argparse.FileType('r'))
# parser.add_argument('-o', '--outfile', help="Output file",
# default=sys.stdout, type=argparse.FileType('w'))
args = parser.parse_args(arguments)
# print(pp_str(args))
# read csv file as a list of lists
# with open(args.infile, 'r') as read_obj:
# # pass the file object to reader() to get the reader object
# csv_reader = reader(read_obj)
# # Pass reader object to list() to get a list of lists
# list_of_rows = list(csv_reader)
# # print(pp_str(list_of_rows))
# for i in list_of_rows:
# print(f"{i}")
df = pd.read_csv(args.infile)
# get_and_plot_mppt(df)
plot_df(df)
if __name__ == '__main__':
main(sys.argv[1:])
# time.sleep(2.612)
sys.exit(0)
|
# Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import re
import json
import reframe.utility as util
import reframe.utility.jsonext as jsonext
from reframe.core.backends import (getlauncher, getscheduler)
from reframe.core.environments import (Environment, ProgEnvironment)
from reframe.core.logging import getlogger
from reframe.core.modules import ModulesSystem
class _ReadOnlyInfo:
__slots__ = ('_info',)
_known_attrs = ()
def __init__(self, info):
self._info = info
def __deepcopy__(self, memo):
# This is a read-only object; simply return ourself
return self
def __getattr__(self, name):
if name in self._known_attrs:
return self._info.get(name, None)
else:
raise AttributeError(
f'{type(self).__qualname__!r} object has no attribute {name!r}'
)
def __setattr__(self, name, value):
if name in self._known_attrs:
raise AttributeError(f'attribute {name!r} is not writeable')
else:
super().__setattr__(name, value)
class ProcessorInfo(_ReadOnlyInfo, jsonext.JSONSerializable):
'''A representation of a processor inside ReFrame.
You can access all the keys of the `processor configuration object
<config_reference.html#processor-info>`__.
.. versionadded:: 3.5.0
.. warning::
Users may not create :class:`ProcessorInfo` objects directly.
'''
__slots__ = ()
_known_attrs = (
'arch', 'num_cpus', 'num_cpus_per_core',
'num_cpus_per_socket', 'num_sockets', 'topology'
)
@property
def info(self):
'''All the available information from the configuration.
:type: :class:`dict`
'''
return self._info
@property
def num_cores(self):
'''Total number of cores.
:type: integral or :class:`None`
'''
if self.num_cpus and self.num_cpus_per_core:
return self.num_cpus // self.num_cpus_per_core
else:
return None
@property
def num_cores_per_socket(self):
'''Number of cores per socket.
:type: integral or :class:`None`
'''
if self.num_cores and self.num_sockets:
return self.num_cores // self.num_sockets
else:
return None
@property
def num_numa_nodes(self):
'''Number of NUMA nodes.
:type: integral or :class:`None`
'''
if self.topology and 'numa_nodes' in self.topology:
return len(self.topology['numa_nodes'])
else:
return None
@property
def num_cores_per_numa_node(self):
'''Number of cores per NUMA node.
:type: integral or :class:`None`
'''
if self.num_numa_nodes and self.num_cores:
return self.num_cores // self.num_numa_nodes
else:
return None
class DeviceInfo(_ReadOnlyInfo, jsonext.JSONSerializable):
'''A representation of a device inside ReFrame.
You can access all the keys of the `device configuration object
<config_reference.html#device-info>`__.
.. versionadded:: 3.5.0
.. warning::
Users may not create :class:`DeviceInfo` objects directly.
'''
__slots__ = ()
_known_attrs = ('type', 'arch')
@property
def info(self):
'''All the available information from the configuration.
:type: :class:`dict`
'''
return self._info
@property
def num_devices(self):
'''Number of devices of this type.
It will return 1 if it wasn't set in the configuration.
:type: integral
'''
return self._info.get('num_devices', 1)
@property
def device_type(self):
'''The type of the device.
:type: :class:`str` or :class:`None`
'''
return self.type
class SystemPartition(jsonext.JSONSerializable):
'''A representation of a system partition inside ReFrame.
.. warning::
Users may not create :class:`SystemPartition` objects directly.
'''
def __init__(self, *, parent, name, sched_type, launcher_type,
descr, access, container_environs, resources,
local_env, environs, max_jobs, prepare_cmds,
processor, devices, extras, features, time_limit):
getlogger().debug(f'Initializing system partition {name!r}')
self._parent_system = parent
self._name = name
self._sched_type = sched_type
self._scheduler = None
self._launcher_type = launcher_type
self._descr = descr
self._access = access
self._container_environs = container_environs
self._local_env = local_env
self._environs = environs
self._max_jobs = max_jobs
self._prepare_cmds = prepare_cmds
self._resources = {r['name']: r['options'] for r in resources}
self._processor = ProcessorInfo(processor)
self._devices = [DeviceInfo(d) for d in devices]
self._extras = extras
self._features = features
self._time_limit = time_limit
@property
def access(self):
'''The scheduler options for accessing this system partition.
:type: :class:`List[str]`
'''
return util.SequenceView(self._access)
@property
def descr(self):
'''The description of this partition.
:type: :class:`str`
'''
return self._descr
@property
def environs(self):
'''The programming environments associated with this system partition.
:type: :class:`List[ProgEnvironment]`
'''
return util.SequenceView(self._environs)
@property
def container_environs(self):
'''Environments associated with the different container platforms.
:type: :class:`Dict[str, Environment]`
'''
return util.MappingView(self._container_environs)
@property
def fullname(self):
'''Return the fully-qualified name of this partition.
The fully-qualified name is of the form
``<parent-system-name>:<partition-name>``.
:type: :class:`str`
'''
return f'{self._parent_system}:{self._name}'
@property
def local_env(self):
'''The local environment associated with this partition.
:type: :class:`Environment`
'''
return self._local_env
@property
def max_jobs(self):
'''The maximum number of concurrent jobs allowed on this partition.
:type: integral
'''
return self._max_jobs
@property
def time_limit(self):
'''The time limit that will be used when submitting jobs to this
partition.
:type: :class:`str` or :obj:`None`
.. versionadded:: 3.11.0
'''
return self._time_limit
@property
def prepare_cmds(self):
'''Commands to be emitted before loading the modules.
:type: :class:`List[str]`
'''
return self._prepare_cmds
@property
def name(self):
'''The name of this partition.
:type: :class:`str`
'''
return self._name
@property
def resources(self):
'''The resources template strings associated with this partition.
This is a dictionary, where the key is the name of a resource and the
value is the scheduler options or directives associated with this
resource.
:type: :class:`Dict[str, List[str]]`
'''
return util.MappingView(self._resources)
@property
def scheduler(self):
'''The backend scheduler of this partition.
:type: :class:`reframe.core.schedulers.JobScheduler`.
.. note::
.. versionchanged:: 2.8
Prior versions returned a string representing the scheduler and
job launcher combination.
.. versionchanged:: 3.2
The property now stores a :class:`JobScheduler` instance.
'''
if self._scheduler is None:
self._scheduler = self._sched_type()
return self._scheduler
@property
def launcher_type(self):
'''The type of the backend launcher of this partition.
.. versionadded:: 3.2
:type: a subclass of :class:`reframe.core.launchers.JobLauncher`.
'''
return self._launcher_type
@property
def launcher(self):
'''See :attr:`launcher_type`.
.. deprecated:: 3.2
Please use :attr:`launcher_type` instead.
'''
from reframe.core.warnings import user_deprecation_warning
user_deprecation_warning("the 'launcher' attribute is deprecated; "
"please use 'launcher_type' instead")
return self.launcher_type
def get_resource(self, name, **values):
'''Instantiate managed resource ``name`` with ``value``.
:meta private:
'''
ret = []
for r in self._resources.get(name, []):
try:
ret.append(r.format(**values))
except KeyError:
pass
return ret
def environment(self, name):
'''Return the partition environment named ``name``.'''
for e in self.environs:
if e.name == name:
return e
return None
@property
def processor(self):
'''Processor information for the current partition.
.. versionadded:: 3.5.0
:type: :class:`reframe.core.systems.ProcessorInfo`
'''
return self._processor
@property
def devices(self):
'''A list of devices in the current partition.
.. versionadded:: 3.5.0
:type: :class:`List[reframe.core.systems.DeviceInfo]`
'''
return self._devices
@property
def extras(self):
'''User defined properties associated with this partition.
These extras are defined in the configuration.
.. versionadded:: 3.5.0
:type: :class:`Dict[str, object]`
'''
return self._extras
@property
def features(self):
'''User defined features associated with this partition.
These features are defined in the configuration.
.. versionadded:: 3.11.0
:type: :class:`List[str]`
'''
return self._features
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return (self._name == other.name and
self._sched_type == other._sched_type and
self._launcher_type == other._launcher_type and
self._access == other._access and
self._environs == other._environs and
self._resources == other._resources and
self._local_env == other._local_env)
def __hash__(self):
return hash(self.fullname)
def json(self):
'''Return a JSON object representing this system partition.'''
return {
'name': self._name,
'descr': self._descr,
'scheduler': self._sched_type.registered_name,
'launcher': self._launcher_type.registered_name,
'access': self._access,
'container_platforms': [
{
'type': ctype,
'modules': [m for m in cpenv.modules],
'variables': [[n, v] for n, v in cpenv.variables.items()]
}
for ctype, cpenv in self._container_environs.items()
],
'modules': [m for m in self._local_env.modules],
'variables': [[n, v]
for n, v in self._local_env.variables.items()],
'environs': [e.name for e in self._environs],
'max_jobs': self._max_jobs,
'resources': [
{
'name': name,
'options': options
}
for name, options in self._resources.items()
]
}
def __str__(self):
return json.dumps(self.json(), indent=2)
class System(jsonext.JSONSerializable):
'''A representation of a system inside ReFrame.
.. warning::
Users may not create :class:`System` objects directly.
'''
def __init__(self, name, descr, hostnames, modules_system,
preload_env, prefix, outputdir,
resourcesdir, stagedir, partitions):
getlogger().debug(f'Initializing system {name!r}')
self._name = name
self._descr = descr
self._hostnames = hostnames
self._modules_system = ModulesSystem.create(modules_system)
self._preload_env = preload_env
self._prefix = prefix
self._outputdir = outputdir
self._resourcesdir = resourcesdir
self._stagedir = stagedir
self._partitions = partitions
@classmethod
def create(cls, site_config):
# Create the whole system hierarchy from bottom up
sysname = site_config.get('systems/0/name')
partitions = []
config_save = site_config.subconfig_system
for p in site_config.get('systems/0/partitions'):
site_config.select_subconfig(f'{sysname}:{p['name']}')
partid = f"systems/0/partitions/@{p["name"]}"
part_name = site_config.get(f'{partid}/name')
part_sched = getscheduler(site_config.get(f'{partid}/scheduler'))
part_launcher = getlauncher(site_config.get(f'{partid}/launcher'))
part_container_environs = {}
for i, p in enumerate(
site_config.get(f'{partid}/container_platforms')
):
ctype = p['type']
part_container_environs[ctype] = Environment(
name=f'__rfm_env_{ctype}',
modules=site_config.get(
f'{partid}/container_platforms/{i}/modules'
),
variables=site_config.get(
f'{partid}/container_platforms/{i}/variables'
)
)
env_patt = site_config.get('general/0/valid_env_names') or [r'.*']
part_environs = [
ProgEnvironment(
name=e,
modules=site_config.get(f'environments/@{e}/modules'),
variables=site_config.get(f'environments/@{e}/variables'),
extras=site_config.get(f'environments/@{e}/extras'),
features=site_config.get(f'environments/@{e}/features'),
cc=site_config.get(f'environments/@{e}/cc'),
cxx=site_config.get(f'environments/@{e}/cxx'),
ftn=site_config.get(f'environments/@{e}/ftn'),
cppflags=site_config.get(f'environments/@{e}/cppflags'),
cflags=site_config.get(f'environments/@{e}/cflags'),
cxxflags=site_config.get(f'environments/@{e}/cxxflags'),
fflags=site_config.get(f'environments/@{e}/fflags'),
ldflags=site_config.get(f'environments/@{e}/ldflags')
) for e in site_config.get(f'{partid}/environs')
if any(re.match(pattern, e) for pattern in env_patt)
]
partitions.append(
SystemPartition(
parent=site_config.get('systems/0/name'),
name=part_name,
sched_type=part_sched,
launcher_type=part_launcher,
descr=site_config.get(f'{partid}/descr'),
access=site_config.get(f'{partid}/access'),
resources=site_config.get(f'{partid}/resources'),
environs=part_environs,
container_environs=part_container_environs,
local_env=Environment(
name=f'__rfm_env_{part_name}',
modules=site_config.get(f'{partid}/modules'),
variables=site_config.get(f'{partid}/variables')
),
max_jobs=site_config.get(f'{partid}/max_jobs'),
prepare_cmds=site_config.get(f'{partid}/prepare_cmds'),
processor=site_config.get(f'{partid}/processor'),
devices=site_config.get(f'{partid}/devices'),
extras=site_config.get(f'{partid}/extras'),
features=site_config.get(f'{partid}/features'),
time_limit=site_config.get(f'{partid}/time_limit')
)
)
# Restore configuration, but ignore unresolved sections or
# configuration parameters at the system level; if we came up to this
# point, then all is good at the partition level, which is enough.
site_config.select_subconfig(config_save, ignore_resolve_errors=True)
return System(
name=sysname,
descr=site_config.get('systems/0/descr'),
hostnames=site_config.get('systems/0/hostnames'),
modules_system=site_config.get('systems/0/modules_system'),
preload_env=Environment(
name=f'__rfm_env_{sysname}',
modules=site_config.get('systems/0/modules'),
variables=site_config.get('systems/0/variables')
),
prefix=site_config.get('systems/0/prefix'),
outputdir=site_config.get('systems/0/outputdir'),
resourcesdir=site_config.get('systems/0/resourcesdir'),
stagedir=site_config.get('systems/0/stagedir'),
partitions=partitions
)
@property
def name(self):
'''The name of this system.
:type: :class:`str`
'''
return self._name
@property
def descr(self):
'''The description of this system.
:type: :class:`str`
'''
return self._descr
@property
def hostnames(self):
'''The hostname patterns associated with this system.
:type: :class:`List[str]`
'''
return self._hostnames
@property
def modules_system(self):
'''The modules system name associated with this system.
:type: :class:`reframe.core.modules.ModulesSystem`
'''
return self._modules_system
@property
def preload_environ(self):
'''The environment to load whenever ReFrame runs on this system.
.. versionadded:: 2.19
:type: :class:`reframe.core.environments.Environment`
'''
return self._preload_env
@property
def prefix(self):
'''The ReFrame prefix associated with this system.
:type: :class:`str`
'''
return self._prefix
@property
def stagedir(self):
'''The ReFrame stage directory prefix associated with this system.
:type: :class:`str`
'''
return self._stagedir
@property
def outputdir(self):
'''The ReFrame output directory prefix associated with this system.
:type: :class:`str`
'''
return self._outputdir
@property
def resourcesdir(self):
'''Global resources directory for this system.
This directory may be used for storing large files related to
regression tests. The value of this directory is controlled by the
`resourcesdir <config_reference.html#.systems[].resourcesdir>`__
configuration parameter.
:type: :class:`str`
'''
return self._resourcesdir
@property
def partitions(self):
'''The system partitions associated with this system.
:type: :class:`List[SystemPartition]`
'''
return util.SequenceView(self._partitions)
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return (self._name == other._name and
self._hostnames == other._hostnames and
self._partitions == other._partitions)
def json(self):
'''Return a JSON object representing this system.'''
return {
'name': self._name,
'descr': self._descr,
'hostnames': self._hostnames,
'modules_system': self._modules_system.name,
'modules': [m for m in self._preload_env.modules],
'variables': [
[name, value]
for name, value in self._preload_env.variables.items()
],
'prefix': self._prefix,
'outputdir': self._outputdir,
'stagedir': self._stagedir,
'resourcesdir': self._resourcesdir,
'partitions': [p.json() for p in self._partitions]
}
def __str__(self):
return json.dumps(self.json(), indent=2)
def __repr__(self):
return (
f'{type(self).__name__}( '
f'name={self._name!r}, descr={self._descr!r}, '
f'hostnames={self._hostnames!r}, '
f'modules_system={self.modules_system.name!r}, '
f'preload_env={self._preload_env!r}, prefix={self._prefix!r}, '
f'outputdir={self._outputdir!r}, '
f'resourcesdir={self._resourcesdir!r}, '
f'stagedir={self._stagedir!r}, partitions={self._partitions!r})'
)
|
# Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import re
import json
import reframe.utility as util
import reframe.utility.jsonext as jsonext
from reframe.core.backends import (getlauncher, getscheduler)
from reframe.core.environments import (Environment, ProgEnvironment)
from reframe.core.logging import getlogger
from reframe.core.modules import ModulesSystem
class _ReadOnlyInfo:
__slots__ = ('_info',)
_known_attrs = ()
def __init__(self, info):
self._info = info
def __deepcopy__(self, memo):
# This is a read-only object; simply return ourself
return self
def __getattr__(self, name):
if name in self._known_attrs:
return self._info.get(name, None)
else:
raise AttributeError(
f'{type(self).__qualname__!r} object has no attribute {name!r}'
)
def __setattr__(self, name, value):
if name in self._known_attrs:
raise AttributeError(f'attribute {name!r} is not writeable')
else:
super().__setattr__(name, value)
class ProcessorInfo(_ReadOnlyInfo, jsonext.JSONSerializable):
'''A representation of a processor inside ReFrame.
You can access all the keys of the `processor configuration object
<config_reference.html#processor-info>`__.
.. versionadded:: 3.5.0
.. warning::
Users may not create :class:`ProcessorInfo` objects directly.
'''
__slots__ = ()
_known_attrs = (
'arch', 'num_cpus', 'num_cpus_per_core',
'num_cpus_per_socket', 'num_sockets', 'topology'
)
@property
def info(self):
'''All the available information from the configuration.
:type: :class:`dict`
'''
return self._info
@property
def num_cores(self):
'''Total number of cores.
:type: integral or :class:`None`
'''
if self.num_cpus and self.num_cpus_per_core:
return self.num_cpus // self.num_cpus_per_core
else:
return None
@property
def num_cores_per_socket(self):
'''Number of cores per socket.
:type: integral or :class:`None`
'''
if self.num_cores and self.num_sockets:
return self.num_cores // self.num_sockets
else:
return None
@property
def num_numa_nodes(self):
'''Number of NUMA nodes.
:type: integral or :class:`None`
'''
if self.topology and 'numa_nodes' in self.topology:
return len(self.topology['numa_nodes'])
else:
return None
@property
def num_cores_per_numa_node(self):
'''Number of cores per NUMA node.
:type: integral or :class:`None`
'''
if self.num_numa_nodes and self.num_cores:
return self.num_cores // self.num_numa_nodes
else:
return None
class DeviceInfo(_ReadOnlyInfo, jsonext.JSONSerializable):
'''A representation of a device inside ReFrame.
You can access all the keys of the `device configuration object
<config_reference.html#device-info>`__.
.. versionadded:: 3.5.0
.. warning::
Users may not create :class:`DeviceInfo` objects directly.
'''
__slots__ = ()
_known_attrs = ('type', 'arch')
@property
def info(self):
'''All the available information from the configuration.
:type: :class:`dict`
'''
return self._info
@property
def num_devices(self):
'''Number of devices of this type.
It will return 1 if it wasn't set in the configuration.
:type: integral
'''
return self._info.get('num_devices', 1)
@property
def device_type(self):
'''The type of the device.
:type: :class:`str` or :class:`None`
'''
return self.type
class SystemPartition(jsonext.JSONSerializable):
'''A representation of a system partition inside ReFrame.
.. warning::
Users may not create :class:`SystemPartition` objects directly.
'''
def __init__(self, *, parent, name, sched_type, launcher_type,
descr, access, container_environs, resources,
local_env, environs, max_jobs, prepare_cmds,
processor, devices, extras, features, time_limit):
getlogger().debug(f'Initializing system partition {name!r}')
self._parent_system = parent
self._name = name
self._sched_type = sched_type
self._scheduler = None
self._launcher_type = launcher_type
self._descr = descr
self._access = access
self._container_environs = container_environs
self._local_env = local_env
self._environs = environs
self._max_jobs = max_jobs
self._prepare_cmds = prepare_cmds
self._resources = {r['name']: r['options'] for r in resources}
self._processor = ProcessorInfo(processor)
self._devices = [DeviceInfo(d) for d in devices]
self._extras = extras
self._features = features
self._time_limit = time_limit
@property
def access(self):
'''The scheduler options for accessing this system partition.
:type: :class:`List[str]`
'''
return util.SequenceView(self._access)
@property
def descr(self):
'''The description of this partition.
:type: :class:`str`
'''
return self._descr
@property
def environs(self):
'''The programming environments associated with this system partition.
:type: :class:`List[ProgEnvironment]`
'''
return util.SequenceView(self._environs)
@property
def container_environs(self):
'''Environments associated with the different container platforms.
:type: :class:`Dict[str, Environment]`
'''
return util.MappingView(self._container_environs)
@property
def fullname(self):
'''Return the fully-qualified name of this partition.
The fully-qualified name is of the form
``<parent-system-name>:<partition-name>``.
:type: :class:`str`
'''
return f'{self._parent_system}:{self._name}'
@property
def local_env(self):
'''The local environment associated with this partition.
:type: :class:`Environment`
'''
return self._local_env
@property
def max_jobs(self):
'''The maximum number of concurrent jobs allowed on this partition.
:type: integral
'''
return self._max_jobs
@property
def time_limit(self):
'''The time limit that will be used when submitting jobs to this
partition.
:type: :class:`str` or :obj:`None`
.. versionadded:: 3.11.0
'''
return self._time_limit
@property
def prepare_cmds(self):
'''Commands to be emitted before loading the modules.
:type: :class:`List[str]`
'''
return self._prepare_cmds
@property
def name(self):
'''The name of this partition.
:type: :class:`str`
'''
return self._name
@property
def resources(self):
'''The resources template strings associated with this partition.
This is a dictionary, where the key is the name of a resource and the
value is the scheduler options or directives associated with this
resource.
:type: :class:`Dict[str, List[str]]`
'''
return util.MappingView(self._resources)
@property
def scheduler(self):
'''The backend scheduler of this partition.
:type: :class:`reframe.core.schedulers.JobScheduler`.
.. note::
.. versionchanged:: 2.8
Prior versions returned a string representing the scheduler and
job launcher combination.
.. versionchanged:: 3.2
The property now stores a :class:`JobScheduler` instance.
'''
if self._scheduler is None:
self._scheduler = self._sched_type()
return self._scheduler
@property
def launcher_type(self):
'''The type of the backend launcher of this partition.
.. versionadded:: 3.2
:type: a subclass of :class:`reframe.core.launchers.JobLauncher`.
'''
return self._launcher_type
@property
def launcher(self):
'''See :attr:`launcher_type`.
.. deprecated:: 3.2
Please use :attr:`launcher_type` instead.
'''
from reframe.core.warnings import user_deprecation_warning
user_deprecation_warning("the 'launcher' attribute is deprecated; "
"please use 'launcher_type' instead")
return self.launcher_type
def get_resource(self, name, **values):
'''Instantiate managed resource ``name`` with ``value``.
:meta private:
'''
ret = []
for r in self._resources.get(name, []):
try:
ret.append(r.format(**values))
except KeyError:
pass
return ret
def environment(self, name):
'''Return the partition environment named ``name``.'''
for e in self.environs:
if e.name == name:
return e
return None
@property
def processor(self):
'''Processor information for the current partition.
.. versionadded:: 3.5.0
:type: :class:`reframe.core.systems.ProcessorInfo`
'''
return self._processor
@property
def devices(self):
'''A list of devices in the current partition.
.. versionadded:: 3.5.0
:type: :class:`List[reframe.core.systems.DeviceInfo]`
'''
return self._devices
@property
def extras(self):
'''User defined properties associated with this partition.
These extras are defined in the configuration.
.. versionadded:: 3.5.0
:type: :class:`Dict[str, object]`
'''
return self._extras
@property
def features(self):
'''User defined features associated with this partition.
These features are defined in the configuration.
.. versionadded:: 3.11.0
:type: :class:`List[str]`
'''
return self._features
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return (self._name == other.name and
self._sched_type == other._sched_type and
self._launcher_type == other._launcher_type and
self._access == other._access and
self._environs == other._environs and
self._resources == other._resources and
self._local_env == other._local_env)
def __hash__(self):
return hash(self.fullname)
def json(self):
'''Return a JSON object representing this system partition.'''
return {
'name': self._name,
'descr': self._descr,
'scheduler': self._sched_type.registered_name,
'launcher': self._launcher_type.registered_name,
'access': self._access,
'container_platforms': [
{
'type': ctype,
'modules': [m for m in cpenv.modules],
'variables': [[n, v] for n, v in cpenv.variables.items()]
}
for ctype, cpenv in self._container_environs.items()
],
'modules': [m for m in self._local_env.modules],
'variables': [[n, v]
for n, v in self._local_env.variables.items()],
'environs': [e.name for e in self._environs],
'max_jobs': self._max_jobs,
'resources': [
{
'name': name,
'options': options
}
for name, options in self._resources.items()
]
}
def __str__(self):
return json.dumps(self.json(), indent=2)
class System(jsonext.JSONSerializable):
'''A representation of a system inside ReFrame.
.. warning::
Users may not create :class:`System` objects directly.
'''
def __init__(self, name, descr, hostnames, modules_system,
preload_env, prefix, outputdir,
resourcesdir, stagedir, partitions):
getlogger().debug(f'Initializing system {name!r}')
self._name = name
self._descr = descr
self._hostnames = hostnames
self._modules_system = ModulesSystem.create(modules_system)
self._preload_env = preload_env
self._prefix = prefix
self._outputdir = outputdir
self._resourcesdir = resourcesdir
self._stagedir = stagedir
self._partitions = partitions
@classmethod
def create(cls, site_config):
# Create the whole system hierarchy from bottom up
sysname = site_config.get('systems/0/name')
partitions = []
config_save = site_config.subconfig_system
for p in site_config.get('systems/0/partitions'):
site_config.select_subconfig(f'{sysname}:{p["name"]}')
partid = f"systems/0/partitions/@{p['name']}"
part_name = site_config.get(f'{partid}/name')
part_sched = getscheduler(site_config.get(f'{partid}/scheduler'))
part_launcher = getlauncher(site_config.get(f'{partid}/launcher'))
part_container_environs = {}
for i, p in enumerate(
site_config.get(f'{partid}/container_platforms')
):
ctype = p['type']
part_container_environs[ctype] = Environment(
name=f'__rfm_env_{ctype}',
modules=site_config.get(
f'{partid}/container_platforms/{i}/modules'
),
variables=site_config.get(
f'{partid}/container_platforms/{i}/variables'
)
)
env_patt = site_config.get('general/0/valid_env_names') or [r'.*']
part_environs = [
ProgEnvironment(
name=e,
modules=site_config.get(f'environments/@{e}/modules'),
variables=site_config.get(f'environments/@{e}/variables'),
extras=site_config.get(f'environments/@{e}/extras'),
features=site_config.get(f'environments/@{e}/features'),
cc=site_config.get(f'environments/@{e}/cc'),
cxx=site_config.get(f'environments/@{e}/cxx'),
ftn=site_config.get(f'environments/@{e}/ftn'),
cppflags=site_config.get(f'environments/@{e}/cppflags'),
cflags=site_config.get(f'environments/@{e}/cflags'),
cxxflags=site_config.get(f'environments/@{e}/cxxflags'),
fflags=site_config.get(f'environments/@{e}/fflags'),
ldflags=site_config.get(f'environments/@{e}/ldflags')
) for e in site_config.get(f'{partid}/environs')
if any(re.match(pattern, e) for pattern in env_patt)
]
partitions.append(
SystemPartition(
parent=site_config.get('systems/0/name'),
name=part_name,
sched_type=part_sched,
launcher_type=part_launcher,
descr=site_config.get(f'{partid}/descr'),
access=site_config.get(f'{partid}/access'),
resources=site_config.get(f'{partid}/resources'),
environs=part_environs,
container_environs=part_container_environs,
local_env=Environment(
name=f'__rfm_env_{part_name}',
modules=site_config.get(f'{partid}/modules'),
variables=site_config.get(f'{partid}/variables')
),
max_jobs=site_config.get(f'{partid}/max_jobs'),
prepare_cmds=site_config.get(f'{partid}/prepare_cmds'),
processor=site_config.get(f'{partid}/processor'),
devices=site_config.get(f'{partid}/devices'),
extras=site_config.get(f'{partid}/extras'),
features=site_config.get(f'{partid}/features'),
time_limit=site_config.get(f'{partid}/time_limit')
)
)
# Restore configuration, but ignore unresolved sections or
# configuration parameters at the system level; if we came up to this
# point, then all is good at the partition level, which is enough.
site_config.select_subconfig(config_save, ignore_resolve_errors=True)
return System(
name=sysname,
descr=site_config.get('systems/0/descr'),
hostnames=site_config.get('systems/0/hostnames'),
modules_system=site_config.get('systems/0/modules_system'),
preload_env=Environment(
name=f'__rfm_env_{sysname}',
modules=site_config.get('systems/0/modules'),
variables=site_config.get('systems/0/variables')
),
prefix=site_config.get('systems/0/prefix'),
outputdir=site_config.get('systems/0/outputdir'),
resourcesdir=site_config.get('systems/0/resourcesdir'),
stagedir=site_config.get('systems/0/stagedir'),
partitions=partitions
)
@property
def name(self):
'''The name of this system.
:type: :class:`str`
'''
return self._name
@property
def descr(self):
'''The description of this system.
:type: :class:`str`
'''
return self._descr
@property
def hostnames(self):
'''The hostname patterns associated with this system.
:type: :class:`List[str]`
'''
return self._hostnames
@property
def modules_system(self):
'''The modules system name associated with this system.
:type: :class:`reframe.core.modules.ModulesSystem`
'''
return self._modules_system
@property
def preload_environ(self):
'''The environment to load whenever ReFrame runs on this system.
.. versionadded:: 2.19
:type: :class:`reframe.core.environments.Environment`
'''
return self._preload_env
@property
def prefix(self):
'''The ReFrame prefix associated with this system.
:type: :class:`str`
'''
return self._prefix
@property
def stagedir(self):
'''The ReFrame stage directory prefix associated with this system.
:type: :class:`str`
'''
return self._stagedir
@property
def outputdir(self):
'''The ReFrame output directory prefix associated with this system.
:type: :class:`str`
'''
return self._outputdir
@property
def resourcesdir(self):
'''Global resources directory for this system.
This directory may be used for storing large files related to
regression tests. The value of this directory is controlled by the
`resourcesdir <config_reference.html#.systems[].resourcesdir>`__
configuration parameter.
:type: :class:`str`
'''
return self._resourcesdir
@property
def partitions(self):
'''The system partitions associated with this system.
:type: :class:`List[SystemPartition]`
'''
return util.SequenceView(self._partitions)
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return (self._name == other._name and
self._hostnames == other._hostnames and
self._partitions == other._partitions)
def json(self):
'''Return a JSON object representing this system.'''
return {
'name': self._name,
'descr': self._descr,
'hostnames': self._hostnames,
'modules_system': self._modules_system.name,
'modules': [m for m in self._preload_env.modules],
'variables': [
[name, value]
for name, value in self._preload_env.variables.items()
],
'prefix': self._prefix,
'outputdir': self._outputdir,
'stagedir': self._stagedir,
'resourcesdir': self._resourcesdir,
'partitions': [p.json() for p in self._partitions]
}
def __str__(self):
return json.dumps(self.json(), indent=2)
def __repr__(self):
return (
f'{type(self).__name__}( '
f'name={self._name!r}, descr={self._descr!r}, '
f'hostnames={self._hostnames!r}, '
f'modules_system={self.modules_system.name!r}, '
f'preload_env={self._preload_env!r}, prefix={self._prefix!r}, '
f'outputdir={self._outputdir!r}, '
f'resourcesdir={self._resourcesdir!r}, '
f'stagedir={self._stagedir!r}, partitions={self._partitions!r})'
)
|
# MIT License
# Copyright (c) 2020 Simon Schug, João Sacramento
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import json
import logging
import sys
import torch
from lib import config, data, energy, train, utils
def load_default_config(energy):
"""
Load default parameter configuration from file.
Args:
tasks: String with the energy name
Returns:
Dictionary of default parameters for the given energy
"""
if energy == "restr_hopfield":
default_config = "etc/energy_restr_hopfield.json"
elif energy == "cond_gaussian":
default_config = "etc/energy_cond_gaussian.json"
else:
raise ValueError("Energy based model \"{}\" not defined.".format(energy))
with open(default_config) as config_json_file:
cfg = json.load(config_json_file)
return cfg
def parse_shell_args(args):
"""
Parse shell arguments for this script.
Args:
args: List of shell arguments
Returns:
Dictionary of shell arguments
"""
parser = argparse.ArgumentParser(
description="Train an energy-based model on MNIST using Equilibrium Propagation."
)
parser.add_argument("--batch_size", type=int, default=argparse.SUPPRESS,
help="Size of mini batches during training.")
parser.add_argument("--c_energy", choices=["cross_entropy", "squared_error"],
default=argparse.SUPPRESS, help="Supervised learning cost function.")
parser.add_argument("--dimensions", type=int, nargs="+",
default=argparse.SUPPRESS, help="Dimensions of the neural network.")
parser.add_argument("--energy", choices=["cond_gaussian", "restr_hopfield"],
default="cond_gaussian", help="Type of energy-based model.")
parser.add_argument("--epochs", type=int, default=argparse.SUPPRESS,
help="Number of epochs to train.")
parser.add_argument("--fast_ff_init", action='store_true', default=argparse.SUPPRESS,
help="Flag to enable fast feedforward initialization.")
parser.add_argument("--learning_rate", type=float, default=argparse.SUPPRESS,
help="Learning rate of the optimizer.")
parser.add_argument("--log_dir", type=str, default="",
help="Subdirectory within ./log/ to store logs.")
parser.add_argument("--nonlinearity", choices=["leaky_relu", "relu", "sigmoid", "tanh"],
default=argparse.SUPPRESS, help="Nonlinearity between network layers.")
parser.add_argument("--optimizer", choices=["adam", "adagrad", "sgd"],
default=argparse.SUPPRESS, help="Optimizer used to train the model.")
parser.add_argument("--seed", type=int, default=argparse.SUPPRESS,
help="Random seed for pytorch")
return vars(parser.parse_args(args))
def run_energy_model_mnist(cfg):
"""
Main script.
Args:
cfg: Dictionary defining parameters of the run
"""
# Initialize seed if specified (might slow down the model)
if cfg['seed'] is not None:
torch.manual_seed(cfg['seed'])
# Create the cost function to be optimized by the model
c_energy = utils.create_cost(cfg['c_energy'], cfg['beta'])
# Create activation functions for every layer as a list
phi = utils.create_activations(cfg['nonlinearity'], len(cfg['dimensions']))
# Initialize energy based model
if cfg["energy"] == "restr_hopfield":
model = energy.RestrictedHopfield(
cfg['dimensions'], c_energy, cfg['batch_size'], phi).to(config.device)
elif cfg["energy"] == "cond_gaussian":
model = energy.ConditionalGaussian(
cfg['dimensions'], c_energy, cfg['batch_size'], phi).to(config.device)
else:
raise ValueError(f'Energy based model \"{cfg['energy']}\" not defined.')
# Define optimizer (may include l2 regularization via weight_decay)
w_optimizer = utils.create_optimizer(model, cfg['optimizer'], lr=cfg['learning_rate'])
# Create torch data loaders with the MNIST data set
mnist_train, mnist_test = data.create_mnist_loaders(cfg['batch_size'])
logging.info("Start training with parametrization:\n{}".format(
json.dumps(cfg, indent=4, sort_keys=True)))
for epoch in range(1, cfg['epochs'] + 1):
# Training
train.train(model, mnist_train, cfg['dynamics'], w_optimizer, cfg["fast_ff_init"])
# Testing
test_acc, test_energy = train.test(model, mnist_test, cfg['dynamics'], cfg["fast_ff_init"])
# Logging
logging.info(
"epoch: {} \t test_acc: {:.4f} \t mean_E: {:.4f}".format(
epoch, test_acc, test_energy)
)
if __name__ == '__main__':
# Parse shell arguments as input configuration
user_config = parse_shell_args(sys.argv[1:])
# Load default parameter configuration from file for the specified energy-based model
cfg = load_default_config(user_config["energy"])
# Overwrite default parameters with user configuration where applicable
cfg.update(user_config)
# Setup global logger and logging directory
config.setup_logging(cfg["energy"] + "_" + cfg["c_energy"] + "_" + cfg["dataset"],
dir=cfg['log_dir'])
# Run the script using the created paramter configuration
run_energy_model_mnist(cfg)
|
# MIT License
# Copyright (c) 2020 Simon Schug, João Sacramento
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import json
import logging
import sys
import torch
from lib import config, data, energy, train, utils
def load_default_config(energy):
"""
Load default parameter configuration from file.
Args:
tasks: String with the energy name
Returns:
Dictionary of default parameters for the given energy
"""
if energy == "restr_hopfield":
default_config = "etc/energy_restr_hopfield.json"
elif energy == "cond_gaussian":
default_config = "etc/energy_cond_gaussian.json"
else:
raise ValueError("Energy based model \"{}\" not defined.".format(energy))
with open(default_config) as config_json_file:
cfg = json.load(config_json_file)
return cfg
def parse_shell_args(args):
"""
Parse shell arguments for this script.
Args:
args: List of shell arguments
Returns:
Dictionary of shell arguments
"""
parser = argparse.ArgumentParser(
description="Train an energy-based model on MNIST using Equilibrium Propagation."
)
parser.add_argument("--batch_size", type=int, default=argparse.SUPPRESS,
help="Size of mini batches during training.")
parser.add_argument("--c_energy", choices=["cross_entropy", "squared_error"],
default=argparse.SUPPRESS, help="Supervised learning cost function.")
parser.add_argument("--dimensions", type=int, nargs="+",
default=argparse.SUPPRESS, help="Dimensions of the neural network.")
parser.add_argument("--energy", choices=["cond_gaussian", "restr_hopfield"],
default="cond_gaussian", help="Type of energy-based model.")
parser.add_argument("--epochs", type=int, default=argparse.SUPPRESS,
help="Number of epochs to train.")
parser.add_argument("--fast_ff_init", action='store_true', default=argparse.SUPPRESS,
help="Flag to enable fast feedforward initialization.")
parser.add_argument("--learning_rate", type=float, default=argparse.SUPPRESS,
help="Learning rate of the optimizer.")
parser.add_argument("--log_dir", type=str, default="",
help="Subdirectory within ./log/ to store logs.")
parser.add_argument("--nonlinearity", choices=["leaky_relu", "relu", "sigmoid", "tanh"],
default=argparse.SUPPRESS, help="Nonlinearity between network layers.")
parser.add_argument("--optimizer", choices=["adam", "adagrad", "sgd"],
default=argparse.SUPPRESS, help="Optimizer used to train the model.")
parser.add_argument("--seed", type=int, default=argparse.SUPPRESS,
help="Random seed for pytorch")
return vars(parser.parse_args(args))
def run_energy_model_mnist(cfg):
"""
Main script.
Args:
cfg: Dictionary defining parameters of the run
"""
# Initialize seed if specified (might slow down the model)
if cfg['seed'] is not None:
torch.manual_seed(cfg['seed'])
# Create the cost function to be optimized by the model
c_energy = utils.create_cost(cfg['c_energy'], cfg['beta'])
# Create activation functions for every layer as a list
phi = utils.create_activations(cfg['nonlinearity'], len(cfg['dimensions']))
# Initialize energy based model
if cfg["energy"] == "restr_hopfield":
model = energy.RestrictedHopfield(
cfg['dimensions'], c_energy, cfg['batch_size'], phi).to(config.device)
elif cfg["energy"] == "cond_gaussian":
model = energy.ConditionalGaussian(
cfg['dimensions'], c_energy, cfg['batch_size'], phi).to(config.device)
else:
raise ValueError(f'Energy based model \"{cfg["energy"]}\" not defined.')
# Define optimizer (may include l2 regularization via weight_decay)
w_optimizer = utils.create_optimizer(model, cfg['optimizer'], lr=cfg['learning_rate'])
# Create torch data loaders with the MNIST data set
mnist_train, mnist_test = data.create_mnist_loaders(cfg['batch_size'])
logging.info("Start training with parametrization:\n{}".format(
json.dumps(cfg, indent=4, sort_keys=True)))
for epoch in range(1, cfg['epochs'] + 1):
# Training
train.train(model, mnist_train, cfg['dynamics'], w_optimizer, cfg["fast_ff_init"])
# Testing
test_acc, test_energy = train.test(model, mnist_test, cfg['dynamics'], cfg["fast_ff_init"])
# Logging
logging.info(
"epoch: {} \t test_acc: {:.4f} \t mean_E: {:.4f}".format(
epoch, test_acc, test_energy)
)
if __name__ == '__main__':
# Parse shell arguments as input configuration
user_config = parse_shell_args(sys.argv[1:])
# Load default parameter configuration from file for the specified energy-based model
cfg = load_default_config(user_config["energy"])
# Overwrite default parameters with user configuration where applicable
cfg.update(user_config)
# Setup global logger and logging directory
config.setup_logging(cfg["energy"] + "_" + cfg["c_energy"] + "_" + cfg["dataset"],
dir=cfg['log_dir'])
# Run the script using the created paramter configuration
run_energy_model_mnist(cfg)
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module containing commands related to android"""
import asyncio
import json
import re
import os
import time
import math
from requests import get
from bs4 import BeautifulSoup
from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY
from userbot.events import register
from userbot.utils import (
chrome, humanbytes, time_formatter, md5, human_to_bytes
)
GITHUB = 'https://github.com'
DEVICES_DATA = ('https://raw.githubusercontent.com/androidtrackers/'
'certified-android-devices/master/by_device.json')
@register(outgoing=True, pattern=r"^\.magisk$")
async def magisk(request):
""" magisk latest releases """
magisk_dict = {
"Stable":
"https://raw.githubusercontent.com/topjohnwu/magisk_files/master/stable.json",
"Beta":
"https://raw.githubusercontent.com/topjohnwu/magisk_files/master/beta.json",
"Canary (Release)":
"https://raw.githubusercontent.com/topjohnwu/magisk_files/canary/release.json",
"Canary (Debug)":
"https://raw.githubusercontent.com/topjohnwu/magisk_files/canary/debug.json"
}
releases = 'Latest Magisk Releases:\n'
for name, release_url in magisk_dict.items():
data = get(release_url).json()
releases += f'{name}: [ZIP v{data['magisk']['version']}]({data['magisk']['link']}) | ' \
f'[APK v{data['app']['version']}]({data['app']['link']}) | ' \
f'[Uninstaller]({data['uninstaller']['link']})\n'
await request.edit(releases)
@register(outgoing=True, pattern=r"^\.device(?: |$)(\S*)")
async def device_info(request):
""" get android device basic info from its codename """
textx = await request.get_reply_message()
codename = request.pattern_match.group(1)
if codename:
pass
elif textx:
codename = textx.text
else:
await request.edit("`Usage: .device <codename> / <model>`")
return
data = json.loads(
get(
"https://raw.githubusercontent.com/androidtrackers/"
"certified-android-devices/master/by_device.json"
).text
)
results = data.get(codename)
if results:
reply = f"**Search results for {codename}**:\n\n"
for item in results:
reply += (
f"**Brand**: {item["brand"]}\n"
f"**Name**: {item["name"]}\n"
f"**Model**: {item["model"]}\n\n"
)
else:
reply = f"`Couldn't find info about {codename}!`\n"
await request.edit(reply)
@register(outgoing=True, pattern=r"^\.codename(?: |)([\S]*)(?: |)([\s\S]*)")
async def codename_info(request):
""" search for android codename """
textx = await request.get_reply_message()
brand = request.pattern_match.group(1).lower()
device = request.pattern_match.group(2).lower()
if brand and device:
pass
elif textx:
brand = textx.text.split(' ')[0]
device = ' '.join(textx.text.split(' ')[1:])
else:
await request.edit("`Usage: .codename <brand> <device>`")
return
data = json.loads(
get(
"https://raw.githubusercontent.com/androidtrackers/"
"certified-android-devices/master/by_brand.json"
).text
)
devices_lower = {k.lower(): v for k, v in data.items()
} # Lower brand names in JSON
devices = devices_lower.get(brand)
results = [
i
for i in devices
if i["name"].lower() == device.lower() or i["model"].lower() == device.lower()
]
if results:
reply = f"**Search results for {brand} {device}**:\n\n"
if len(results) > 8:
results = results[:8]
for item in results:
reply += (
f"**Device**: {item["device"]}\n"
f"**Name**: {item["name"]}\n"
f"**Model**: {item["model"]}\n\n"
)
else:
reply = f"`Couldn't find {device} codename!`\n"
await request.edit(reply)
@register(outgoing=True, pattern=r"^\.pixeldl(?: |$)(.*)")
async def download_api(dl):
await dl.edit("`Collecting information...`")
URL = dl.pattern_match.group(1)
URL_MSG = await dl.get_reply_message()
if URL:
pass
elif URL_MSG:
URL = URL_MSG.text
else:
await dl.edit("`Empty information...`")
return
if not re.findall(r'\bhttps?://download.*pixelexperience.*\.org\S+', URL):
await dl.edit("`Invalid information...`")
return
driver = await chrome()
await dl.edit("`Getting information...`")
driver.get(URL)
error = driver.find_elements_by_class_name("swal2-content")
if len(error) > 0:
if error[0].text == "File Not Found.":
await dl.edit(f"`FileNotFoundError`: {URL} is not found.")
return
datas = driver.find_elements_by_class_name('download__meta')
""" - enumerate data to make sure we download the matched version - """
md5_origin = None
i = None
for index, value in enumerate(datas):
for data in value.text.split("\n"):
if data.startswith("MD5"):
md5_origin = data.split(':')[1].strip()
i = index
break
if md5_origin is not None and i is not None:
break
if md5_origin is None and i is None:
await dl.edit("`There is no match version available...`")
if URL.endswith('/'):
file_name = URL.split("/")[-2]
else:
file_name = URL.split("/")[-1]
file_path = TEMP_DOWNLOAD_DIRECTORY + file_name
download = driver.find_elements_by_class_name("download__btn")[i]
download.click()
await dl.edit("`Starting download...`")
file_size = human_to_bytes(download.text.split(None, 3)[-1].strip('()'))
display_message = None
complete = False
start = time.time()
while complete is False:
if os.path.isfile(file_path + '.crdownload'):
try:
downloaded = os.stat(file_path + '.crdownload').st_size
status = "Downloading"
except OSError: # Rare case
await asyncio.sleep(1)
continue
elif os.path.isfile(file_path):
downloaded = os.stat(file_path).st_size
file_size = downloaded
status = "Checking"
else:
await asyncio.sleep(0.3)
continue
diff = time.time() - start
percentage = downloaded / file_size * 100
speed = round(downloaded / diff, 2)
eta = round((file_size - downloaded) / speed)
prog_str = "`{0}` | [{1}{2}] `{3}%`".format(
status,
"".join(["●" for i in range(
math.floor(percentage / 10))]),
"".join(["○"for i in range(
10 - math.floor(percentage / 10))]),
round(percentage, 2))
current_message = (
"`[DOWNLOAD]`\n\n"
f"`{file_name}`\n"
f"`Status`\n{prog_str}\n"
f"`{humanbytes(downloaded)} of {humanbytes(file_size)}"
f" @ {humanbytes(speed)}`\n"
f"`ETA` -> {time_formatter(eta)}"
)
if round(diff % 15.00) == 0 and display_message != current_message or (
downloaded == file_size):
await dl.edit(current_message)
display_message = current_message
if downloaded == file_size:
if not os.path.isfile(file_path): # Rare case
await asyncio.sleep(1)
continue
MD5 = await md5(file_path)
if md5_origin == MD5:
complete = True
else:
await dl.edit("`Download corrupt...`")
os.remove(file_path)
driver.quit()
return
await dl.respond(
f"`{file_name}`\n\n"
f"Successfully downloaded to `{file_path}`."
)
await dl.delete()
driver.quit()
return
@register(outgoing=True, pattern=r"^\.specs(?: |)([\S]*)(?: |)([\s\S]*)")
async def devices_specifications(request):
""" Mobile devices specifications """
textx = await request.get_reply_message()
brand = request.pattern_match.group(1).lower()
device = request.pattern_match.group(2).lower()
await request.edit(f"`Searching for device specification...`")
if brand and device:
pass
elif textx:
brand = textx.text.split(' ')[0]
device = ' '.join(textx.text.split(' ')[1:])
else:
return await request.edit("`Usage: .specs <brand> <device>`")
all_brands = BeautifulSoup(
get('https://www.devicespecifications.com/en/brand-more').content,
'lxml').find('div', {
'class': 'brand-listing-container-news'
}).findAll('a')
brand_page_url = None
try:
brand_page_url = [
i['href'] for i in all_brands if brand == i.text.strip().lower()
][0]
except IndexError:
await request.edit(f'`{brand} is unknown brand!`')
devices = BeautifulSoup(get(brand_page_url).content, 'lxml') \
.findAll('div', {'class': 'model-listing-container-80'})
device_page_url = None
try:
device_page_url = [
i.a['href']
for i in BeautifulSoup(str(devices), 'lxml').findAll('h3')
if device in i.text.strip().lower()
]
except IndexError:
await request.edit(f"`can't find {device}!`")
if len(device_page_url) > 2:
device_page_url = device_page_url[:2]
reply = ''
for url in device_page_url:
info = BeautifulSoup(get(url).content, 'lxml')
reply = '\n**' + info.title.text.split('-')[0].strip() + '**\n\n'
info = info.find('div', {'id': 'model-brief-specifications'})
specifications = re.findall(r'<b>.*?<br/>', str(info))
for item in specifications:
title = re.findall(r'<b>(.*?)</b>', item)[0].strip()
data = re.findall(r'</b>: (.*?)<br/>', item)[0]\
.replace('<b>', '').replace('</b>', '').strip()
reply += f'**{title}**: {data}\n'
await request.edit(reply)
@register(outgoing=True, pattern=r"^\.twrp(?: |$)(\S*)")
async def twrp(request):
""" get android device twrp """
textx = await request.get_reply_message()
device = request.pattern_match.group(1)
if device:
pass
elif textx:
device = textx.text.split(' ')[0]
else:
return await request.edit("`Usage: .twrp <codename>`")
url = get(f'https://dl.twrp.me/{device}/')
if url.status_code == 404:
reply = f"`Couldn't find twrp downloads for {device}!`\n"
return await request.edit(reply)
page = BeautifulSoup(url.content, 'lxml')
download = page.find('table').find('tr').find('a')
dl_link = f"https://dl.twrp.me{download["href"]}"
dl_file = download.text
size = page.find("span", {"class": "filesize"}).text
date = page.find("em").text.strip()
reply = f'**Latest TWRP for {device}:**\n' \
f'[{dl_file}]({dl_link}) - __{size}__\n' \
f'**Updated:** __{date}__\n'
await request.edit(reply)
CMD_HELP.update({
"android":
">`.magisk`"
"\nGet latest Magisk releases"
"\n\n>`.device <codename>`"
"\nUsage: Get info about android device codename or model."
"\n\n>`.codename <brand> <device>`"
"\nUsage: Search for android device codename."
"\n\n>`.pixeldl` **<download.pixelexperience.org>**"
"\nUsage: Download pixel experience ROM into your userbot server."
"\n\n>`.specs <brand> <device>`"
"\nUsage: Get device specifications info."
"\n\n>`.twrp <codename>`"
"\nUsage: Get latest twrp download for android device."
})
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module containing commands related to android"""
import asyncio
import json
import re
import os
import time
import math
from requests import get
from bs4 import BeautifulSoup
from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY
from userbot.events import register
from userbot.utils import (
chrome, humanbytes, time_formatter, md5, human_to_bytes
)
GITHUB = 'https://github.com'
DEVICES_DATA = ('https://raw.githubusercontent.com/androidtrackers/'
'certified-android-devices/master/by_device.json')
@register(outgoing=True, pattern=r"^\.magisk$")
async def magisk(request):
""" magisk latest releases """
magisk_dict = {
"Stable":
"https://raw.githubusercontent.com/topjohnwu/magisk_files/master/stable.json",
"Beta":
"https://raw.githubusercontent.com/topjohnwu/magisk_files/master/beta.json",
"Canary (Release)":
"https://raw.githubusercontent.com/topjohnwu/magisk_files/canary/release.json",
"Canary (Debug)":
"https://raw.githubusercontent.com/topjohnwu/magisk_files/canary/debug.json"
}
releases = 'Latest Magisk Releases:\n'
for name, release_url in magisk_dict.items():
data = get(release_url).json()
releases += f'{name}: [ZIP v{data["magisk"]["version"]}]({data["magisk"]["link"]}) | ' \
f'[APK v{data["app"]["version"]}]({data["app"]["link"]}) | ' \
f'[Uninstaller]({data["uninstaller"]["link"]})\n'
await request.edit(releases)
@register(outgoing=True, pattern=r"^\.device(?: |$)(\S*)")
async def device_info(request):
""" get android device basic info from its codename """
textx = await request.get_reply_message()
codename = request.pattern_match.group(1)
if codename:
pass
elif textx:
codename = textx.text
else:
await request.edit("`Usage: .device <codename> / <model>`")
return
data = json.loads(
get(
"https://raw.githubusercontent.com/androidtrackers/"
"certified-android-devices/master/by_device.json"
).text
)
results = data.get(codename)
if results:
reply = f"**Search results for {codename}**:\n\n"
for item in results:
reply += (
f"**Brand**: {item['brand']}\n"
f"**Name**: {item['name']}\n"
f"**Model**: {item['model']}\n\n"
)
else:
reply = f"`Couldn't find info about {codename}!`\n"
await request.edit(reply)
@register(outgoing=True, pattern=r"^\.codename(?: |)([\S]*)(?: |)([\s\S]*)")
async def codename_info(request):
""" search for android codename """
textx = await request.get_reply_message()
brand = request.pattern_match.group(1).lower()
device = request.pattern_match.group(2).lower()
if brand and device:
pass
elif textx:
brand = textx.text.split(' ')[0]
device = ' '.join(textx.text.split(' ')[1:])
else:
await request.edit("`Usage: .codename <brand> <device>`")
return
data = json.loads(
get(
"https://raw.githubusercontent.com/androidtrackers/"
"certified-android-devices/master/by_brand.json"
).text
)
devices_lower = {k.lower(): v for k, v in data.items()
} # Lower brand names in JSON
devices = devices_lower.get(brand)
results = [
i
for i in devices
if i["name"].lower() == device.lower() or i["model"].lower() == device.lower()
]
if results:
reply = f"**Search results for {brand} {device}**:\n\n"
if len(results) > 8:
results = results[:8]
for item in results:
reply += (
f"**Device**: {item['device']}\n"
f"**Name**: {item['name']}\n"
f"**Model**: {item['model']}\n\n"
)
else:
reply = f"`Couldn't find {device} codename!`\n"
await request.edit(reply)
@register(outgoing=True, pattern=r"^\.pixeldl(?: |$)(.*)")
async def download_api(dl):
await dl.edit("`Collecting information...`")
URL = dl.pattern_match.group(1)
URL_MSG = await dl.get_reply_message()
if URL:
pass
elif URL_MSG:
URL = URL_MSG.text
else:
await dl.edit("`Empty information...`")
return
if not re.findall(r'\bhttps?://download.*pixelexperience.*\.org\S+', URL):
await dl.edit("`Invalid information...`")
return
driver = await chrome()
await dl.edit("`Getting information...`")
driver.get(URL)
error = driver.find_elements_by_class_name("swal2-content")
if len(error) > 0:
if error[0].text == "File Not Found.":
await dl.edit(f"`FileNotFoundError`: {URL} is not found.")
return
datas = driver.find_elements_by_class_name('download__meta')
""" - enumerate data to make sure we download the matched version - """
md5_origin = None
i = None
for index, value in enumerate(datas):
for data in value.text.split("\n"):
if data.startswith("MD5"):
md5_origin = data.split(':')[1].strip()
i = index
break
if md5_origin is not None and i is not None:
break
if md5_origin is None and i is None:
await dl.edit("`There is no match version available...`")
if URL.endswith('/'):
file_name = URL.split("/")[-2]
else:
file_name = URL.split("/")[-1]
file_path = TEMP_DOWNLOAD_DIRECTORY + file_name
download = driver.find_elements_by_class_name("download__btn")[i]
download.click()
await dl.edit("`Starting download...`")
file_size = human_to_bytes(download.text.split(None, 3)[-1].strip('()'))
display_message = None
complete = False
start = time.time()
while complete is False:
if os.path.isfile(file_path + '.crdownload'):
try:
downloaded = os.stat(file_path + '.crdownload').st_size
status = "Downloading"
except OSError: # Rare case
await asyncio.sleep(1)
continue
elif os.path.isfile(file_path):
downloaded = os.stat(file_path).st_size
file_size = downloaded
status = "Checking"
else:
await asyncio.sleep(0.3)
continue
diff = time.time() - start
percentage = downloaded / file_size * 100
speed = round(downloaded / diff, 2)
eta = round((file_size - downloaded) / speed)
prog_str = "`{0}` | [{1}{2}] `{3}%`".format(
status,
"".join(["●" for i in range(
math.floor(percentage / 10))]),
"".join(["○"for i in range(
10 - math.floor(percentage / 10))]),
round(percentage, 2))
current_message = (
"`[DOWNLOAD]`\n\n"
f"`{file_name}`\n"
f"`Status`\n{prog_str}\n"
f"`{humanbytes(downloaded)} of {humanbytes(file_size)}"
f" @ {humanbytes(speed)}`\n"
f"`ETA` -> {time_formatter(eta)}"
)
if round(diff % 15.00) == 0 and display_message != current_message or (
downloaded == file_size):
await dl.edit(current_message)
display_message = current_message
if downloaded == file_size:
if not os.path.isfile(file_path): # Rare case
await asyncio.sleep(1)
continue
MD5 = await md5(file_path)
if md5_origin == MD5:
complete = True
else:
await dl.edit("`Download corrupt...`")
os.remove(file_path)
driver.quit()
return
await dl.respond(
f"`{file_name}`\n\n"
f"Successfully downloaded to `{file_path}`."
)
await dl.delete()
driver.quit()
return
@register(outgoing=True, pattern=r"^\.specs(?: |)([\S]*)(?: |)([\s\S]*)")
async def devices_specifications(request):
""" Mobile devices specifications """
textx = await request.get_reply_message()
brand = request.pattern_match.group(1).lower()
device = request.pattern_match.group(2).lower()
await request.edit(f"`Searching for device specification...`")
if brand and device:
pass
elif textx:
brand = textx.text.split(' ')[0]
device = ' '.join(textx.text.split(' ')[1:])
else:
return await request.edit("`Usage: .specs <brand> <device>`")
all_brands = BeautifulSoup(
get('https://www.devicespecifications.com/en/brand-more').content,
'lxml').find('div', {
'class': 'brand-listing-container-news'
}).findAll('a')
brand_page_url = None
try:
brand_page_url = [
i['href'] for i in all_brands if brand == i.text.strip().lower()
][0]
except IndexError:
await request.edit(f'`{brand} is unknown brand!`')
devices = BeautifulSoup(get(brand_page_url).content, 'lxml') \
.findAll('div', {'class': 'model-listing-container-80'})
device_page_url = None
try:
device_page_url = [
i.a['href']
for i in BeautifulSoup(str(devices), 'lxml').findAll('h3')
if device in i.text.strip().lower()
]
except IndexError:
await request.edit(f"`can't find {device}!`")
if len(device_page_url) > 2:
device_page_url = device_page_url[:2]
reply = ''
for url in device_page_url:
info = BeautifulSoup(get(url).content, 'lxml')
reply = '\n**' + info.title.text.split('-')[0].strip() + '**\n\n'
info = info.find('div', {'id': 'model-brief-specifications'})
specifications = re.findall(r'<b>.*?<br/>', str(info))
for item in specifications:
title = re.findall(r'<b>(.*?)</b>', item)[0].strip()
data = re.findall(r'</b>: (.*?)<br/>', item)[0]\
.replace('<b>', '').replace('</b>', '').strip()
reply += f'**{title}**: {data}\n'
await request.edit(reply)
@register(outgoing=True, pattern=r"^\.twrp(?: |$)(\S*)")
async def twrp(request):
""" get android device twrp """
textx = await request.get_reply_message()
device = request.pattern_match.group(1)
if device:
pass
elif textx:
device = textx.text.split(' ')[0]
else:
return await request.edit("`Usage: .twrp <codename>`")
url = get(f'https://dl.twrp.me/{device}/')
if url.status_code == 404:
reply = f"`Couldn't find twrp downloads for {device}!`\n"
return await request.edit(reply)
page = BeautifulSoup(url.content, 'lxml')
download = page.find('table').find('tr').find('a')
dl_link = f"https://dl.twrp.me{download['href']}"
dl_file = download.text
size = page.find("span", {"class": "filesize"}).text
date = page.find("em").text.strip()
reply = f'**Latest TWRP for {device}:**\n' \
f'[{dl_file}]({dl_link}) - __{size}__\n' \
f'**Updated:** __{date}__\n'
await request.edit(reply)
CMD_HELP.update({
"android":
">`.magisk`"
"\nGet latest Magisk releases"
"\n\n>`.device <codename>`"
"\nUsage: Get info about android device codename or model."
"\n\n>`.codename <brand> <device>`"
"\nUsage: Search for android device codename."
"\n\n>`.pixeldl` **<download.pixelexperience.org>**"
"\nUsage: Download pixel experience ROM into your userbot server."
"\n\n>`.specs <brand> <device>`"
"\nUsage: Get device specifications info."
"\n\n>`.twrp <codename>`"
"\nUsage: Get latest twrp download for android device."
})
|
# VolScan is a Binance Volatility Bot(BVT Bot)
# compatible module that generates crypto buying signals based upon negative price change & volatility.
# It does this in two different ways,
# the main one being by calculating the aggregate price change within a user defined period,
# the second way being by use of the Coefficient Of Variation(CV),
# which is a statistical measure of the dispersion of data points in a data series around the mean,
# and is used in certain markets to ascertain the volatility of products:
# https://www.investopedia.com/terms/c/coefficientofvariation.asp.
#
# VolScan provides the option to use either signals generating method individually,
# or combined within user defined settings.
# Volscan will provide all the buying signals required for your bot,
# so other external signal generating modules should be disabled.
#
# The way that VolScan works is that it collects all the cryto coin/token data for all USDT coin
# pairings that appear on Binance into user defined "scanning periods" which are varying numbers of minutes in length,
# each period then being split into the number of individual scans that make up the period.
# Example. you decide you want your scanning period to be 3 minutes in duration,
# and within that period you want all coins scanned every 30 seconds,
# so in total VolScan will carry out 2 scans per minute for 3 minutes in total = 6 price check scans,
# it then checks the variables between the current price & the previous price all the way back through the total number
# of scans, coming up with an aggregate change in price % for the whole scanning period.
# It then removes all coins that have positive changes in price %,
# and creates a list of all the coins that had a negative change in price, the list is in sequential order,
# the highest negative price change at the top, the lowest negative price change at the bottom.
#
# The Coefficient of Variation method works along similar lines,
# but concentrates purely on standard deviation in price ranges,
# the mean or average price which then is calculated into the final CV score for the scanning period....
# the higher the CV score, the higher the volatility of the coins/tokens.
# The CV rated coins are then created into a tickers list in exactly
# the same way as the previously described negative change in price coins.
#
# Whichever way you choose to have your tickers lists created,
# they will then be dynamically updated at the end of every scanning period with a completely new lists
# of the latest high volatilty coin results.
#
# The VolScan module is easy to format with most processes done automatically for you,
# below are the user defined settings you will need to create to get started using the module:
import os
import numpy as np
from time import sleep
from datetime import datetime
from binance.client import Client
from helpers.parameters import parse_args, load_config
# Load creds modules
from helpers.handle_creds import (
load_correct_creds
)
args = parse_args()
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_creds = load_config(creds_file)
parsed_config = load_config(config_file)
# Load trading vars
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
EX_PAIRS = parsed_config['trading_options']['FIATS']
# Load creds for correct environment
access_key, secret_key = load_correct_creds(parsed_creds)
client = Client(access_key, secret_key)
# SCANNING_PERIOD - by default, we check the price difference for each coin on Binance for the last 3 minutes,
# you can change this value for different results.
# This also determines how often each iteration of the code is executed.
SCANNING_PERIOD = 3 # minutes
# TIME_SLEEP - how many seconds do you want between each price scan.
# By default, every 12 seconds the price change will be recorded during SCANNING_PERIOD (3min)
# After which the calculation is performed. The price change is also calculated every 12 seconds.
TIME_SLEEP = 30 # seconds
# If True, an updated list of coins will be generated from the site - http://edgesforledges.com/watchlists/binance.
# If False, then the list you create in TICKERS_LIST = 'tickers.txt' will be used.
CREATE_TICKER_LIST = False
# NUMBER_COINS_IN_LIST - Limit the number of coins that can be added to the dynamic list of volatile coins. For example,
# if NUMBER_COINS_IN_LIST = 20,
# then each period only 20 sorted coins will be added to the list (Above the lowest values with a minus sign).
NUMBER_COINS_IN_LIST = 20
# CV_INDEX - Coefficient of Variation. Only those coins with a COV greater than the specified value will be displayed.
CoV_INDEX = 0.0
# CREATE_LIST_BY_COV_AND_PRICE_CHANGE is a filter for creating dynamic lists of the most volatile coins.
# If COV_FILTER = True, lists of volatile coins will take into account the CoV parameter.
# For example,
# if CoV_INDEX = 0.5, then only coins with CoV above 0.5 and price change less than 0 will be added to list.
# If False will be used only Price Change.
CREATE_LIST_BY_COV_AND_PRICE_CHANGE = False
# CREATE_LIST_BY_ONLY_COV - If True - A dynamic list of volatile coins will be created only based on the CoV parameter.
# For example: If CoV_INDEX = 0.3 then the list will include coins with CoV_INDEX greater than 0.3 and the list will be
# sorted
# (At the top there will be coins with the highest CoV)
# If False The list will be created only based on the Price Change.
CREATE_LIST_BY_ONLY_COV = False
# When creating a ticker list from the source site:
# http://edgesforledges.com you can use the parameter (all or innovation-zone).
# ticker_type = 'innovation-zone'
ticker_type = 'all'
if CREATE_TICKER_LIST:
TICKERS_LIST = 'tickers_all_USDT.txt'
else:
TICKERS_LIST = 'tickers_all.txt'
# BTC_FILTER - This feature is still in development.
# Objective: Check the change in the price of bitcoin over the scanning period and,
# based upon the results, either halt the bot from buying, or allow it to continue.
# make further purchases of coins.
# For example, if Bitcoin price change = 1.0 and coin price change is negative (-0.8), we give a buy signal....
# BTC_FILTER = False
SIGNAL_NAME = 'vyacheslav_signalbuy_VolScan'
SIGNAL_FILE_BUY = 'signals/' + SIGNAL_NAME + '.buy'
class txcolors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
YELLOW = '\033[33m'
TURQUOISE = '\033[36m'
UNDERLINE = '\033[4m'
END = '\033[0m'
ITALICS = '\033[3m'
# get_price() function, takes 1 parameter (Binance client).
# And it returns a dictionary of coins,
# with the given keys ('symbol'(str), 'price'(float), 'time', 'price_list'(list), 'change_price'(float), 'cov'(float)).
def get_price(client_api):
initial_price = {}
tickers = [line.strip() for line in open(TICKERS_LIST)]
prices = client_api.get_all_tickers()
for coin in prices:
for item in tickers:
if item + PAIR_WITH == coin['symbol'] and all(item + PAIR_WITH not in coin['symbol'] for item in EX_PAIRS):
initial_price[coin['symbol']] = {'symbol': coin['symbol'],
'price': coin['price'],
'time': datetime.now(),
'price_list': [],
'change_price': 0.0,
'cov': 0.0}
return initial_price
# Function с_о_v(), takes 1 parameter (List of coin prices for the period 'price_list': []).
# And it returns the Coefficient of Variation (float) of the coin.
def c_o_v(price_list):
if price_list:
a = np.array(price_list, float)
cov = round((a.std() / a.mean()) * 100, 2)
return cov
return 0.0
# Percentage_price_change() function, takes 1 parameter (List of coin prices for the period 'price_list': []).
# And it returns the percentage of price change.
def percentage_price_change(price_list):
if price_list:
return round(sum([100 * (b - a) / a for a, b in zip(price_list[::1], price_list[1::1])]), 4)
# sort_list_coins() function, takes 2 parameters (List of coins and sorting type).
# Based on the sorting type, sorts the coins in the list by their 'change_price' or 'cov'.
# And it returns a sorted list.
def sort_list_coins(list_coins, sort_type='change_price'):
if sort_type == 'cov':
sort_list = sorted(list_coins, key=lambda x: x[f'{sort_type}'], reverse=True)
else:
sort_list = sorted(list_coins, key=lambda x: x[f'{sort_type}'])
return sort_list
# do_work () function, takes 1 parameter (Binance client). This is the main function of the module.
# Which, in an endless cycle, searches for coins with a negative indicator of price change,
# sorts them and gives buy signals.
def do_work():
# Initializing coins for data storage.
init_price = get_price(client)
list_volatility = []
count = 0
while True:
print(f'{txcolors.YELLOW}{SIGNAL_NAME} launched with a period of {SCANNING_PERIOD} minutes.')
print(f"{txcolors.YELLOW}Number of coins to scan - {len(init_price)}")
# We reset the data every period.
if count == (SCANNING_PERIOD * 60) / TIME_SLEEP:
init_price = get_price(client)
list_volatility = []
count = 0
# Start a cycle to collect prices for each coin within a period.
while count < (SCANNING_PERIOD * 60) / TIME_SLEEP:
count += 1
print(f'{txcolors.YELLOW}{SIGNAL_NAME} Round {count} complete. Next scan in {TIME_SLEEP} seconds.')
try:
# Requesting the latest coin prices
last_price = get_price(client)
for coin in last_price:
# if len(init_price[coin]['price_list']) == (SCANNING_PERIOD * 60) / TIME_SLEEP:
# del init_price[coin]['price_list'][0]
init_price[coin]['price_list'].append(float(last_price[coin]['price']))
if len(init_price[coin]['price_list']) == (SCANNING_PERIOD * 60) / TIME_SLEEP:
coin_price_list = init_price[coin]['price_list']
percent_change_price = percentage_price_change(coin_price_list)
cov = c_o_v(coin_price_list)
if CREATE_LIST_BY_COV_AND_PRICE_CHANGE:
condition = percent_change_price < 0 and cov >= CoV_INDEX
elif CREATE_LIST_BY_ONLY_COV:
condition = cov >= CoV_INDEX
else:
condition = percent_change_price < 0
if condition:
if init_price[coin] not in list_volatility:
init_price[coin]['time'] = datetime.now()
init_price[coin]['change_price'] = percent_change_price
init_price[coin]['cov'] = cov
list_volatility.append(init_price[coin])
if not list_volatility:
print(f'{txcolors.YELLOW}Stand by for next update ...')
else:
if os.path.exists(SIGNAL_FILE_BUY):
os.remove(SIGNAL_FILE_BUY)
if CREATE_LIST_BY_ONLY_COV:
sort_t = 'cov'
else:
sort_t = 'change_price'
sort_list_vol_coin = sort_list_coins(list_volatility, sort_type=sort_t)
for item in sort_list_vol_coin[:NUMBER_COINS_IN_LIST]:
print(f'{txcolors.YELLOW}{SIGNAL_NAME}: detected a signal on{txcolors.END} '
f'{txcolors.YELLOW}{item['symbol']}{txcolors.END}'
)
with open(SIGNAL_FILE_BUY, 'a+') as f:
f.write(item["symbol"] + '\n')
sleep(TIME_SLEEP)
except Exception as e:
print(f'{SIGNAL_NAME}: Exception do_work() 1: {e}')
continue
except KeyboardInterrupt as ki:
continue
|
# VolScan is a Binance Volatility Bot(BVT Bot)
# compatible module that generates crypto buying signals based upon negative price change & volatility.
# It does this in two different ways,
# the main one being by calculating the aggregate price change within a user defined period,
# the second way being by use of the Coefficient Of Variation(CV),
# which is a statistical measure of the dispersion of data points in a data series around the mean,
# and is used in certain markets to ascertain the volatility of products:
# https://www.investopedia.com/terms/c/coefficientofvariation.asp.
#
# VolScan provides the option to use either signals generating method individually,
# or combined within user defined settings.
# Volscan will provide all the buying signals required for your bot,
# so other external signal generating modules should be disabled.
#
# The way that VolScan works is that it collects all the cryto coin/token data for all USDT coin
# pairings that appear on Binance into user defined "scanning periods" which are varying numbers of minutes in length,
# each period then being split into the number of individual scans that make up the period.
# Example. you decide you want your scanning period to be 3 minutes in duration,
# and within that period you want all coins scanned every 30 seconds,
# so in total VolScan will carry out 2 scans per minute for 3 minutes in total = 6 price check scans,
# it then checks the variables between the current price & the previous price all the way back through the total number
# of scans, coming up with an aggregate change in price % for the whole scanning period.
# It then removes all coins that have positive changes in price %,
# and creates a list of all the coins that had a negative change in price, the list is in sequential order,
# the highest negative price change at the top, the lowest negative price change at the bottom.
#
# The Coefficient of Variation method works along similar lines,
# but concentrates purely on standard deviation in price ranges,
# the mean or average price which then is calculated into the final CV score for the scanning period....
# the higher the CV score, the higher the volatility of the coins/tokens.
# The CV rated coins are then created into a tickers list in exactly
# the same way as the previously described negative change in price coins.
#
# Whichever way you choose to have your tickers lists created,
# they will then be dynamically updated at the end of every scanning period with a completely new lists
# of the latest high volatilty coin results.
#
# The VolScan module is easy to format with most processes done automatically for you,
# below are the user defined settings you will need to create to get started using the module:
import os
import numpy as np
from time import sleep
from datetime import datetime
from binance.client import Client
from helpers.parameters import parse_args, load_config
# Load creds modules
from helpers.handle_creds import (
load_correct_creds
)
args = parse_args()
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_creds = load_config(creds_file)
parsed_config = load_config(config_file)
# Load trading vars
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
EX_PAIRS = parsed_config['trading_options']['FIATS']
# Load creds for correct environment
access_key, secret_key = load_correct_creds(parsed_creds)
client = Client(access_key, secret_key)
# SCANNING_PERIOD - by default, we check the price difference for each coin on Binance for the last 3 minutes,
# you can change this value for different results.
# This also determines how often each iteration of the code is executed.
SCANNING_PERIOD = 3 # minutes
# TIME_SLEEP - how many seconds do you want between each price scan.
# By default, every 12 seconds the price change will be recorded during SCANNING_PERIOD (3min)
# After which the calculation is performed. The price change is also calculated every 12 seconds.
TIME_SLEEP = 30 # seconds
# If True, an updated list of coins will be generated from the site - http://edgesforledges.com/watchlists/binance.
# If False, then the list you create in TICKERS_LIST = 'tickers.txt' will be used.
CREATE_TICKER_LIST = False
# NUMBER_COINS_IN_LIST - Limit the number of coins that can be added to the dynamic list of volatile coins. For example,
# if NUMBER_COINS_IN_LIST = 20,
# then each period only 20 sorted coins will be added to the list (Above the lowest values with a minus sign).
NUMBER_COINS_IN_LIST = 20
# CV_INDEX - Coefficient of Variation. Only those coins with a COV greater than the specified value will be displayed.
CoV_INDEX = 0.0
# CREATE_LIST_BY_COV_AND_PRICE_CHANGE is a filter for creating dynamic lists of the most volatile coins.
# If COV_FILTER = True, lists of volatile coins will take into account the CoV parameter.
# For example,
# if CoV_INDEX = 0.5, then only coins with CoV above 0.5 and price change less than 0 will be added to list.
# If False will be used only Price Change.
CREATE_LIST_BY_COV_AND_PRICE_CHANGE = False
# CREATE_LIST_BY_ONLY_COV - If True - A dynamic list of volatile coins will be created only based on the CoV parameter.
# For example: If CoV_INDEX = 0.3 then the list will include coins with CoV_INDEX greater than 0.3 and the list will be
# sorted
# (At the top there will be coins with the highest CoV)
# If False The list will be created only based on the Price Change.
CREATE_LIST_BY_ONLY_COV = False
# When creating a ticker list from the source site:
# http://edgesforledges.com you can use the parameter (all or innovation-zone).
# ticker_type = 'innovation-zone'
ticker_type = 'all'
if CREATE_TICKER_LIST:
TICKERS_LIST = 'tickers_all_USDT.txt'
else:
TICKERS_LIST = 'tickers_all.txt'
# BTC_FILTER - This feature is still in development.
# Objective: Check the change in the price of bitcoin over the scanning period and,
# based upon the results, either halt the bot from buying, or allow it to continue.
# make further purchases of coins.
# For example, if Bitcoin price change = 1.0 and coin price change is negative (-0.8), we give a buy signal....
# BTC_FILTER = False
SIGNAL_NAME = 'vyacheslav_signalbuy_VolScan'
SIGNAL_FILE_BUY = 'signals/' + SIGNAL_NAME + '.buy'
class txcolors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
YELLOW = '\033[33m'
TURQUOISE = '\033[36m'
UNDERLINE = '\033[4m'
END = '\033[0m'
ITALICS = '\033[3m'
# get_price() function, takes 1 parameter (Binance client).
# And it returns a dictionary of coins,
# with the given keys ('symbol'(str), 'price'(float), 'time', 'price_list'(list), 'change_price'(float), 'cov'(float)).
def get_price(client_api):
initial_price = {}
tickers = [line.strip() for line in open(TICKERS_LIST)]
prices = client_api.get_all_tickers()
for coin in prices:
for item in tickers:
if item + PAIR_WITH == coin['symbol'] and all(item + PAIR_WITH not in coin['symbol'] for item in EX_PAIRS):
initial_price[coin['symbol']] = {'symbol': coin['symbol'],
'price': coin['price'],
'time': datetime.now(),
'price_list': [],
'change_price': 0.0,
'cov': 0.0}
return initial_price
# Function с_о_v(), takes 1 parameter (List of coin prices for the period 'price_list': []).
# And it returns the Coefficient of Variation (float) of the coin.
def c_o_v(price_list):
if price_list:
a = np.array(price_list, float)
cov = round((a.std() / a.mean()) * 100, 2)
return cov
return 0.0
# Percentage_price_change() function, takes 1 parameter (List of coin prices for the period 'price_list': []).
# And it returns the percentage of price change.
def percentage_price_change(price_list):
if price_list:
return round(sum([100 * (b - a) / a for a, b in zip(price_list[::1], price_list[1::1])]), 4)
# sort_list_coins() function, takes 2 parameters (List of coins and sorting type).
# Based on the sorting type, sorts the coins in the list by their 'change_price' or 'cov'.
# And it returns a sorted list.
def sort_list_coins(list_coins, sort_type='change_price'):
if sort_type == 'cov':
sort_list = sorted(list_coins, key=lambda x: x[f'{sort_type}'], reverse=True)
else:
sort_list = sorted(list_coins, key=lambda x: x[f'{sort_type}'])
return sort_list
# do_work () function, takes 1 parameter (Binance client). This is the main function of the module.
# Which, in an endless cycle, searches for coins with a negative indicator of price change,
# sorts them and gives buy signals.
def do_work():
# Initializing coins for data storage.
init_price = get_price(client)
list_volatility = []
count = 0
while True:
print(f'{txcolors.YELLOW}{SIGNAL_NAME} launched with a period of {SCANNING_PERIOD} minutes.')
print(f"{txcolors.YELLOW}Number of coins to scan - {len(init_price)}")
# We reset the data every period.
if count == (SCANNING_PERIOD * 60) / TIME_SLEEP:
init_price = get_price(client)
list_volatility = []
count = 0
# Start a cycle to collect prices for each coin within a period.
while count < (SCANNING_PERIOD * 60) / TIME_SLEEP:
count += 1
print(f'{txcolors.YELLOW}{SIGNAL_NAME} Round {count} complete. Next scan in {TIME_SLEEP} seconds.')
try:
# Requesting the latest coin prices
last_price = get_price(client)
for coin in last_price:
# if len(init_price[coin]['price_list']) == (SCANNING_PERIOD * 60) / TIME_SLEEP:
# del init_price[coin]['price_list'][0]
init_price[coin]['price_list'].append(float(last_price[coin]['price']))
if len(init_price[coin]['price_list']) == (SCANNING_PERIOD * 60) / TIME_SLEEP:
coin_price_list = init_price[coin]['price_list']
percent_change_price = percentage_price_change(coin_price_list)
cov = c_o_v(coin_price_list)
if CREATE_LIST_BY_COV_AND_PRICE_CHANGE:
condition = percent_change_price < 0 and cov >= CoV_INDEX
elif CREATE_LIST_BY_ONLY_COV:
condition = cov >= CoV_INDEX
else:
condition = percent_change_price < 0
if condition:
if init_price[coin] not in list_volatility:
init_price[coin]['time'] = datetime.now()
init_price[coin]['change_price'] = percent_change_price
init_price[coin]['cov'] = cov
list_volatility.append(init_price[coin])
if not list_volatility:
print(f'{txcolors.YELLOW}Stand by for next update ...')
else:
if os.path.exists(SIGNAL_FILE_BUY):
os.remove(SIGNAL_FILE_BUY)
if CREATE_LIST_BY_ONLY_COV:
sort_t = 'cov'
else:
sort_t = 'change_price'
sort_list_vol_coin = sort_list_coins(list_volatility, sort_type=sort_t)
for item in sort_list_vol_coin[:NUMBER_COINS_IN_LIST]:
print(f'{txcolors.YELLOW}{SIGNAL_NAME}: detected a signal on{txcolors.END} '
f'{txcolors.YELLOW}{item["symbol"]}{txcolors.END}'
)
with open(SIGNAL_FILE_BUY, 'a+') as f:
f.write(item["symbol"] + '\n')
sleep(TIME_SLEEP)
except Exception as e:
print(f'{SIGNAL_NAME}: Exception do_work() 1: {e}')
continue
except KeyboardInterrupt as ki:
continue
|
# Copyright (c) 2019-2022 ThatRedKite and contributors
from typing import Optional
from discord.ext import commands
from wand.image import Image as WandImage
from wand.color import Color as WandColor
import discord
import si_prefix
from math import sin, atan
from io import BytesIO
from thatkitebot.backend import util
from thatkitebot.cogs.electronics import parse_input, TooFewArgsError
def wavelength_to_rgb(wavelength, gamma=0.98):
'''This converts a given wavelength of light to an
approximate RGB color value. The wavelength must be given
in nanometers in the range from 380 nm through 750 nm
(789 THz through 400 THz).
Based on code by Dan Bruton
http://www.physics.sfasu.edu/astro/color/spectra.html
'''
wavelength = float(wavelength)
if 380 <= wavelength <= 440:
attenuation = 0.3 + 0.7 * (wavelength - 380) / (440 - 380)
R = ((-(wavelength - 440) / (440 - 380)) * attenuation) ** gamma
G = 0.0
B = (1.0 * attenuation) ** gamma
if 405 <= wavelength < 430:
R = R * (wavelength / 310)
elif 440 <= wavelength <= 490:
R = 0.0
G = ((wavelength - 440) / (490 - 440)) ** gamma
B = 1.0
elif 490 <= wavelength <= 510:
R = 0.0
G = 1.0
B = (-(wavelength - 510) / (510 - 490)) ** gamma
elif 510 <= wavelength <= 580:
R = ((wavelength - 510) / (580 - 510)) ** gamma
G = 1.0
B = 0.0
elif 580 <= wavelength <= 645:
R = 1.0
G = ((-(wavelength - 645) / (645 - 580)) ** gamma) * 0.9
B = 0.0
elif 645 <= wavelength <= 750:
attenuation = 0.3 + 0.7 * (750 - wavelength) / (750 - 645)
R = (1 * attenuation) ** gamma
G = 0.0
B = 0.0
else:
R = 0.0
G = 0.0
B = 0.0
R *= 255
G *= 255
B *= 255
return int(R), int(G), int(B)
def calculate_diffraction(p):
if "lmm" in p:
lmm = si_prefix.si_parse(p["lmm"])
else:
raise TooFewArgsError()
if "l" in p:
l = si_prefix.si_parse(p["l"])
else:
raise TooFewArgsError()
if "d" in p:
d = si_prefix.si_parse(p["d"])
else:
raise TooFewArgsError()
res = 1 / lmm / 1000 * sin((atan(d / (2 * l))))
return dict(res=si_prefix.si_format(res), Lmm=lmm, L=si_prefix.si_format(l), D=si_prefix.si_format(d))
class LaserCog(commands.Cog, name="Laser commands"):
def __init__(self, bot):
self.bot: commands.Bot = bot
@commands.cooldown(5, 10, commands.BucketType.channel)
@commands.command(aliases=["autism"])
async def spectrum(self, ctx):
"""
Returns a picture of visible light spectrum.
"""
embed = discord.Embed(title="Visible Light Spectrum")
embed.set_image(
url="https://media.discordapp.net/attachments/910895468001767484/913348594269036584/unknown.png")
await ctx.send(embed=embed)
@commands.cooldown(1, 5, commands.BucketType.channel)
@commands.group()
async def laser(self, ctx):
"""
General command for laser related things.
"""
if not ctx.subcommand_passed:
await self.goggles(ctx)
@laser.command(aliases=["glasses", "safety"])
async def goggles(self, ctx, section: str = ""):
"""
Returns a laser safety information.
"""
brands = section.lower() in ["brands", "companies", "manufacturers"]
od = section.lower() in ["od", "density"]
amazon = section.lower() in ["amazon", "wish"]
wavelength = section.lower() in ["wavelength", "nm", "nanometers"]
if not brands and not od and not amazon and not wavelength:
brands = True
od = True
amazon = True
wavelength = True
embed = discord.Embed(title="Lasers of all powers can pose a serious risk to your eyes.",
description="""5mW is the safety limit where your blink reflex should save you from any damage.
Anything above that can cause permanent eye damage faster than you can blink and the worse case, permanent blindness.""")
else:
embed = discord.Embed(title="Laser safety guide")
embed.set_thumbnail(
url="https://cdn.discordapp.com/attachments/909159696798220400/912036244073115658/14429.png")
if brands:
embed.add_field(name="\nLaser safety equipment can be found here: ",
value="[Laserglow](https://www.laserglow.com/product/AGF-Laser-Safety-Goggles)\n"
"[Lasertack](https://lasertack.com/en/laser-safety-glasses)\n"
"[Thorlabs](https://www.thorlabs.com/newgrouppage9.cfm?objectgroup_id=762)",
inline=False)
embed.add_field(name="\nOther trusted brands include",
value="Honeywell, Glendale, Sperian,"
"Newport/MKS, Edmund Optics, Laservision/Uvex,"
"Laserglow, NoIR (LaserShield)",
inline=False)
if amazon:
embed.add_field(name="\nAnything from Amazon, AliExpress, Wish is **__unsafe!__**",
value="If you wish to see for the rest of your life, **__do not use them!__**", inline=True)
if od:
embed.add_field(name="\nWhat is OD?",
value="""OD stands for *Optical density*.
It’s the fraction of light (of a certain wavelength) that gets through the goggles,expressed in powers of 10.
OD1 means that *10%* of the light that hits the goggles gets through.
OD2 means *1%*,
OD3 is *0.1%*, and so on.""",
inline=False)
if wavelength:
embed.add_field(name="\nWhat is the wavelength or nm?",
value=f"""The wavelength in nanometers (nm) corresponds to the color.
If you are not sure the wavelength but you know the color,
you can ask someone, do `{self.bot.command_prefix}laser color (color)` or refer to `+spectrum`.""",
inline=True)
embed.set_footer(text=f"For a more in depth explanation, use {self.bot.command_prefix}laser safety")
await ctx.send(embed=embed)
@laser.command()
async def color(self, ctx, color: str):
"""
Returns an approximation of light color given a wavelength.
"""
color = int(color.lower().replace("nm", ""))
new_color = wavelength_to_rgb(color)
with WandImage(width=256, height=256, background=WandColor(f"rgb{new_color}")) as colorimg:
b = colorimg.make_blob(format="jpeg")
file = discord.File(BytesIO(b), filename="color.jpeg")
embed = discord.Embed(title=f"Approximated color for {color}nm")
embed.set_image(url="attachment://color.jpeg")
embed.set_footer(text="This is not 100% accurate since your monitor and\neyes play a role but this is as close as it can get.\n"
"If the color is black, it is considered invisible.1")
await ctx.send(file=file, embed=embed)
@laser.command(aliases=["diff"])
async def diffraction(self, ctx, *, args=None):
"""
Calculates the wavelength of a laser using a diffraction grating. Run command for more information.
"""
if not args:
embed = discord.Embed(title="Diffraction Grating Equation",
description="This is to calculate the wavelength of a laser using a diffraction grating")
embed.set_image(
url="https://cdn.discordapp.com/attachments/909159696798220400/912064371205738566/kitething5fff.png")
embed.add_field(name="Measurements and information you need",
value="The diffraction grating's slits per mm (L/mm) \n Distance from the diffraction grating to a wall (L) \n Distance between split beams (D) ",
inline=False)
embed.add_field(name="Use the bot for calculations.",
value="You can use this command to do the calculation, for example: `{}laser diffraction lmm=1000 L=6.78 D=11.6`".format(
self.bot.command_prefix))
embed.set_footer(text="This command accepts SI prefixes.")
await ctx.send(embed=embed)
else:
try:
p = parse_input(args)
p = {k.lower(): v for k, v in p.items()}
res = calculate_diffraction(p)
embed = discord.Embed(title="Diffraction Grating Equation")
embed.set_image(
url="https://cdn.discordapp.com/attachments/909159696798220400/912064371205738566/kitething5fff.png")
embed.add_field(name="Values:", value=f"L/mm = {res["Lmm"]}\nL = {res["L"]}m\nD = {res["D"]}m")
embed.add_field(name="Wavelength value:", value="{}m".format(str(res["res"])))
await ctx.send(embed=embed)
except TooFewArgsError:
await util.errormsg(ctx, "Not enough arguments to compute anything.")
return
def setup(bot):
bot.add_cog(LaserCog(bot))
|
# Copyright (c) 2019-2022 ThatRedKite and contributors
from typing import Optional
from discord.ext import commands
from wand.image import Image as WandImage
from wand.color import Color as WandColor
import discord
import si_prefix
from math import sin, atan
from io import BytesIO
from thatkitebot.backend import util
from thatkitebot.cogs.electronics import parse_input, TooFewArgsError
def wavelength_to_rgb(wavelength, gamma=0.98):
'''This converts a given wavelength of light to an
approximate RGB color value. The wavelength must be given
in nanometers in the range from 380 nm through 750 nm
(789 THz through 400 THz).
Based on code by Dan Bruton
http://www.physics.sfasu.edu/astro/color/spectra.html
'''
wavelength = float(wavelength)
if 380 <= wavelength <= 440:
attenuation = 0.3 + 0.7 * (wavelength - 380) / (440 - 380)
R = ((-(wavelength - 440) / (440 - 380)) * attenuation) ** gamma
G = 0.0
B = (1.0 * attenuation) ** gamma
if 405 <= wavelength < 430:
R = R * (wavelength / 310)
elif 440 <= wavelength <= 490:
R = 0.0
G = ((wavelength - 440) / (490 - 440)) ** gamma
B = 1.0
elif 490 <= wavelength <= 510:
R = 0.0
G = 1.0
B = (-(wavelength - 510) / (510 - 490)) ** gamma
elif 510 <= wavelength <= 580:
R = ((wavelength - 510) / (580 - 510)) ** gamma
G = 1.0
B = 0.0
elif 580 <= wavelength <= 645:
R = 1.0
G = ((-(wavelength - 645) / (645 - 580)) ** gamma) * 0.9
B = 0.0
elif 645 <= wavelength <= 750:
attenuation = 0.3 + 0.7 * (750 - wavelength) / (750 - 645)
R = (1 * attenuation) ** gamma
G = 0.0
B = 0.0
else:
R = 0.0
G = 0.0
B = 0.0
R *= 255
G *= 255
B *= 255
return int(R), int(G), int(B)
def calculate_diffraction(p):
if "lmm" in p:
lmm = si_prefix.si_parse(p["lmm"])
else:
raise TooFewArgsError()
if "l" in p:
l = si_prefix.si_parse(p["l"])
else:
raise TooFewArgsError()
if "d" in p:
d = si_prefix.si_parse(p["d"])
else:
raise TooFewArgsError()
res = 1 / lmm / 1000 * sin((atan(d / (2 * l))))
return dict(res=si_prefix.si_format(res), Lmm=lmm, L=si_prefix.si_format(l), D=si_prefix.si_format(d))
class LaserCog(commands.Cog, name="Laser commands"):
def __init__(self, bot):
self.bot: commands.Bot = bot
@commands.cooldown(5, 10, commands.BucketType.channel)
@commands.command(aliases=["autism"])
async def spectrum(self, ctx):
"""
Returns a picture of visible light spectrum.
"""
embed = discord.Embed(title="Visible Light Spectrum")
embed.set_image(
url="https://media.discordapp.net/attachments/910895468001767484/913348594269036584/unknown.png")
await ctx.send(embed=embed)
@commands.cooldown(1, 5, commands.BucketType.channel)
@commands.group()
async def laser(self, ctx):
"""
General command for laser related things.
"""
if not ctx.subcommand_passed:
await self.goggles(ctx)
@laser.command(aliases=["glasses", "safety"])
async def goggles(self, ctx, section: str = ""):
"""
Returns a laser safety information.
"""
brands = section.lower() in ["brands", "companies", "manufacturers"]
od = section.lower() in ["od", "density"]
amazon = section.lower() in ["amazon", "wish"]
wavelength = section.lower() in ["wavelength", "nm", "nanometers"]
if not brands and not od and not amazon and not wavelength:
brands = True
od = True
amazon = True
wavelength = True
embed = discord.Embed(title="Lasers of all powers can pose a serious risk to your eyes.",
description="""5mW is the safety limit where your blink reflex should save you from any damage.
Anything above that can cause permanent eye damage faster than you can blink and the worse case, permanent blindness.""")
else:
embed = discord.Embed(title="Laser safety guide")
embed.set_thumbnail(
url="https://cdn.discordapp.com/attachments/909159696798220400/912036244073115658/14429.png")
if brands:
embed.add_field(name="\nLaser safety equipment can be found here: ",
value="[Laserglow](https://www.laserglow.com/product/AGF-Laser-Safety-Goggles)\n"
"[Lasertack](https://lasertack.com/en/laser-safety-glasses)\n"
"[Thorlabs](https://www.thorlabs.com/newgrouppage9.cfm?objectgroup_id=762)",
inline=False)
embed.add_field(name="\nOther trusted brands include",
value="Honeywell, Glendale, Sperian,"
"Newport/MKS, Edmund Optics, Laservision/Uvex,"
"Laserglow, NoIR (LaserShield)",
inline=False)
if amazon:
embed.add_field(name="\nAnything from Amazon, AliExpress, Wish is **__unsafe!__**",
value="If you wish to see for the rest of your life, **__do not use them!__**", inline=True)
if od:
embed.add_field(name="\nWhat is OD?",
value="""OD stands for *Optical density*.
It’s the fraction of light (of a certain wavelength) that gets through the goggles,expressed in powers of 10.
OD1 means that *10%* of the light that hits the goggles gets through.
OD2 means *1%*,
OD3 is *0.1%*, and so on.""",
inline=False)
if wavelength:
embed.add_field(name="\nWhat is the wavelength or nm?",
value=f"""The wavelength in nanometers (nm) corresponds to the color.
If you are not sure the wavelength but you know the color,
you can ask someone, do `{self.bot.command_prefix}laser color (color)` or refer to `+spectrum`.""",
inline=True)
embed.set_footer(text=f"For a more in depth explanation, use {self.bot.command_prefix}laser safety")
await ctx.send(embed=embed)
@laser.command()
async def color(self, ctx, color: str):
"""
Returns an approximation of light color given a wavelength.
"""
color = int(color.lower().replace("nm", ""))
new_color = wavelength_to_rgb(color)
with WandImage(width=256, height=256, background=WandColor(f"rgb{new_color}")) as colorimg:
b = colorimg.make_blob(format="jpeg")
file = discord.File(BytesIO(b), filename="color.jpeg")
embed = discord.Embed(title=f"Approximated color for {color}nm")
embed.set_image(url="attachment://color.jpeg")
embed.set_footer(text="This is not 100% accurate since your monitor and\neyes play a role but this is as close as it can get.\n"
"If the color is black, it is considered invisible.1")
await ctx.send(file=file, embed=embed)
@laser.command(aliases=["diff"])
async def diffraction(self, ctx, *, args=None):
"""
Calculates the wavelength of a laser using a diffraction grating. Run command for more information.
"""
if not args:
embed = discord.Embed(title="Diffraction Grating Equation",
description="This is to calculate the wavelength of a laser using a diffraction grating")
embed.set_image(
url="https://cdn.discordapp.com/attachments/909159696798220400/912064371205738566/kitething5fff.png")
embed.add_field(name="Measurements and information you need",
value="The diffraction grating's slits per mm (L/mm) \n Distance from the diffraction grating to a wall (L) \n Distance between split beams (D) ",
inline=False)
embed.add_field(name="Use the bot for calculations.",
value="You can use this command to do the calculation, for example: `{}laser diffraction lmm=1000 L=6.78 D=11.6`".format(
self.bot.command_prefix))
embed.set_footer(text="This command accepts SI prefixes.")
await ctx.send(embed=embed)
else:
try:
p = parse_input(args)
p = {k.lower(): v for k, v in p.items()}
res = calculate_diffraction(p)
embed = discord.Embed(title="Diffraction Grating Equation")
embed.set_image(
url="https://cdn.discordapp.com/attachments/909159696798220400/912064371205738566/kitething5fff.png")
embed.add_field(name="Values:", value=f"L/mm = {res['Lmm']}\nL = {res['L']}m\nD = {res['D']}m")
embed.add_field(name="Wavelength value:", value="{}m".format(str(res["res"])))
await ctx.send(embed=embed)
except TooFewArgsError:
await util.errormsg(ctx, "Not enough arguments to compute anything.")
return
def setup(bot):
bot.add_cog(LaserCog(bot))
|
"""
Módulo que representa um ticket, uma pessoa ou um serviço.
Não deve ser usado diretamente, mas obtido no retorno de algum método
da classe Entity ou Query.
Exemplo de uso:
>>> from pyvidesk.tickets import Tickets
>>> tickets = Tickets(token="my_token")
>>> ticket = ticket.get_by_id(3)
>>> print(ticket)
... <Model for Ticket(id=3)>
--------------------------------------------------------------------------
>>> from datetime import date, timedelta
>>> from pyvidesk.tickets import Tickets
>>> yesterday = date.today() - timedelta(days=1)
>>> tickets = Tickets(token="my_token")
>>> ticket_properties = tickets.get_properties()
>>> my_query = (
... tickets.query()
... .filter(ticket_properties["lastUpdate"] >= yesterday)
... .select("id")
... .top(5)
... )
>>> for ticket in my_query:
... print(ticket)
... <Model for Ticket(id=2336)>
... <Model for Ticket(id=3139)>
... <Model for Ticket(id=3807)>
... <Model for Ticket(id=3822)>
... <Model for Ticket(id=3843)>
--------------------------------------------------------------------------
>>> from pyvidesk.tickets import Ticket
>>> tickets = Tickets(token="my_token")
>>> ticket = tickets.get_empty_model()
>>> print(ticket)
... <Model for Ticket()>
"""
from .exceptions import (
PyvideskCannotSetReadOnlyProperty,
PyvideskPropertyNotValidError,
PyvideskPropertyWithWrongType,
PyvideskSaveWithoutIdError,
)
from .properties import ComplexProperty
class Model:
"""
Classe que modela um objeto da entity.
Exemplo de uso:
>>> from datetime import date
>>> from pyvidesk.tickets import Tickets
>>> tickets = Tickets("my_token")
>>> today = date.today()
>>> ticket = tickets.get_by_id(3)
>>> for action in ticket.actions:
... for appointment in action.timeAppointments:
... appointment.date = today
>>> ticket.save()
-------------------------------------------------------------------------
>>> from pyvidesk.persons import Persons
>>> persons = Persons("my_token")
>>> person = persons.get_by_id(1)
>>> person.delete()
"""
__is_complex__ = False
def __init__(self, entity, name_=None, **properties):
"""
Args:
entity (): Objeto que representa a entidade do modelo. Aqui, entidade pode ser
tanto Tickets, Persons e Services, como as propriedades complexas dessas
entidades. Assim, conseguimos acessar os valores das propriedades complexas
na forma de atributos;
name_ (str): O nome da entidade. Importante para propriedades complexas.
properties (kwargs): As propriedades e valores obtidos pela query.
_state (dict): Representa o estado da query no servidor do Movidesk.
"""
self._entity = entity
self._entity_properties = self._entity.get_properties(
as_model=self.__is_complex__
)
self._properties = properties
self._name = name_
self._state = dict()
for prop, prop_value in self._properties.items():
try:
property_obj = self._entity_properties[prop]
except KeyError:
self._state[
prop
] = "Propriedade ainda não suportada por esta biblioteca."
continue
if isinstance(property_obj, ComplexProperty):
if isinstance(prop_value, dict):
self._state[prop] = _ComplexPropertyModel(
entity=property_obj,
name_=prop,
**prop_value,
)
if isinstance(prop_value, list):
self._state[prop] = []
for values in prop_value:
self._state[prop].append(
_ComplexPropertyModel(
entity=property_obj,
name_=prop[:-1],
**values,
)
)
continue
self._state[prop] = property_obj.deserialize(value=prop_value)
def __repr__(self):
if "id" in self._properties:
properties_text = f"id={self._properties["id"]}"
else:
properties_text = ", ".join(
[
f"{prop}={prop_value}"
for prop, prop_value in self._properties.items()
]
)
name = self._name or self._entity.__class__.__name__[:-1]
return f"<{self.__class__.__name__} for {name}({properties_text})>"
def __setattr__(self, attr, value):
if attr in (
"_entity",
"_entity_properties",
"_properties",
"_name",
"_state",
):
super().__setattr__(attr, value)
return
if attr not in self._entity_properties:
raise PyvideskPropertyNotValidError(param=attr, class_=self._entity)
if not isinstance(value, self._entity_properties[attr].alias):
raise PyvideskPropertyWithWrongType(
param=self._entity_properties[attr].full_name,
value=value,
correct_type=self._entity_properties[attr].alias,
)
if self._entity_properties[attr].is_read_only:
raise PyvideskCannotSetReadOnlyProperty(
f"{self._entity_properties[attr].full_name} é uma propriedade "
"que permite apenas leitura!"
)
super().__setattr__(attr, value)
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
try:
return self._state[attr]
except KeyError as wrong_property:
if attr in self._entity_properties:
entity = self._entity_properties[attr]
if isinstance(entity, ComplexProperty):
self._state[attr] = _EmptyComplexPropertyModel(
entity=entity,
name_=attr,
)
return self._state[attr]
return
raise PyvideskPropertyNotValidError(
param=attr, class_=self._entity
) from wrong_property
@property
def _state_raw(self):
"""
Metodo que retorna o estado do modelo no formato JSON. Útil para o método save().
"""
state_raw = dict()
for prop, prop_value in self._state.items():
if isinstance(prop_value, Model):
state_raw[prop] = prop_value._state
elif _is_list_of_complex_propeties(prop_value, class_=self.__class__):
state_raw[prop] = []
for p in prop_value:
state_raw[prop].append(p._state_raw)
else:
state_raw[prop] = prop_value
return state_raw
def _get_changes(self):
"""
Metodo que obtem as mudancas de 'primeiro nivel'.
Ou seja, esse metodo nao obtem as mudancas em propriedades complexas.
"""
changes = dict()
for prop, prop_value in self.__dict__.items():
if prop in self._entity_properties:
if prop in self._state:
if prop_value != self._state.get(prop, prop_value):
changes[prop] = self.__dict__[prop]
else:
changes[prop] = self.__dict__[prop]
return changes
def _get_all_changes(self):
"""
Metodo que obtem todas as mudancas do modelo, incluindo as propriedades complexas.
"""
changes = self._get_changes()
for prop, prop_value in self._state.items():
if prop in self._entity_properties:
if isinstance(prop_value, Model):
_changes = prop_value._get_changes()
if _changes:
changes[prop] = _changes
continue
if _is_list_of_complex_propeties(prop_value, class_=self.__class__):
_changes = _get_changes_on_children_properties(prop_value)
if _changes:
changes[prop] = _changes
continue
if prop in self._state:
if prop_value != self._state.get(prop, prop_value):
changes[prop] = self.__dict__[prop]
else:
changes[prop] = self.__dict__[prop]
return changes
def _do_change(self, prop_name, prop_changes):
"""
Metodo que prepara as mudancas para a requisicao PATCH e realiza
tais mudancas no modelo.
Essas mudancas afetam apenas o atributo _state, logo, se chamarmos o metodo
raw() ainda obteremos os valores do modelo sem as mudancas.
Apos a conclusao do metodo save(), raw() retorna os valores com as mudancas
implementadas.
Args:
prop_name (str): O nome da propriedade;
prop_changes (str, list, int, datetime, _ComplexProperty): As mudancas da propriedade.
"""
if _is_list_of_complex_propeties(prop_changes):
prop_models = {
prop_model.id: prop_model for prop_model in self._state[prop_name]
}
for prop_change in prop_changes:
prop_obj = prop_models[prop_change.pop("id")]
for subprop_name, subprop_change in prop_change.items():
prop_obj._do_change(subprop_name, subprop_change)
else:
self._state[prop_name] = prop_changes
def _serialize_all_changes(self):
"""
Metodo que obtem e prepara (por meio da serializacao dos valores)
as mudancas para a requisicao PATCH.
Returns:
changes (dict): Dicionario com as propriedades que serao alteradas
"""
changes = dict()
for prop, prop_changes in self._get_all_changes().items():
self._do_change(prop, prop_changes)
prop_value = getattr(self, prop)
if isinstance(prop_value, Model):
prop_value = prop_value._state_raw
if _is_list_of_complex_propeties(prop_value, class_=self.__class__):
prop_value = [p._state_raw for p in prop_value]
property_obj = self._entity_properties[prop]
changes[prop] = property_obj.serialize(value=prop_value)
return changes
def get_properties(self):
return self._entity.get_properties()
def save(self):
"""Metodo que salva as alteracoes feitas no modelo"""
if not self.id:
raise PyvideskSaveWithoutIdError(
f"Não é possível atualizar {self.__repr__()}, pois o ID não está definido!"
)
changes = self._serialize_all_changes()
if changes:
self._entity.api.patch(changes=changes, model_id=self.id)
model = self._entity.get_by_id(self.id)
self._properties = model._properties
self._state = model._state
def delete(self):
if not self.id:
raise PyvideskSaveWithoutIdError(
f"Não é possível deletar {self.__repr__()}, pois o ID não está definido!"
)
self._entity.api.delete(model_id=self.id)
self._properties = self._state = dict()
return self._entity.get_empty_model()
def raw(self):
"""Metodo que retorna o JSON "cru" do modelo"""
return self._properties
class _ComplexPropertyModel(Model):
__is_complex__ = True
def save(self):
pass
def delete(self):
pass
class EmptyModel(Model):
"""
Classe que retorna um modelo vazio da entidade.
Deve ser usado na criacao de Tickets, Persons ou Services.
A principal diferença para a classe Model é que nao checamos se um atributo é
'readonly' e só checamos se um atributo pertence a entidade quando chamamos o metodo
create().
Exemplo:
>>> from pyvidesk.tickets import Tickets
>>> tickets = Tickets("meu_token_secreto")
>>> ticket = tickets.get_empty_model()
>>> ticket.subject = "Assunto"
>>> ticket.type = 1
>>> ticket.serviceFirstLevelId = 190853
>>> ticket.createdBy.id = "2263751"
>>> ticket.clients = [{"id": "917910092"}]
>>> ticket.actions = [{"description": "Descrição", "type": 1}]
>>> ticket.ownerTeam = "Administradores"
>>> ticket.owner.id = "2222"
>>> ticket.create()
"""
def __setattr__(self, attr, value):
if attr in (
"_entity",
"_entity_properties",
"_properties",
"_name",
"_state",
):
super().__setattr__(attr, value)
return
if attr not in self._entity_properties:
raise PyvideskPropertyNotValidError(param=attr, class_=self._entity)
if not isinstance(value, self._entity_properties[attr].alias):
raise PyvideskPropertyWithWrongType(
param=self._entity_properties[attr].full_name,
value=value,
correct_type=self._entity_properties[attr].alias,
)
self.__dict__[attr] = value
def _do_change(self, prop_name, prop_changes):
"""
Sobscrevendo este método da classe Model.
"""
self._state[prop_name] = prop_changes
def create(self):
"""
Funcao que cria o modelo.
Returns:
(pyvidesk.model.Model): Objeto da classe Model que representa o
modelo criado no servidor.
"""
changes = self._serialize_all_changes()
if changes:
model_id = self._entity.api.post(infos=changes)
return self._entity.get_by_id(model_id)
def save(self):
pass
def delete(self):
pass
class _EmptyComplexPropertyModel(EmptyModel):
__is_complex__ = True
def create(self):
pass
def _get_changes_on_children_properties(values):
"""Funcao que obtem as mudancas das propriedades complexas"""
changes = []
for property_obj in values:
_changes = property_obj._get_all_changes()
changes += [{**_changes, "id": property_obj.id}] if _changes else []
# O 'id' da propriedade é importante para sabermos em qual modelo aplicar a mudança
return changes
def _is_list_of_complex_propeties(property_obj, class_=dict):
return isinstance(property_obj, list) and all(
isinstance(p, class_) for p in property_obj
)
|
"""
Módulo que representa um ticket, uma pessoa ou um serviço.
Não deve ser usado diretamente, mas obtido no retorno de algum método
da classe Entity ou Query.
Exemplo de uso:
>>> from pyvidesk.tickets import Tickets
>>> tickets = Tickets(token="my_token")
>>> ticket = ticket.get_by_id(3)
>>> print(ticket)
... <Model for Ticket(id=3)>
--------------------------------------------------------------------------
>>> from datetime import date, timedelta
>>> from pyvidesk.tickets import Tickets
>>> yesterday = date.today() - timedelta(days=1)
>>> tickets = Tickets(token="my_token")
>>> ticket_properties = tickets.get_properties()
>>> my_query = (
... tickets.query()
... .filter(ticket_properties["lastUpdate"] >= yesterday)
... .select("id")
... .top(5)
... )
>>> for ticket in my_query:
... print(ticket)
... <Model for Ticket(id=2336)>
... <Model for Ticket(id=3139)>
... <Model for Ticket(id=3807)>
... <Model for Ticket(id=3822)>
... <Model for Ticket(id=3843)>
--------------------------------------------------------------------------
>>> from pyvidesk.tickets import Ticket
>>> tickets = Tickets(token="my_token")
>>> ticket = tickets.get_empty_model()
>>> print(ticket)
... <Model for Ticket()>
"""
from .exceptions import (
PyvideskCannotSetReadOnlyProperty,
PyvideskPropertyNotValidError,
PyvideskPropertyWithWrongType,
PyvideskSaveWithoutIdError,
)
from .properties import ComplexProperty
class Model:
"""
Classe que modela um objeto da entity.
Exemplo de uso:
>>> from datetime import date
>>> from pyvidesk.tickets import Tickets
>>> tickets = Tickets("my_token")
>>> today = date.today()
>>> ticket = tickets.get_by_id(3)
>>> for action in ticket.actions:
... for appointment in action.timeAppointments:
... appointment.date = today
>>> ticket.save()
-------------------------------------------------------------------------
>>> from pyvidesk.persons import Persons
>>> persons = Persons("my_token")
>>> person = persons.get_by_id(1)
>>> person.delete()
"""
__is_complex__ = False
def __init__(self, entity, name_=None, **properties):
"""
Args:
entity (): Objeto que representa a entidade do modelo. Aqui, entidade pode ser
tanto Tickets, Persons e Services, como as propriedades complexas dessas
entidades. Assim, conseguimos acessar os valores das propriedades complexas
na forma de atributos;
name_ (str): O nome da entidade. Importante para propriedades complexas.
properties (kwargs): As propriedades e valores obtidos pela query.
_state (dict): Representa o estado da query no servidor do Movidesk.
"""
self._entity = entity
self._entity_properties = self._entity.get_properties(
as_model=self.__is_complex__
)
self._properties = properties
self._name = name_
self._state = dict()
for prop, prop_value in self._properties.items():
try:
property_obj = self._entity_properties[prop]
except KeyError:
self._state[
prop
] = "Propriedade ainda não suportada por esta biblioteca."
continue
if isinstance(property_obj, ComplexProperty):
if isinstance(prop_value, dict):
self._state[prop] = _ComplexPropertyModel(
entity=property_obj,
name_=prop,
**prop_value,
)
if isinstance(prop_value, list):
self._state[prop] = []
for values in prop_value:
self._state[prop].append(
_ComplexPropertyModel(
entity=property_obj,
name_=prop[:-1],
**values,
)
)
continue
self._state[prop] = property_obj.deserialize(value=prop_value)
def __repr__(self):
if "id" in self._properties:
properties_text = f"id={self._properties['id']}"
else:
properties_text = ", ".join(
[
f"{prop}={prop_value}"
for prop, prop_value in self._properties.items()
]
)
name = self._name or self._entity.__class__.__name__[:-1]
return f"<{self.__class__.__name__} for {name}({properties_text})>"
def __setattr__(self, attr, value):
if attr in (
"_entity",
"_entity_properties",
"_properties",
"_name",
"_state",
):
super().__setattr__(attr, value)
return
if attr not in self._entity_properties:
raise PyvideskPropertyNotValidError(param=attr, class_=self._entity)
if not isinstance(value, self._entity_properties[attr].alias):
raise PyvideskPropertyWithWrongType(
param=self._entity_properties[attr].full_name,
value=value,
correct_type=self._entity_properties[attr].alias,
)
if self._entity_properties[attr].is_read_only:
raise PyvideskCannotSetReadOnlyProperty(
f"{self._entity_properties[attr].full_name} é uma propriedade "
"que permite apenas leitura!"
)
super().__setattr__(attr, value)
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
try:
return self._state[attr]
except KeyError as wrong_property:
if attr in self._entity_properties:
entity = self._entity_properties[attr]
if isinstance(entity, ComplexProperty):
self._state[attr] = _EmptyComplexPropertyModel(
entity=entity,
name_=attr,
)
return self._state[attr]
return
raise PyvideskPropertyNotValidError(
param=attr, class_=self._entity
) from wrong_property
@property
def _state_raw(self):
"""
Metodo que retorna o estado do modelo no formato JSON. Útil para o método save().
"""
state_raw = dict()
for prop, prop_value in self._state.items():
if isinstance(prop_value, Model):
state_raw[prop] = prop_value._state
elif _is_list_of_complex_propeties(prop_value, class_=self.__class__):
state_raw[prop] = []
for p in prop_value:
state_raw[prop].append(p._state_raw)
else:
state_raw[prop] = prop_value
return state_raw
def _get_changes(self):
"""
Metodo que obtem as mudancas de 'primeiro nivel'.
Ou seja, esse metodo nao obtem as mudancas em propriedades complexas.
"""
changes = dict()
for prop, prop_value in self.__dict__.items():
if prop in self._entity_properties:
if prop in self._state:
if prop_value != self._state.get(prop, prop_value):
changes[prop] = self.__dict__[prop]
else:
changes[prop] = self.__dict__[prop]
return changes
def _get_all_changes(self):
"""
Metodo que obtem todas as mudancas do modelo, incluindo as propriedades complexas.
"""
changes = self._get_changes()
for prop, prop_value in self._state.items():
if prop in self._entity_properties:
if isinstance(prop_value, Model):
_changes = prop_value._get_changes()
if _changes:
changes[prop] = _changes
continue
if _is_list_of_complex_propeties(prop_value, class_=self.__class__):
_changes = _get_changes_on_children_properties(prop_value)
if _changes:
changes[prop] = _changes
continue
if prop in self._state:
if prop_value != self._state.get(prop, prop_value):
changes[prop] = self.__dict__[prop]
else:
changes[prop] = self.__dict__[prop]
return changes
def _do_change(self, prop_name, prop_changes):
"""
Metodo que prepara as mudancas para a requisicao PATCH e realiza
tais mudancas no modelo.
Essas mudancas afetam apenas o atributo _state, logo, se chamarmos o metodo
raw() ainda obteremos os valores do modelo sem as mudancas.
Apos a conclusao do metodo save(), raw() retorna os valores com as mudancas
implementadas.
Args:
prop_name (str): O nome da propriedade;
prop_changes (str, list, int, datetime, _ComplexProperty): As mudancas da propriedade.
"""
if _is_list_of_complex_propeties(prop_changes):
prop_models = {
prop_model.id: prop_model for prop_model in self._state[prop_name]
}
for prop_change in prop_changes:
prop_obj = prop_models[prop_change.pop("id")]
for subprop_name, subprop_change in prop_change.items():
prop_obj._do_change(subprop_name, subprop_change)
else:
self._state[prop_name] = prop_changes
def _serialize_all_changes(self):
"""
Metodo que obtem e prepara (por meio da serializacao dos valores)
as mudancas para a requisicao PATCH.
Returns:
changes (dict): Dicionario com as propriedades que serao alteradas
"""
changes = dict()
for prop, prop_changes in self._get_all_changes().items():
self._do_change(prop, prop_changes)
prop_value = getattr(self, prop)
if isinstance(prop_value, Model):
prop_value = prop_value._state_raw
if _is_list_of_complex_propeties(prop_value, class_=self.__class__):
prop_value = [p._state_raw for p in prop_value]
property_obj = self._entity_properties[prop]
changes[prop] = property_obj.serialize(value=prop_value)
return changes
def get_properties(self):
return self._entity.get_properties()
def save(self):
"""Metodo que salva as alteracoes feitas no modelo"""
if not self.id:
raise PyvideskSaveWithoutIdError(
f"Não é possível atualizar {self.__repr__()}, pois o ID não está definido!"
)
changes = self._serialize_all_changes()
if changes:
self._entity.api.patch(changes=changes, model_id=self.id)
model = self._entity.get_by_id(self.id)
self._properties = model._properties
self._state = model._state
def delete(self):
if not self.id:
raise PyvideskSaveWithoutIdError(
f"Não é possível deletar {self.__repr__()}, pois o ID não está definido!"
)
self._entity.api.delete(model_id=self.id)
self._properties = self._state = dict()
return self._entity.get_empty_model()
def raw(self):
"""Metodo que retorna o JSON "cru" do modelo"""
return self._properties
class _ComplexPropertyModel(Model):
__is_complex__ = True
def save(self):
pass
def delete(self):
pass
class EmptyModel(Model):
"""
Classe que retorna um modelo vazio da entidade.
Deve ser usado na criacao de Tickets, Persons ou Services.
A principal diferença para a classe Model é que nao checamos se um atributo é
'readonly' e só checamos se um atributo pertence a entidade quando chamamos o metodo
create().
Exemplo:
>>> from pyvidesk.tickets import Tickets
>>> tickets = Tickets("meu_token_secreto")
>>> ticket = tickets.get_empty_model()
>>> ticket.subject = "Assunto"
>>> ticket.type = 1
>>> ticket.serviceFirstLevelId = 190853
>>> ticket.createdBy.id = "2263751"
>>> ticket.clients = [{"id": "917910092"}]
>>> ticket.actions = [{"description": "Descrição", "type": 1}]
>>> ticket.ownerTeam = "Administradores"
>>> ticket.owner.id = "2222"
>>> ticket.create()
"""
def __setattr__(self, attr, value):
if attr in (
"_entity",
"_entity_properties",
"_properties",
"_name",
"_state",
):
super().__setattr__(attr, value)
return
if attr not in self._entity_properties:
raise PyvideskPropertyNotValidError(param=attr, class_=self._entity)
if not isinstance(value, self._entity_properties[attr].alias):
raise PyvideskPropertyWithWrongType(
param=self._entity_properties[attr].full_name,
value=value,
correct_type=self._entity_properties[attr].alias,
)
self.__dict__[attr] = value
def _do_change(self, prop_name, prop_changes):
"""
Sobscrevendo este método da classe Model.
"""
self._state[prop_name] = prop_changes
def create(self):
"""
Funcao que cria o modelo.
Returns:
(pyvidesk.model.Model): Objeto da classe Model que representa o
modelo criado no servidor.
"""
changes = self._serialize_all_changes()
if changes:
model_id = self._entity.api.post(infos=changes)
return self._entity.get_by_id(model_id)
def save(self):
pass
def delete(self):
pass
class _EmptyComplexPropertyModel(EmptyModel):
__is_complex__ = True
def create(self):
pass
def _get_changes_on_children_properties(values):
"""Funcao que obtem as mudancas das propriedades complexas"""
changes = []
for property_obj in values:
_changes = property_obj._get_all_changes()
changes += [{**_changes, "id": property_obj.id}] if _changes else []
# O 'id' da propriedade é importante para sabermos em qual modelo aplicar a mudança
return changes
def _is_list_of_complex_propeties(property_obj, class_=dict):
return isinstance(property_obj, list) and all(
isinstance(p, class_) for p in property_obj
)
|
from typing import List, Tuple
import pytest
from conftest import DeSECAPIV1Client, query_replication, NSLordClient, assert_eventually
def generate_params(dict_value_lists_by_type: dict) -> List[Tuple[str, str]]:
return [
(rr_type, value)
for rr_type in dict_value_lists_by_type.keys()
for value in dict_value_lists_by_type[rr_type]
]
VALID_RECORDS_CANONICAL = {
'A': ['127.0.0.1', '127.0.0.2'],
'AAAA': ['::1', '::2'],
'AFSDB': ['2 turquoise.femto.edu.'],
'APL': [
# from RFC 3123 Sec. 4
'1:192.168.32.0/21 !1:192.168.38.0/28',
'1:192.168.42.0/26 1:192.168.42.64/26 1:192.168.42.128/25',
'1:127.0.0.1/32 1:172.16.64.0/22',
'1:224.0.0.0/4 2:ff00::/8',
],
'CAA': [
'128 issue "letsencrypt.org"', '128 iodef "mailto:desec@example.com"',
'1 issue "letsencrypt.org"'
],
'CDNSKEY': [
None,
'256 3 8 AwEAAday3UX323uVzQqtOMQ7EHQYfD5O fv4akjQGN2zY5AgB/2jmdR/+1PvXFqzK CAGJv4wjABEBNWLLFm7ew1hHMDZEKVL1 7aml0EBKI6Dsz6Mxt6n7ScvLtHaFRKax T4i2JxiuVhKdQR9XGMiWAPQKrRM5SLG0 P+2F+TLKl3D0L/cD',
'257 3 8 AwEAAcw5QLr0IjC0wKbGoBPQv4qmeqHy 9mvL5qGQTuaG5TSrNqEAR6b/qvxDx6my 4JmEmjUPA1JeEI9YfTUieMr2UZflu7aI bZFLw0vqiYrywCGrCHXLalOrEOmrvAxL vq4vHtuTlH7JIszzYBSes8g1vle6KG7x XiP3U5Ll96Qiu6bZ31rlMQSPB20xbqJJ h6psNSrQs41QvdcXAej+K2Hl1Wd8kPri ec4AgiBEh8sk5Pp8W9ROLQ7PcbqqttFa W2m7N/Wy4qcFU13roWKDEAstbxH5CHPo BfZSbIwK4KM6BK/uDHpSPIbiOvOCW+lv u9TAiZPc0oysY6aslO7jXv16Gws=',
'257 3 13 aCoEWYBBVsP9Fek2oC8yqU8ocKmnS1iD SFZNORnQuHKtJ9Wpyz+kNryquB78Pyk/ NTEoai5bxoipVQQXzHlzyg==',
],
'CDS': [
None,
'6454 8 1 24396e17e36d031f71c354b06a979a67a01f503e',
],
'CERT': ['6 0 0 sadfdQ=='],
'CNAME': ['example.com.'],
'DHCID': ['aaaaaaaaaaaa', 'xxxx'],
'DLV': ['6454 8 1 24396e17e36d031f71c354b06a979a67a01f503e'],
'DNAME': ['example.com.'],
'DNSKEY': [
None,
'256 3 8 AwEAAday3UX323uVzQqtOMQ7EHQYfD5O fv4akjQGN2zY5AgB/2jmdR/+1PvXFqzK CAGJv4wjABEBNWLLFm7ew1hHMDZEKVL1 7aml0EBKI6Dsz6Mxt6n7ScvLtHaFRKax T4i2JxiuVhKdQR9XGMiWAPQKrRM5SLG0 P+2F+TLKl3D0L/cD',
'257 3 8 AwEAAcw5QLr0IjC0wKbGoBPQv4qmeqHy 9mvL5qGQTuaG5TSrNqEAR6b/qvxDx6my 4JmEmjUPA1JeEI9YfTUieMr2UZflu7aI bZFLw0vqiYrywCGrCHXLalOrEOmrvAxL vq4vHtuTlH7JIszzYBSes8g1vle6KG7x XiP3U5Ll96Qiu6bZ31rlMQSPB20xbqJJ h6psNSrQs41QvdcXAej+K2Hl1Wd8kPri ec4AgiBEh8sk5Pp8W9ROLQ7PcbqqttFa W2m7N/Wy4qcFU13roWKDEAstbxH5CHPo BfZSbIwK4KM6BK/uDHpSPIbiOvOCW+lv u9TAiZPc0oysY6aslO7jXv16Gws=',
'257 3 13 aCoEWYBBVsP9Fek2oC8yqU8ocKmnS1iD SFZNORnQuHKtJ9Wpyz+kNryquB78Pyk/ NTEoai5bxoipVQQXzHlzyg==',
],
'DS': ['6454 8 1 24396e17e36d031f71c354b06a979a67a01f503e'],
'EUI48': ['aa-bb-cc-dd-ee-ff'],
'EUI64': ['aa-bb-cc-dd-ee-ff-00-11'],
'HINFO': ['"ARMv8-A" "Linux"'],
'HTTPS': ['1 h3POOL.exaMPLe. alpn=h2,h3 echconfig="MTIzLi4uCg=="'],
# 'IPSECKEY': ['12 0 2 . asdfdf==', '03 1 1 127.0.00.1 asdfdf==', '12 3 1 example.com. asdfdf==',],
'KX': ['4 example.com.', '28 io.', '0 .'],
'LOC': [
'23 12 59.000 N 42 22 48.500 W 65.00m 20.00m 10.00m 10.00m',
],
'MX': ['10 example.com.', '20 1.1.1.1.'],
'NAPTR': [
'100 50 "s" "z3950+I2L+I2C" "" _z3950._tcp.gatech.edu.',
],
'NS': ['ns1.example.com.'],
'OPENPGPKEY': [
'mQINBF3yev8BEADR9GxB6OJ5AJlXBWc3nWyWZ+yNNVBiy73XjgOs0uowbxph'
'dIw6l75M6xw3i9xAlcjAGG2710FJaye7EZHot3RTIgHpn4FrErQSpNPuJKjD'
'IedZZ4av5SRtz5FfnXhNkQGs7jAVi6FmjR9/0GWMxj0BdbcOmeePCUfIIH7T'
'ujQJ2c3XHOu/kZ1h4zsFVSslcLEi4KXy0I52pEz0E2CyJrxCLdBd7uU7wDCg'
'G8KrIP3UJ5EtukP/LMq4D1eZ4FmtVqzkuDYlJJo70XQytEK9UqDdaDvlUeS5'
'FrVj4Zf7OaC5YcSvQemVV4VYSBgJIPb+iFY21/1mXAxyYaunqaR0j5qNaMjr'
'E2g3ADRxJiLExhhzlqwJU8+Lc+0QajF/s3lc+dB5usSPqGk6Eb4hBEMaqQvg'
'5I0W8pFtHINYipNW5xGSrsX0pyWVai6EkoTXfjbBMC7khwmwsycJ8pYj3ipe'
'aNQuUP+XXqJKepoVOY2475Z7YT1NRRbGGEp743mbqKo4SnEKxS2kApo1UPd1'
'FbI50TZ62Vsv4tne3bR25eCycjdvIOp6zPm/Pf9LFVm5KF8Wd2U3vRi/uo4v'
'HPUK1RoIzjmirp3XUBGBgHd/mhlOADPWB9dE96eXK4yEHlbfomfFiKAisHDc'
'vUa0E/UbklYBhJjdWBaw1fDDyiSxsBCTsq4ObQARAQABtBFzdXBwb3J0QHBv'
'c3Rlby5kZYkCVAQTAQgAPhYhBJZxyBhcZRmrtOitn6TrgtJXP3x3BQJd8nr/'
'AhsDBQkDw7iABQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEKTrgtJXP3x3'
'+UIP/jpw6Nkp5hLbXxpPRSL2TyyWDfEHPKkBQfU+jnAUIN+WgAV27HpOa+vZ'
'/hmTKOG6SlTOxHWACmDiUVfhLOYMV8QPDD3yPFCZWo4UxBKPZaai6GQwr44u'
'zCcU+E6AdFnb2nbzYSgACrErU5o5JoU2lPgleMI3FYsG8wb/kQAD7XGDX+Ev'
'tAbAQGK5EgevycJzot/hsR/S6EM/l0VsW74DIje3fbp3gaJY2fUG9fTdQu7a'
'gj6f9HuZAvXHIuSFeA/kwhUWuZfTcct8PV78gwQB4d6AOFMzoxLaFQAzxuTR'
'60kZxsyyi4U5km6D/XzI9rTd228PD8xkGr/2Kx1YRU0ixZnohv9xNc4GP/69'
'GNWbbOZcyJcSL+kvych+ddbP5VjHea+b4vT35KV++PMndj+78BE1u5sdqWir'
'X9pi09go7SW1BlaJsMHrkR0P8yFCaFWLyCmIC7C/KcSuHVwcjVYWHynLq6CK'
'kkv4r8BNM/QFzPCeozXjMk7zq9TkJjLVxsUVNcZaNqzlWO0JzCfE6ICpHhyI'
'g/1bO/VJQyk+6llyX1LwRKCeKQCp6KcLx4qnjgZ8g1ArNvazNot9fAssgAUz'
'yoyOBF1SYJxWnzu9GE1F47zU1iD6FB8mjspvE00voDs8t2e+xtZoqsM12WtC'
'8R4VbCY0LmTPGiWyxD9y7TnUlDfHuQINBF3yev8BEAC4dyN2BPiHCmwtKV/3'
'9ZUMVCjb39wnsAA8CH7WAAM5j+k8/uXKUmTcFoZ7+9ya6PZCLXbPC64FIAwl'
'YalzCEP5Jx25Ct/DPhVJPIFWHMOYbyUbLJ8tlC1vnnDhd8czeGmozkuyofMh'
'39QzR3SLzOqucJO3GC6Fx7eFNasajJsaAXaQToKx8YqKCGG4nHxn0Ucb79+G'
'/0wQhtR0Mk3CxcajYJAsTV2ulW05P9xqovblXImXDZpgv0bQ2TX43SdR17yk'
'QzL33HRNCT7clLblHLMPQVxYy1yGS6hOAQj/Rmp+BO7d3S082+oyAFWeb7a9'
'fwzedbxPeiE2VOLtZizQUWIHHqwKP0tNEWRvSfCbc6ktvZQnHCIKyhmTC8N7'
'kvS4T6WjWzpc1M+GOMlOqhtW6t3zV1i2tkcpujduBGRIZ8ZQY+yo/i1HSL5t'
'N98606YXN1s2JyqwAkBJfPYiMp67J2uaFsML3YQEKAxR64GhkjFR/OqYtlIB'
'cx1PvcrPbVWQzXZBfFyjbAd55MnWVk6GrbM3y1QATN3NNhXfbMzLLU6cw/8p'
'sJw0+hxv1W2bJTftrs/5PyLryNOKYHbPEtC6aIyuzbIFFKWxkNshUiasd82Q'
'Jafgx3pFNnCtB61UV46QeqPI7sVueLslurqVgEGb2dS6unKYWXedoIMELm3C'
'g0XdJQARAQABiQI8BBgBCAAmFiEElnHIGFxlGau06K2fpOuC0lc/fHcFAl3y'
'ev8CGwwFCQPDuIAACgkQpOuC0lc/fHc/PxAAj29SBqW6ZRG8zOOw0Dmg1sg4'
'ONYtJ4hEzqPv2WbtOKxgtdcjQS1gMadtfcrH0omZPn8YmeojdbJCd5b9UBYr'
'h4Km3usURy79ouqvyQdZOIBOCUuvNcAUX2xvgUEHQW+rDpkd2mxdASsay1I7'
'yx2S0xE/QP/L2dH0470JWJ+tCIz3WuW2BEi+wijy2tqJfzIkIWA5ND2jwl4n'
'roY7srmAwZfXlh97/T5oOPIUsupIp+vmtMd4B0qa1wLGFDch+VwVvklLN5/Q'
'Vfbedy1Y8yHYiRWSrd3pHvkdtE5rI8qCOWaU/271plT9MZiwHe5WzCWESbKi'
'dwHQanM0Y6+Y8rrvUWGXrlPDvVd3Gd6TjqNhA8+AEiG+BHsw7Azc5in97/yW'
'9cAYEldWv1tUjxgqvWWbGA8E6M/EuE3FuM48HNODfEh/b0ut+b2UAtuz3LzK'
'NVpqYZ9NIebpIMlUuJoQc9rPCWzMDNX37iGRBA016L7VizeJRpJ8VPRAQWHe'
'L5eC85dx9wcdK152fqlOUj729J2TZ5JYQdm9vF2cA6bsIB9m48j/UzNEeV3W'
'NZ3nuZqQ9VjVLYiPURbdkYxWfUvFdVawfqUZ4PGKbVWrFfod8WwHa+gsP4UJ'
'hLN/nxCalBbc3HnyYo0Inlytu4fumElS7kuUVNielOsJlyUr8kfxU3c6MPk=',
],
'PTR': ['example.com.', '*.example.com.'],
'RP': ['hostmaster.example.com. .'],
'SMIMEA': ['3 1 0 aabbccddeeff'],
'SPF': [
'"v=spf1 ip4:10.1" ".1.1 ip4:127" ".0.0.0/16 ip4:192.168.0.0/27 include:example.com -all"',
'"v=spf1 include:example.com ~all"',
'"v=spf1 ip4:10.1.1.1 ip4:127.0.0.0/16 ip4:192.168.0.0/27 include:example.com -all"',
'"spf2.0/pra,mfrom ip6:2001:558:fe14:76:68:87:28:0/120 -all"',
],
'SRV': ['0 0 0 .', '100 1 5061 example.com.'],
'SSHFP': ['2 2 aabbcceeddff'],
'SVCB': ['2 sVc2.example.NET. port=1234 echconfig="MjIyLi4uCg==" ipv6hint=2001:db8::2'],
'TLSA': ['3 0 2 696b8f6b92a913560b23ef5720c378881faffe74432d04eb35db957c0a93987b47adf26abb5dac10ba482597ae16edb069b511bec3e26010d1927bf6392760dd 696b8f6b92a913560b23ef5720c378881faffe74432d04eb35db957c0a93987b47adf26abb5dac10ba482597ae16edb069b511bec3e26010d1927bf6392760dd',],
'TXT': [
'"foobar"',
'"foo" "bar"',
'"foo" "" "bar"',
'"" "" "foo" "" "bar"',
r'"new\010line"',
r'"\000" "NUL byte yo"',
r'"\130\164name\164Boss\164type\1611"', # binary stuff with first bit 1
f'"{'a' * 255}" "{'a' * 243}"', # 500 byte total wire length
r'"\000\001\002\003\004\005\006\007\008\009\010\011\012\013\014\015\016\017\018\019\020\021\022\023\024\025\026\027\028\029\030\031 !\"#$%&' + "'" + r'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\127\128\129\130\131\132\133\134\135\136\137\138\139\140\141\142\143\144\145\146\147\148\149\150\151\152\153\154\155\156\157\158\159\160\161\162\163\164\165\166\167\168\169\170\171\172\173\174\175\176\177\178\179\180\181\182\183\184\185\186\187\188\189\190\191\192\193\194\195\196\197\198\199\200\201\202\203\204\205\206\207\208\209\210\211\212\213\214\215\216\217\218\219\220\221\222\223\224\225\226\227\228\229\230\231\232\233\234\235\236\237\238\239\240\241\242\243\244\245\246\247\248\249\250\251\252\253\254" "\255"',
],
'URI': ['10 1 "ftp://ftp1.example.com/public"'],
}
VALID_RECORDS_NON_CANONICAL = {
'A': ['127.0.0.3'],
'AAAA': ['0000::0000:0003', '2001:db8::128.2.129.4'],
'AFSDB': ['03 turquoise.FEMTO.edu.'],
'APL': ['2:FF00:0:0:0:0::/8 !1:192.168.38.0/28'],
'CAA': ['0128 "issue" "letsencrypt.org"'],
'CDNSKEY': [
'0256 3 8 AwEAAday3UX323uVzQqtOMQ7EHQYfD5Ofv4akjQGN2zY5AgB/2jmdR/+1PvXFqzKCAGJv4wjABEBNWLLFm7ew1hHMDZEKVL17aml0EBKI6Dsz6Mxt6n7ScvLtHaFRKaxT4i2JxiuVhKdQR9XGMiWAPQKrRM5SLG0P+2F+TLKl3D0L/cD',
'257 03 8 AwEAAcw5QLr0IjC0wKbGoBPQv4qmeqHy9mvL5qGQTuaG5TSrNqEAR6b/qvxDx6my4JmEmjUPA1JeEI9YfTUieMr2UZflu7aIbZFLw0vqiYrywCGrCHXLalOrEOmrvAxLvq4vHtuTlH7JIszzYBSes8g1vle6KG7xXiP3U5Ll96Qiu6bZ31rlMQSPB20xbqJJh6psNSrQs41QvdcXAej+K2Hl1Wd8kPriec4AgiBEh8sk5Pp8W9ROLQ7PcbqqttFaW2m7N/Wy4qcFU13roWKDEAstbxH5CHPoBfZSbIwK4KM6BK/uDHpSPIbiOvOCW+lvu9TAiZPc0oysY6aslO7jXv16Gws=',
'257 3 013 aCoEWYBBVsP9Fek2oC8yqU8ocKmnS1iDSFZNORnQuHKtJ9Wpyz+kNryquB78Pyk/NTEoai5bxoipVQQXzHlzyg==',
],
'CDS': [
'06454 08 01 24396e17e36d031f71c354b06a979a67a01f503e',
'6454 8 2 5C BA665A006F6487625C6218522F09BD3673C25FA10F25CB18459AA1 0DF1F520',
],
'CERT': ['06 00 00 sadfee=='],
'CNAME': ['EXAMPLE.TEST.'],
'DHCID': ['aa aaa aaaa a a a', 'xxxx'],
'DLV': [
'06454 08 01 24396e17e36d031f71c354b06a979a67a01f503e',
'6454 8 2 5C BA665A006F6487625C6218522F09BD3673C25FA10F25CB18459AA1 0DF1F520',
],
'DNAME': ['EXAMPLE.TEST.'],
'DNSKEY': [
'0256 3 8 AwEAAday3UX323uVzQqtOMQ7EHQYfD5Ofv4akjQGN2zY5AgB/2jmdR/+1PvXFqzKCAGJv4wjABEBNWLLFm7ew1hHMDZEKVL17aml0EBKI6Dsz6Mxt6n7ScvLtHaFRKaxT4i2JxiuVhKdQR9XGMiWAPQKrRM5SLG0P+2F+TLKl3D0L/cD',
'257 03 8 AwEAAcw5QLr0IjC0wKbGoBPQv4qmeqHy9mvL5qGQTuaG5TSrNqEAR6b/qvxDx6my4JmEmjUPA1JeEI9YfTUieMr2UZflu7aIbZFLw0vqiYrywCGrCHXLalOrEOmrvAxLvq4vHtuTlH7JIszzYBSes8g1vle6KG7xXiP3U5Ll96Qiu6bZ31rlMQSPB20xbqJJh6psNSrQs41QvdcXAej+K2Hl1Wd8kPriec4AgiBEh8sk5Pp8W9ROLQ7PcbqqttFaW2m7N/Wy4qcFU13roWKDEAstbxH5CHPoBfZSbIwK4KM6BK/uDHpSPIbiOvOCW+lvu9TAiZPc0oysY6aslO7jXv16Gws=',
'257 3 013 aCoEWYBBVsP9Fek2oC8yqU8ocKmnS1iDSFZNORnQuHKtJ9Wpyz+kNryquB78Pyk/NTEoai5bxoipVQQXzHlzyg==',
],
'DS': [
'06454 08 01 24396e17e36d031f71c354b06a979a67a01f503e',
'6454 8 2 5C BA665A006F6487625C6218522F09BD3673C25FA10F25CB18459AA1 0DF1F520',
],
'EUI48': ['AA-BB-CC-DD-EE-F1'],
'EUI64': ['AA-BB-CC-DD-EE-FF-00-12'],
'HINFO': ['cpu os'],
'HTTPS': [
# from https://tools.ietf.org/html/draft-ietf-dnsop-svcb-https-02#section-10.3, with echconfig base64'd
'1 . alpn=h3',
'0 pool.svc.example.',
'1 h3pool.example. alpn=h2,h3 echconfig="MTIzLi4uCg=="',
'2 . alpn=h2 echconfig="YWJjLi4uCg=="',
# made-up (not from RFC)
'1 pool.svc.example. no-default-alpn port=1234 ipv4hint=192.168.123.1',
'2 . echconfig=... key65333=ex1 key65444=ex2 mandatory=key65444,echconfig', # see #section-7
],
# 'IPSECKEY': ['12 0 2 . asdfdf==', '03 1 1 127.0.00.1 asdfdf==', '12 3 1 example.com. asdfdf==',],
'KX': ['012 example.TEST.'],
'LOC': [
'023 012 59 N 042 022 48.500 W 65.00m 20.00m 10.00m 10.00m',
],
'MX': ['10 010.1.1.1.'],
'NAPTR': [
'100 50 "s" "z3950+I2L+I2C" "" _z3950._tcp.gatech.edu.',
],
'NS': ['EXaMPLE.COM.'],
'OPENPGPKEY': [
'mG8EXtVIsRMFK4EEAC==',
'mQINBF3yev8BEADR9GxB6OJ5AJlXBWc3nWyWZ+yNNVBiy73XjgOs0uowbxph '
'dIw6l75M6xw3i9xAlcjAGG2710FJaye7EZHot3RTIgHpn4FrErQSpNPuJKjD '
'IedZZ4av5SRtz5FfnXhNkQGs7jAVi6FmjR9/0GWMxj0BdbcOmeePCUfIIH7T '
'ujQJ2c3XHOu/kZ1h4zsFVSslcLEi4KXy0I52pEz0E2CyJrxCLdBd7uU7wDCg '
'G8KrIP3UJ5EtukP/LMq4D1eZ4FmtVqzkuDYlJJo70XQytEK9UqDdaDvlUeS5 '
'FrVj4Zf7OaC5YcSvQemVV4VYSBgJIPb+iFY21/1mXAxyYaunqaR0j5qNaMjr '
'E2g3ADRxJiLExhhzlqwJU8+Lc+0QajF/s3lc+dB5usSPqGk6Eb4hBEMaqQvg '
'5I0W8pFtHINYipNW5xGSrsX0pyWVai6EkoTXfjbBMC7khwmwsycJ8pYj3ipe '
'aNQuUP+XXqJKepoVOY2475Z7YT1NRRbGGEp743mbqKo4SnEKxS2kApo1UPd1 '
'FbI50TZ62Vsv4tne3bR25eCycjdvIOp6zPm/Pf9LFVm5KF8Wd2U3vRi/uo4v '
'HPUK1RoIzjmirp3XUBGBgHd/mhlOADPWB9dE96eXK4yEHlbfomfFiKAisHDc '
'vUa0E/UbklYBhJjdWBaw1fDDyiSxsBCTsq4ObQARAQABtBFzdXBwb3J0QHBv '
'c3Rlby5kZYkCVAQTAQgAPhYhBJZxyBhcZRmrtOitn6TrgtJXP3x3BQJd8nr/ '
'AhsDBQkDw7iABQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEKTrgtJXP3x3 '
'+UIP/jpw6Nkp5hLbXxpPRSL2TyyWDfEHPKkBQfU+jnAUIN+WgAV27HpOa+vZ '
'/hmTKOG6SlTOxHWACmDiUVfhLOYMV8QPDD3yPFCZWo4UxBKPZaai6GQwr44u '
'zCcU+E6AdFnb2nbzYSgACrErU5o5JoU2lPgleMI3FYsG8wb/kQAD7XGDX+Ev '
'tAbAQGK5EgevycJzot/hsR/S6EM/l0VsW74DIje3fbp3gaJY2fUG9fTdQu7a '
'gj6f9HuZAvXHIuSFeA/kwhUWuZfTcct8PV78gwQB4d6AOFMzoxLaFQAzxuTR '
'60kZxsyyi4U5km6D/XzI9rTd228PD8xkGr/2Kx1YRU0ixZnohv9xNc4GP/69 '
'GNWbbOZcyJcSL+kvych+ddbP5VjHea+b4vT35KV++PMndj+78BE1u5sdqWir '
'X9pi09go7SW1BlaJsMHrkR0P8yFCaFWLyCmIC7C/KcSuHVwcjVYWHynLq6CK '
'kkv4r8BNM/QFzPCeozXjMk7zq9TkJjLVxsUVNcZaNqzlWO0JzCfE6ICpHhyI '
'g/1bO/VJQyk+6llyX1LwRKCeKQCp6KcLx4qnjgZ8g1ArNvazNot9fAssgAUz '
'yoyOBF1SYJxWnzu9GE1F47zU1iD6FB8mjspvE00voDs8t2e+xtZoqsM12WtC '
'8R4VbCY0LmTPGiWyxD9y7TnUlDfHuQINBF3yev8BEAC4dyN2BPiHCmwtKV/3 '
'9ZUMVCjb39wnsAA8CH7WAAM5j+k8/uXKUmTcFoZ7+9ya6PZCLXbPC64FIAwl '
'YalzCEP5Jx25Ct/DPhVJPIFWHMOYbyUbLJ8tlC1vnnDhd8czeGmozkuyofMh '
'39QzR3SLzOqucJO3GC6Fx7eFNasajJsaAXaQToKx8YqKCGG4nHxn0Ucb79+G '
'/0wQhtR0Mk3CxcajYJAsTV2ulW05P9xqovblXImXDZpgv0bQ2TX43SdR17yk '
'QzL33HRNCT7clLblHLMPQVxYy1yGS6hOAQj/Rmp+BO7d3S082+oyAFWeb7a9 '
'fwzedbxPeiE2VOLtZizQUWIHHqwKP0tNEWRvSfCbc6ktvZQnHCIKyhmTC8N7 '
'kvS4T6WjWzpc1M+GOMlOqhtW6t3zV1i2tkcpujduBGRIZ8ZQY+yo/i1HSL5t '
'N98606YXN1s2JyqwAkBJfPYiMp67J2uaFsML3YQEKAxR64GhkjFR/OqYtlIB '
'cx1PvcrPbVWQzXZBfFyjbAd55MnWVk6GrbM3y1QATN3NNhXfbMzLLU6cw/8p '
'sJw0+hxv1W2bJTftrs/5PyLryNOKYHbPEtC6aIyuzbIFFKWxkNshUiasd82Q '
'Jafgx3pFNnCtB61UV46QeqPI7sVueLslurqVgEGb2dS6unKYWXedoIMELm3C '
'g0XdJQARAQABiQI8BBgBCAAmFiEElnHIGFxlGau06K2fpOuC0lc/fHcFAl3y '
'ev8CGwwFCQPDuIAACgkQpOuC0lc/fHc/PxAAj29SBqW6ZRG8zOOw0Dmg1sg4 '
'ONYtJ4hEzqPv2WbtOKxgtdcjQS1gMadtfcrH0omZPn8YmeojdbJCd5b9UBYr '
'h4Km3usURy79ouqvyQdZOIBOCUuvNcAUX2xvgUEHQW+rDpkd2mxdASsay1I7 '
'yx2S0xE/QP/L2dH0470JWJ+tCIz3WuW2BEi+wijy2tqJfzIkIWA5ND2jwl4n '
'roY7srmAwZfXlh97/T5oOPIUsupIp+vmtMd4B0qa1wLGFDch+VwVvklLN5/Q '
'Vfbedy1Y8yHYiRWSrd3pHvkdtE5rI8qCOWaU/271plT9MZiwHe5WzCWESbKi '
'dwHQanM0Y6+Y8rrvUWGXrlPDvVd3Gd6TjqNhA8+AEiG+BHsw7Azc5in97/yW '
'9cAYEldWv1tUjxgqvWWbGA8E6M/EuE3FuM48HNODfEh/b0ut+b2UAtuz3LzK '
'NVpqYZ9NIebpIMlUuJoQc9rPCWzMDNX37iGRBA016L7VizeJRpJ8VPRAQWHe '
'L5eC85dx9wcdK152fqlOUj729J2TZ5JYQdm9vF2cA6bsIB9m48j/UzNEeV3W '
'NZ3nuZqQ9VjVLYiPURbdkYxWfUvFdVawfqUZ4PGKbVWrFfod8WwHa+gsP4UJ '
'hLN/nxCalBbc3HnyYo0Inlytu4fumElS7kuUVNielOsJlyUr8kfxU3c6MPk=',
],
'PTR': ['EXAMPLE.TEST.'],
'RP': ['hostmaster.EXAMPLE.com. .'],
'SMIMEA': ['3 01 0 aabbccDDeeff'],
'SPF': [],
'SRV': ['100 01 5061 example.com.'],
'SSHFP': ['02 2 aabbcceeddff'],
'SVCB': [
'0 svc4-baz.example.net.',
'1 . key65333=...',
'2 svc2.example.net. echconfig="MjIyLi4uCg==" ipv6hint=2001:db8::2 port=1234',
],
'TLSA': ['003 00 002 696B8F6B92A913560b23ef5720c378881faffe74432d04eb35db957c0a93987b47adf26abb5dac10ba482597ae16edb069b511bec3e26010d1927bf6392760dd',],
'TXT': [
f'"{'a' * 498}" ',
'"' + 124 * '🧥' + '==="', # 501 byte total length
'"🧥 👚 👕 👖 👔 👗 👙 👘 👠 👡 👢 👞 👟 🥾 🥿 🧦 🧤 🧣 🎩 🧢 👒 🎓 ⛑ 👑 👝 👛 👜 💼 🎒 "',
'"🧥 👚 👕 👖 👔 👗 👙 👘 👠 👡 👢 👞 👟 🥾 🥿 🧦 🧤 🧣 🎩 🧢 👒 🎓 ⛑ 👑 👝 👛 👜 💼 🎒 👓 🕶 🥽 🥼 🌂 🧵"',
'"' + ''.join(fr'\{n:03}' for n in range(256)) + '"', # all bytes
],
'URI': ['10 01 "ftp://ftp1.example.test/public"',],
}
INVALID_RECORDS = {
'A': ['127.0.0.999', '127.000.0.01', '127.0.0.256', '::1', 'foobar', '10.0.1', '10!'],
'AAAA': ['::g', '1:1:1:1:1:1:1:1:', '1:1:1:1:1:1:1:1:1'],
'AFSDB': ['example.com.', '1 1', '1 de'],
'APL': [
'0:192.168.32.0/21 !1:192.168.38.0/28',
'1:192.168.32.0/21 !!1:192.168.38.0/28',
'1:192.168.32.0/33',
'18:12345/2',
'1:127.0.0.1',
'2:::/129',
],
'CAA': ['43235 issue "letsencrypt.org"'],
'CDNSKEY': ['a 3 13 aCoEWYBBVsP9Fek2oC8yqU8ocKmnS1iDSFZNORnQuHKtJ9Wpyz+kNryq uB78Pyk/NTEoai5bxoipVQQXzHlzyg=='],
'CDS': [
'a 8 1 24396E17E36D031F71C354B06A979A67A01F503E',
'6454 8 1 aabbccddeeff',
],
'CERT': ['6 0 sadfdd=='],
'CNAME': ['example.com', '10 example.com.'],
'DHCID': ['x', 'xx', 'xxx'],
'DLV': ['-34 13 1 aabbccddeeff'],
'DNAME': ['example.com', '10 example.com.'],
'DNSKEY': ['a 3 13 aCoEWYBBVsP9Fek2oC8yqU8ocKmnS1iDSFZNORnQuHKtJ9Wpyz+kNryq uB78Pyk/NTEoai5bxoipVQQXzHlzyg=='],
'DS': [
'-34 13 1 24396E17E36D031F71C354B06A979A67A01F503E',
'6454 8 1 aabbccddeeff',
],
'EUI48': ['aa-bb-ccdd-ee-ff', 'AA-BB-CC-DD-EE-GG'],
'EUI64': ['aa-bb-cc-dd-ee-ff-gg-11', 'AA-BB-C C-DD-EE-FF-00-11'],
'HINFO': ['"ARMv8-A"', f'"a" "{'b' * 256}"'],
'HTTPS': [
# from https://tools.ietf.org/html/draft-ietf-dnsop-svcb-https-02#section-10.3, with echconfig base64'd
'1 h3pool alpn=h2,h3 echconfig="MTIzLi4uCg=="',
# made-up (not from RFC)
'0 pool.svc.example. no-default-alpn port=1234 ipv4hint=192.168.123.1', # no keys in alias mode
'1 pool.svc.example. no-default-alpn port=1234 ipv4hint=192.168.123.1 ipv4hint=192.168.123.2', # dup
],
# 'IPSECKEY': [],
'KX': ['-1 example.com', '10 example.com'],
'LOC': ['23 12 61.000 N 42 22 48.500 W 65.00m 20.00m 10.00m 10.00m', 'foo', '1.1.1.1'],
'MX': ['10 example.com', 'example.com.', '-5 asdf.', '65537 asdf.' '10 _foo.example.com.', '10 $url.'],
'NAPTR': ['100 50 "s" "z3950+I2L+I2C" "" _z3950._tcp.gatech.edu',
'100 50 "s" "" _z3950._tcp.gatech.edu.',
'100 50 3 2 "z3950+I2L+I2C" "" _z3950._tcp.gatech.edu.'],
'NS': ['ns1.example.com', '127.0.0.1'],
'OPENPGPKEY': ['1 2 3'],
'PTR': ['"example.com."', '10 *.example.com.'],
'RP': ['hostmaster.example.com.', '10 foo.'],
'SMIMEA': ['3 1 0 aGVsbG8gd29ybGQh', 'x 0 0 aabbccddeeff'],
'SPF': ['"v=spf1', 'v=spf1 include:example.com ~all'],
'SRV': ['0 0 0 0', '100 5061 example.com.', '0 0 16920 _foo.example.com.', '0 0 16920 $url.'],
'SSHFP': ['aabbcceeddff'],
'SVCB': [
'0 svc4-baz.example.net. keys=val',
'1 not.fully.qualified key65333=...',
'2 duplicate.key. echconfig="MjIyLi4uCg==" echconfig="MjIyLi4uCg=="',
],
'TLSA': ['3 1 1 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'],
'TXT': [
'foob"ar',
'v=spf1 include:example.com ~all',
'"foo\nbar"',
'"\x00" "Django rejects literal NUL byte"',
],
'URI': ['"1" "2" "3"'],
}
INVALID_RECORDS_PARAMS = [(rr_type, value) for rr_type in INVALID_RECORDS.keys() for value in INVALID_RECORDS[rr_type]]
def test_soundness():
assert INVALID_RECORDS.keys() == VALID_RECORDS_CANONICAL.keys() == VALID_RECORDS_NON_CANONICAL.keys()
@pytest.mark.parametrize("rr_type,value", generate_params(VALID_RECORDS_CANONICAL))
def test_create_valid_canonical(api_user_domain: DeSECAPIV1Client, rr_type: str, value: str):
domain_name = api_user_domain.domain
expected = set()
subname = 'a'
if rr_type in ('CDNSKEY', 'CDS', 'DNSKEY'):
expected |= api_user_domain.get_key_params(domain_name, rr_type)
subname = ''
if value is not None:
assert api_user_domain.rr_set_create(domain_name, rr_type, [value], subname=subname).status_code == 201
expected.add(value)
rrset = NSLordClient.query(f'{subname}.{domain_name}'.strip('.'), rr_type)
assert rrset == expected
assert_eventually(lambda: query_replication(domain_name, subname, rr_type) == expected)
@pytest.mark.parametrize("rr_type,value", generate_params(VALID_RECORDS_NON_CANONICAL))
def test_create_valid_non_canonical(api_user_domain: DeSECAPIV1Client, rr_type: str, value: str):
domain_name = api_user_domain.domain
expected = set()
subname = 'a'
if rr_type in ('CDNSKEY', 'CDS', 'DNSKEY'):
expected |= api_user_domain.get_key_params(domain_name, rr_type)
subname = ''
if value is not None:
assert api_user_domain.rr_set_create(domain_name, rr_type, [value], subname=subname).status_code == 201
expected.add(value)
rrset = NSLordClient.query(f'{subname}.{domain_name}'.strip('.'), rr_type)
assert len(rrset) == len(expected)
assert_eventually(lambda: len(query_replication(domain_name, subname, rr_type)) == len(expected))
@pytest.mark.parametrize("rr_type,value", INVALID_RECORDS_PARAMS)
def test_create_invalid(api_user_domain: DeSECAPIV1Client, rr_type: str, value: str):
assert api_user_domain.rr_set_create(api_user_domain.domain, rr_type, [value]).status_code == 400
def test_create_long_subname(api_user_domain: DeSECAPIV1Client):
subname = 'a' * 63
assert api_user_domain.rr_set_create(api_user_domain.domain, "AAAA", ["::1"], subname=subname).status_code == 201
assert NSLordClient.query(f"{subname}.{api_user_domain.domain}", "AAAA") == {"::1"}
assert_eventually(lambda: query_replication(api_user_domain.domain, subname, "AAAA") == {"::1"})
def test_add_remove_DNSKEY(api_user_domain: DeSECAPIV1Client):
domain_name = api_user_domain.domain
auto_dnskeys = api_user_domain.get_key_params(domain_name, 'DNSKEY')
# After adding another DNSKEY, we expect it to be part of the nameserver's response (along with the automatic ones)
value = '257 3 13 aCoEWYBBVsP9Fek2oC8yqU8ocKmnS1iD SFZNORnQuHKtJ9Wpyz+kNryquB78Pyk/ NTEoai5bxoipVQQXzHlzyg=='
assert api_user_domain.rr_set_create(domain_name, 'DNSKEY', [value], subname='').status_code == 201
assert NSLordClient.query(domain_name, 'DNSKEY') == auto_dnskeys | {value}
assert_eventually(lambda: query_replication(domain_name, '', 'DNSKEY') == auto_dnskeys | {value})
# After deleting it, we expect that the automatically managed ones are still there
assert api_user_domain.rr_set_delete(domain_name, "DNSKEY", subname='').status_code == 204
assert NSLordClient.query(domain_name, 'DNSKEY') == auto_dnskeys
assert_eventually(lambda: query_replication(domain_name, '', 'DNSKEY') == auto_dnskeys)
|
from typing import List, Tuple
import pytest
from conftest import DeSECAPIV1Client, query_replication, NSLordClient, assert_eventually
def generate_params(dict_value_lists_by_type: dict) -> List[Tuple[str, str]]:
return [
(rr_type, value)
for rr_type in dict_value_lists_by_type.keys()
for value in dict_value_lists_by_type[rr_type]
]
VALID_RECORDS_CANONICAL = {
'A': ['127.0.0.1', '127.0.0.2'],
'AAAA': ['::1', '::2'],
'AFSDB': ['2 turquoise.femto.edu.'],
'APL': [
# from RFC 3123 Sec. 4
'1:192.168.32.0/21 !1:192.168.38.0/28',
'1:192.168.42.0/26 1:192.168.42.64/26 1:192.168.42.128/25',
'1:127.0.0.1/32 1:172.16.64.0/22',
'1:224.0.0.0/4 2:ff00::/8',
],
'CAA': [
'128 issue "letsencrypt.org"', '128 iodef "mailto:desec@example.com"',
'1 issue "letsencrypt.org"'
],
'CDNSKEY': [
None,
'256 3 8 AwEAAday3UX323uVzQqtOMQ7EHQYfD5O fv4akjQGN2zY5AgB/2jmdR/+1PvXFqzK CAGJv4wjABEBNWLLFm7ew1hHMDZEKVL1 7aml0EBKI6Dsz6Mxt6n7ScvLtHaFRKax T4i2JxiuVhKdQR9XGMiWAPQKrRM5SLG0 P+2F+TLKl3D0L/cD',
'257 3 8 AwEAAcw5QLr0IjC0wKbGoBPQv4qmeqHy 9mvL5qGQTuaG5TSrNqEAR6b/qvxDx6my 4JmEmjUPA1JeEI9YfTUieMr2UZflu7aI bZFLw0vqiYrywCGrCHXLalOrEOmrvAxL vq4vHtuTlH7JIszzYBSes8g1vle6KG7x XiP3U5Ll96Qiu6bZ31rlMQSPB20xbqJJ h6psNSrQs41QvdcXAej+K2Hl1Wd8kPri ec4AgiBEh8sk5Pp8W9ROLQ7PcbqqttFa W2m7N/Wy4qcFU13roWKDEAstbxH5CHPo BfZSbIwK4KM6BK/uDHpSPIbiOvOCW+lv u9TAiZPc0oysY6aslO7jXv16Gws=',
'257 3 13 aCoEWYBBVsP9Fek2oC8yqU8ocKmnS1iD SFZNORnQuHKtJ9Wpyz+kNryquB78Pyk/ NTEoai5bxoipVQQXzHlzyg==',
],
'CDS': [
None,
'6454 8 1 24396e17e36d031f71c354b06a979a67a01f503e',
],
'CERT': ['6 0 0 sadfdQ=='],
'CNAME': ['example.com.'],
'DHCID': ['aaaaaaaaaaaa', 'xxxx'],
'DLV': ['6454 8 1 24396e17e36d031f71c354b06a979a67a01f503e'],
'DNAME': ['example.com.'],
'DNSKEY': [
None,
'256 3 8 AwEAAday3UX323uVzQqtOMQ7EHQYfD5O fv4akjQGN2zY5AgB/2jmdR/+1PvXFqzK CAGJv4wjABEBNWLLFm7ew1hHMDZEKVL1 7aml0EBKI6Dsz6Mxt6n7ScvLtHaFRKax T4i2JxiuVhKdQR9XGMiWAPQKrRM5SLG0 P+2F+TLKl3D0L/cD',
'257 3 8 AwEAAcw5QLr0IjC0wKbGoBPQv4qmeqHy 9mvL5qGQTuaG5TSrNqEAR6b/qvxDx6my 4JmEmjUPA1JeEI9YfTUieMr2UZflu7aI bZFLw0vqiYrywCGrCHXLalOrEOmrvAxL vq4vHtuTlH7JIszzYBSes8g1vle6KG7x XiP3U5Ll96Qiu6bZ31rlMQSPB20xbqJJ h6psNSrQs41QvdcXAej+K2Hl1Wd8kPri ec4AgiBEh8sk5Pp8W9ROLQ7PcbqqttFa W2m7N/Wy4qcFU13roWKDEAstbxH5CHPo BfZSbIwK4KM6BK/uDHpSPIbiOvOCW+lv u9TAiZPc0oysY6aslO7jXv16Gws=',
'257 3 13 aCoEWYBBVsP9Fek2oC8yqU8ocKmnS1iD SFZNORnQuHKtJ9Wpyz+kNryquB78Pyk/ NTEoai5bxoipVQQXzHlzyg==',
],
'DS': ['6454 8 1 24396e17e36d031f71c354b06a979a67a01f503e'],
'EUI48': ['aa-bb-cc-dd-ee-ff'],
'EUI64': ['aa-bb-cc-dd-ee-ff-00-11'],
'HINFO': ['"ARMv8-A" "Linux"'],
'HTTPS': ['1 h3POOL.exaMPLe. alpn=h2,h3 echconfig="MTIzLi4uCg=="'],
# 'IPSECKEY': ['12 0 2 . asdfdf==', '03 1 1 127.0.00.1 asdfdf==', '12 3 1 example.com. asdfdf==',],
'KX': ['4 example.com.', '28 io.', '0 .'],
'LOC': [
'23 12 59.000 N 42 22 48.500 W 65.00m 20.00m 10.00m 10.00m',
],
'MX': ['10 example.com.', '20 1.1.1.1.'],
'NAPTR': [
'100 50 "s" "z3950+I2L+I2C" "" _z3950._tcp.gatech.edu.',
],
'NS': ['ns1.example.com.'],
'OPENPGPKEY': [
'mQINBF3yev8BEADR9GxB6OJ5AJlXBWc3nWyWZ+yNNVBiy73XjgOs0uowbxph'
'dIw6l75M6xw3i9xAlcjAGG2710FJaye7EZHot3RTIgHpn4FrErQSpNPuJKjD'
'IedZZ4av5SRtz5FfnXhNkQGs7jAVi6FmjR9/0GWMxj0BdbcOmeePCUfIIH7T'
'ujQJ2c3XHOu/kZ1h4zsFVSslcLEi4KXy0I52pEz0E2CyJrxCLdBd7uU7wDCg'
'G8KrIP3UJ5EtukP/LMq4D1eZ4FmtVqzkuDYlJJo70XQytEK9UqDdaDvlUeS5'
'FrVj4Zf7OaC5YcSvQemVV4VYSBgJIPb+iFY21/1mXAxyYaunqaR0j5qNaMjr'
'E2g3ADRxJiLExhhzlqwJU8+Lc+0QajF/s3lc+dB5usSPqGk6Eb4hBEMaqQvg'
'5I0W8pFtHINYipNW5xGSrsX0pyWVai6EkoTXfjbBMC7khwmwsycJ8pYj3ipe'
'aNQuUP+XXqJKepoVOY2475Z7YT1NRRbGGEp743mbqKo4SnEKxS2kApo1UPd1'
'FbI50TZ62Vsv4tne3bR25eCycjdvIOp6zPm/Pf9LFVm5KF8Wd2U3vRi/uo4v'
'HPUK1RoIzjmirp3XUBGBgHd/mhlOADPWB9dE96eXK4yEHlbfomfFiKAisHDc'
'vUa0E/UbklYBhJjdWBaw1fDDyiSxsBCTsq4ObQARAQABtBFzdXBwb3J0QHBv'
'c3Rlby5kZYkCVAQTAQgAPhYhBJZxyBhcZRmrtOitn6TrgtJXP3x3BQJd8nr/'
'AhsDBQkDw7iABQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEKTrgtJXP3x3'
'+UIP/jpw6Nkp5hLbXxpPRSL2TyyWDfEHPKkBQfU+jnAUIN+WgAV27HpOa+vZ'
'/hmTKOG6SlTOxHWACmDiUVfhLOYMV8QPDD3yPFCZWo4UxBKPZaai6GQwr44u'
'zCcU+E6AdFnb2nbzYSgACrErU5o5JoU2lPgleMI3FYsG8wb/kQAD7XGDX+Ev'
'tAbAQGK5EgevycJzot/hsR/S6EM/l0VsW74DIje3fbp3gaJY2fUG9fTdQu7a'
'gj6f9HuZAvXHIuSFeA/kwhUWuZfTcct8PV78gwQB4d6AOFMzoxLaFQAzxuTR'
'60kZxsyyi4U5km6D/XzI9rTd228PD8xkGr/2Kx1YRU0ixZnohv9xNc4GP/69'
'GNWbbOZcyJcSL+kvych+ddbP5VjHea+b4vT35KV++PMndj+78BE1u5sdqWir'
'X9pi09go7SW1BlaJsMHrkR0P8yFCaFWLyCmIC7C/KcSuHVwcjVYWHynLq6CK'
'kkv4r8BNM/QFzPCeozXjMk7zq9TkJjLVxsUVNcZaNqzlWO0JzCfE6ICpHhyI'
'g/1bO/VJQyk+6llyX1LwRKCeKQCp6KcLx4qnjgZ8g1ArNvazNot9fAssgAUz'
'yoyOBF1SYJxWnzu9GE1F47zU1iD6FB8mjspvE00voDs8t2e+xtZoqsM12WtC'
'8R4VbCY0LmTPGiWyxD9y7TnUlDfHuQINBF3yev8BEAC4dyN2BPiHCmwtKV/3'
'9ZUMVCjb39wnsAA8CH7WAAM5j+k8/uXKUmTcFoZ7+9ya6PZCLXbPC64FIAwl'
'YalzCEP5Jx25Ct/DPhVJPIFWHMOYbyUbLJ8tlC1vnnDhd8czeGmozkuyofMh'
'39QzR3SLzOqucJO3GC6Fx7eFNasajJsaAXaQToKx8YqKCGG4nHxn0Ucb79+G'
'/0wQhtR0Mk3CxcajYJAsTV2ulW05P9xqovblXImXDZpgv0bQ2TX43SdR17yk'
'QzL33HRNCT7clLblHLMPQVxYy1yGS6hOAQj/Rmp+BO7d3S082+oyAFWeb7a9'
'fwzedbxPeiE2VOLtZizQUWIHHqwKP0tNEWRvSfCbc6ktvZQnHCIKyhmTC8N7'
'kvS4T6WjWzpc1M+GOMlOqhtW6t3zV1i2tkcpujduBGRIZ8ZQY+yo/i1HSL5t'
'N98606YXN1s2JyqwAkBJfPYiMp67J2uaFsML3YQEKAxR64GhkjFR/OqYtlIB'
'cx1PvcrPbVWQzXZBfFyjbAd55MnWVk6GrbM3y1QATN3NNhXfbMzLLU6cw/8p'
'sJw0+hxv1W2bJTftrs/5PyLryNOKYHbPEtC6aIyuzbIFFKWxkNshUiasd82Q'
'Jafgx3pFNnCtB61UV46QeqPI7sVueLslurqVgEGb2dS6unKYWXedoIMELm3C'
'g0XdJQARAQABiQI8BBgBCAAmFiEElnHIGFxlGau06K2fpOuC0lc/fHcFAl3y'
'ev8CGwwFCQPDuIAACgkQpOuC0lc/fHc/PxAAj29SBqW6ZRG8zOOw0Dmg1sg4'
'ONYtJ4hEzqPv2WbtOKxgtdcjQS1gMadtfcrH0omZPn8YmeojdbJCd5b9UBYr'
'h4Km3usURy79ouqvyQdZOIBOCUuvNcAUX2xvgUEHQW+rDpkd2mxdASsay1I7'
'yx2S0xE/QP/L2dH0470JWJ+tCIz3WuW2BEi+wijy2tqJfzIkIWA5ND2jwl4n'
'roY7srmAwZfXlh97/T5oOPIUsupIp+vmtMd4B0qa1wLGFDch+VwVvklLN5/Q'
'Vfbedy1Y8yHYiRWSrd3pHvkdtE5rI8qCOWaU/271plT9MZiwHe5WzCWESbKi'
'dwHQanM0Y6+Y8rrvUWGXrlPDvVd3Gd6TjqNhA8+AEiG+BHsw7Azc5in97/yW'
'9cAYEldWv1tUjxgqvWWbGA8E6M/EuE3FuM48HNODfEh/b0ut+b2UAtuz3LzK'
'NVpqYZ9NIebpIMlUuJoQc9rPCWzMDNX37iGRBA016L7VizeJRpJ8VPRAQWHe'
'L5eC85dx9wcdK152fqlOUj729J2TZ5JYQdm9vF2cA6bsIB9m48j/UzNEeV3W'
'NZ3nuZqQ9VjVLYiPURbdkYxWfUvFdVawfqUZ4PGKbVWrFfod8WwHa+gsP4UJ'
'hLN/nxCalBbc3HnyYo0Inlytu4fumElS7kuUVNielOsJlyUr8kfxU3c6MPk=',
],
'PTR': ['example.com.', '*.example.com.'],
'RP': ['hostmaster.example.com. .'],
'SMIMEA': ['3 1 0 aabbccddeeff'],
'SPF': [
'"v=spf1 ip4:10.1" ".1.1 ip4:127" ".0.0.0/16 ip4:192.168.0.0/27 include:example.com -all"',
'"v=spf1 include:example.com ~all"',
'"v=spf1 ip4:10.1.1.1 ip4:127.0.0.0/16 ip4:192.168.0.0/27 include:example.com -all"',
'"spf2.0/pra,mfrom ip6:2001:558:fe14:76:68:87:28:0/120 -all"',
],
'SRV': ['0 0 0 .', '100 1 5061 example.com.'],
'SSHFP': ['2 2 aabbcceeddff'],
'SVCB': ['2 sVc2.example.NET. port=1234 echconfig="MjIyLi4uCg==" ipv6hint=2001:db8::2'],
'TLSA': ['3 0 2 696b8f6b92a913560b23ef5720c378881faffe74432d04eb35db957c0a93987b47adf26abb5dac10ba482597ae16edb069b511bec3e26010d1927bf6392760dd 696b8f6b92a913560b23ef5720c378881faffe74432d04eb35db957c0a93987b47adf26abb5dac10ba482597ae16edb069b511bec3e26010d1927bf6392760dd',],
'TXT': [
'"foobar"',
'"foo" "bar"',
'"foo" "" "bar"',
'"" "" "foo" "" "bar"',
r'"new\010line"',
r'"\000" "NUL byte yo"',
r'"\130\164name\164Boss\164type\1611"', # binary stuff with first bit 1
f'"{"a" * 255}" "{"a" * 243}"', # 500 byte total wire length
r'"\000\001\002\003\004\005\006\007\008\009\010\011\012\013\014\015\016\017\018\019\020\021\022\023\024\025\026\027\028\029\030\031 !\"#$%&' + "'" + r'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\127\128\129\130\131\132\133\134\135\136\137\138\139\140\141\142\143\144\145\146\147\148\149\150\151\152\153\154\155\156\157\158\159\160\161\162\163\164\165\166\167\168\169\170\171\172\173\174\175\176\177\178\179\180\181\182\183\184\185\186\187\188\189\190\191\192\193\194\195\196\197\198\199\200\201\202\203\204\205\206\207\208\209\210\211\212\213\214\215\216\217\218\219\220\221\222\223\224\225\226\227\228\229\230\231\232\233\234\235\236\237\238\239\240\241\242\243\244\245\246\247\248\249\250\251\252\253\254" "\255"',
],
'URI': ['10 1 "ftp://ftp1.example.com/public"'],
}
VALID_RECORDS_NON_CANONICAL = {
'A': ['127.0.0.3'],
'AAAA': ['0000::0000:0003', '2001:db8::128.2.129.4'],
'AFSDB': ['03 turquoise.FEMTO.edu.'],
'APL': ['2:FF00:0:0:0:0::/8 !1:192.168.38.0/28'],
'CAA': ['0128 "issue" "letsencrypt.org"'],
'CDNSKEY': [
'0256 3 8 AwEAAday3UX323uVzQqtOMQ7EHQYfD5Ofv4akjQGN2zY5AgB/2jmdR/+1PvXFqzKCAGJv4wjABEBNWLLFm7ew1hHMDZEKVL17aml0EBKI6Dsz6Mxt6n7ScvLtHaFRKaxT4i2JxiuVhKdQR9XGMiWAPQKrRM5SLG0P+2F+TLKl3D0L/cD',
'257 03 8 AwEAAcw5QLr0IjC0wKbGoBPQv4qmeqHy9mvL5qGQTuaG5TSrNqEAR6b/qvxDx6my4JmEmjUPA1JeEI9YfTUieMr2UZflu7aIbZFLw0vqiYrywCGrCHXLalOrEOmrvAxLvq4vHtuTlH7JIszzYBSes8g1vle6KG7xXiP3U5Ll96Qiu6bZ31rlMQSPB20xbqJJh6psNSrQs41QvdcXAej+K2Hl1Wd8kPriec4AgiBEh8sk5Pp8W9ROLQ7PcbqqttFaW2m7N/Wy4qcFU13roWKDEAstbxH5CHPoBfZSbIwK4KM6BK/uDHpSPIbiOvOCW+lvu9TAiZPc0oysY6aslO7jXv16Gws=',
'257 3 013 aCoEWYBBVsP9Fek2oC8yqU8ocKmnS1iDSFZNORnQuHKtJ9Wpyz+kNryquB78Pyk/NTEoai5bxoipVQQXzHlzyg==',
],
'CDS': [
'06454 08 01 24396e17e36d031f71c354b06a979a67a01f503e',
'6454 8 2 5C BA665A006F6487625C6218522F09BD3673C25FA10F25CB18459AA1 0DF1F520',
],
'CERT': ['06 00 00 sadfee=='],
'CNAME': ['EXAMPLE.TEST.'],
'DHCID': ['aa aaa aaaa a a a', 'xxxx'],
'DLV': [
'06454 08 01 24396e17e36d031f71c354b06a979a67a01f503e',
'6454 8 2 5C BA665A006F6487625C6218522F09BD3673C25FA10F25CB18459AA1 0DF1F520',
],
'DNAME': ['EXAMPLE.TEST.'],
'DNSKEY': [
'0256 3 8 AwEAAday3UX323uVzQqtOMQ7EHQYfD5Ofv4akjQGN2zY5AgB/2jmdR/+1PvXFqzKCAGJv4wjABEBNWLLFm7ew1hHMDZEKVL17aml0EBKI6Dsz6Mxt6n7ScvLtHaFRKaxT4i2JxiuVhKdQR9XGMiWAPQKrRM5SLG0P+2F+TLKl3D0L/cD',
'257 03 8 AwEAAcw5QLr0IjC0wKbGoBPQv4qmeqHy9mvL5qGQTuaG5TSrNqEAR6b/qvxDx6my4JmEmjUPA1JeEI9YfTUieMr2UZflu7aIbZFLw0vqiYrywCGrCHXLalOrEOmrvAxLvq4vHtuTlH7JIszzYBSes8g1vle6KG7xXiP3U5Ll96Qiu6bZ31rlMQSPB20xbqJJh6psNSrQs41QvdcXAej+K2Hl1Wd8kPriec4AgiBEh8sk5Pp8W9ROLQ7PcbqqttFaW2m7N/Wy4qcFU13roWKDEAstbxH5CHPoBfZSbIwK4KM6BK/uDHpSPIbiOvOCW+lvu9TAiZPc0oysY6aslO7jXv16Gws=',
'257 3 013 aCoEWYBBVsP9Fek2oC8yqU8ocKmnS1iDSFZNORnQuHKtJ9Wpyz+kNryquB78Pyk/NTEoai5bxoipVQQXzHlzyg==',
],
'DS': [
'06454 08 01 24396e17e36d031f71c354b06a979a67a01f503e',
'6454 8 2 5C BA665A006F6487625C6218522F09BD3673C25FA10F25CB18459AA1 0DF1F520',
],
'EUI48': ['AA-BB-CC-DD-EE-F1'],
'EUI64': ['AA-BB-CC-DD-EE-FF-00-12'],
'HINFO': ['cpu os'],
'HTTPS': [
# from https://tools.ietf.org/html/draft-ietf-dnsop-svcb-https-02#section-10.3, with echconfig base64'd
'1 . alpn=h3',
'0 pool.svc.example.',
'1 h3pool.example. alpn=h2,h3 echconfig="MTIzLi4uCg=="',
'2 . alpn=h2 echconfig="YWJjLi4uCg=="',
# made-up (not from RFC)
'1 pool.svc.example. no-default-alpn port=1234 ipv4hint=192.168.123.1',
'2 . echconfig=... key65333=ex1 key65444=ex2 mandatory=key65444,echconfig', # see #section-7
],
# 'IPSECKEY': ['12 0 2 . asdfdf==', '03 1 1 127.0.00.1 asdfdf==', '12 3 1 example.com. asdfdf==',],
'KX': ['012 example.TEST.'],
'LOC': [
'023 012 59 N 042 022 48.500 W 65.00m 20.00m 10.00m 10.00m',
],
'MX': ['10 010.1.1.1.'],
'NAPTR': [
'100 50 "s" "z3950+I2L+I2C" "" _z3950._tcp.gatech.edu.',
],
'NS': ['EXaMPLE.COM.'],
'OPENPGPKEY': [
'mG8EXtVIsRMFK4EEAC==',
'mQINBF3yev8BEADR9GxB6OJ5AJlXBWc3nWyWZ+yNNVBiy73XjgOs0uowbxph '
'dIw6l75M6xw3i9xAlcjAGG2710FJaye7EZHot3RTIgHpn4FrErQSpNPuJKjD '
'IedZZ4av5SRtz5FfnXhNkQGs7jAVi6FmjR9/0GWMxj0BdbcOmeePCUfIIH7T '
'ujQJ2c3XHOu/kZ1h4zsFVSslcLEi4KXy0I52pEz0E2CyJrxCLdBd7uU7wDCg '
'G8KrIP3UJ5EtukP/LMq4D1eZ4FmtVqzkuDYlJJo70XQytEK9UqDdaDvlUeS5 '
'FrVj4Zf7OaC5YcSvQemVV4VYSBgJIPb+iFY21/1mXAxyYaunqaR0j5qNaMjr '
'E2g3ADRxJiLExhhzlqwJU8+Lc+0QajF/s3lc+dB5usSPqGk6Eb4hBEMaqQvg '
'5I0W8pFtHINYipNW5xGSrsX0pyWVai6EkoTXfjbBMC7khwmwsycJ8pYj3ipe '
'aNQuUP+XXqJKepoVOY2475Z7YT1NRRbGGEp743mbqKo4SnEKxS2kApo1UPd1 '
'FbI50TZ62Vsv4tne3bR25eCycjdvIOp6zPm/Pf9LFVm5KF8Wd2U3vRi/uo4v '
'HPUK1RoIzjmirp3XUBGBgHd/mhlOADPWB9dE96eXK4yEHlbfomfFiKAisHDc '
'vUa0E/UbklYBhJjdWBaw1fDDyiSxsBCTsq4ObQARAQABtBFzdXBwb3J0QHBv '
'c3Rlby5kZYkCVAQTAQgAPhYhBJZxyBhcZRmrtOitn6TrgtJXP3x3BQJd8nr/ '
'AhsDBQkDw7iABQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEKTrgtJXP3x3 '
'+UIP/jpw6Nkp5hLbXxpPRSL2TyyWDfEHPKkBQfU+jnAUIN+WgAV27HpOa+vZ '
'/hmTKOG6SlTOxHWACmDiUVfhLOYMV8QPDD3yPFCZWo4UxBKPZaai6GQwr44u '
'zCcU+E6AdFnb2nbzYSgACrErU5o5JoU2lPgleMI3FYsG8wb/kQAD7XGDX+Ev '
'tAbAQGK5EgevycJzot/hsR/S6EM/l0VsW74DIje3fbp3gaJY2fUG9fTdQu7a '
'gj6f9HuZAvXHIuSFeA/kwhUWuZfTcct8PV78gwQB4d6AOFMzoxLaFQAzxuTR '
'60kZxsyyi4U5km6D/XzI9rTd228PD8xkGr/2Kx1YRU0ixZnohv9xNc4GP/69 '
'GNWbbOZcyJcSL+kvych+ddbP5VjHea+b4vT35KV++PMndj+78BE1u5sdqWir '
'X9pi09go7SW1BlaJsMHrkR0P8yFCaFWLyCmIC7C/KcSuHVwcjVYWHynLq6CK '
'kkv4r8BNM/QFzPCeozXjMk7zq9TkJjLVxsUVNcZaNqzlWO0JzCfE6ICpHhyI '
'g/1bO/VJQyk+6llyX1LwRKCeKQCp6KcLx4qnjgZ8g1ArNvazNot9fAssgAUz '
'yoyOBF1SYJxWnzu9GE1F47zU1iD6FB8mjspvE00voDs8t2e+xtZoqsM12WtC '
'8R4VbCY0LmTPGiWyxD9y7TnUlDfHuQINBF3yev8BEAC4dyN2BPiHCmwtKV/3 '
'9ZUMVCjb39wnsAA8CH7WAAM5j+k8/uXKUmTcFoZ7+9ya6PZCLXbPC64FIAwl '
'YalzCEP5Jx25Ct/DPhVJPIFWHMOYbyUbLJ8tlC1vnnDhd8czeGmozkuyofMh '
'39QzR3SLzOqucJO3GC6Fx7eFNasajJsaAXaQToKx8YqKCGG4nHxn0Ucb79+G '
'/0wQhtR0Mk3CxcajYJAsTV2ulW05P9xqovblXImXDZpgv0bQ2TX43SdR17yk '
'QzL33HRNCT7clLblHLMPQVxYy1yGS6hOAQj/Rmp+BO7d3S082+oyAFWeb7a9 '
'fwzedbxPeiE2VOLtZizQUWIHHqwKP0tNEWRvSfCbc6ktvZQnHCIKyhmTC8N7 '
'kvS4T6WjWzpc1M+GOMlOqhtW6t3zV1i2tkcpujduBGRIZ8ZQY+yo/i1HSL5t '
'N98606YXN1s2JyqwAkBJfPYiMp67J2uaFsML3YQEKAxR64GhkjFR/OqYtlIB '
'cx1PvcrPbVWQzXZBfFyjbAd55MnWVk6GrbM3y1QATN3NNhXfbMzLLU6cw/8p '
'sJw0+hxv1W2bJTftrs/5PyLryNOKYHbPEtC6aIyuzbIFFKWxkNshUiasd82Q '
'Jafgx3pFNnCtB61UV46QeqPI7sVueLslurqVgEGb2dS6unKYWXedoIMELm3C '
'g0XdJQARAQABiQI8BBgBCAAmFiEElnHIGFxlGau06K2fpOuC0lc/fHcFAl3y '
'ev8CGwwFCQPDuIAACgkQpOuC0lc/fHc/PxAAj29SBqW6ZRG8zOOw0Dmg1sg4 '
'ONYtJ4hEzqPv2WbtOKxgtdcjQS1gMadtfcrH0omZPn8YmeojdbJCd5b9UBYr '
'h4Km3usURy79ouqvyQdZOIBOCUuvNcAUX2xvgUEHQW+rDpkd2mxdASsay1I7 '
'yx2S0xE/QP/L2dH0470JWJ+tCIz3WuW2BEi+wijy2tqJfzIkIWA5ND2jwl4n '
'roY7srmAwZfXlh97/T5oOPIUsupIp+vmtMd4B0qa1wLGFDch+VwVvklLN5/Q '
'Vfbedy1Y8yHYiRWSrd3pHvkdtE5rI8qCOWaU/271plT9MZiwHe5WzCWESbKi '
'dwHQanM0Y6+Y8rrvUWGXrlPDvVd3Gd6TjqNhA8+AEiG+BHsw7Azc5in97/yW '
'9cAYEldWv1tUjxgqvWWbGA8E6M/EuE3FuM48HNODfEh/b0ut+b2UAtuz3LzK '
'NVpqYZ9NIebpIMlUuJoQc9rPCWzMDNX37iGRBA016L7VizeJRpJ8VPRAQWHe '
'L5eC85dx9wcdK152fqlOUj729J2TZ5JYQdm9vF2cA6bsIB9m48j/UzNEeV3W '
'NZ3nuZqQ9VjVLYiPURbdkYxWfUvFdVawfqUZ4PGKbVWrFfod8WwHa+gsP4UJ '
'hLN/nxCalBbc3HnyYo0Inlytu4fumElS7kuUVNielOsJlyUr8kfxU3c6MPk=',
],
'PTR': ['EXAMPLE.TEST.'],
'RP': ['hostmaster.EXAMPLE.com. .'],
'SMIMEA': ['3 01 0 aabbccDDeeff'],
'SPF': [],
'SRV': ['100 01 5061 example.com.'],
'SSHFP': ['02 2 aabbcceeddff'],
'SVCB': [
'0 svc4-baz.example.net.',
'1 . key65333=...',
'2 svc2.example.net. echconfig="MjIyLi4uCg==" ipv6hint=2001:db8::2 port=1234',
],
'TLSA': ['003 00 002 696B8F6B92A913560b23ef5720c378881faffe74432d04eb35db957c0a93987b47adf26abb5dac10ba482597ae16edb069b511bec3e26010d1927bf6392760dd',],
'TXT': [
f'"{"a" * 498}" ',
'"' + 124 * '🧥' + '==="', # 501 byte total length
'"🧥 👚 👕 👖 👔 👗 👙 👘 👠 👡 👢 👞 👟 🥾 🥿 🧦 🧤 🧣 🎩 🧢 👒 🎓 ⛑ 👑 👝 👛 👜 💼 🎒 "',
'"🧥 👚 👕 👖 👔 👗 👙 👘 👠 👡 👢 👞 👟 🥾 🥿 🧦 🧤 🧣 🎩 🧢 👒 🎓 ⛑ 👑 👝 👛 👜 💼 🎒 👓 🕶 🥽 🥼 🌂 🧵"',
'"' + ''.join(fr'\{n:03}' for n in range(256)) + '"', # all bytes
],
'URI': ['10 01 "ftp://ftp1.example.test/public"',],
}
INVALID_RECORDS = {
'A': ['127.0.0.999', '127.000.0.01', '127.0.0.256', '::1', 'foobar', '10.0.1', '10!'],
'AAAA': ['::g', '1:1:1:1:1:1:1:1:', '1:1:1:1:1:1:1:1:1'],
'AFSDB': ['example.com.', '1 1', '1 de'],
'APL': [
'0:192.168.32.0/21 !1:192.168.38.0/28',
'1:192.168.32.0/21 !!1:192.168.38.0/28',
'1:192.168.32.0/33',
'18:12345/2',
'1:127.0.0.1',
'2:::/129',
],
'CAA': ['43235 issue "letsencrypt.org"'],
'CDNSKEY': ['a 3 13 aCoEWYBBVsP9Fek2oC8yqU8ocKmnS1iDSFZNORnQuHKtJ9Wpyz+kNryq uB78Pyk/NTEoai5bxoipVQQXzHlzyg=='],
'CDS': [
'a 8 1 24396E17E36D031F71C354B06A979A67A01F503E',
'6454 8 1 aabbccddeeff',
],
'CERT': ['6 0 sadfdd=='],
'CNAME': ['example.com', '10 example.com.'],
'DHCID': ['x', 'xx', 'xxx'],
'DLV': ['-34 13 1 aabbccddeeff'],
'DNAME': ['example.com', '10 example.com.'],
'DNSKEY': ['a 3 13 aCoEWYBBVsP9Fek2oC8yqU8ocKmnS1iDSFZNORnQuHKtJ9Wpyz+kNryq uB78Pyk/NTEoai5bxoipVQQXzHlzyg=='],
'DS': [
'-34 13 1 24396E17E36D031F71C354B06A979A67A01F503E',
'6454 8 1 aabbccddeeff',
],
'EUI48': ['aa-bb-ccdd-ee-ff', 'AA-BB-CC-DD-EE-GG'],
'EUI64': ['aa-bb-cc-dd-ee-ff-gg-11', 'AA-BB-C C-DD-EE-FF-00-11'],
'HINFO': ['"ARMv8-A"', f'"a" "{"b" * 256}"'],
'HTTPS': [
# from https://tools.ietf.org/html/draft-ietf-dnsop-svcb-https-02#section-10.3, with echconfig base64'd
'1 h3pool alpn=h2,h3 echconfig="MTIzLi4uCg=="',
# made-up (not from RFC)
'0 pool.svc.example. no-default-alpn port=1234 ipv4hint=192.168.123.1', # no keys in alias mode
'1 pool.svc.example. no-default-alpn port=1234 ipv4hint=192.168.123.1 ipv4hint=192.168.123.2', # dup
],
# 'IPSECKEY': [],
'KX': ['-1 example.com', '10 example.com'],
'LOC': ['23 12 61.000 N 42 22 48.500 W 65.00m 20.00m 10.00m 10.00m', 'foo', '1.1.1.1'],
'MX': ['10 example.com', 'example.com.', '-5 asdf.', '65537 asdf.' '10 _foo.example.com.', '10 $url.'],
'NAPTR': ['100 50 "s" "z3950+I2L+I2C" "" _z3950._tcp.gatech.edu',
'100 50 "s" "" _z3950._tcp.gatech.edu.',
'100 50 3 2 "z3950+I2L+I2C" "" _z3950._tcp.gatech.edu.'],
'NS': ['ns1.example.com', '127.0.0.1'],
'OPENPGPKEY': ['1 2 3'],
'PTR': ['"example.com."', '10 *.example.com.'],
'RP': ['hostmaster.example.com.', '10 foo.'],
'SMIMEA': ['3 1 0 aGVsbG8gd29ybGQh', 'x 0 0 aabbccddeeff'],
'SPF': ['"v=spf1', 'v=spf1 include:example.com ~all'],
'SRV': ['0 0 0 0', '100 5061 example.com.', '0 0 16920 _foo.example.com.', '0 0 16920 $url.'],
'SSHFP': ['aabbcceeddff'],
'SVCB': [
'0 svc4-baz.example.net. keys=val',
'1 not.fully.qualified key65333=...',
'2 duplicate.key. echconfig="MjIyLi4uCg==" echconfig="MjIyLi4uCg=="',
],
'TLSA': ['3 1 1 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'],
'TXT': [
'foob"ar',
'v=spf1 include:example.com ~all',
'"foo\nbar"',
'"\x00" "Django rejects literal NUL byte"',
],
'URI': ['"1" "2" "3"'],
}
INVALID_RECORDS_PARAMS = [(rr_type, value) for rr_type in INVALID_RECORDS.keys() for value in INVALID_RECORDS[rr_type]]
def test_soundness():
assert INVALID_RECORDS.keys() == VALID_RECORDS_CANONICAL.keys() == VALID_RECORDS_NON_CANONICAL.keys()
@pytest.mark.parametrize("rr_type,value", generate_params(VALID_RECORDS_CANONICAL))
def test_create_valid_canonical(api_user_domain: DeSECAPIV1Client, rr_type: str, value: str):
domain_name = api_user_domain.domain
expected = set()
subname = 'a'
if rr_type in ('CDNSKEY', 'CDS', 'DNSKEY'):
expected |= api_user_domain.get_key_params(domain_name, rr_type)
subname = ''
if value is not None:
assert api_user_domain.rr_set_create(domain_name, rr_type, [value], subname=subname).status_code == 201
expected.add(value)
rrset = NSLordClient.query(f'{subname}.{domain_name}'.strip('.'), rr_type)
assert rrset == expected
assert_eventually(lambda: query_replication(domain_name, subname, rr_type) == expected)
@pytest.mark.parametrize("rr_type,value", generate_params(VALID_RECORDS_NON_CANONICAL))
def test_create_valid_non_canonical(api_user_domain: DeSECAPIV1Client, rr_type: str, value: str):
domain_name = api_user_domain.domain
expected = set()
subname = 'a'
if rr_type in ('CDNSKEY', 'CDS', 'DNSKEY'):
expected |= api_user_domain.get_key_params(domain_name, rr_type)
subname = ''
if value is not None:
assert api_user_domain.rr_set_create(domain_name, rr_type, [value], subname=subname).status_code == 201
expected.add(value)
rrset = NSLordClient.query(f'{subname}.{domain_name}'.strip('.'), rr_type)
assert len(rrset) == len(expected)
assert_eventually(lambda: len(query_replication(domain_name, subname, rr_type)) == len(expected))
@pytest.mark.parametrize("rr_type,value", INVALID_RECORDS_PARAMS)
def test_create_invalid(api_user_domain: DeSECAPIV1Client, rr_type: str, value: str):
assert api_user_domain.rr_set_create(api_user_domain.domain, rr_type, [value]).status_code == 400
def test_create_long_subname(api_user_domain: DeSECAPIV1Client):
subname = 'a' * 63
assert api_user_domain.rr_set_create(api_user_domain.domain, "AAAA", ["::1"], subname=subname).status_code == 201
assert NSLordClient.query(f"{subname}.{api_user_domain.domain}", "AAAA") == {"::1"}
assert_eventually(lambda: query_replication(api_user_domain.domain, subname, "AAAA") == {"::1"})
def test_add_remove_DNSKEY(api_user_domain: DeSECAPIV1Client):
domain_name = api_user_domain.domain
auto_dnskeys = api_user_domain.get_key_params(domain_name, 'DNSKEY')
# After adding another DNSKEY, we expect it to be part of the nameserver's response (along with the automatic ones)
value = '257 3 13 aCoEWYBBVsP9Fek2oC8yqU8ocKmnS1iD SFZNORnQuHKtJ9Wpyz+kNryquB78Pyk/ NTEoai5bxoipVQQXzHlzyg=='
assert api_user_domain.rr_set_create(domain_name, 'DNSKEY', [value], subname='').status_code == 201
assert NSLordClient.query(domain_name, 'DNSKEY') == auto_dnskeys | {value}
assert_eventually(lambda: query_replication(domain_name, '', 'DNSKEY') == auto_dnskeys | {value})
# After deleting it, we expect that the automatically managed ones are still there
assert api_user_domain.rr_set_delete(domain_name, "DNSKEY", subname='').status_code == 204
assert NSLordClient.query(domain_name, 'DNSKEY') == auto_dnskeys
assert_eventually(lambda: query_replication(domain_name, '', 'DNSKEY') == auto_dnskeys)
|
import numpy as np
def remove_values_from_list(the_list, val):
return [value for value in the_list if value != val]
def remove_values_from_list_to_float(the_list, val):
return [float(value) for value in the_list if value != val]
def load_3d_arr_from_string(arr):
arr = arr.replace('[', '').replace(']', '').split('\n')
count = arr.count('') + 1
arr = remove_values_from_list(arr, '')
group_size = len(arr) // count
groups = [remove_values_from_list_to_float(val.split(' '), '') for group in range(count) for val in
arr[group * group_size: (group + 1) * group_size]]
groups = [groups[group * group_size: (group + 1) * group_size] for group in range(count)]
return np.array(groups)
def normalize_config(config):
config['variances'] = load_3d_arr_from_string(config['variances'])
config['means'] = load_3d_arr_from_string(config['means'])[0, :]
config['counts'] = load_3d_arr_from_string(config['counts'])[0, 0, :]
config['layers'] = load_3d_arr_from_string(config['layers'])[0, 0, :]
config['layer'] = int(config['layer'])
config['batch_size'] = int(config['batch_size'])
config['iterations'] = int(config['iterations'])
config['epsilon'] = float(config['epsilon'])
config['eta'] = float(config['eta'])
config['beta1'] = float(config['beta1'])
config['beta2'] = float(config['beta2'])
config['a_func'] = config['a_func'][0].casefold()
config['optimizer'] = config['optimizer'][0]
return config
def validate_config(config):
errors = []
n_clusters = config['counts'].shape[0]
if config['means'].shape[0] != n_clusters or config['variances'].shape[0] != n_clusters:
errors.append(
f"Count of clusters differ in mean, count and variance field - {n_clusters}, {config["means"].shape[0]}, "
f"{config["variances"].shape[0]}.")
cluster_dimensionality = config['means'].shape[1]
if config['variances'].shape[1] != cluster_dimensionality or config['variances'].shape[2] != cluster_dimensionality:
errors.append(
f"Clusters differ in mean, and variance field - {cluster_dimensionality}, {config["variances"].shape[1:]}.")
if len(config['layers']) < 3:
errors.append(
f"Ensure to have at least 3 layers.")
if config['layer'] >= len(config['layers']):
errors.append(
f"Layer index out of range.")
elif config['layers'][config['layer']] != 2:
errors.append(
f"Selected layer does not have specified dimensionality (2).")
if config['layers'][0] != config['layers'][-1]:
errors.append(
f"Input and output layer dimensionality differs.")
for index, layer in enumerate(config['layers']):
if layer < 1:
errors.append(
f"Layer {index} has invalid dimensionality - {layer}.")
for key in ['layer', 'batch_size', 'iterations', 'epsilon', 'beta1', 'beta2', 'eta']:
if config[key] < 0:
errors.append(
f"Invalid option for {key} - {config[key]}.")
return errors
|
import numpy as np
def remove_values_from_list(the_list, val):
return [value for value in the_list if value != val]
def remove_values_from_list_to_float(the_list, val):
return [float(value) for value in the_list if value != val]
def load_3d_arr_from_string(arr):
arr = arr.replace('[', '').replace(']', '').split('\n')
count = arr.count('') + 1
arr = remove_values_from_list(arr, '')
group_size = len(arr) // count
groups = [remove_values_from_list_to_float(val.split(' '), '') for group in range(count) for val in
arr[group * group_size: (group + 1) * group_size]]
groups = [groups[group * group_size: (group + 1) * group_size] for group in range(count)]
return np.array(groups)
def normalize_config(config):
config['variances'] = load_3d_arr_from_string(config['variances'])
config['means'] = load_3d_arr_from_string(config['means'])[0, :]
config['counts'] = load_3d_arr_from_string(config['counts'])[0, 0, :]
config['layers'] = load_3d_arr_from_string(config['layers'])[0, 0, :]
config['layer'] = int(config['layer'])
config['batch_size'] = int(config['batch_size'])
config['iterations'] = int(config['iterations'])
config['epsilon'] = float(config['epsilon'])
config['eta'] = float(config['eta'])
config['beta1'] = float(config['beta1'])
config['beta2'] = float(config['beta2'])
config['a_func'] = config['a_func'][0].casefold()
config['optimizer'] = config['optimizer'][0]
return config
def validate_config(config):
errors = []
n_clusters = config['counts'].shape[0]
if config['means'].shape[0] != n_clusters or config['variances'].shape[0] != n_clusters:
errors.append(
f"Count of clusters differ in mean, count and variance field - {n_clusters}, {config['means'].shape[0]}, "
f"{config['variances'].shape[0]}.")
cluster_dimensionality = config['means'].shape[1]
if config['variances'].shape[1] != cluster_dimensionality or config['variances'].shape[2] != cluster_dimensionality:
errors.append(
f"Clusters differ in mean, and variance field - {cluster_dimensionality}, {config['variances'].shape[1:]}.")
if len(config['layers']) < 3:
errors.append(
f"Ensure to have at least 3 layers.")
if config['layer'] >= len(config['layers']):
errors.append(
f"Layer index out of range.")
elif config['layers'][config['layer']] != 2:
errors.append(
f"Selected layer does not have specified dimensionality (2).")
if config['layers'][0] != config['layers'][-1]:
errors.append(
f"Input and output layer dimensionality differs.")
for index, layer in enumerate(config['layers']):
if layer < 1:
errors.append(
f"Layer {index} has invalid dimensionality - {layer}.")
for key in ['layer', 'batch_size', 'iterations', 'epsilon', 'beta1', 'beta2', 'eta']:
if config[key] < 0:
errors.append(
f"Invalid option for {key} - {config[key]}.")
return errors
|
# Referenced from
# https://opensource.apple.com/tarballs/xnu-7195.60.75/bsd/kern/kdebug.c
# https://gitee.com/mirrors/darwin-xnu/blob/main/bsd/kern/kdebug.c kdebug.h
import enum
import io
from construct import Struct, Const, Padding, Int32ul, Int64ul, Array, GreedyRange, Byte, FixedSized, \
CString
KDBG_CLASS_MASK = 0xff000000
KDBG_CLASS_OFFSET = 24
KDBG_CLASS_MAX = 0xff
KDBG_SUBCLASS_MASK = 0x00ff0000
KDBG_SUBCLASS_OFFSET = 16
KDBG_SUBCLASS_MAX = 0xff
# ## /* class and subclass mask */
KDBG_CSC_MASK = 0xffff0000
KDBG_CSC_OFFSET = KDBG_SUBCLASS_OFFSET
KDBG_CSC_MAX = 0xffff
KDBG_CODE_MASK = 0x0000fffc
KDBG_CODE_OFFSET = 2
KDBG_CODE_MAX = 0x3fff
KDBG_EVENTID_MASK = 0xfffffffc
KDBG_FUNC_MASK = 0x00000003
def kdbg_extract_class(Debugid):
return (Debugid & KDBG_CLASS_MASK) >> KDBG_CLASS_OFFSET
def kdbg_extract_subclass(Debugid):
return (Debugid & KDBG_SUBCLASS_MASK) >> KDBG_SUBCLASS_OFFSET
def kdbg_extract_csc(Debugid):
return (Debugid & KDBG_CSC_MASK) >> KDBG_CSC_OFFSET
def kdbg_extract_code(Debugid):
return (Debugid & KDBG_CODE_MASK) >> KDBG_CODE_OFFSET
class DgbFuncQual(enum.Enum):
"""
## /* function qualifiers */
DBG_FUNC_START 1U
DBG_FUNC_END 2U
DBG_FUNC_NONE = 0U
Event's role in the trace.
"""
DBG_FUNC_NONE = 0
DBG_FUNC_START = 1
DBG_FUNC_END = 2
DBG_FUNC_ALL = 3
class DebugClasses(enum.Enum):
DBG_MACH = 1
DBG_NETWORK = 2
DBG_FSYSTEM = 3
DBG_BSD = 4
DBG_IOKIT = 5
DBG_DRIVERS = 6
DBG_TRACE = 7
DBG_DLIL = 8
DBG_PTHREAD = 9
DBG_CORESTORAGE = 10
DBG_CG = 11
DBG_MONOTONIC = 12
DBG_MISC = 20
DBG_SECURITY = 30
DBG_DYLD = 31
DBG_QT = 32
DBG_APPS = 33
DBG_LAUNCHD = 34
DBG_SILICON = 35
DBG_PERF = 37
DBG_IMPORTANCE = 38
DBG_BANK = 40
DBG_XPC = 41
DBG_ATM = 42
DBG_ARIADNE = 43
DBG_DAEMON = 44
DBG_ENERGYTRACE = 45
DBG_DISPATCH = 46
DBG_IMG = 49
DBG_UMALLOC = 51
DBG_TURNSTILE = 53
DBG_MIG = 255
class DBG_MACH(enum.Enum):
DBG_MACH_EXCP_KTRAP_x86= 0x02 ## /* Kernel Traps on x86 */
DBG_MACH_EXCP_DFLT = 0x03 ## /* deprecated name */
DBG_MACH_EXCP_SYNC_ARM = 0x03 ## /* arm/arm64 synchronous exception */
DBG_MACH_EXCP_IFLT = 0x04 ## /* deprecated name */
DBG_MACH_EXCP_SERR_ARM = 0x04 ## /* arm/arm64 SError (async) exception */
DBG_MACH_EXCP_INTR = 0x05 ## /* Interrupts */
DBG_MACH_EXCP_ALNG = 0x06 ## /* Alignment Exception */
DBG_MACH_EXCP_UTRAP_x86= 0x07 ## /* User Traps on x86 */
DBG_MACH_EXCP_FP = 0x08 ## /* FP Unavail */
DBG_MACH_EXCP_DECI = 0x09 ## /* Decrementer Interrupt */
DBG_MACH_CHUD = 0x0A ## /* deprecated name */
DBG_MACH_SIGNPOST = 0x0A ## /* kernel signposts */
DBG_MACH_EXCP_SC = 0x0C ## /* System Calls */
DBG_MACH_EXCP_TRACE = 0x0D ## /* Trace exception */
DBG_MACH_EXCP_EMUL = 0x0E ## /* Instruction emulated */
DBG_MACH_IHDLR = 0x10 ## /* Interrupt Handlers */
DBG_MACH_IPC = 0x20 ## /* Inter Process Comm */
DBG_MACH_RESOURCE = 0x25 ## /* tracing limits, etc */
DBG_MACH_VM = 0x30 ## /* Virtual Memory */
DBG_MACH_LEAKS = 0x31 ## /* alloc/free */
DBG_MACH_WORKINGSET = 0x32 ## /* private subclass for working set related debugging */
DBG_MACH_SCHED = 0x40 ## /* Scheduler */
DBG_MACH_MSGID_INVALID = 0x50 ## /* Messages - invalid */
DBG_MACH_LOCKS = 0x60 ## /* new lock APIs */
DBG_MACH_PMAP = 0x70 ## /* pmap */
DBG_MACH_CLOCK = 0x80 ## /* clock */
DBG_MACH_MP = 0x90 ## /* MP related */
DBG_MACH_VM_PRESSURE = 0xA0 ## /* Memory Pressure Events */
DBG_MACH_STACKSHOT = 0xA1 ## /* Stackshot/Microstackshot subsystem */
DBG_MACH_SFI = 0xA2 ## /* Selective Forced Idle (SFI) */
DBG_MACH_ENERGY_PERF = 0xA3 ## /* Energy/performance resource stats */
DBG_MACH_SYSDIAGNOSE = 0xA4 ## /* sysdiagnose */
DBG_MACH_ZALLOC = 0xA5 ## /* Zone allocator */
DBG_MACH_THREAD_GROUP = 0xA6 ## /* Thread groups */
DBG_MACH_COALITION = 0xA7 ## /* Coalitions */
DBG_MACH_SHAREDREGION = 0xA8 ## /* Shared region */
DBG_MACH_SCHED_CLUTCH = 0xA9 ## /* Clutch scheduler */
DBG_MACH_IO = 0xAA ## /* I/O */
DBG_MACH_WORKGROUP = 0xAB ## /* Workgroup subsystem */
class DBG_MACH_IO(enum.Enum):
DBC_MACH_IO_MMIO_READ = 0x1
DBC_MACH_IO_MMIO_WRITE = 0x2
DBC_MACH_IO_PHYS_READ = 0x3
DBC_MACH_IO_PHYS_WRITE = 0x4
DBC_MACH_IO_PORTIO_READ = 0x5
DBC_MACH_IO_PORTIO_WRITE = 0x6
class DBG_MACH_EXCP_INTR(enum.Enum):
DBG_INTR_TYPE_UNKNOWN = 0x0 ## /* default/unknown interrupt */
DBG_INTR_TYPE_IPI = 0x1 ## /* interprocessor interrupt */
DBG_INTR_TYPE_TIMER = 0x2 ## /* timer interrupt */
DBG_INTR_TYPE_OTHER = 0x3 ## /* other (usually external) interrupt */
DBG_INTR_TYPE_PMI = 0x4 ## /* performance monitor interrupt */
class DBG_MACH_SCHED(enum.Enum):
MACH_SCHED = 0x0 # /* Scheduler */
MACH_STACK_ATTACH = 0x1 # /* stack_attach() */
MACH_STACK_HANDOFF = 0x2 # /* stack_handoff() */
MACH_CALL_CONT = 0x3 # /* call_continuation() */
MACH_CALLOUT = 0x4 # /* callouts */
MACH_STACK_DETACH = 0x5
MACH_MAKE_RUNNABLE = 0x6 # /* make thread runnable */
MACH_PROMOTE = 0x7 # /* promoted due to resource (replaced by MACH_PROMOTED) */
MACH_DEMOTE = 0x8 # /* promotion undone (replaced by MACH_UNPROMOTED) */
MACH_IDLE = 0x9 # /* processor idling */
MACH_STACK_DEPTH = 0xa # /* stack depth at switch */
MACH_MOVED = 0xb # /* did not use original scheduling decision */
MACH_PSET_LOAD_AVERAGE = 0xc
MACH_AMP_DEBUG = 0xd
MACH_FAILSAFE = 0xe # /* tripped fixed-pri/RT failsafe */
MACH_BLOCK = 0xf # /* thread block */
MACH_WAIT = 0x10 # /* thread wait assertion */
MACH_GET_URGENCY = 0x14 # /* Urgency queried by platform */
MACH_URGENCY = 0x15 # /* Urgency (RT/BG/NORMAL) communicated
MACH_REDISPATCH = 0x16 # /* "next thread" thread redispatched */
MACH_REMOTE_AST = 0x17 # /* AST signal issued to remote processor */
MACH_SCHED_CHOOSE_PROCESSOR = 0x18 # /* Result of choose_processor */
MACH_DEEP_IDLE = 0x19 # /* deep idle on master processor */
MACH_SCHED_DECAY_PRIORITY = 0x1a # /*was MACH_SCHED_DECAY_PRIORITY */
MACH_CPU_THROTTLE_DISABLE = 0x1b # /* Global CPU Throttle Disable */
MACH_RW_PROMOTE = 0x1c# /* promoted due to RW lock promotion */
MACH_RW_DEMOTE = 0x1d# /* promotion due to RW lock undone */
MACH_SCHED_MAINTENANCE = 0x1f# /* periodic maintenance thread */
MACH_DISPATCH = 0x20# /* context switch completed */
MACH_QUANTUM_HANDOFF = 0x21# /* quantum handoff occurred */
MACH_MULTIQ_DEQUEUE = 0x22# /* Result of multiq dequeue */
MACH_SCHED_THREAD_SWITCH= 0x23# /* attempt direct context switch to hinted thread */
MACH_SCHED_SMT_BALANCE = 0x24# /* SMT load balancing ASTs */
MACH_REMOTE_DEFERRED_AST= 0x25# /* Deferred AST started against remote processor */
MACH_REMOTE_CANCEL_AST = 0x26# /* Canceled deferred AST for remote processor */
MACH_SCHED_CHANGE_PRIORITY = 0x27# /* thread sched priority changed */
MACH_SCHED_UPDATE_REC_CORES = 0x28 # /* Change to recommended processor bitmask */
MACH_STACK_WAIT = 0x29# /* Thread could not be switched-to because of kernel stack shortage */
MACH_THREAD_BIND = 0x2a# /* Thread was bound (or unbound) to a processor */
MACH_WAITQ_PROMOTE = 0x2b# /* Thread promoted by waitq boost */
MACH_WAITQ_DEMOTE = 0x2c# /* Thread demoted from waitq boost */
MACH_SCHED_LOAD = 0x2d# /* load update */
MACH_REC_CORES_FAILSAFE = 0x2e# /* recommended processor failsafe kicked in */
MACH_SCHED_QUANTUM_EXPIRED = 0x2f# /* thread quantum expired */
MACH_EXEC_PROMOTE = 0x30# /* Thread promoted by exec boost */
MACH_EXEC_DEMOTE = 0x31# /* Thread demoted from exec boost */
MACH_AMP_SIGNAL_SPILL = 0x32# /* AMP spill signal sent to cpuid */
MACH_AMP_STEAL = 0x33# /* AMP thread stolen or spilled */
MACH_SCHED_LOAD_EFFECTIVE = 0x34# /* Effective scheduler load */
MACH_PROMOTED = 0x35# /* was: thread promoted due to mutex priority promotion */
MACH_UNPROMOTED = 0x36# /* was: thread unpromoted due to mutex priority promotion */
MACH_PROMOTED_UPDATE = 0x37# /* was: thread already promoted, but promotion priority changed */
MACH_QUIESCENT_COUNTER = 0x38# /* quiescent counter tick */
MACH_TURNSTILE_USER_CHANGE = 0x39# /* base priority change because of turnstile */
MACH_AMP_RECOMMENDATION_CHANGE = 0x3a# /* Thread group recommendation change */
MACH_AMP_PERFCTL_POLICY_CHANGE = 0x3b# /* AMP policy for perfctl cluster recommendation */
MACH_TURNSTILE_KERNEL_CHANGE = 0x40# /* sched priority change because of turnstile */
MACH_SCHED_WI_AUTO_JOIN = 0x41# /* work interval auto join events */
MACH_SCHED_WI_DEFERRED_FINISH = 0x42# /* work interval pending finish events for auto-join thread groups */
MACH_PSET_AVG_EXEC_TIME = 0x50
class DBG_MACH_SCHED_CLUTCH(enum.Enum):
MACH_SCHED_CLUTCH_ROOT_BUCKET_STATE = 0x0# /* __unused */
MACH_SCHED_CLUTCH_TG_BUCKET_STATE = 0x1# /* __unused */
MACH_SCHED_CLUTCH_THREAD_SELECT = 0x2# /* Thread selection events for Clutch scheduler */
MACH_SCHED_CLUTCH_THREAD_STATE = 0x3# /* __unused */
MACH_SCHED_CLUTCH_TG_BUCKET_PRI = 0x4# /* Clutch bucket priority update event */
MACH_SCHED_EDGE_CLUSTER_OVERLOAD = 0x5# /* Cluster experienced overload; migrating threads to other clusters */
MACH_SCHED_EDGE_STEAL = 0x6# /* Per-cluster avg. thread execution time */
MACH_SCHED_EDGE_REBAL_RUNNABLE = 0x7# /* Rebalance runnable threads on a foreign cluster */
MACH_SCHED_EDGE_REBAL_RUNNING = 0x8# /* Rebalance running threads on a foreign cluster */
MACH_SCHED_EDGE_SHOULD_YIELD = 0x9# /* Edge decisions for thread yield */
MACH_SCHED_CLUTCH_THR_COUNT = 0xa# /* Clutch scheduler runnable thread counts */
MACH_SCHED_EDGE_LOAD_AVG = 0xb# /* Per-cluster load average */
class DBG_MACH_WORKGROUP(enum.Enum):
WORKGROUP_INTERVAL_CREATE = 0x0# /* work interval creation */
WORKGROUP_INTERVAL_DESTROY = 0x1# /* work interval destruction */
WORKGROUP_INTERVAL_CHANGE = 0x2# /* thread work interval change */
WORKGROUP_INTERVAL_START = 0x3# /* work interval start call */
WORKGROUP_INTERVAL_UPDATE = 0x4# /* work interval update call */
WORKGROUP_INTERVAL_FINISH = 0x5# /* work interval finish call */
class DBG_MACH_IPC(enum.Enum):
MACH_TASK_SUSPEND = 0x0 # /* Suspended a task */
MACH_TASK_RESUME = 0x1 # /* Resumed a task */
MACH_THREAD_SET_VOUCHER = 0x2
MACH_IPC_MSG_SEND = 0x3 # /* mach msg send, uniq msg info */
MACH_IPC_MSG_RECV = 0x4 # /* mach_msg receive */
MACH_IPC_MSG_RECV_VOUCHER_REFUSED = 0x5 # /* mach_msg receive, voucher refused */
MACH_IPC_KMSG_FREE = 0x6 # /* kernel free of kmsg data */
MACH_IPC_VOUCHER_CREATE = 0x7 # /* Voucher added to global voucher hashtable */
MACH_IPC_VOUCHER_CREATE_ATTR_DATA = 0x8 # /* Attr data for newly created voucher */
MACH_IPC_VOUCHER_DESTROY = 0x9 # /* Voucher removed from global voucher hashtable */
MACH_IPC_KMSG_INFO = 0xa # /* Send/Receive info for a kmsg */
MACH_IPC_KMSG_LINK = 0xb # /* link a kernel kmsg pointer to user mach_msg_header_t */
MACH_IPC_PORT_ENTRY_MODIFY = 0xc # /* A port space gained or lost a port right (reference) */
MACH_IPC_DESTROY_GUARDED_DESC = 0xd # /* Unable to receive a guarded descriptor */
class DBG_MACH_THREAD_GROUP(enum.Enum):
MACH_THREAD_GROUP_NEW = 0x0
MACH_THREAD_GROUP_FREE = 0x1
MACH_THREAD_GROUP_SET = 0x2
MACH_THREAD_GROUP_NAME = 0x3
MACH_THREAD_GROUP_NAME_FREE = 0x4
MACH_THREAD_GROUP_FLAGS = 0x5
MACH_THREAD_GROUP_BLOCK = 0x6
class DBG_MACH_COALITION(enum.Enum):
MACH_COALITION_NEW = 0x0
MACH_COALITION_FREE = 0x1
MACH_COALITION_ADOPT = 0x2
MACH_COALITION_REMOVE = 0x3
MACH_COALITION_THREAD_GROUP_SET = 0x4
class DBG_MACH_PMAP(enum.Enum):
MACH_COALITION_NEW = 0x0
MACH_COALITION_FREE = 0x1
MACH_COALITION_ADOPT = 0x2
MACH_COALITION_REMOVE = 0x3
MACH_COALITION_THREAD_GROUP_SET = 0x4
class DBG_MACH_CLOCK(enum.Enum):
MACH_EPOCH_CHANGE = 0x0 # /* wake epoch change */
MACH_BRIDGE_RCV_TS = 0x1 # /* receive timestamp pair from interrupt handler */
MACH_BRIDGE_REMOTE_TIME = 0x2 # /* calculate remote timestamp */
MACH_BRIDGE_RESET_TS = 0x3 # /* reset timestamp conversion parameters */
MACH_BRIDGE_TS_PARAMS = 0x4 # /* recompute timestamp conversion parameters */
MACH_BRIDGE_SKIP_TS = 0x5 # /* skip timestamp */
MACH_BRIDGE_TS_MISMATCH = 0x6 # /* mismatch between predicted and received remote timestamp */
MACH_BRIDGE_OBSV_RATE= 0x7 # /* out of range observed rates */
class DBG_MACH_STACKSHOT(enum.Enum):
MICROSTACKSHOT_RECORD = 0x0
MICROSTACKSHOT_GATHER = 0x1
class DBG_MACH_SYSDIAGNOSE(enum.Enum):
SYSDIAGNOSE_NOTIFY_USER = 0x0
SYSDIAGNOSE_FULL = 0x1
SYSDIAGNOSE_STACKSHOT= 0x2
SYSDIAGNOSE_TAILSPIN = 0x3
class DBG_MACH_SFI(enum.Enum):
SFI_SET_WINDOW = 0x0
SFI_CANCEL_WINDOW = 0x1
SFI_SET_CLASS_OFFTIME = 0x2
SFI_CANCEL_CLASS_OFFTIME = 0x3
SFI_THREAD_DEFER = 0x4
SFI_OFF_TIMER = 0x5
SFI_ON_TIMER = 0x6
SFI_WAIT_CANCELED = 0x7
SFI_PID_SET_MANAGED = 0x8
SFI_PID_CLEAR_MANAGED = 0x9
SFI_GLOBAL_DEFER = 0xa
class DBG_MACH_ZALLOC(enum.Enum):
ZALLOC_ZCRAM = 0x0
class DBG_MACH_RESOURCE(enum.Enum):
RMON_ENABLE_CPUUSAGE_MONITOR = 0x001
RMON_CPUUSAGE_VIOLATED = 0x002
RMON_CPUUSAGE_SUSPENDED = 0x003
RMON_CPUUSAGE_VIOLATED_K32A = 0x004
RMON_CPUUSAGE_VIOLATED_K32B = 0x005
RMON_CPUUSAGE_RESUMED = 0x006
RMON_DISABLE_CPUUSAGE_MONITOR= 0x00f
RMON_ENABLE_CPUWAKES_MONITOR = 0x011
RMON_CPUWAKES_VIOLATED = 0x012
RMON_CPUWAKES_VIOLATED_K32A = 0x014
RMON_CPUWAKES_VIOLATED_K32B = 0x015
RMON_DISABLE_CPUWAKES_MONITOR= 0x01f
RMON_ENABLE_IO_MONITOR = 0x021
RMON_LOGWRITES_VIOLATED = 0x022
RMON_PHYSWRITES_VIOLATED = 0x023
RMON_LOGWRITES_VIOLATED_K32A = 0x024
RMON_LOGWRITES_VIOLATED_K32B = 0x025
RMON_DISABLE_IO_MONITOR = 0x02f
class DBG_NETWORK(enum.Enum):
DBG_NETIP = 1 # /* Internet Protocol */
DBG_NETARP = 2 # /* Address Resolution Protocol */
DBG_NETUDP = 3 # /* User Datagram Protocol */
DBG_NETTCP = 4 # /* Transmission Control Protocol */
DBG_NETICMP = 5 # /* Internet Control Message Protocol */
DBG_NETIGMP = 6 # /* Internet Group Management Protocol */
DBG_NETRIP = 7 # /* Routing Information Protocol */
DBG_NETOSPF = 8 # /* Open Shortest Path First */
DBG_NETISIS = 9 # /* Intermediate System to Intermediate System */
DBG_NETSNMP = 10 # /* Simple Network Management Protocol */
DBG_NETSOCK = 11 # /* Socket Layer */
DBG_NETAARP = 100 # /* Apple ARP */
DBG_NETDDP = 101 # /* Datagram Delivery Protocol */
DBG_NETNBP = 102 # /* Name Binding Protocol */
DBG_NETZIP = 103 # /* Zone Information Protocol */
DBG_NETADSP = 104 # /* Name Binding Protocol */
DBG_NETATP = 105 # /* Apple Transaction Protocol */
DBG_NETASP = 106 # /* Apple Session Protocol */
DBG_NETAFP = 107 # /* Apple Filing Protocol */
DBG_NETRTMP = 108 # /* Routing Table Maintenance Protocol */
DBG_NETAURP = 109 # /* Apple Update Routing Protocol */
DBG_NETIPSEC = 128 # /* IPsec Protocol */
DBG_NETVMNET = 129 # /* VMNet */
class DBG_IOKIT(enum.Enum):
DBG_IOINTC = 0 # /* Interrupt controller */
DBG_IOWORKLOOP = 1 # /* Work from work loop */
DBG_IOINTES = 2 # /* Interrupt event source */
DBG_IOCLKES = 3 # /* Clock event source */
DBG_IOCMDQ = 4 # /* Command queue latencies */
DBG_IOMCURS = 5 # /* Memory Cursor */
DBG_IOMDESC = 6 # /* Memory Descriptors */
DBG_IOPOWER = 7 # /* Power Managerment */
DBG_IOSERVICE = 8 # /* Matching etc. */
DBG_IOREGISTRY = 9 # /* Registry */
#**** 9-32 reserved for internal IOKit usage ****
DBG_IOSTORAGE = 32 # /* Storage layers */
DBG_IONETWORK = 33 # /* Network layers */
DBG_IOKEYBOARD = 34 # /* Keyboard */
DBG_IOHID = 35 # /* HID Devices */
DBG_IOAUDIO = 36 # /* Audio */
DBG_IOSERIAL = 37 # /* Serial */
DBG_IOTTY = 38 # /* TTY layers */
DBG_IOSAM = 39 # /* SCSI Architecture Model layers */
DBG_IOPARALLELATA = 40 # /* Parallel ATA */
DBG_IOPARALLELSCSI = 41 # /* Parallel SCSI */
DBG_IOSATA = 42 # /* Serial-ATA */
DBG_IOSAS = 43 # /* SAS */
DBG_IOFIBRECHANNEL = 44 # /* FiberChannel */
DBG_IOUSB = 45 # /* USB */
DBG_IOBLUETOOTH = 46 # /* Bluetooth */
DBG_IOFIREWIRE = 47 # /* FireWire */
DBG_IOINFINIBAND = 48 # /* Infiniband */
DBG_IOCPUPM = 49 # /* CPU Power Management */
DBG_IOGRAPHICS = 50 # /* Graphics */
DBG_HIBERNATE = 51 # /* hibernation related events */
DBG_IOTHUNDERBOLT = 52 # /* Thunderbolt */
DBG_BOOTER = 53 # /* booter related events */
DBG_IOAUDIO2 = 54 # /* Audio (extended) */
DBG_IOSURFACEPA = 64 # /* IOSurface page mappings */
DBG_IOMDPA = 65 # /* IOMemoryDescriptor page mappings */
DBG_IODARTPA = 66 # /* DART page mappings */
class DBG_DRIVERS(enum.Enum):
DBG_DRVSTORAGE = 1# /* Storage layers */
DBG_DRVNETWORK = 2# /* Network layers */
DBG_DRVKEYBOARD = 3# /* Keyboard */
DBG_DRVHID = 4# /* HID Devices */
DBG_DRVAUDIO = 5# /* Audio */
DBG_DRVSERIAL = 7# /* Serial */
DBG_DRVSAM = 8# /* SCSI Architecture Model layers */
DBG_DRVPARALLELATA = 9# /* Parallel ATA */
DBG_DRVPARALLELSCSI = 10# /* Parallel SCSI */
DBG_DRVSATA = 11# /* Serial ATA */
DBG_DRVSAS = 12# /* SAS */
DBG_DRVFIBRECHANNEL = 13# /* FiberChannel */
DBG_DRVUSB = 14# /* USB */
DBG_DRVBLUETOOTH = 15# /* Bluetooth */
DBG_DRVFIREWIRE = 16# /* FireWire */
DBG_DRVINFINIBAND = 17# /* Infiniband */
DBG_DRVGRAPHICS = 18# /* Graphics */
DBG_DRVSD = 19# /* Secure Digital */
DBG_DRVNAND = 20# /* NAND drivers and layers */
DBG_SSD = 21# /* SSD */
DBG_DRVSPI = 22# /* SPI */
DBG_DRVWLAN_802_11 = 23# /* WLAN 802.11 */
DBG_DRVSSM = 24# /* System State Manager(AppleSSM) */
DBG_DRVSMC = 25# /* System Management Controller */
DBG_DRVMACEFIMANAGER = 26# /* Mac EFI Manager */
DBG_DRVANE = 27# /* ANE */
DBG_DRVETHERNET = 28# /* Ethernet */
DBG_DRVMCC = 29# /* Memory Cache Controller */
DBG_DRVACCESSORY = 30# /* Accessories */
class DBG_DLIL(enum.Enum):
DBG_DLIL_STATIC = 1 # /* Static DLIL code */
DBG_DLIL_PR_MOD = 2 # /* DLIL Protocol Module */
DBG_DLIL_IF_MOD = 3 # /* DLIL Interface Module */
DBG_DLIL_PR_FLT = 4 # /* DLIL Protocol Filter */
DBG_DLIL_IF_FLT = 5 # /* DLIL Interface FIlter */
class DBG_FSYSTEM(enum.Enum):
DBG_FSRW = 0x1 # /* reads and writes to the filesystem */
DBG_DKRW = 0x2 # /* reads and writes to the disk */
DBG_FSVN = 0x3 # /* vnode operations (inc. locking/unlocking) */
DBG_FSLOOOKUP = 0x4 # /* namei and other lookup-related operations */
DBG_JOURNAL= 0x5 # /* journaling operations */
DBG_IOCTL = 0x6 # /* ioctl to the disk */
DBG_BOOTCACHE = 0x7 # /* bootcache operations */
DBG_HFS = 0x8 # /* HFS-specific events; see the hfs project */
DBG_APFS = 0x9 # /* APFS-specific events; see the apfs project */
DBG_SMB = 0xA # /* SMB-specific events; see the smb project */
DBG_MOUNT = 0xB # /* Mounting/unmounting operations */
DBG_EXFAT = 0xE # /* ExFAT-specific events; see the exfat project */
DBG_MSDOS = 0xF # /* FAT-specific events; see the msdosfs project */
DBG_ACFS = 0x10 # /* Xsan-specific events; see the XsanFS project */
DBG_THROTTLE = 0x11 # /* I/O Throttling events */
DBG_DECMP = 0x12 # /* Decmpfs-specific events */
DBG_VFS = 0x13 # /* VFS layer events */
DBG_LIVEFS = 0x14 # /* LiveFS events; see the UserFS project */
DBG_CONTENT_PROT = 0xCF# /* Content Protection Events: see bsd/sys/cprotect.h */
class DBG_BSD(enum.Enum):
DBG_BSD_PROC = 0x01# /* process/signals related */
DBG_BSD_MEMSTAT = 0x02# /* memorystatus / jetsam operations */
DBG_BSD_KEVENT = 0x03# /* kqueue / kevent related */
DBG_BSD_EXCP_SC = 0x0C# /* System Calls */
DBG_BSD_AIO = 0x0D# /* aio (POSIX async IO) */
DBG_BSD_SC_EXTENDED_INFO = 0x0E# /* System Calls, extended info */
DBG_BSD_SC_EXTENDED_INFO2 = 0x0F# /* System Calls, extended info */
DBG_BSD_KDEBUG_TEST = 0xFF# /* for testing kdebug */
class DBG_BSD_PROC(enum.Enum):
BSD_PROC_EXIT = 1 # /* process exit */
BSD_PROC_FRCEXIT = 2 # /* Kernel force termination */
BSD_PROC_EXEC = 3 # /* process spawn / exec */
BSD_PROC_EXITREASON_CREATE = 4 # /* exit reason creation */
BSD_PROC_EXITREASON_COMMIT = 5 # /* exit reason commited to a proc */
class DBG_BSD_MEMSTAT(enum.Enum):
BSD_MEMSTAT_SCAN = 1 # /* memorystatus thread awake */
BSD_MEMSTAT_JETSAM = 2 # /* LRU jetsam */
BSD_MEMSTAT_JETSAM_HIWAT = 3 # /* highwater jetsam */
BSD_MEMSTAT_FREEZE = 4 # /* freeze process */
BSD_MEMSTAT_FREEZE_SCAN = 5 # /* select a process to freeze and freeze it */
BSD_MEMSTAT_UPDATE = 6 # /* priority update */
BSD_MEMSTAT_IDLE_DEMOTE = 7 # /* idle demotion fired */
BSD_MEMSTAT_CLEAR_ERRORS = 8 # /* reset termination error state */
BSD_MEMSTAT_DIRTY_TRACK = 9 # /* track the process state */
BSD_MEMSTAT_DIRTY_SET = 10 # /* set the process state */
BSD_MEMSTAT_DIRTY_CLEAR = 11 # /* clear the process state */
BSD_MEMSTAT_GRP_SET_PROP = 12 # /* set group properties */
BSD_MEMSTAT_DO_KILL = 13 # /* memorystatus kills */
BSD_MEMSTAT_CHANGE_PRIORITY = 14 # /* priority changed */
BSD_MEMSTAT_FAST_JETSAM = 15 # /* Aggressive jetsam ("clear-the-deck") */
BSD_MEMSTAT_COMPACTOR_RUN = 16 # /* run VM compactor after process kill */
BSD_MEMSTAT_FREEZE_DISABLE = 17 # /* disable freeze and kill frozen processes */
BSD_MEMSTAT_RELAUNCH_FLAGS = 18 # /* flags representing jetsam behavior; based on launchd data */
class DBG_BSD_KEVENT(enum.Enum):
BSD_KEVENT_KQ_PROCESS_BEGIN = 1
BSD_KEVENT_KQ_PROCESS_END = 2
BSD_KEVENT_KQWQ_PROCESS_BEGIN = 3
BSD_KEVENT_KQWQ_PROCESS_END = 4
BSD_KEVENT_KQWQ_BIND = 5
BSD_KEVENT_KQWQ_UNBIND = 6
BSD_KEVENT_KQWQ_THREQUEST = 7
BSD_KEVENT_KQWL_PROCESS_BEGIN = 8
BSD_KEVENT_KQWL_PROCESS_END = 9
BSD_KEVENT_KQWL_THREQUEST = 10
BSD_KEVENT_KQWL_THADJUST = 11
BSD_KEVENT_KQ_REGISTER = 12
BSD_KEVENT_KQWQ_REGISTER = 13
BSD_KEVENT_KQWL_REGISTER = 14
BSD_KEVENT_KNOTE_ACTIVATE = 15
BSD_KEVENT_KQ_PROCESS = 16
BSD_KEVENT_KQWQ_PROCESS = 17
BSD_KEVENT_KQWL_PROCESS = 18
BSD_KEVENT_KQWL_BIND = 19
BSD_KEVENT_KQWL_UNBIND = 20
BSD_KEVENT_KNOTE_ENABLE = 21
BSD_KEVENT_KNOTE_VANISHED = 22
class DBG_TRACE(enum.Enum):
DBG_TRACE_DATA = 0
DBG_TRACE_STRING = 1
DBG_TRACE_INFO = 2
class DBG_CORESTORAGE(enum.Enum):
DBG_CS_IO = 0
class DBG_SECURITY(enum.Enum):
DBG_SEC_KERNEL = 0# /* raw entropy collected by the kernel */
DBG_SEC_SANDBOX = 1
class DBG_MONOTONIC(enum.Enum):
DBG_MT_INSTRS_CYCLES = 1
DBG_MT_DEBUG = 2
DBG_MT_RESOURCES_PROC_EXIT = 3
DBG_MT_RESOURCES_THR_EXIT = 4
DBG_MT_TMPTH = 0xfe
DBG_MT_TMPCPU = 0xff
class DBG_MISC(enum.Enum):
DBG_MISC_COREBRIGHTNESS = 0x01
DBG_MISC_VIDEOENG = 0x02
DBG_EVENT = 0x10
DBG_MISC_INSTRUMENTS = 0x11
DBG_MISC_INSTRUMENTSBT = 0x12
DBG_MISC_LAYOUT = 0x1a
DBG_BUFFER = 0x20
class DBG_DYLD(enum.Enum):
DBG_DYLD_UUID = 5
class DBG_DYLD_UUID(enum.Enum):
DBG_DYLD_UUID_MAP_A = (0)
DBG_DYLD_UUID_MAP_B = (1)
DBG_DYLD_UUID_MAP_32_A = (2)
DBG_DYLD_UUID_MAP_32_B = (3)
DBG_DYLD_UUID_MAP_32_C = (4)
DBG_DYLD_UUID_UNMAP_A = (5)
DBG_DYLD_UUID_UNMAP_B = (6)
DBG_DYLD_UUID_UNMAP_32_A = (7)
DBG_DYLD_UUID_UNMAP_32_B = (8)
DBG_DYLD_UUID_UNMAP_32_C = (9)
DBG_DYLD_UUID_SHARED_CACHE_A = (10)
DBG_DYLD_UUID_SHARED_CACHE_B = (11)
DBG_DYLD_UUID_SHARED_CACHE_32_A = (12)
DBG_DYLD_UUID_SHARED_CACHE_32_B = (13)
DBG_DYLD_UUID_SHARED_CACHE_32_C = (14)
DBG_DYLD_AOT_UUID_MAP_A = (15)
DBG_DYLD_AOT_UUID_MAP_B = (16)
class DBG_DKRW(enum.Enum):
DKIO_DONE = 0x01
DKIO_READ = 0x02
DKIO_ASYNC = 0x04
DKIO_META = 0x08
DKIO_PAGING = 0x10
DKIO_THROTTLE= 0x20# /* Deprecated, still provided so fs_usage doesn't break */
DKIO_PASSIVE = 0x40
DKIO_NOCACHE = 0x80
DKIO_TIER_MASK = 0xF00
DKIO_TIER_SHIFT = 8
DKIO_TIER_UPGRADE = 0x1000
class DBG_APPS(enum.Enum):
DBG_APP_LOGINWINDOW = 0x03
DBG_APP_AUDIO = 0x04
DBG_APP_SYSTEMUI = 0x05
DBG_APP_SIGNPOST = 0x0A
DBG_APP_APPKIT = 0x0C
DBG_APP_UIKIT = 0x0D
DBG_APP_DFR = 0x0E
DBG_APP_LAYOUT = 0x0F
DBG_APP_COREDATA = 0x10
DBG_APP_SAMBA = 0x80
DBG_APP_EOSSUPPORT = 0x81
DBG_APP_MACEFIMANAGER= 0x82
class DBG_THROTTLE(enum.Enum):
OPEN_THROTTLE_WINDOW = 0x1
PROCESS_THROTTLED = 0x2
IO_THROTTLE_DISABLE = 0x3
IO_TIER_UPL_MISMATCH = 0x4
class DBG_PERF(enum.Enum):
PERF_EVENT = 0
PERF_DATA = 1
PERF_STK = 2
class DBG_IMPORTANCE(enum.Enum):
IMP_ASSERTION = 0x10 # /* Task takes/drops a boost assertion */
IMP_BOOST = 0x11 # /* Task boost level changed */
IMP_MSG = 0x12 # /* boosting message sent by donating task on donating port */
IMP_WATCHPORT = 0x13 # /* port marked as watchport, and boost was transferred to the watched task */
IMP_TASK_SUPPRESSION = 0x17 # /* Task changed suppression behaviors */
IMP_TASK_APPTYPE = 0x18 # /* Task launched with apptype */
IMP_UPDATE = 0x19 # /* Requested -> effective calculation */
IMP_USYNCH_QOS_OVERRIDE = 0x1A # /* Userspace synchronization applied QoS override to resource owning thread */
IMP_DONOR_CHANGE = 0x1B # /* The iit_donor bit changed */
IMP_MAIN_THREAD_QOS = 0x1C # /* The task's main thread QoS was set */
IMP_SYNC_IPC_QOS = 0x1D # /* Sync IPC QOS override */
IMP_TASK_POLICY_DARWIN_BG = 0x21
IMP_TASK_POLICY_IOPOL = 0x22
IMP_TASK_POLICY_IO = 0x23
IMP_TASK_POLICY_PASSIVE_IO = 0x24
IMP_TASK_POLICY_DARWIN_BG_IOPOL = 0x27
IMP_TASK_POLICY_TAL = 0x28
IMP_TASK_POLICY_BOOST = 0x29
IMP_TASK_POLICY_ROLE = 0x2A
IMP_TASK_POLICY_TERMINATED = 0x2C
IMP_TASK_POLICY_NEW_SOCKETS_BG = 0x2D
IMP_TASK_POLICY_SUP_ACTIVE = 0x2E
IMP_TASK_POLICY_LATENCY_QOS = 0x2F
IMP_TASK_POLICY_THROUGH_QOS = 0x30
IMP_TASK_POLICY_WATCHERS_BG = 0x31
IMP_TASK_POLICY_SFI_MANAGED = 0x34
IMP_TASK_POLICY_ALL_SOCKETS_BG = 0x37
IMP_TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS = 0x39# /* latency as value1, throughput as value2 */
IMP_TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS = 0x3A# /* latency as value1, throughput as value2 */
IMP_TASK_POLICY_PIDBIND_BG = 0x32
IMP_TASK_POLICY_QOS_OVERRIDE = 0x36
IMP_TASK_POLICY_QOS_AND_RELPRIO = 0x38# /* QoS as value1, relative priority as value2 */
IMP_TASK_POLICY_QOS_WORKQ_OVERRIDE =0x3B
IMP_TASK_POLICY_QOS_PROMOTE = 0x3C
IMP_TASK_POLICY_QOS_KEVENT_OVERRIDE = 0x3D
IMP_TASK_POLICY_QOS_SERVICER_OVERRIDE = 0x3E
class IMP_ASSERTION(enum.Enum):
IMP_HOLD = 0x2 # /* Task holds a boost assertion */
IMP_DROP = 0x4 # /* Task drops a boost assertion */
IMP_EXTERN = 0x8 # /* boost assertion moved from kernel to userspace responsibility (externalized) */
class IMP_BOOST(enum.Enum):
IMP_BOOSTED = 0x1
IMP_UNBOOSTED = 0x2 # /* Task drops a boost assertion */
class IMP_MSG(enum.Enum):
IMP_MSG_SEND = 0x1 # /* boosting message sent by donating task on donating port */
IMP_MSG_DELV = 0x2 # /* boosting message delivered to task */
class IMP_UPDATE(enum.Enum):
IMP_UPDATE_TASK_CREATE = 0x1
class IMP_USYNCH_QOS_OVERRIDE(enum.Enum):
IMP_USYNCH_ADD_OVERRIDE = 0x0 # /* add override for a contended resource */
IMP_USYNCH_REMOVE_OVERRIDE = 0x1 # /* remove override for a contended resource */
class IMP_DONOR_CHANGE(enum.Enum):
IMP_DONOR_UPDATE_LIVE_DONOR_STATE = 0x0
IMP_DONOR_INIT_DONOR_STATE = 0x1
class IMP_SYNC_IPC_QOS(enum.Enum):
IMP_SYNC_IPC_QOS_APPLIED = 0x0
IMP_SYNC_IPC_QOS_REMOVED = 0x1
IMP_SYNC_IPC_QOS_OVERFLOW = 0x2
IMP_SYNC_IPC_QOS_UNDERFLOW = 0x3
class DBG_TURNSTILE(enum.Enum):
TURNSTILE_HEAP_OPERATIONS = 0x10
TURNSTILE_PRIORITY_OPERATIONS = 0x20
TURNSTILE_FREELIST_OPERATIONS = 0x30
class TURNSTILE_HEAP_OPERATIONS(enum.Enum):
THREAD_ADDED_TO_TURNSTILE_WAITQ = 0x1
THREAD_REMOVED_FROM_TURNSTILE_WAITQ = 0x2
THREAD_MOVED_IN_TURNSTILE_WAITQ = 0x3
TURNSTILE_ADDED_TO_TURNSTILE_HEAP = 0x4
TURNSTILE_REMOVED_FROM_TURNSTILE_HEAP= 0x5
TURNSTILE_MOVED_IN_TURNSTILE_HEAP = 0x6
TURNSTILE_ADDED_TO_THREAD_HEAP = 0x7
TURNSTILE_REMOVED_FROM_THREAD_HEAP = 0x8
TURNSTILE_MOVED_IN_THREAD_HEAP = 0x9
TURNSTILE_UPDATE_STOPPED_BY_LIMIT = 0xa
THREAD_NOT_WAITING_ON_TURNSTILE = 0xb
class TURNSTILE_PRIORITY_OPERATIONS(enum.Enum):
TURNSTILE_PRIORITY_CHANGE = 0x1
THREAD_USER_PROMOTION_CHANGE = 0x2
class TURNSTILE_FREELIST_OPERATIONS(enum.Enum):
TURNSTILE_PREPARE = 0x1
TURNSTILE_COMPLETE = 0x2
class DBG_BANK(enum.Enum):
BANK_ACCOUNT_INFO = 0x10 # /* Trace points related to bank account struct */
BANK_TASK_INFO = 0x11 # /* Trace points related to bank task struct */
class DBG_ATM(enum.Enum):
ATM_SUBAID_INFO = 0x10
ATM_GETVALUE_INFO = 0x20
ATM_UNREGISTER_INFO = 0x30
class BANK_ACCOUNT_INFO(enum.Enum):
BANK_SETTLE_CPU_TIME = 0x1 # /* Bank ledger(chit) rolled up to tasks. */
BANK_SECURE_ORIGINATOR_CHANGED = 0x2 # /* Secure Originator changed. */
BANK_SETTLE_ENERGY = 0x3 # /* Bank ledger(energy field) rolled up to tasks. */
class ATM_SUBAID_INFO(enum.Enum):
ATM_MIN_CALLED = 0x1
ATM_LINK_LIST_TRIM = 0x2
class ATM_GETVALUE_INFO(enum.Enum):
ATM_VALUE_REPLACED = 0x1
ATM_VALUE_ADDED = 0x2
class ATM_UNREGISTER_INFO(enum.Enum):
ATM_VALUE_UNREGISTERED = 0x1
ATM_VALUE_DIFF_MAILBOX = 0x2
class DBG_DAEMON(enum.Enum):
DBG_DAEMON_COREDUET = 0x1
DBG_DAEMON_POWERD = 0x2
# KD_BUF_FORMAT = '<Q32sQLLQ'
# '<QLLQQQQLLQ'
kperf_data = Struct(
'timestamp' / Int64ul,
'args' / Array(4, Int64ul),
'code' / Int64ul,
'debug_id' / Int32ul,
'cpu_id' / Int32ul,
'unused' / Int64ul,
)
# Kevent = namedtuple('Kevent', ['timestamp', 'data', 'values', 'tid', 'debugid', 'eventid', 'func_qualifier'])
#
# typedef struct {
# ## /* the thread ID */
# #if defined(__arm64__)
# uint64_t thread;
# #else
# uintptr_t thread;
# #endif
# ## /*= 0 for invalid, otherwise the PID=or 1 for kernel_task) */
# int valid;
# ## /* the name of the process owning the thread */
# char command[20];
# } kd_threadmap;
#
kd_threadmap = Struct(
'tid' / Int64ul,
'pid' / Int32ul,
'process' / FixedSized(0x14, CString('utf8')),
)
kd_header_v2 = Struct(
'number_of_treads' / Int32ul,
Padding(12),
'is_64bit' / Int32ul,
'tick_frequency' / Int64ul,
Padding(0x100),
'threadmap' / Array(lambda ctx: ctx.number_of_treads, kd_threadmap),
'_pad' / GreedyRange(Const(0, Byte)),
)
# // Version 3 header
# // The header chunk has the tag= 0x00001000 which also serves as a magic word
# // that identifies the file as a version 3 trace file. The header payload is
# // a set of fixed fields followed by a variable number of sub-chunks:
# ## /*
# * ____________________________________________________________________________
# | Offset | Size | Field |
# | ----------------------------------------------------------------------------
# | = 0 | 4 | Tag=0x00001000) |
# | 4 | 4 | Sub-tag. Represents the version of the header. |
# | 8 | 8 | Length of header payload=40+8x) |
# | 16 | 8 | Time base info. Two 32-bit numbers, numer/denom, |
# | | | for converting timestamps to nanoseconds. |
# | 24 | 8 | Timestamp of trace start. |
# | 32 | 8 | Wall time seconds since Unix epoch. |
# | | | As returned by gettimeofday(). |
# | 40 | 4 | Wall time microseconds. As returned by gettimeofday(). |
# | 44 | 4 | Local time zone offset in minutes.= " ) |
# | 48 | 4 | Type of daylight savings time correction to apply.= " ) |
# | 52 | 4 | Flags. 1 = 64-bit. Remaining bits should be written |
# | | | as= 0 and ignored when reading. |
# | 56 | 8x | Variable number of sub-chunks. None are required. |
# | | | Ignore unknown chunks. |
# | ----------------------------------------------------------------------------
# */
# // NOTE: The header sub-chunks are considered part of the header chunk,
# // so they must be included in the header chunk’s length field.
# // The CPU map is an optional sub-chunk of the header chunk. It provides
# // information about the CPUs that are referenced from the trace events.
# typedef struct {
# uint32_t tag;
# uint32_t sub_tag;
# uint64_t length;
# uint32_t timebase_numer;
# uint32_t timebase_denom;
# uint64_t timestamp;
# uint64_t walltime_secs;
# uint32_t walltime_usecs;
# uint32_t timezone_minuteswest;
# uint32_t timezone_dst;
# uint32_t flags;
# } __attribute__((packed)) kd_header_v3;
#
# kd_header_v3 header = {
# .tag = RAW_VERSION3,
# .sub_tag = V3_HEADER_VERSION,
# .length ==sizeof(kd_header_v3) + cpumap_size - sizeof(kd_cpumap_header)),
# .timebase_numer = timebase.numer,
# .timebase_denom = timebase.denom,
# .timestamp == 0,
# .walltime_secs == 0,
# .walltime_usecs == 0,
# .timezone_minuteswest == 0,
# .timezone_dst == 0,
#
kd_header_v3 = Struct(
'tag' / Int32ul,
'sub_tag' / Int32ul,
'length' / Int64ul,
'timebase_numer' / Int32ul,
'timebase_denom' / Int32ul,
'timestamp' / Int64ul,
'walltime_secs' / Int64ul,
'walltime_usecs' / Int32ul,
'timezone_minuteswest' / Int32ul,
'timezone_dst' / Int32ul,
'flags' / Int32ul,
'tag2' / Int32ul,
)
CLASS_DICT = vars()
class KdBufParser:
def __init__(self,data):
self.timestamp = data.timestamp
self.args = data.args
self.code = data.code
self.debug_id = data.debug_id
self.event_id = data.debug_id & KDBG_EVENTID_MASK
self.func_code = data.debug_id & KDBG_FUNC_MASK
self.class_code = kdbg_extract_class(data.debug_id)
self.subclass_code = kdbg_extract_subclass(data.debug_id)
self.final_code = kdbg_extract_code(data.debug_id)
@classmethod
def decode(cls,buf_io:io.BytesIO):
while True:
buf = buf_io.read(64)
if not buf:
return
data = kperf_data.parse(buf)
yield cls(data)
class KperfData:
def __init__(self,traceCodesFile={},filter_pid=None,filter_process=None):
self.trace_codes = traceCodesFile
self.threads_pids = {}
self.version = None
self.filter_tid = filter_pid
self.filter_process = filter_process
def set_threads_pids(self, threads):
for thread in threads:
self.threads_pids[thread.tid] = (thread.pid,thread.process)
def _format_process(self, tid):
pid,process_name = self.threads_pids.get(tid,(None,None))
return pid,process_name,f'{process_name}({pid})' if pid else f'Error: tid {tid}'
def _format_class(self,classes,code):
if classes:
try:
classes_name = classes(code).name
return classes_name, f'{classes_name:<18}'
except ValueError:
return None,f'''{'Error(' + (str(code)) + ')'}'''
else:
return None,f'''{'Error(' + (str(code)) + ')'}'''
def check_header(self,kd_buf):
if kd_buf.startswith(b'\x07X\xa2Y'):
return io.BytesIO()
buf_io = io.BytesIO(kd_buf)
if not self.threads_pids:
self.version = buf_io.read(4)
parsed_header = kd_header_v2.parse_stream(buf_io)
self.set_threads_pids(parsed_header.threadmap)
return buf_io
def to_dict(self,kd_buf):
buf_io = self.check_header(kd_buf)
for event in KdBufParser.decode(buf_io):
yield event
def to_str(self,kd_buf:bytes):
buf_io = self.check_header(kd_buf)
for event in KdBufParser.decode(buf_io):
pid,process_name,process_str = self._format_process(event.code)
if self.filter_tid and self.filter_tid != pid:
continue
if self.filter_process and self.filter_process != process_name:
continue
formatted_data = ''
formatted_data += f'{process_str:<27}'
if event.event_id in self.trace_codes:
name = self.trace_codes[event.event_id] + f' ({hex(event.event_id)})'
else:
name = hex(event.event_id)
formatted_data += f'{name:<60}'
classes_name,_str = self._format_class(DebugClasses,event.class_code)
formatted_data += f'{_str:<18}'
classes_name,_str = self._format_class(CLASS_DICT.get(classes_name),event.subclass_code)
formatted_data += f'{_str:<30}'
# classes_name,_str = self._format_class(CLASS_DICT.get(classes_name),event.final_code)
# formatted_data += f'{_str:<30}'
try:
formatted_data += f'{DgbFuncQual(event.func_code).name:<15}'
except ValueError:
formatted_data += f'{'Error':<16}'
yield formatted_data
|
# Referenced from
# https://opensource.apple.com/tarballs/xnu-7195.60.75/bsd/kern/kdebug.c
# https://gitee.com/mirrors/darwin-xnu/blob/main/bsd/kern/kdebug.c kdebug.h
import enum
import io
from construct import Struct, Const, Padding, Int32ul, Int64ul, Array, GreedyRange, Byte, FixedSized, \
CString
KDBG_CLASS_MASK = 0xff000000
KDBG_CLASS_OFFSET = 24
KDBG_CLASS_MAX = 0xff
KDBG_SUBCLASS_MASK = 0x00ff0000
KDBG_SUBCLASS_OFFSET = 16
KDBG_SUBCLASS_MAX = 0xff
# ## /* class and subclass mask */
KDBG_CSC_MASK = 0xffff0000
KDBG_CSC_OFFSET = KDBG_SUBCLASS_OFFSET
KDBG_CSC_MAX = 0xffff
KDBG_CODE_MASK = 0x0000fffc
KDBG_CODE_OFFSET = 2
KDBG_CODE_MAX = 0x3fff
KDBG_EVENTID_MASK = 0xfffffffc
KDBG_FUNC_MASK = 0x00000003
def kdbg_extract_class(Debugid):
return (Debugid & KDBG_CLASS_MASK) >> KDBG_CLASS_OFFSET
def kdbg_extract_subclass(Debugid):
return (Debugid & KDBG_SUBCLASS_MASK) >> KDBG_SUBCLASS_OFFSET
def kdbg_extract_csc(Debugid):
return (Debugid & KDBG_CSC_MASK) >> KDBG_CSC_OFFSET
def kdbg_extract_code(Debugid):
return (Debugid & KDBG_CODE_MASK) >> KDBG_CODE_OFFSET
class DgbFuncQual(enum.Enum):
"""
## /* function qualifiers */
DBG_FUNC_START 1U
DBG_FUNC_END 2U
DBG_FUNC_NONE = 0U
Event's role in the trace.
"""
DBG_FUNC_NONE = 0
DBG_FUNC_START = 1
DBG_FUNC_END = 2
DBG_FUNC_ALL = 3
class DebugClasses(enum.Enum):
DBG_MACH = 1
DBG_NETWORK = 2
DBG_FSYSTEM = 3
DBG_BSD = 4
DBG_IOKIT = 5
DBG_DRIVERS = 6
DBG_TRACE = 7
DBG_DLIL = 8
DBG_PTHREAD = 9
DBG_CORESTORAGE = 10
DBG_CG = 11
DBG_MONOTONIC = 12
DBG_MISC = 20
DBG_SECURITY = 30
DBG_DYLD = 31
DBG_QT = 32
DBG_APPS = 33
DBG_LAUNCHD = 34
DBG_SILICON = 35
DBG_PERF = 37
DBG_IMPORTANCE = 38
DBG_BANK = 40
DBG_XPC = 41
DBG_ATM = 42
DBG_ARIADNE = 43
DBG_DAEMON = 44
DBG_ENERGYTRACE = 45
DBG_DISPATCH = 46
DBG_IMG = 49
DBG_UMALLOC = 51
DBG_TURNSTILE = 53
DBG_MIG = 255
class DBG_MACH(enum.Enum):
DBG_MACH_EXCP_KTRAP_x86= 0x02 ## /* Kernel Traps on x86 */
DBG_MACH_EXCP_DFLT = 0x03 ## /* deprecated name */
DBG_MACH_EXCP_SYNC_ARM = 0x03 ## /* arm/arm64 synchronous exception */
DBG_MACH_EXCP_IFLT = 0x04 ## /* deprecated name */
DBG_MACH_EXCP_SERR_ARM = 0x04 ## /* arm/arm64 SError (async) exception */
DBG_MACH_EXCP_INTR = 0x05 ## /* Interrupts */
DBG_MACH_EXCP_ALNG = 0x06 ## /* Alignment Exception */
DBG_MACH_EXCP_UTRAP_x86= 0x07 ## /* User Traps on x86 */
DBG_MACH_EXCP_FP = 0x08 ## /* FP Unavail */
DBG_MACH_EXCP_DECI = 0x09 ## /* Decrementer Interrupt */
DBG_MACH_CHUD = 0x0A ## /* deprecated name */
DBG_MACH_SIGNPOST = 0x0A ## /* kernel signposts */
DBG_MACH_EXCP_SC = 0x0C ## /* System Calls */
DBG_MACH_EXCP_TRACE = 0x0D ## /* Trace exception */
DBG_MACH_EXCP_EMUL = 0x0E ## /* Instruction emulated */
DBG_MACH_IHDLR = 0x10 ## /* Interrupt Handlers */
DBG_MACH_IPC = 0x20 ## /* Inter Process Comm */
DBG_MACH_RESOURCE = 0x25 ## /* tracing limits, etc */
DBG_MACH_VM = 0x30 ## /* Virtual Memory */
DBG_MACH_LEAKS = 0x31 ## /* alloc/free */
DBG_MACH_WORKINGSET = 0x32 ## /* private subclass for working set related debugging */
DBG_MACH_SCHED = 0x40 ## /* Scheduler */
DBG_MACH_MSGID_INVALID = 0x50 ## /* Messages - invalid */
DBG_MACH_LOCKS = 0x60 ## /* new lock APIs */
DBG_MACH_PMAP = 0x70 ## /* pmap */
DBG_MACH_CLOCK = 0x80 ## /* clock */
DBG_MACH_MP = 0x90 ## /* MP related */
DBG_MACH_VM_PRESSURE = 0xA0 ## /* Memory Pressure Events */
DBG_MACH_STACKSHOT = 0xA1 ## /* Stackshot/Microstackshot subsystem */
DBG_MACH_SFI = 0xA2 ## /* Selective Forced Idle (SFI) */
DBG_MACH_ENERGY_PERF = 0xA3 ## /* Energy/performance resource stats */
DBG_MACH_SYSDIAGNOSE = 0xA4 ## /* sysdiagnose */
DBG_MACH_ZALLOC = 0xA5 ## /* Zone allocator */
DBG_MACH_THREAD_GROUP = 0xA6 ## /* Thread groups */
DBG_MACH_COALITION = 0xA7 ## /* Coalitions */
DBG_MACH_SHAREDREGION = 0xA8 ## /* Shared region */
DBG_MACH_SCHED_CLUTCH = 0xA9 ## /* Clutch scheduler */
DBG_MACH_IO = 0xAA ## /* I/O */
DBG_MACH_WORKGROUP = 0xAB ## /* Workgroup subsystem */
class DBG_MACH_IO(enum.Enum):
DBC_MACH_IO_MMIO_READ = 0x1
DBC_MACH_IO_MMIO_WRITE = 0x2
DBC_MACH_IO_PHYS_READ = 0x3
DBC_MACH_IO_PHYS_WRITE = 0x4
DBC_MACH_IO_PORTIO_READ = 0x5
DBC_MACH_IO_PORTIO_WRITE = 0x6
class DBG_MACH_EXCP_INTR(enum.Enum):
DBG_INTR_TYPE_UNKNOWN = 0x0 ## /* default/unknown interrupt */
DBG_INTR_TYPE_IPI = 0x1 ## /* interprocessor interrupt */
DBG_INTR_TYPE_TIMER = 0x2 ## /* timer interrupt */
DBG_INTR_TYPE_OTHER = 0x3 ## /* other (usually external) interrupt */
DBG_INTR_TYPE_PMI = 0x4 ## /* performance monitor interrupt */
class DBG_MACH_SCHED(enum.Enum):
MACH_SCHED = 0x0 # /* Scheduler */
MACH_STACK_ATTACH = 0x1 # /* stack_attach() */
MACH_STACK_HANDOFF = 0x2 # /* stack_handoff() */
MACH_CALL_CONT = 0x3 # /* call_continuation() */
MACH_CALLOUT = 0x4 # /* callouts */
MACH_STACK_DETACH = 0x5
MACH_MAKE_RUNNABLE = 0x6 # /* make thread runnable */
MACH_PROMOTE = 0x7 # /* promoted due to resource (replaced by MACH_PROMOTED) */
MACH_DEMOTE = 0x8 # /* promotion undone (replaced by MACH_UNPROMOTED) */
MACH_IDLE = 0x9 # /* processor idling */
MACH_STACK_DEPTH = 0xa # /* stack depth at switch */
MACH_MOVED = 0xb # /* did not use original scheduling decision */
MACH_PSET_LOAD_AVERAGE = 0xc
MACH_AMP_DEBUG = 0xd
MACH_FAILSAFE = 0xe # /* tripped fixed-pri/RT failsafe */
MACH_BLOCK = 0xf # /* thread block */
MACH_WAIT = 0x10 # /* thread wait assertion */
MACH_GET_URGENCY = 0x14 # /* Urgency queried by platform */
MACH_URGENCY = 0x15 # /* Urgency (RT/BG/NORMAL) communicated
MACH_REDISPATCH = 0x16 # /* "next thread" thread redispatched */
MACH_REMOTE_AST = 0x17 # /* AST signal issued to remote processor */
MACH_SCHED_CHOOSE_PROCESSOR = 0x18 # /* Result of choose_processor */
MACH_DEEP_IDLE = 0x19 # /* deep idle on master processor */
MACH_SCHED_DECAY_PRIORITY = 0x1a # /*was MACH_SCHED_DECAY_PRIORITY */
MACH_CPU_THROTTLE_DISABLE = 0x1b # /* Global CPU Throttle Disable */
MACH_RW_PROMOTE = 0x1c# /* promoted due to RW lock promotion */
MACH_RW_DEMOTE = 0x1d# /* promotion due to RW lock undone */
MACH_SCHED_MAINTENANCE = 0x1f# /* periodic maintenance thread */
MACH_DISPATCH = 0x20# /* context switch completed */
MACH_QUANTUM_HANDOFF = 0x21# /* quantum handoff occurred */
MACH_MULTIQ_DEQUEUE = 0x22# /* Result of multiq dequeue */
MACH_SCHED_THREAD_SWITCH= 0x23# /* attempt direct context switch to hinted thread */
MACH_SCHED_SMT_BALANCE = 0x24# /* SMT load balancing ASTs */
MACH_REMOTE_DEFERRED_AST= 0x25# /* Deferred AST started against remote processor */
MACH_REMOTE_CANCEL_AST = 0x26# /* Canceled deferred AST for remote processor */
MACH_SCHED_CHANGE_PRIORITY = 0x27# /* thread sched priority changed */
MACH_SCHED_UPDATE_REC_CORES = 0x28 # /* Change to recommended processor bitmask */
MACH_STACK_WAIT = 0x29# /* Thread could not be switched-to because of kernel stack shortage */
MACH_THREAD_BIND = 0x2a# /* Thread was bound (or unbound) to a processor */
MACH_WAITQ_PROMOTE = 0x2b# /* Thread promoted by waitq boost */
MACH_WAITQ_DEMOTE = 0x2c# /* Thread demoted from waitq boost */
MACH_SCHED_LOAD = 0x2d# /* load update */
MACH_REC_CORES_FAILSAFE = 0x2e# /* recommended processor failsafe kicked in */
MACH_SCHED_QUANTUM_EXPIRED = 0x2f# /* thread quantum expired */
MACH_EXEC_PROMOTE = 0x30# /* Thread promoted by exec boost */
MACH_EXEC_DEMOTE = 0x31# /* Thread demoted from exec boost */
MACH_AMP_SIGNAL_SPILL = 0x32# /* AMP spill signal sent to cpuid */
MACH_AMP_STEAL = 0x33# /* AMP thread stolen or spilled */
MACH_SCHED_LOAD_EFFECTIVE = 0x34# /* Effective scheduler load */
MACH_PROMOTED = 0x35# /* was: thread promoted due to mutex priority promotion */
MACH_UNPROMOTED = 0x36# /* was: thread unpromoted due to mutex priority promotion */
MACH_PROMOTED_UPDATE = 0x37# /* was: thread already promoted, but promotion priority changed */
MACH_QUIESCENT_COUNTER = 0x38# /* quiescent counter tick */
MACH_TURNSTILE_USER_CHANGE = 0x39# /* base priority change because of turnstile */
MACH_AMP_RECOMMENDATION_CHANGE = 0x3a# /* Thread group recommendation change */
MACH_AMP_PERFCTL_POLICY_CHANGE = 0x3b# /* AMP policy for perfctl cluster recommendation */
MACH_TURNSTILE_KERNEL_CHANGE = 0x40# /* sched priority change because of turnstile */
MACH_SCHED_WI_AUTO_JOIN = 0x41# /* work interval auto join events */
MACH_SCHED_WI_DEFERRED_FINISH = 0x42# /* work interval pending finish events for auto-join thread groups */
MACH_PSET_AVG_EXEC_TIME = 0x50
class DBG_MACH_SCHED_CLUTCH(enum.Enum):
MACH_SCHED_CLUTCH_ROOT_BUCKET_STATE = 0x0# /* __unused */
MACH_SCHED_CLUTCH_TG_BUCKET_STATE = 0x1# /* __unused */
MACH_SCHED_CLUTCH_THREAD_SELECT = 0x2# /* Thread selection events for Clutch scheduler */
MACH_SCHED_CLUTCH_THREAD_STATE = 0x3# /* __unused */
MACH_SCHED_CLUTCH_TG_BUCKET_PRI = 0x4# /* Clutch bucket priority update event */
MACH_SCHED_EDGE_CLUSTER_OVERLOAD = 0x5# /* Cluster experienced overload; migrating threads to other clusters */
MACH_SCHED_EDGE_STEAL = 0x6# /* Per-cluster avg. thread execution time */
MACH_SCHED_EDGE_REBAL_RUNNABLE = 0x7# /* Rebalance runnable threads on a foreign cluster */
MACH_SCHED_EDGE_REBAL_RUNNING = 0x8# /* Rebalance running threads on a foreign cluster */
MACH_SCHED_EDGE_SHOULD_YIELD = 0x9# /* Edge decisions for thread yield */
MACH_SCHED_CLUTCH_THR_COUNT = 0xa# /* Clutch scheduler runnable thread counts */
MACH_SCHED_EDGE_LOAD_AVG = 0xb# /* Per-cluster load average */
class DBG_MACH_WORKGROUP(enum.Enum):
WORKGROUP_INTERVAL_CREATE = 0x0# /* work interval creation */
WORKGROUP_INTERVAL_DESTROY = 0x1# /* work interval destruction */
WORKGROUP_INTERVAL_CHANGE = 0x2# /* thread work interval change */
WORKGROUP_INTERVAL_START = 0x3# /* work interval start call */
WORKGROUP_INTERVAL_UPDATE = 0x4# /* work interval update call */
WORKGROUP_INTERVAL_FINISH = 0x5# /* work interval finish call */
class DBG_MACH_IPC(enum.Enum):
MACH_TASK_SUSPEND = 0x0 # /* Suspended a task */
MACH_TASK_RESUME = 0x1 # /* Resumed a task */
MACH_THREAD_SET_VOUCHER = 0x2
MACH_IPC_MSG_SEND = 0x3 # /* mach msg send, uniq msg info */
MACH_IPC_MSG_RECV = 0x4 # /* mach_msg receive */
MACH_IPC_MSG_RECV_VOUCHER_REFUSED = 0x5 # /* mach_msg receive, voucher refused */
MACH_IPC_KMSG_FREE = 0x6 # /* kernel free of kmsg data */
MACH_IPC_VOUCHER_CREATE = 0x7 # /* Voucher added to global voucher hashtable */
MACH_IPC_VOUCHER_CREATE_ATTR_DATA = 0x8 # /* Attr data for newly created voucher */
MACH_IPC_VOUCHER_DESTROY = 0x9 # /* Voucher removed from global voucher hashtable */
MACH_IPC_KMSG_INFO = 0xa # /* Send/Receive info for a kmsg */
MACH_IPC_KMSG_LINK = 0xb # /* link a kernel kmsg pointer to user mach_msg_header_t */
MACH_IPC_PORT_ENTRY_MODIFY = 0xc # /* A port space gained or lost a port right (reference) */
MACH_IPC_DESTROY_GUARDED_DESC = 0xd # /* Unable to receive a guarded descriptor */
class DBG_MACH_THREAD_GROUP(enum.Enum):
MACH_THREAD_GROUP_NEW = 0x0
MACH_THREAD_GROUP_FREE = 0x1
MACH_THREAD_GROUP_SET = 0x2
MACH_THREAD_GROUP_NAME = 0x3
MACH_THREAD_GROUP_NAME_FREE = 0x4
MACH_THREAD_GROUP_FLAGS = 0x5
MACH_THREAD_GROUP_BLOCK = 0x6
class DBG_MACH_COALITION(enum.Enum):
MACH_COALITION_NEW = 0x0
MACH_COALITION_FREE = 0x1
MACH_COALITION_ADOPT = 0x2
MACH_COALITION_REMOVE = 0x3
MACH_COALITION_THREAD_GROUP_SET = 0x4
class DBG_MACH_PMAP(enum.Enum):
MACH_COALITION_NEW = 0x0
MACH_COALITION_FREE = 0x1
MACH_COALITION_ADOPT = 0x2
MACH_COALITION_REMOVE = 0x3
MACH_COALITION_THREAD_GROUP_SET = 0x4
class DBG_MACH_CLOCK(enum.Enum):
MACH_EPOCH_CHANGE = 0x0 # /* wake epoch change */
MACH_BRIDGE_RCV_TS = 0x1 # /* receive timestamp pair from interrupt handler */
MACH_BRIDGE_REMOTE_TIME = 0x2 # /* calculate remote timestamp */
MACH_BRIDGE_RESET_TS = 0x3 # /* reset timestamp conversion parameters */
MACH_BRIDGE_TS_PARAMS = 0x4 # /* recompute timestamp conversion parameters */
MACH_BRIDGE_SKIP_TS = 0x5 # /* skip timestamp */
MACH_BRIDGE_TS_MISMATCH = 0x6 # /* mismatch between predicted and received remote timestamp */
MACH_BRIDGE_OBSV_RATE= 0x7 # /* out of range observed rates */
class DBG_MACH_STACKSHOT(enum.Enum):
MICROSTACKSHOT_RECORD = 0x0
MICROSTACKSHOT_GATHER = 0x1
class DBG_MACH_SYSDIAGNOSE(enum.Enum):
SYSDIAGNOSE_NOTIFY_USER = 0x0
SYSDIAGNOSE_FULL = 0x1
SYSDIAGNOSE_STACKSHOT= 0x2
SYSDIAGNOSE_TAILSPIN = 0x3
class DBG_MACH_SFI(enum.Enum):
SFI_SET_WINDOW = 0x0
SFI_CANCEL_WINDOW = 0x1
SFI_SET_CLASS_OFFTIME = 0x2
SFI_CANCEL_CLASS_OFFTIME = 0x3
SFI_THREAD_DEFER = 0x4
SFI_OFF_TIMER = 0x5
SFI_ON_TIMER = 0x6
SFI_WAIT_CANCELED = 0x7
SFI_PID_SET_MANAGED = 0x8
SFI_PID_CLEAR_MANAGED = 0x9
SFI_GLOBAL_DEFER = 0xa
class DBG_MACH_ZALLOC(enum.Enum):
ZALLOC_ZCRAM = 0x0
class DBG_MACH_RESOURCE(enum.Enum):
RMON_ENABLE_CPUUSAGE_MONITOR = 0x001
RMON_CPUUSAGE_VIOLATED = 0x002
RMON_CPUUSAGE_SUSPENDED = 0x003
RMON_CPUUSAGE_VIOLATED_K32A = 0x004
RMON_CPUUSAGE_VIOLATED_K32B = 0x005
RMON_CPUUSAGE_RESUMED = 0x006
RMON_DISABLE_CPUUSAGE_MONITOR= 0x00f
RMON_ENABLE_CPUWAKES_MONITOR = 0x011
RMON_CPUWAKES_VIOLATED = 0x012
RMON_CPUWAKES_VIOLATED_K32A = 0x014
RMON_CPUWAKES_VIOLATED_K32B = 0x015
RMON_DISABLE_CPUWAKES_MONITOR= 0x01f
RMON_ENABLE_IO_MONITOR = 0x021
RMON_LOGWRITES_VIOLATED = 0x022
RMON_PHYSWRITES_VIOLATED = 0x023
RMON_LOGWRITES_VIOLATED_K32A = 0x024
RMON_LOGWRITES_VIOLATED_K32B = 0x025
RMON_DISABLE_IO_MONITOR = 0x02f
class DBG_NETWORK(enum.Enum):
DBG_NETIP = 1 # /* Internet Protocol */
DBG_NETARP = 2 # /* Address Resolution Protocol */
DBG_NETUDP = 3 # /* User Datagram Protocol */
DBG_NETTCP = 4 # /* Transmission Control Protocol */
DBG_NETICMP = 5 # /* Internet Control Message Protocol */
DBG_NETIGMP = 6 # /* Internet Group Management Protocol */
DBG_NETRIP = 7 # /* Routing Information Protocol */
DBG_NETOSPF = 8 # /* Open Shortest Path First */
DBG_NETISIS = 9 # /* Intermediate System to Intermediate System */
DBG_NETSNMP = 10 # /* Simple Network Management Protocol */
DBG_NETSOCK = 11 # /* Socket Layer */
DBG_NETAARP = 100 # /* Apple ARP */
DBG_NETDDP = 101 # /* Datagram Delivery Protocol */
DBG_NETNBP = 102 # /* Name Binding Protocol */
DBG_NETZIP = 103 # /* Zone Information Protocol */
DBG_NETADSP = 104 # /* Name Binding Protocol */
DBG_NETATP = 105 # /* Apple Transaction Protocol */
DBG_NETASP = 106 # /* Apple Session Protocol */
DBG_NETAFP = 107 # /* Apple Filing Protocol */
DBG_NETRTMP = 108 # /* Routing Table Maintenance Protocol */
DBG_NETAURP = 109 # /* Apple Update Routing Protocol */
DBG_NETIPSEC = 128 # /* IPsec Protocol */
DBG_NETVMNET = 129 # /* VMNet */
class DBG_IOKIT(enum.Enum):
DBG_IOINTC = 0 # /* Interrupt controller */
DBG_IOWORKLOOP = 1 # /* Work from work loop */
DBG_IOINTES = 2 # /* Interrupt event source */
DBG_IOCLKES = 3 # /* Clock event source */
DBG_IOCMDQ = 4 # /* Command queue latencies */
DBG_IOMCURS = 5 # /* Memory Cursor */
DBG_IOMDESC = 6 # /* Memory Descriptors */
DBG_IOPOWER = 7 # /* Power Managerment */
DBG_IOSERVICE = 8 # /* Matching etc. */
DBG_IOREGISTRY = 9 # /* Registry */
#**** 9-32 reserved for internal IOKit usage ****
DBG_IOSTORAGE = 32 # /* Storage layers */
DBG_IONETWORK = 33 # /* Network layers */
DBG_IOKEYBOARD = 34 # /* Keyboard */
DBG_IOHID = 35 # /* HID Devices */
DBG_IOAUDIO = 36 # /* Audio */
DBG_IOSERIAL = 37 # /* Serial */
DBG_IOTTY = 38 # /* TTY layers */
DBG_IOSAM = 39 # /* SCSI Architecture Model layers */
DBG_IOPARALLELATA = 40 # /* Parallel ATA */
DBG_IOPARALLELSCSI = 41 # /* Parallel SCSI */
DBG_IOSATA = 42 # /* Serial-ATA */
DBG_IOSAS = 43 # /* SAS */
DBG_IOFIBRECHANNEL = 44 # /* FiberChannel */
DBG_IOUSB = 45 # /* USB */
DBG_IOBLUETOOTH = 46 # /* Bluetooth */
DBG_IOFIREWIRE = 47 # /* FireWire */
DBG_IOINFINIBAND = 48 # /* Infiniband */
DBG_IOCPUPM = 49 # /* CPU Power Management */
DBG_IOGRAPHICS = 50 # /* Graphics */
DBG_HIBERNATE = 51 # /* hibernation related events */
DBG_IOTHUNDERBOLT = 52 # /* Thunderbolt */
DBG_BOOTER = 53 # /* booter related events */
DBG_IOAUDIO2 = 54 # /* Audio (extended) */
DBG_IOSURFACEPA = 64 # /* IOSurface page mappings */
DBG_IOMDPA = 65 # /* IOMemoryDescriptor page mappings */
DBG_IODARTPA = 66 # /* DART page mappings */
class DBG_DRIVERS(enum.Enum):
DBG_DRVSTORAGE = 1# /* Storage layers */
DBG_DRVNETWORK = 2# /* Network layers */
DBG_DRVKEYBOARD = 3# /* Keyboard */
DBG_DRVHID = 4# /* HID Devices */
DBG_DRVAUDIO = 5# /* Audio */
DBG_DRVSERIAL = 7# /* Serial */
DBG_DRVSAM = 8# /* SCSI Architecture Model layers */
DBG_DRVPARALLELATA = 9# /* Parallel ATA */
DBG_DRVPARALLELSCSI = 10# /* Parallel SCSI */
DBG_DRVSATA = 11# /* Serial ATA */
DBG_DRVSAS = 12# /* SAS */
DBG_DRVFIBRECHANNEL = 13# /* FiberChannel */
DBG_DRVUSB = 14# /* USB */
DBG_DRVBLUETOOTH = 15# /* Bluetooth */
DBG_DRVFIREWIRE = 16# /* FireWire */
DBG_DRVINFINIBAND = 17# /* Infiniband */
DBG_DRVGRAPHICS = 18# /* Graphics */
DBG_DRVSD = 19# /* Secure Digital */
DBG_DRVNAND = 20# /* NAND drivers and layers */
DBG_SSD = 21# /* SSD */
DBG_DRVSPI = 22# /* SPI */
DBG_DRVWLAN_802_11 = 23# /* WLAN 802.11 */
DBG_DRVSSM = 24# /* System State Manager(AppleSSM) */
DBG_DRVSMC = 25# /* System Management Controller */
DBG_DRVMACEFIMANAGER = 26# /* Mac EFI Manager */
DBG_DRVANE = 27# /* ANE */
DBG_DRVETHERNET = 28# /* Ethernet */
DBG_DRVMCC = 29# /* Memory Cache Controller */
DBG_DRVACCESSORY = 30# /* Accessories */
class DBG_DLIL(enum.Enum):
DBG_DLIL_STATIC = 1 # /* Static DLIL code */
DBG_DLIL_PR_MOD = 2 # /* DLIL Protocol Module */
DBG_DLIL_IF_MOD = 3 # /* DLIL Interface Module */
DBG_DLIL_PR_FLT = 4 # /* DLIL Protocol Filter */
DBG_DLIL_IF_FLT = 5 # /* DLIL Interface FIlter */
class DBG_FSYSTEM(enum.Enum):
DBG_FSRW = 0x1 # /* reads and writes to the filesystem */
DBG_DKRW = 0x2 # /* reads and writes to the disk */
DBG_FSVN = 0x3 # /* vnode operations (inc. locking/unlocking) */
DBG_FSLOOOKUP = 0x4 # /* namei and other lookup-related operations */
DBG_JOURNAL= 0x5 # /* journaling operations */
DBG_IOCTL = 0x6 # /* ioctl to the disk */
DBG_BOOTCACHE = 0x7 # /* bootcache operations */
DBG_HFS = 0x8 # /* HFS-specific events; see the hfs project */
DBG_APFS = 0x9 # /* APFS-specific events; see the apfs project */
DBG_SMB = 0xA # /* SMB-specific events; see the smb project */
DBG_MOUNT = 0xB # /* Mounting/unmounting operations */
DBG_EXFAT = 0xE # /* ExFAT-specific events; see the exfat project */
DBG_MSDOS = 0xF # /* FAT-specific events; see the msdosfs project */
DBG_ACFS = 0x10 # /* Xsan-specific events; see the XsanFS project */
DBG_THROTTLE = 0x11 # /* I/O Throttling events */
DBG_DECMP = 0x12 # /* Decmpfs-specific events */
DBG_VFS = 0x13 # /* VFS layer events */
DBG_LIVEFS = 0x14 # /* LiveFS events; see the UserFS project */
DBG_CONTENT_PROT = 0xCF# /* Content Protection Events: see bsd/sys/cprotect.h */
class DBG_BSD(enum.Enum):
DBG_BSD_PROC = 0x01# /* process/signals related */
DBG_BSD_MEMSTAT = 0x02# /* memorystatus / jetsam operations */
DBG_BSD_KEVENT = 0x03# /* kqueue / kevent related */
DBG_BSD_EXCP_SC = 0x0C# /* System Calls */
DBG_BSD_AIO = 0x0D# /* aio (POSIX async IO) */
DBG_BSD_SC_EXTENDED_INFO = 0x0E# /* System Calls, extended info */
DBG_BSD_SC_EXTENDED_INFO2 = 0x0F# /* System Calls, extended info */
DBG_BSD_KDEBUG_TEST = 0xFF# /* for testing kdebug */
class DBG_BSD_PROC(enum.Enum):
BSD_PROC_EXIT = 1 # /* process exit */
BSD_PROC_FRCEXIT = 2 # /* Kernel force termination */
BSD_PROC_EXEC = 3 # /* process spawn / exec */
BSD_PROC_EXITREASON_CREATE = 4 # /* exit reason creation */
BSD_PROC_EXITREASON_COMMIT = 5 # /* exit reason commited to a proc */
class DBG_BSD_MEMSTAT(enum.Enum):
BSD_MEMSTAT_SCAN = 1 # /* memorystatus thread awake */
BSD_MEMSTAT_JETSAM = 2 # /* LRU jetsam */
BSD_MEMSTAT_JETSAM_HIWAT = 3 # /* highwater jetsam */
BSD_MEMSTAT_FREEZE = 4 # /* freeze process */
BSD_MEMSTAT_FREEZE_SCAN = 5 # /* select a process to freeze and freeze it */
BSD_MEMSTAT_UPDATE = 6 # /* priority update */
BSD_MEMSTAT_IDLE_DEMOTE = 7 # /* idle demotion fired */
BSD_MEMSTAT_CLEAR_ERRORS = 8 # /* reset termination error state */
BSD_MEMSTAT_DIRTY_TRACK = 9 # /* track the process state */
BSD_MEMSTAT_DIRTY_SET = 10 # /* set the process state */
BSD_MEMSTAT_DIRTY_CLEAR = 11 # /* clear the process state */
BSD_MEMSTAT_GRP_SET_PROP = 12 # /* set group properties */
BSD_MEMSTAT_DO_KILL = 13 # /* memorystatus kills */
BSD_MEMSTAT_CHANGE_PRIORITY = 14 # /* priority changed */
BSD_MEMSTAT_FAST_JETSAM = 15 # /* Aggressive jetsam ("clear-the-deck") */
BSD_MEMSTAT_COMPACTOR_RUN = 16 # /* run VM compactor after process kill */
BSD_MEMSTAT_FREEZE_DISABLE = 17 # /* disable freeze and kill frozen processes */
BSD_MEMSTAT_RELAUNCH_FLAGS = 18 # /* flags representing jetsam behavior; based on launchd data */
class DBG_BSD_KEVENT(enum.Enum):
BSD_KEVENT_KQ_PROCESS_BEGIN = 1
BSD_KEVENT_KQ_PROCESS_END = 2
BSD_KEVENT_KQWQ_PROCESS_BEGIN = 3
BSD_KEVENT_KQWQ_PROCESS_END = 4
BSD_KEVENT_KQWQ_BIND = 5
BSD_KEVENT_KQWQ_UNBIND = 6
BSD_KEVENT_KQWQ_THREQUEST = 7
BSD_KEVENT_KQWL_PROCESS_BEGIN = 8
BSD_KEVENT_KQWL_PROCESS_END = 9
BSD_KEVENT_KQWL_THREQUEST = 10
BSD_KEVENT_KQWL_THADJUST = 11
BSD_KEVENT_KQ_REGISTER = 12
BSD_KEVENT_KQWQ_REGISTER = 13
BSD_KEVENT_KQWL_REGISTER = 14
BSD_KEVENT_KNOTE_ACTIVATE = 15
BSD_KEVENT_KQ_PROCESS = 16
BSD_KEVENT_KQWQ_PROCESS = 17
BSD_KEVENT_KQWL_PROCESS = 18
BSD_KEVENT_KQWL_BIND = 19
BSD_KEVENT_KQWL_UNBIND = 20
BSD_KEVENT_KNOTE_ENABLE = 21
BSD_KEVENT_KNOTE_VANISHED = 22
class DBG_TRACE(enum.Enum):
DBG_TRACE_DATA = 0
DBG_TRACE_STRING = 1
DBG_TRACE_INFO = 2
class DBG_CORESTORAGE(enum.Enum):
DBG_CS_IO = 0
class DBG_SECURITY(enum.Enum):
DBG_SEC_KERNEL = 0# /* raw entropy collected by the kernel */
DBG_SEC_SANDBOX = 1
class DBG_MONOTONIC(enum.Enum):
DBG_MT_INSTRS_CYCLES = 1
DBG_MT_DEBUG = 2
DBG_MT_RESOURCES_PROC_EXIT = 3
DBG_MT_RESOURCES_THR_EXIT = 4
DBG_MT_TMPTH = 0xfe
DBG_MT_TMPCPU = 0xff
class DBG_MISC(enum.Enum):
DBG_MISC_COREBRIGHTNESS = 0x01
DBG_MISC_VIDEOENG = 0x02
DBG_EVENT = 0x10
DBG_MISC_INSTRUMENTS = 0x11
DBG_MISC_INSTRUMENTSBT = 0x12
DBG_MISC_LAYOUT = 0x1a
DBG_BUFFER = 0x20
class DBG_DYLD(enum.Enum):
DBG_DYLD_UUID = 5
class DBG_DYLD_UUID(enum.Enum):
DBG_DYLD_UUID_MAP_A = (0)
DBG_DYLD_UUID_MAP_B = (1)
DBG_DYLD_UUID_MAP_32_A = (2)
DBG_DYLD_UUID_MAP_32_B = (3)
DBG_DYLD_UUID_MAP_32_C = (4)
DBG_DYLD_UUID_UNMAP_A = (5)
DBG_DYLD_UUID_UNMAP_B = (6)
DBG_DYLD_UUID_UNMAP_32_A = (7)
DBG_DYLD_UUID_UNMAP_32_B = (8)
DBG_DYLD_UUID_UNMAP_32_C = (9)
DBG_DYLD_UUID_SHARED_CACHE_A = (10)
DBG_DYLD_UUID_SHARED_CACHE_B = (11)
DBG_DYLD_UUID_SHARED_CACHE_32_A = (12)
DBG_DYLD_UUID_SHARED_CACHE_32_B = (13)
DBG_DYLD_UUID_SHARED_CACHE_32_C = (14)
DBG_DYLD_AOT_UUID_MAP_A = (15)
DBG_DYLD_AOT_UUID_MAP_B = (16)
class DBG_DKRW(enum.Enum):
DKIO_DONE = 0x01
DKIO_READ = 0x02
DKIO_ASYNC = 0x04
DKIO_META = 0x08
DKIO_PAGING = 0x10
DKIO_THROTTLE= 0x20# /* Deprecated, still provided so fs_usage doesn't break */
DKIO_PASSIVE = 0x40
DKIO_NOCACHE = 0x80
DKIO_TIER_MASK = 0xF00
DKIO_TIER_SHIFT = 8
DKIO_TIER_UPGRADE = 0x1000
class DBG_APPS(enum.Enum):
DBG_APP_LOGINWINDOW = 0x03
DBG_APP_AUDIO = 0x04
DBG_APP_SYSTEMUI = 0x05
DBG_APP_SIGNPOST = 0x0A
DBG_APP_APPKIT = 0x0C
DBG_APP_UIKIT = 0x0D
DBG_APP_DFR = 0x0E
DBG_APP_LAYOUT = 0x0F
DBG_APP_COREDATA = 0x10
DBG_APP_SAMBA = 0x80
DBG_APP_EOSSUPPORT = 0x81
DBG_APP_MACEFIMANAGER= 0x82
class DBG_THROTTLE(enum.Enum):
OPEN_THROTTLE_WINDOW = 0x1
PROCESS_THROTTLED = 0x2
IO_THROTTLE_DISABLE = 0x3
IO_TIER_UPL_MISMATCH = 0x4
class DBG_PERF(enum.Enum):
PERF_EVENT = 0
PERF_DATA = 1
PERF_STK = 2
class DBG_IMPORTANCE(enum.Enum):
IMP_ASSERTION = 0x10 # /* Task takes/drops a boost assertion */
IMP_BOOST = 0x11 # /* Task boost level changed */
IMP_MSG = 0x12 # /* boosting message sent by donating task on donating port */
IMP_WATCHPORT = 0x13 # /* port marked as watchport, and boost was transferred to the watched task */
IMP_TASK_SUPPRESSION = 0x17 # /* Task changed suppression behaviors */
IMP_TASK_APPTYPE = 0x18 # /* Task launched with apptype */
IMP_UPDATE = 0x19 # /* Requested -> effective calculation */
IMP_USYNCH_QOS_OVERRIDE = 0x1A # /* Userspace synchronization applied QoS override to resource owning thread */
IMP_DONOR_CHANGE = 0x1B # /* The iit_donor bit changed */
IMP_MAIN_THREAD_QOS = 0x1C # /* The task's main thread QoS was set */
IMP_SYNC_IPC_QOS = 0x1D # /* Sync IPC QOS override */
IMP_TASK_POLICY_DARWIN_BG = 0x21
IMP_TASK_POLICY_IOPOL = 0x22
IMP_TASK_POLICY_IO = 0x23
IMP_TASK_POLICY_PASSIVE_IO = 0x24
IMP_TASK_POLICY_DARWIN_BG_IOPOL = 0x27
IMP_TASK_POLICY_TAL = 0x28
IMP_TASK_POLICY_BOOST = 0x29
IMP_TASK_POLICY_ROLE = 0x2A
IMP_TASK_POLICY_TERMINATED = 0x2C
IMP_TASK_POLICY_NEW_SOCKETS_BG = 0x2D
IMP_TASK_POLICY_SUP_ACTIVE = 0x2E
IMP_TASK_POLICY_LATENCY_QOS = 0x2F
IMP_TASK_POLICY_THROUGH_QOS = 0x30
IMP_TASK_POLICY_WATCHERS_BG = 0x31
IMP_TASK_POLICY_SFI_MANAGED = 0x34
IMP_TASK_POLICY_ALL_SOCKETS_BG = 0x37
IMP_TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS = 0x39# /* latency as value1, throughput as value2 */
IMP_TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS = 0x3A# /* latency as value1, throughput as value2 */
IMP_TASK_POLICY_PIDBIND_BG = 0x32
IMP_TASK_POLICY_QOS_OVERRIDE = 0x36
IMP_TASK_POLICY_QOS_AND_RELPRIO = 0x38# /* QoS as value1, relative priority as value2 */
IMP_TASK_POLICY_QOS_WORKQ_OVERRIDE =0x3B
IMP_TASK_POLICY_QOS_PROMOTE = 0x3C
IMP_TASK_POLICY_QOS_KEVENT_OVERRIDE = 0x3D
IMP_TASK_POLICY_QOS_SERVICER_OVERRIDE = 0x3E
class IMP_ASSERTION(enum.Enum):
IMP_HOLD = 0x2 # /* Task holds a boost assertion */
IMP_DROP = 0x4 # /* Task drops a boost assertion */
IMP_EXTERN = 0x8 # /* boost assertion moved from kernel to userspace responsibility (externalized) */
class IMP_BOOST(enum.Enum):
IMP_BOOSTED = 0x1
IMP_UNBOOSTED = 0x2 # /* Task drops a boost assertion */
class IMP_MSG(enum.Enum):
IMP_MSG_SEND = 0x1 # /* boosting message sent by donating task on donating port */
IMP_MSG_DELV = 0x2 # /* boosting message delivered to task */
class IMP_UPDATE(enum.Enum):
IMP_UPDATE_TASK_CREATE = 0x1
class IMP_USYNCH_QOS_OVERRIDE(enum.Enum):
IMP_USYNCH_ADD_OVERRIDE = 0x0 # /* add override for a contended resource */
IMP_USYNCH_REMOVE_OVERRIDE = 0x1 # /* remove override for a contended resource */
class IMP_DONOR_CHANGE(enum.Enum):
IMP_DONOR_UPDATE_LIVE_DONOR_STATE = 0x0
IMP_DONOR_INIT_DONOR_STATE = 0x1
class IMP_SYNC_IPC_QOS(enum.Enum):
IMP_SYNC_IPC_QOS_APPLIED = 0x0
IMP_SYNC_IPC_QOS_REMOVED = 0x1
IMP_SYNC_IPC_QOS_OVERFLOW = 0x2
IMP_SYNC_IPC_QOS_UNDERFLOW = 0x3
class DBG_TURNSTILE(enum.Enum):
TURNSTILE_HEAP_OPERATIONS = 0x10
TURNSTILE_PRIORITY_OPERATIONS = 0x20
TURNSTILE_FREELIST_OPERATIONS = 0x30
class TURNSTILE_HEAP_OPERATIONS(enum.Enum):
THREAD_ADDED_TO_TURNSTILE_WAITQ = 0x1
THREAD_REMOVED_FROM_TURNSTILE_WAITQ = 0x2
THREAD_MOVED_IN_TURNSTILE_WAITQ = 0x3
TURNSTILE_ADDED_TO_TURNSTILE_HEAP = 0x4
TURNSTILE_REMOVED_FROM_TURNSTILE_HEAP= 0x5
TURNSTILE_MOVED_IN_TURNSTILE_HEAP = 0x6
TURNSTILE_ADDED_TO_THREAD_HEAP = 0x7
TURNSTILE_REMOVED_FROM_THREAD_HEAP = 0x8
TURNSTILE_MOVED_IN_THREAD_HEAP = 0x9
TURNSTILE_UPDATE_STOPPED_BY_LIMIT = 0xa
THREAD_NOT_WAITING_ON_TURNSTILE = 0xb
class TURNSTILE_PRIORITY_OPERATIONS(enum.Enum):
TURNSTILE_PRIORITY_CHANGE = 0x1
THREAD_USER_PROMOTION_CHANGE = 0x2
class TURNSTILE_FREELIST_OPERATIONS(enum.Enum):
TURNSTILE_PREPARE = 0x1
TURNSTILE_COMPLETE = 0x2
class DBG_BANK(enum.Enum):
BANK_ACCOUNT_INFO = 0x10 # /* Trace points related to bank account struct */
BANK_TASK_INFO = 0x11 # /* Trace points related to bank task struct */
class DBG_ATM(enum.Enum):
ATM_SUBAID_INFO = 0x10
ATM_GETVALUE_INFO = 0x20
ATM_UNREGISTER_INFO = 0x30
class BANK_ACCOUNT_INFO(enum.Enum):
BANK_SETTLE_CPU_TIME = 0x1 # /* Bank ledger(chit) rolled up to tasks. */
BANK_SECURE_ORIGINATOR_CHANGED = 0x2 # /* Secure Originator changed. */
BANK_SETTLE_ENERGY = 0x3 # /* Bank ledger(energy field) rolled up to tasks. */
class ATM_SUBAID_INFO(enum.Enum):
ATM_MIN_CALLED = 0x1
ATM_LINK_LIST_TRIM = 0x2
class ATM_GETVALUE_INFO(enum.Enum):
ATM_VALUE_REPLACED = 0x1
ATM_VALUE_ADDED = 0x2
class ATM_UNREGISTER_INFO(enum.Enum):
ATM_VALUE_UNREGISTERED = 0x1
ATM_VALUE_DIFF_MAILBOX = 0x2
class DBG_DAEMON(enum.Enum):
DBG_DAEMON_COREDUET = 0x1
DBG_DAEMON_POWERD = 0x2
# KD_BUF_FORMAT = '<Q32sQLLQ'
# '<QLLQQQQLLQ'
kperf_data = Struct(
'timestamp' / Int64ul,
'args' / Array(4, Int64ul),
'code' / Int64ul,
'debug_id' / Int32ul,
'cpu_id' / Int32ul,
'unused' / Int64ul,
)
# Kevent = namedtuple('Kevent', ['timestamp', 'data', 'values', 'tid', 'debugid', 'eventid', 'func_qualifier'])
#
# typedef struct {
# ## /* the thread ID */
# #if defined(__arm64__)
# uint64_t thread;
# #else
# uintptr_t thread;
# #endif
# ## /*= 0 for invalid, otherwise the PID=or 1 for kernel_task) */
# int valid;
# ## /* the name of the process owning the thread */
# char command[20];
# } kd_threadmap;
#
kd_threadmap = Struct(
'tid' / Int64ul,
'pid' / Int32ul,
'process' / FixedSized(0x14, CString('utf8')),
)
kd_header_v2 = Struct(
'number_of_treads' / Int32ul,
Padding(12),
'is_64bit' / Int32ul,
'tick_frequency' / Int64ul,
Padding(0x100),
'threadmap' / Array(lambda ctx: ctx.number_of_treads, kd_threadmap),
'_pad' / GreedyRange(Const(0, Byte)),
)
# // Version 3 header
# // The header chunk has the tag= 0x00001000 which also serves as a magic word
# // that identifies the file as a version 3 trace file. The header payload is
# // a set of fixed fields followed by a variable number of sub-chunks:
# ## /*
# * ____________________________________________________________________________
# | Offset | Size | Field |
# | ----------------------------------------------------------------------------
# | = 0 | 4 | Tag=0x00001000) |
# | 4 | 4 | Sub-tag. Represents the version of the header. |
# | 8 | 8 | Length of header payload=40+8x) |
# | 16 | 8 | Time base info. Two 32-bit numbers, numer/denom, |
# | | | for converting timestamps to nanoseconds. |
# | 24 | 8 | Timestamp of trace start. |
# | 32 | 8 | Wall time seconds since Unix epoch. |
# | | | As returned by gettimeofday(). |
# | 40 | 4 | Wall time microseconds. As returned by gettimeofday(). |
# | 44 | 4 | Local time zone offset in minutes.= " ) |
# | 48 | 4 | Type of daylight savings time correction to apply.= " ) |
# | 52 | 4 | Flags. 1 = 64-bit. Remaining bits should be written |
# | | | as= 0 and ignored when reading. |
# | 56 | 8x | Variable number of sub-chunks. None are required. |
# | | | Ignore unknown chunks. |
# | ----------------------------------------------------------------------------
# */
# // NOTE: The header sub-chunks are considered part of the header chunk,
# // so they must be included in the header chunk’s length field.
# // The CPU map is an optional sub-chunk of the header chunk. It provides
# // information about the CPUs that are referenced from the trace events.
# typedef struct {
# uint32_t tag;
# uint32_t sub_tag;
# uint64_t length;
# uint32_t timebase_numer;
# uint32_t timebase_denom;
# uint64_t timestamp;
# uint64_t walltime_secs;
# uint32_t walltime_usecs;
# uint32_t timezone_minuteswest;
# uint32_t timezone_dst;
# uint32_t flags;
# } __attribute__((packed)) kd_header_v3;
#
# kd_header_v3 header = {
# .tag = RAW_VERSION3,
# .sub_tag = V3_HEADER_VERSION,
# .length ==sizeof(kd_header_v3) + cpumap_size - sizeof(kd_cpumap_header)),
# .timebase_numer = timebase.numer,
# .timebase_denom = timebase.denom,
# .timestamp == 0,
# .walltime_secs == 0,
# .walltime_usecs == 0,
# .timezone_minuteswest == 0,
# .timezone_dst == 0,
#
kd_header_v3 = Struct(
'tag' / Int32ul,
'sub_tag' / Int32ul,
'length' / Int64ul,
'timebase_numer' / Int32ul,
'timebase_denom' / Int32ul,
'timestamp' / Int64ul,
'walltime_secs' / Int64ul,
'walltime_usecs' / Int32ul,
'timezone_minuteswest' / Int32ul,
'timezone_dst' / Int32ul,
'flags' / Int32ul,
'tag2' / Int32ul,
)
CLASS_DICT = vars()
class KdBufParser:
def __init__(self,data):
self.timestamp = data.timestamp
self.args = data.args
self.code = data.code
self.debug_id = data.debug_id
self.event_id = data.debug_id & KDBG_EVENTID_MASK
self.func_code = data.debug_id & KDBG_FUNC_MASK
self.class_code = kdbg_extract_class(data.debug_id)
self.subclass_code = kdbg_extract_subclass(data.debug_id)
self.final_code = kdbg_extract_code(data.debug_id)
@classmethod
def decode(cls,buf_io:io.BytesIO):
while True:
buf = buf_io.read(64)
if not buf:
return
data = kperf_data.parse(buf)
yield cls(data)
class KperfData:
def __init__(self,traceCodesFile={},filter_pid=None,filter_process=None):
self.trace_codes = traceCodesFile
self.threads_pids = {}
self.version = None
self.filter_tid = filter_pid
self.filter_process = filter_process
def set_threads_pids(self, threads):
for thread in threads:
self.threads_pids[thread.tid] = (thread.pid,thread.process)
def _format_process(self, tid):
pid,process_name = self.threads_pids.get(tid,(None,None))
return pid,process_name,f'{process_name}({pid})' if pid else f'Error: tid {tid}'
def _format_class(self,classes,code):
if classes:
try:
classes_name = classes(code).name
return classes_name, f'{classes_name:<18}'
except ValueError:
return None,f'''{'Error(' + (str(code)) + ")"}'''
else:
return None,f'''{'Error(' + (str(code)) + ")"}'''
def check_header(self,kd_buf):
if kd_buf.startswith(b'\x07X\xa2Y'):
return io.BytesIO()
buf_io = io.BytesIO(kd_buf)
if not self.threads_pids:
self.version = buf_io.read(4)
parsed_header = kd_header_v2.parse_stream(buf_io)
self.set_threads_pids(parsed_header.threadmap)
return buf_io
def to_dict(self,kd_buf):
buf_io = self.check_header(kd_buf)
for event in KdBufParser.decode(buf_io):
yield event
def to_str(self,kd_buf:bytes):
buf_io = self.check_header(kd_buf)
for event in KdBufParser.decode(buf_io):
pid,process_name,process_str = self._format_process(event.code)
if self.filter_tid and self.filter_tid != pid:
continue
if self.filter_process and self.filter_process != process_name:
continue
formatted_data = ''
formatted_data += f'{process_str:<27}'
if event.event_id in self.trace_codes:
name = self.trace_codes[event.event_id] + f' ({hex(event.event_id)})'
else:
name = hex(event.event_id)
formatted_data += f'{name:<60}'
classes_name,_str = self._format_class(DebugClasses,event.class_code)
formatted_data += f'{_str:<18}'
classes_name,_str = self._format_class(CLASS_DICT.get(classes_name),event.subclass_code)
formatted_data += f'{_str:<30}'
# classes_name,_str = self._format_class(CLASS_DICT.get(classes_name),event.final_code)
# formatted_data += f'{_str:<30}'
try:
formatted_data += f'{DgbFuncQual(event.func_code).name:<15}'
except ValueError:
formatted_data += f'{"Error":<16}'
yield formatted_data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.