lca_index
int64 0
223
| idx
stringlengths 7
11
| line_type
stringclasses 6
values | ground_truth
stringlengths 2
35
| completions
sequencelengths 3
1.16k
| prefix
stringlengths 298
32.8k
| postfix
stringlengths 0
28.6k
| repo
stringclasses 34
values |
---|---|---|---|---|---|---|---|
0 | 0-46-56 | inproject | MergedProjectProfile | [
"add_func_to_reached_and_clone",
"Any",
"copy",
"Dict",
"FunctionProfile",
"fuzz_cfg_load",
"fuzz_cov_load",
"fuzz_utils",
"FuzzerProfile",
"List",
"load_all_profiles",
"logger",
"logging",
"MergedProjectProfile",
"Optional",
"os",
"read_fuzzer_data_file_to_profile",
"Set",
"Tuple",
"__doc__",
"__file__",
"__name__",
"__package__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader. | ,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-47-54 | inproject | FuzzerProfile | [
"add_func_to_reached_and_clone",
"Any",
"copy",
"Dict",
"FunctionProfile",
"fuzz_cfg_load",
"fuzz_cov_load",
"fuzz_utils",
"FuzzerProfile",
"List",
"load_all_profiles",
"logger",
"logging",
"MergedProjectProfile",
"Optional",
"os",
"read_fuzzer_data_file_to_profile",
"Set",
"Tuple",
"__doc__",
"__file__",
"__name__",
"__package__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader. | ],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-62-30 | inproject | extract_all_callsites | [
"CalltreeCallsite",
"data_file_read_calltree",
"extract_all_callsites",
"extract_all_callsites_recursive",
"List",
"logger",
"logging",
"Optional",
"print_ctcs_tree",
"__doc__",
"__file__",
"__name__",
"__package__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load. | (profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-62-60 | inproject | function_call_depths | [
"accummulate_profile",
"all_class_functions",
"binary_executable",
"correlate_executable_name",
"coverage",
"file_targets",
"function_call_depths",
"functions_reached_by_fuzzer",
"functions_unreached_by_fuzzer",
"fuzzer_source_file",
"get_cov_metrics",
"get_cov_uncovered_reachable_funcs",
"get_file_targets",
"get_function_coverage",
"get_key",
"get_target_fuzzer_filename",
"get_total_basic_blocks",
"get_total_cyclomatic_complexity",
"introspector_data_file",
"load_coverage",
"reaches",
"refine_paths",
"set_all_reached_functions",
"set_all_unreached_functions",
"total_basic_blocks",
"total_cyclomatic_complexity",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile. | )
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-66-40 | inproject | demangle_cpp_func | [
"Any",
"cxxfilt",
"data_file_read_yaml",
"demangle_cpp_func",
"Dict",
"get_all_files_in_tree_with_regex",
"get_target_coverage_url",
"List",
"logger",
"logging",
"longest_common_prefix",
"normalise_str",
"Optional",
"os",
"re",
"safe_decode",
"scan_executables_for_fuzz_introspector_logs",
"yaml",
"__doc__",
"__file__",
"__name__",
"__package__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils. | (node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-66-63 | inproject | dst_function_name | [
"children",
"cov_callsite_link",
"cov_color",
"cov_ct_idx",
"cov_forward_reds",
"cov_hitcount",
"cov_largest_blocked_func",
"cov_link",
"cov_parent",
"depth",
"dst_function_name",
"dst_function_source_file",
"hitcount",
"parent_calltree_callsite",
"src_function_name",
"src_function_source_file",
"src_linenumber",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node. | )
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-77-33 | random | cov_callsite_link | [
"children",
"cov_callsite_link",
"cov_color",
"cov_ct_idx",
"cov_forward_reds",
"cov_hitcount",
"cov_largest_blocked_func",
"cov_link",
"cov_parent",
"depth",
"dst_function_name",
"dst_function_source_file",
"hitcount",
"parent_calltree_callsite",
"src_function_name",
"src_function_source_file",
"src_linenumber",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node. |
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-79-30 | infile | create_str_node_ctx_idx | [
"analysis_func",
"create_calltree",
"create_fuzz_blocker_table",
"create_str_node_ctx_idx",
"get_fuzz_blockers",
"html_create_dedicated_calltree_file",
"name",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self. | (str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-79-63 | infile | cov_ct_idx | [
"children",
"cov_callsite_link",
"cov_color",
"cov_ct_idx",
"cov_forward_reds",
"cov_hitcount",
"cov_largest_blocked_func",
"cov_link",
"cov_parent",
"depth",
"dst_function_name",
"dst_function_source_file",
"hitcount",
"parent_calltree_callsite",
"src_function_name",
"src_function_source_file",
"src_linenumber",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node. | ))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-133-13 | infile | html_create_dedicated_calltree_file | [
"analysis_func",
"create_calltree",
"create_fuzz_blocker_table",
"create_str_node_ctx_idx",
"get_fuzz_blockers",
"html_create_dedicated_calltree_file",
"name",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self. | (
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-144-38 | inproject | FuzzerProfile | [
"add_func_to_reached_and_clone",
"Any",
"copy",
"Dict",
"FunctionProfile",
"fuzz_cfg_load",
"fuzz_cov_load",
"fuzz_utils",
"FuzzerProfile",
"List",
"load_all_profiles",
"logger",
"logging",
"MergedProjectProfile",
"Optional",
"os",
"read_fuzzer_data_file_to_profile",
"Set",
"Tuple",
"__doc__",
"__file__",
"__name__",
"__package__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader. | ):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-153-40 | inproject | html_get_header | [
"Any",
"html_add_header_with_link",
"html_create_table_head",
"html_get_header",
"html_get_navbar",
"html_get_table_of_contents",
"html_table_add_row",
"List",
"Tuple",
"__doc__",
"__file__",
"__name__",
"__package__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers. | (
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-155-49 | inproject | get_key | [
"accummulate_profile",
"all_class_functions",
"binary_executable",
"correlate_executable_name",
"coverage",
"file_targets",
"function_call_depths",
"functions_reached_by_fuzzer",
"functions_unreached_by_fuzzer",
"fuzzer_source_file",
"get_cov_metrics",
"get_cov_uncovered_reachable_funcs",
"get_file_targets",
"get_function_coverage",
"get_key",
"get_target_fuzzer_filename",
"get_total_basic_blocks",
"get_total_cyclomatic_complexity",
"introspector_data_file",
"load_coverage",
"reaches",
"refine_paths",
"set_all_reached_functions",
"set_all_unreached_functions",
"total_basic_blocks",
"total_cyclomatic_complexity",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile. | () }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-166-34 | infile | create_fuzz_blocker_table | [
"analysis_func",
"create_calltree",
"create_fuzz_blocker_table",
"create_str_node_ctx_idx",
"get_fuzz_blockers",
"html_create_dedicated_calltree_file",
"name",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self. | (profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-181-25 | infile | append | [
"append",
"clear",
"copy",
"count",
"extend",
"index",
"insert",
"pop",
"remove",
"reverse",
"sort",
"__add__",
"__annotations__",
"__class__",
"__class_getitem__",
"__contains__",
"__delattr__",
"__delitem__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__ge__",
"__getattribute__",
"__getitem__",
"__gt__",
"__hash__",
"__iadd__",
"__imul__",
"__init__",
"__init_subclass__",
"__iter__",
"__le__",
"__len__",
"__lt__",
"__module__",
"__mul__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__rmul__",
"__setattr__",
"__setitem__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs. | (self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-181-37 | infile | create_str_node_ctx_idx | [
"analysis_func",
"create_calltree",
"create_fuzz_blocker_table",
"create_str_node_ctx_idx",
"get_fuzz_blockers",
"html_create_dedicated_calltree_file",
"name",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self. | (str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-181-70 | infile | cov_ct_idx | [
"children",
"cov_callsite_link",
"cov_color",
"cov_ct_idx",
"cov_forward_reds",
"cov_hitcount",
"cov_largest_blocked_func",
"cov_link",
"cov_parent",
"depth",
"dst_function_name",
"dst_function_source_file",
"hitcount",
"parent_calltree_callsite",
"src_function_name",
"src_function_source_file",
"src_linenumber",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node. | )))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-207-38 | inproject | FuzzerProfile | [
"add_func_to_reached_and_clone",
"Any",
"copy",
"Dict",
"FunctionProfile",
"fuzz_cfg_load",
"fuzz_cov_load",
"fuzz_utils",
"FuzzerProfile",
"List",
"load_all_profiles",
"logger",
"logging",
"MergedProjectProfile",
"Optional",
"os",
"read_fuzzer_data_file_to_profile",
"Set",
"Tuple",
"__doc__",
"__file__",
"__name__",
"__package__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader. | ,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-210-41 | inproject | CalltreeCallsite | [
"CalltreeCallsite",
"data_file_read_calltree",
"extract_all_callsites",
"extract_all_callsites_recursive",
"List",
"logger",
"logging",
"Optional",
"print_ctcs_tree",
"__doc__",
"__file__",
"__name__",
"__package__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load. | ] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-213-38 | inproject | extract_all_callsites | [
"CalltreeCallsite",
"data_file_read_calltree",
"extract_all_callsites",
"extract_all_callsites_recursive",
"List",
"logger",
"logging",
"Optional",
"print_ctcs_tree",
"__doc__",
"__file__",
"__name__",
"__package__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load. | (profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-213-68 | inproject | function_call_depths | [
"accummulate_profile",
"all_class_functions",
"binary_executable",
"correlate_executable_name",
"coverage",
"file_targets",
"function_call_depths",
"functions_reached_by_fuzzer",
"functions_unreached_by_fuzzer",
"fuzzer_source_file",
"get_cov_metrics",
"get_cov_uncovered_reachable_funcs",
"get_file_targets",
"get_function_coverage",
"get_key",
"get_target_fuzzer_filename",
"get_total_basic_blocks",
"get_total_cyclomatic_complexity",
"introspector_data_file",
"load_coverage",
"reaches",
"refine_paths",
"set_all_reached_functions",
"set_all_unreached_functions",
"total_basic_blocks",
"total_cyclomatic_complexity",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile. | )
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-240-33 | infile | get_fuzz_blockers | [
"analysis_func",
"create_calltree",
"create_fuzz_blocker_table",
"create_str_node_ctx_idx",
"get_fuzz_blockers",
"html_create_dedicated_calltree_file",
"name",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self. | (
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-249-15 | random | append | [
"append",
"clear",
"copy",
"count",
"extend",
"index",
"insert",
"pop",
"remove",
"reverse",
"sort",
"__add__",
"__annotations__",
"__class__",
"__class_getitem__",
"__contains__",
"__delattr__",
"__delitem__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__ge__",
"__getattribute__",
"__getitem__",
"__gt__",
"__hash__",
"__iadd__",
"__imul__",
"__init__",
"__init_subclass__",
"__iter__",
"__le__",
"__len__",
"__lt__",
"__module__",
"__mul__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__rmul__",
"__setattr__",
"__setitem__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables. | (f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-250-47 | inproject | html_create_table_head | [
"Any",
"html_add_header_with_link",
"html_create_table_head",
"html_get_header",
"html_get_navbar",
"html_get_table_of_contents",
"html_table_add_row",
"List",
"Tuple",
"__doc__",
"__file__",
"__name__",
"__package__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers. | (
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.html_table_add_row([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
0 | 0-276-51 | inproject | html_table_add_row | [
"Any",
"html_add_header_with_link",
"html_create_table_head",
"html_get_header",
"html_get_navbar",
"html_get_table_of_contents",
"html_table_add_row",
"List",
"Tuple",
"__doc__",
"__file__",
"__name__",
"__package__"
] | # Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers. | ([
str(node.cov_forward_reds),
str(node.cov_ct_idx),
node.cov_parent,
f"<a href={node_link}>call site</a>",
node.cov_largest_blocked_func
])
html_table_string += "</table>"
return html_table_string
| ossf__fuzz-introspector |
1 | 1-95-26 | inproject | search | [
"findall",
"finditer",
"flags",
"fullmatch",
"groupindex",
"groups",
"match",
"pattern",
"search",
"split",
"sub",
"subn",
"__annotations__",
"__class__",
"__class_getitem__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l. | (line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-95-39 | inproject | groups | [
"end",
"endpos",
"expand",
"group",
"groupdict",
"groups",
"lastgroup",
"lastindex",
"pos",
"re",
"regs",
"span",
"start",
"string",
"__annotations__",
"__bool__",
"__class__",
"__class_getitem__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__ge__",
"__getattribute__",
"__getitem__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__le__",
"__lt__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line). | ()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-96-26 | inproject | search | [
"findall",
"finditer",
"flags",
"fullmatch",
"groupindex",
"groups",
"match",
"pattern",
"search",
"split",
"sub",
"subn",
"__annotations__",
"__class__",
"__class_getitem__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c. | (line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-96-39 | inproject | groups | [
"end",
"endpos",
"expand",
"group",
"groupdict",
"groups",
"lastgroup",
"lastindex",
"pos",
"re",
"regs",
"span",
"start",
"string",
"__annotations__",
"__bool__",
"__class__",
"__class_getitem__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__ge__",
"__getattribute__",
"__getitem__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__le__",
"__lt__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line). | ()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-97-26 | inproject | search | [
"findall",
"finditer",
"flags",
"fullmatch",
"groupindex",
"groups",
"match",
"pattern",
"search",
"split",
"sub",
"subn",
"__annotations__",
"__class__",
"__class_getitem__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s. | (line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-97-39 | inproject | groups | [
"end",
"endpos",
"expand",
"group",
"groupdict",
"groups",
"lastgroup",
"lastindex",
"pos",
"re",
"regs",
"span",
"start",
"string",
"__annotations__",
"__bool__",
"__class__",
"__class_getitem__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__ge__",
"__getattribute__",
"__getitem__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__le__",
"__lt__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line). | ()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-123-14 | infile | write | [
"close",
"closed",
"fileno",
"flush",
"isatty",
"mode",
"name",
"read",
"readable",
"readline",
"readlines",
"seek",
"seekable",
"tell",
"truncate",
"writable",
"write",
"writelines",
"_is_protocol",
"__annotations__",
"__class__",
"__class_getitem__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__enter__",
"__eq__",
"__exit__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__iter__",
"__module__",
"__ne__",
"__new__",
"__next__",
"__parameters__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f. | (" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-123-24 | infile | join | [
"capitalize",
"casefold",
"center",
"count",
"encode",
"endswith",
"expandtabs",
"find",
"format",
"format_map",
"index",
"isalnum",
"isalpha",
"isascii",
"isdecimal",
"isdigit",
"isidentifier",
"islower",
"isnumeric",
"isprintable",
"isspace",
"istitle",
"isupper",
"join",
"ljust",
"lower",
"lstrip",
"maketrans",
"partition",
"removeprefix",
"removesuffix",
"replace",
"rfind",
"rindex",
"rjust",
"rpartition",
"rsplit",
"rstrip",
"split",
"splitlines",
"startswith",
"strip",
"swapcase",
"title",
"translate",
"upper",
"zfill",
"__add__",
"__annotations__",
"__class__",
"__contains__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__ge__",
"__getattribute__",
"__getitem__",
"__getnewargs__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__iter__",
"__le__",
"__len__",
"__lt__",
"__mod__",
"__module__",
"__mul__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__rmul__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ". | ([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-123-35 | infile | type | [
"color_node",
"edge_style",
"edges",
"graph_style",
"hide_node",
"label2id",
"name",
"node_style",
"nodes",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write",
"_get_node_id",
"_order_edges",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self. | , self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-123-46 | infile | name | [
"color_node",
"edge_style",
"edges",
"graph_style",
"hide_node",
"label2id",
"name",
"node_style",
"nodes",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write",
"_get_node_id",
"_order_edges",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self. | , "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-124-14 | infile | write | [
"close",
"closed",
"fileno",
"flush",
"isatty",
"mode",
"name",
"read",
"readable",
"readline",
"readlines",
"seek",
"seekable",
"tell",
"truncate",
"writable",
"write",
"writelines",
"_is_protocol",
"__annotations__",
"__class__",
"__class_getitem__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__enter__",
"__eq__",
"__exit__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__iter__",
"__module__",
"__ne__",
"__new__",
"__next__",
"__parameters__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f. | (self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-124-25 | infile | graph_style | [
"color_node",
"edge_style",
"edges",
"graph_style",
"hide_node",
"label2id",
"name",
"node_style",
"nodes",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write",
"_get_node_id",
"_order_edges",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self. | )
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-125-14 | infile | write | [
"close",
"closed",
"fileno",
"flush",
"isatty",
"mode",
"name",
"read",
"readable",
"readline",
"readlines",
"seek",
"seekable",
"tell",
"truncate",
"writable",
"write",
"writelines",
"_is_protocol",
"__annotations__",
"__class__",
"__class_getitem__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__enter__",
"__eq__",
"__exit__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__iter__",
"__module__",
"__ne__",
"__new__",
"__next__",
"__parameters__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f. | (self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-125-25 | infile | node_style | [
"color_node",
"edge_style",
"edges",
"graph_style",
"hide_node",
"label2id",
"name",
"node_style",
"nodes",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write",
"_get_node_id",
"_order_edges",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self. | )
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-126-14 | infile | write | [
"close",
"closed",
"fileno",
"flush",
"isatty",
"mode",
"name",
"read",
"readable",
"readline",
"readlines",
"seek",
"seekable",
"tell",
"truncate",
"writable",
"write",
"writelines",
"_is_protocol",
"__annotations__",
"__class__",
"__class_getitem__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__enter__",
"__eq__",
"__exit__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__iter__",
"__module__",
"__ne__",
"__new__",
"__next__",
"__parameters__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f. | (self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-126-25 | infile | edge_style | [
"color_node",
"edge_style",
"edges",
"graph_style",
"hide_node",
"label2id",
"name",
"node_style",
"nodes",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write",
"_get_node_id",
"_order_edges",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self. | )
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-133-29 | infile | _order_edges | [
"color_node",
"edge_style",
"edges",
"graph_style",
"hide_node",
"label2id",
"name",
"node_style",
"nodes",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write",
"_get_node_id",
"_order_edges",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self. | ():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-135-18 | infile | write | [
"close",
"closed",
"fileno",
"flush",
"isatty",
"mode",
"name",
"read",
"readable",
"readline",
"readlines",
"seek",
"seekable",
"tell",
"truncate",
"writable",
"write",
"writelines",
"_is_protocol",
"__annotations__",
"__class__",
"__class_getitem__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__enter__",
"__eq__",
"__exit__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__iter__",
"__module__",
"__ne__",
"__new__",
"__next__",
"__parameters__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f. | (line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-136-14 | infile | write | [
"close",
"closed",
"fileno",
"flush",
"isatty",
"mode",
"name",
"read",
"readable",
"readline",
"readlines",
"seek",
"seekable",
"tell",
"truncate",
"writable",
"write",
"writelines",
"_is_protocol",
"__annotations__",
"__class__",
"__class_getitem__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__enter__",
"__eq__",
"__exit__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__iter__",
"__module__",
"__ne__",
"__new__",
"__next__",
"__parameters__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f. | ("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-142-20 | common | label2id | [
"color_node",
"edge_style",
"edges",
"graph_style",
"hide_node",
"label2id",
"name",
"node_style",
"nodes",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write",
"_get_node_id",
"_order_edges",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self. | .get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-142-29 | common | get | [
"clear",
"copy",
"fromkeys",
"get",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
"__annotations__",
"__class__",
"__class_getitem__",
"__contains__",
"__delattr__",
"__delitem__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__getitem__",
"__hash__",
"__init__",
"__init_subclass__",
"__ior__",
"__iter__",
"__len__",
"__module__",
"__ne__",
"__new__",
"__or__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__setattr__",
"__setitem__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id. | (node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-152-23 | infile | _get_node_id | [
"color_node",
"edge_style",
"edges",
"graph_style",
"hide_node",
"label2id",
"name",
"node_style",
"nodes",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write",
"_get_node_id",
"_order_edges",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self. | (node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-166-25 | infile | _get_node_id | [
"color_node",
"edge_style",
"edges",
"graph_style",
"hide_node",
"label2id",
"name",
"node_style",
"nodes",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write",
"_get_node_id",
"_order_edges",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self. | (source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-167-25 | infile | _get_node_id | [
"color_node",
"edge_style",
"edges",
"graph_style",
"hide_node",
"label2id",
"name",
"node_style",
"nodes",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write",
"_get_node_id",
"_order_edges",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self. | (target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-188-13 | infile | remove_node | [
"color_node",
"edge_style",
"edges",
"graph_style",
"hide_node",
"label2id",
"name",
"node_style",
"nodes",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write",
"_get_node_id",
"_order_edges",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self. | (node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms.transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-197-15 | infile | algorithms | [
"accumulate",
"adamic_adar_index",
"add_cycle",
"add_path",
"add_star",
"adjacency_data",
"adjacency_graph",
"adjacency_matrix",
"adjacency_spectrum",
"algebraic_connectivity",
"algorithms",
"ALGORITHMS",
"all_neighbors",
"all_node_cuts",
"all_pairs_all_shortest_paths",
"all_pairs_bellman_ford_path",
"all_pairs_bellman_ford_path_length",
"all_pairs_dijkstra",
"all_pairs_dijkstra_path",
"all_pairs_dijkstra_path_length",
"all_pairs_lowest_common_ancestor",
"all_pairs_node_connectivity",
"all_pairs_shortest_path",
"all_pairs_shortest_path_length",
"all_shortest_paths",
"all_simple_edge_paths",
"all_simple_paths",
"all_topological_sorts",
"all_triads",
"all_triplets",
"AmbiguousSolution",
"ancestors",
"antichains",
"Any",
"apply_alpha",
"approximate_current_flow_betweenness_centrality",
"approximation",
"arbitrary_element",
"ArborescenceIterator",
"arf_layout",
"articulation_points",
"AS_graph_generator",
"AsciiBaseGlyphs",
"AsciiDirectedGlyphs",
"AsciiUndirectedGlyphs",
"assortativity",
"astar_path",
"astar_path_length",
"ATLAS_FILE",
"attr_matrix",
"attr_sparse_matrix",
"attracting_components",
"attribute_ac",
"attribute_assortativity_coefficient",
"attribute_mixing_dict",
"attribute_mixing_matrix",
"attrmatrix",
"average_clustering",
"average_degree_connectivity",
"average_neighbor_degree",
"average_node_connectivity",
"average_shortest_path_length",
"balanced_tree",
"barabasi_albert_graph",
"barbell_graph",
"barycenter",
"BaseGlyphs",
"bellman_ford_path",
"bellman_ford_path_length",
"bellman_ford_predecessor_and_distance",
"bethe_hessian_matrix",
"bethe_hessian_spectrum",
"bethehessianmatrix",
"betweenness_centrality",
"betweenness_centrality_subset",
"bfs_beam_edges",
"bfs_edges",
"bfs_labeled_edges",
"bfs_layers",
"bfs_layout",
"bfs_predecessors",
"bfs_successors",
"bfs_tree",
"biconnected_component_edges",
"biconnected_components",
"bidirectional_dijkstra",
"bidirectional_shortest_path",
"binomial_graph",
"binomial_tree",
"bipartite",
"bipartite_layout",
"bisect_left",
"blake2b",
"boruvka_mst_edges",
"boundary_expansion",
"bridges",
"bull_graph",
"capacity_scaling",
"cartesian_product",
"caveman_graph",
"cd_index",
"center",
"centrality",
"CGInverseLaplacian",
"chain",
"chain_decomposition",
"chaini",
"check_create_using",
"check_planarity",
"check_planarity_recursive",
"choose_pref_attach",
"chordal",
"chordal_cycle_graph",
"chordal_graph_cliques",
"chordal_graph_treewidth",
"chordless_cycles",
"chromatic_polynomial",
"chvatal_graph",
"circulant_graph",
"circular_ladder_graph",
"circular_layout",
"classes",
"clique",
"closeness_centrality",
"closeness_vitality",
"cluster",
"clustering",
"cn_soundarajan_hopcroft",
"Collection",
"collections",
"colliders",
"coloring",
"comb",
"combinations",
"combinations_with_replacement",
"combinatorial_embedding_to_pos",
"common_neighbor_centrality",
"common_neighbors",
"communicability",
"communicability_betweenness_centrality",
"communicability_exp",
"community",
"complement",
"complete_bipartite_graph",
"complete_graph",
"complete_multipartite_graph",
"complete_to_chordal_graph",
"components",
"compose",
"compose_all",
"compute_v_structures",
"condensation",
"conductance",
"config",
"configuration_model",
"ConflictPair",
"conftest",
"connected_caveman_graph",
"connected_components",
"connected_double_edge_swap",
"connected_watts_strogatz_graph",
"connectivity",
"constraint",
"contracted_edge",
"contracted_nodes",
"convert",
"convert_matrix",
"convert_node_labels_to_integers",
"copy",
"core_number",
"coreviews",
"corona_product",
"cost_of_flow",
"could_be_isomorphic",
"count",
"Counter",
"create_component_structure",
"create_empty_copy",
"cubical_graph",
"current_flow_betweenness_centrality",
"current_flow_betweenness_centrality_subset",
"current_flow_closeness_centrality",
"cut_size",
"cycle_basis",
"cycle_graph",
"cytoscape_data",
"cytoscape_graph",
"d_separated",
"dag_longest_path",
"dag_longest_path_length",
"dag_to_branching",
"data_to_n",
"dataclass",
"davis_southern_women_graph",
"debug_print",
"dedensify",
"deepcopy",
"defaultdict",
"degree",
"degree_assortativity_coefficient",
"degree_centrality",
"degree_histogram",
"degree_mixing_dict",
"degree_mixing_matrix",
"degree_pearson_correlation_coefficient",
"degree_sequence_tree",
"DegreeSequenceRandomGraph",
"dense_gnm_random_graph",
"density",
"deque",
"desargues_graph",
"descendants",
"descendants_at_distance",
"dfs_edges",
"dfs_labeled_edges",
"dfs_postorder_nodes",
"dfs_predecessors",
"dfs_preorder_nodes",
"dfs_successors",
"dfs_tree",
"diameter",
"diamond_graph",
"dict_to_numpy_array",
"difference",
"DiGraph",
"dijkstra",
"dijkstra_path",
"dijkstra_path_length",
"dijkstra_predecessor_and_distance",
"directed_combinatorial_laplacian_matrix",
"directed_configuration_model",
"directed_edge_swap",
"directed_havel_hakimi_graph",
"directed_joint_degree_graph",
"directed_laplacian_matrix",
"directed_modularity_matrix",
"discrete_sequence",
"disjoint_union",
"disjoint_union_all",
"dispersion",
"dodecahedral_graph",
"dominance_frontiers",
"dominating_set",
"dorogovtsev_goltsev_mendes_graph",
"double_edge_swap",
"draw",
"draw_circular",
"draw_forceatlas2",
"draw_kamada_kawai",
"draw_networkx",
"draw_networkx_edge_labels",
"draw_networkx_edges",
"draw_networkx_labels",
"draw_networkx_nodes",
"draw_planar",
"draw_random",
"draw_shell",
"draw_spectral",
"draw_spring",
"drawing",
"dual_barabasi_albert_graph",
"duplication_divergence_graph",
"eccentricity",
"edge_betweenness_centrality",
"edge_betweenness_centrality_subset",
"edge_bfs",
"edge_boundary",
"edge_connectivity",
"edge_current_flow_betweenness_centrality",
"edge_current_flow_betweenness_centrality_subset",
"edge_dfs",
"edge_disjoint_paths",
"edge_expansion",
"edge_load_centrality",
"edge_subgraph",
"EdgePartition",
"edges",
"effective_graph_resistance",
"effective_size",
"efficiency",
"ego_graph",
"eigenvector_centrality",
"eigenvector_centrality_numpy",
"Element",
"ElementTree",
"empty_graph",
"Enum",
"enumerate_all_cliques",
"equitable_color",
"equivalence_classes",
"erdos_renyi_graph",
"escape",
"estrada_index",
"eulerian_circuit",
"eulerian_path",
"eulerize",
"ExceededMaxIterations",
"exception",
"expected_degree_graph",
"extended_barabasi_albert_graph",
"factorial",
"FancyArrowFactory",
"fast_could_be_isomorphic",
"fast_gnp_random_graph",
"faster_could_be_isomorphic",
"fiedler_vector",
"field",
"filters",
"find_asteroidal_triple",
"find_cliques",
"find_cliques_recursive",
"find_cycle",
"find_induced_nodes",
"find_minimal_d_separator",
"find_negative_cycle",
"flatten",
"florentine_families_graph",
"flow",
"flow_hierarchy",
"flow_matrix_row",
"floyd_warshall",
"floyd_warshall_numpy",
"floyd_warshall_predecessor_and_distance",
"forceatlas2_layout",
"FORWARD",
"FORWARD_EDGE",
"freeze",
"from_dict_of_dicts",
"from_dict_of_lists",
"from_edgelist",
"from_graph6_bytes",
"from_nested_tuple",
"from_numpy_array",
"from_pandas_adjacency",
"from_pandas_edgelist",
"from_prufer_sequence",
"from_scipy_sparse_array",
"from_sparse6_bytes",
"frozen",
"frucht_graph",
"fruchterman_reingold_layout",
"full_join",
"full_rary_tree",
"FullInverseLaplacian",
"functools",
"gaussian_random_partition_graph",
"gcd",
"general_random_intersection_graph",
"generalized_degree",
"generate_adjlist",
"generate_edgelist",
"generate_gexf",
"generate_gml",
"generate_graphml",
"generate_multiline_adjlist",
"generate_network_text",
"generate_pajek",
"generate_random_paths",
"Generator",
"generators",
"generic_bfs_edges",
"geographical_threshold_graph",
"geometric_edges",
"geometric_soft_configuration_graph",
"get_canonical_ordering",
"get_counterexample",
"get_counterexample_recursive",
"get_edge_attributes",
"get_node_attributes",
"GEXF",
"GEXFReader",
"GEXFWriter",
"girth",
"global_efficiency",
"global_parameters",
"global_reaching_centrality",
"gn_graph",
"gnc_graph",
"gnm_random_graph",
"gnp_random_graph",
"gnr_graph",
"goldberg_radzik",
"gomory_hu_tree",
"google_matrix",
"Graph",
"graph_atlas",
"graph_atlas_g",
"graph_edit_distance",
"graphmatrix",
"GraphML",
"GraphMLReader",
"GraphMLWriter",
"GraphMLWriterLxml",
"graphviews",
"greedy_color",
"grid_2d_graph",
"grid_graph",
"group_betweenness_centrality",
"group_closeness_centrality",
"group_degree_centrality",
"group_in_degree_centrality",
"group_out_degree_centrality",
"groups",
"gutman_index",
"gzip",
"harmonic_centrality",
"harmonic_diameter",
"has_bridges",
"has_cycle",
"has_eulerian_path",
"has_path",
"HasACycle",
"havel_hakimi_graph",
"heappop",
"heappush",
"heapq",
"heawood_graph",
"hexagonal_lattice_graph",
"hits",
"hkn_harary_graph",
"hnm_harary_graph",
"hoffman_singleton_graph",
"house_graph",
"house_x_graph",
"htmlentitydefs",
"hypercube_graph",
"icosahedral_graph",
"identified_nodes",
"immediate_dominators",
"importlib",
"in_degree_centrality",
"incidence_matrix",
"incremental_closeness_centrality",
"IncrementalElement",
"induced_subgraph",
"inf",
"information_centrality",
"intersection",
"intersection_all",
"intersection_array",
"Interval",
"interval_graph",
"inverse_line_graph",
"is_aperiodic",
"is_arborescence",
"is_at_free",
"is_attracting_component",
"is_biconnected",
"is_bipartite",
"is_branching",
"is_chordal",
"is_connected",
"is_d_separator",
"is_digraphical",
"is_directed",
"is_directed_acyclic_graph",
"is_distance_regular",
"is_dominating_set",
"is_edge_cover",
"is_empty",
"is_eulerian",
"is_forest",
"is_frozen",
"is_graphical",
"is_isolate",
"is_isomorphic",
"is_k_edge_connected",
"is_k_regular",
"is_kl_connected",
"is_matching",
"is_maximal_matching",
"is_minimal_d_separator",
"is_multigraphical",
"is_negatively_weighted",
"is_path",
"is_perfect_matching",
"is_planar",
"is_pseudographical",
"is_regular",
"is_regular_expander",
"is_semiconnected",
"is_semieulerian",
"is_simple_path",
"is_strongly_connected",
"is_strongly_regular",
"is_tournament",
"is_tree",
"is_triad",
"is_valid_degree_sequence_erdos_gallai",
"is_valid_degree_sequence_havel_hakimi",
"is_valid_directed_joint_degree",
"is_valid_joint_degree",
"is_weakly_connected",
"is_weighted",
"islice",
"isnan",
"isolates",
"isomorphism",
"it",
"itemgetter",
"Iterator",
"itertools",
"jaccard_coefficient",
"johnson",
"join_trees",
"joint_degree_graph",
"junction_tree",
"k_components",
"k_core",
"k_corona",
"k_crust",
"k_edge_augmentation",
"k_edge_components",
"k_edge_subgraphs",
"k_factor",
"k_random_intersection_graph",
"k_shell",
"k_truss",
"kamada_kawai_layout",
"karate_club_graph",
"katz_centrality",
"katz_centrality_numpy",
"kemeny_constant",
"kl_connected_subgraph",
"kneser_graph",
"kosaraju_strongly_connected_components",
"krackhardt_kite_graph",
"kruskal_mst_edges",
"ladder_graph",
"laplacian_centrality",
"laplacian_matrix",
"laplacian_spectrum",
"laplacianmatrix",
"lattice_reference",
"lazy_imports",
"LCF_graph",
"les_miserables_graph",
"LEVEL_EDGE",
"lexicographic_product",
"lexicographical_topological_sort",
"LFR_benchmark_graph",
"linalg",
"line_graph",
"link_analysis",
"LIST_START_VALUE",
"literal_destringizer",
"literal_eval",
"literal_stringizer",
"load_centrality",
"local_bridges",
"local_constraint",
"local_efficiency",
"local_reaching_centrality",
"log",
"lollipop_graph",
"lowest_common_ancestor",
"lowest_common_ancestors",
"LRPlanarity",
"make_bi_connected",
"make_clique_bipartite",
"make_max_clique_graph",
"make_qstr",
"Mapping",
"margulis_gabber_galil_graph",
"matching_dict_to_set",
"math",
"max_flow_min_cost",
"max_weight_clique",
"max_weight_matching",
"maximal_independent_set",
"maximal_matching",
"maximum_branching",
"maximum_flow",
"maximum_flow_value",
"maximum_spanning_arborescence",
"maximum_spanning_edges",
"maximum_spanning_tree",
"MaxWeightClique",
"maybe_regular_expander",
"min_cost_flow",
"min_cost_flow_cost",
"min_edge_cover",
"min_weight_matching",
"minimal_d_separator",
"minimum_branching",
"minimum_cut",
"minimum_cut_value",
"minimum_cycle_basis",
"minimum_edge_cut",
"minimum_node_cut",
"minimum_spanning_arborescence",
"minimum_spanning_edges",
"minimum_spanning_tree",
"mixing_dict",
"mixing_expansion",
"modular_product",
"modularity_matrix",
"modularity_spectrum",
"modularitymatrix",
"moebius_kantor_graph",
"moral",
"moral_graph",
"multi_source_dijkstra",
"multi_source_dijkstra_path",
"multi_source_dijkstra_path_length",
"MultiDiGraph",
"MultiGraph",
"multipartite_layout",
"mutual_weight",
"mycielski_graph",
"mycielskian",
"n_to_data",
"NamedTuple",
"navigable_small_world_graph",
"negative_edge_cycle",
"neighbors",
"network_simplex",
"NetworkXAlgorithmError",
"NetworkXError",
"NetworkXException",
"NetworkXNoCycle",
"NetworkXNoPath",
"NetworkXNotImplemented",
"NetworkXPointlessConcept",
"NetworkXTreewidthBoundExceeded",
"NetworkXUnbounded",
"NetworkXUnfeasible",
"newman_betweenness_centrality",
"newman_watts_strogatz_graph",
"node_attribute_xy",
"node_boundary",
"node_classification",
"node_clique_number",
"node_connected_component",
"node_connectivity",
"node_degree_xy",
"node_disjoint_paths",
"node_expansion",
"node_link_data",
"node_link_graph",
"NodeNotFound",
"nodes",
"nodes_or_number",
"nodes_with_selfloops",
"non_edges",
"non_neighbors",
"non_randomness",
"nonisomorphic_trees",
"normalized_cut_size",
"normalized_laplacian_matrix",
"normalized_laplacian_spectrum",
"normalized_mutual_weight",
"not_implemented_for",
"NotATree",
"np_random_state",
"null_graph",
"NUM_GRAPHS",
"Number",
"number_attracting_components",
"number_connected_components",
"number_of_cliques",
"number_of_edges",
"number_of_isolates",
"number_of_nodes",
"number_of_nonisomorphic_trees",
"number_of_selfloops",
"number_of_spanning_trees",
"number_of_walks",
"number_strongly_connected_components",
"number_weakly_connected_components",
"numbers",
"numeric_assortativity_coefficient",
"nx",
"nx_agraph",
"nx_pydot",
"octahedral_graph",
"omega",
"onion_layers",
"open_file",
"operators",
"optimal_edit_paths",
"optimize_edit_paths",
"optimize_graph_edit_distance",
"os",
"out_degree_centrality",
"overall_reciprocity",
"pagerank",
"pairwise",
"paley_graph",
"panther_similarity",
"pappus_graph",
"parse_adjlist",
"parse_edgelist",
"parse_gml",
"parse_gml_lines",
"parse_graphml",
"parse_leda",
"parse_multiline_adjlist",
"parse_pajek",
"partial",
"partial_duplication_graph",
"partition_spanning_tree",
"path_graph",
"path_weight",
"PathBuffer",
"Pattern",
"percolation_centrality",
"periphery",
"permutations",
"petersen_graph",
"planar_layout",
"PlanarEmbedding",
"planted_partition_graph",
"power",
"PowerIterationFailedConvergence",
"powerlaw_cluster_graph",
"predecessor",
"preferential_attachment",
"prefix_tree",
"prefix_tree_recursive",
"prim_mst_edges",
"PriorityQueue",
"product",
"projected_graph",
"prominent_group",
"py_random_state",
"quotient_graph",
"ra_index_soundarajan_hopcroft",
"radius",
"random_clustered_graph",
"random_cograph",
"random_degree_sequence_graph",
"random_geometric_graph",
"random_internet_as_graph",
"random_k_out_graph",
"random_kernel_graph",
"random_labeled_rooted_forest",
"random_labeled_rooted_tree",
"random_labeled_tree",
"random_layout",
"random_lobster",
"random_partition_graph",
"random_powerlaw_tree",
"random_powerlaw_tree_sequence",
"random_reference",
"random_regular_expander_graph",
"random_regular_graph",
"random_shell_graph",
"random_spanning_tree",
"random_triad",
"random_uniform_k_out_graph",
"random_unlabeled_rooted_forest",
"random_unlabeled_rooted_tree",
"random_unlabeled_tree",
"random_weighted_sample",
"re",
"read_adjlist",
"read_edgelist",
"read_gexf",
"read_gml",
"read_graph6",
"read_graphml",
"read_leda",
"read_multiline_adjlist",
"read_pajek",
"read_sparse6",
"read_weighted_edgelist",
"readwrite",
"reciprocity",
"reconstruct_path",
"recursive_simple_cycles",
"reduce",
"register_namespace",
"relabel",
"relabel_gexf_graph",
"relabel_nodes",
"relaxed_caveman_graph",
"remove_edge_attributes",
"remove_node_attributes",
"repeat",
"reportviews",
"rescale_layout",
"rescale_layout_dict",
"resistance_distance",
"resource_allocation_index",
"restricted_view",
"reverse",
"REVERSE",
"reverse_cuthill_mckee_ordering",
"REVERSE_EDGE",
"reverse_view",
"rich_club_coefficient",
"ring_of_cliques",
"root_to_leaf_paths",
"rooted_product",
"s_metric",
"scale_free_graph",
"schultz_index",
"second_order_centrality",
"sedgewick_maze_graph",
"selfloop_edges",
"Sequence",
"Set",
"set_edge_attributes",
"set_node_attributes",
"set_position",
"shell_layout",
"shortest_path",
"shortest_path_length",
"shortest_paths",
"shortest_simple_paths",
"sigma",
"simple_cycles",
"simrank_similarity",
"single_source_all_shortest_paths",
"single_source_bellman_ford",
"single_source_bellman_ford_path",
"single_source_bellman_ford_path_length",
"single_source_dijkstra",
"single_source_dijkstra_path",
"single_source_dijkstra_path_length",
"single_source_shortest_path",
"single_source_shortest_path_length",
"single_target_shortest_path",
"single_target_shortest_path_length",
"snap_aggregation",
"soft_random_geometric_graph",
"spanner",
"SpanningTreeIterator",
"spectral_bisection",
"spectral_graph_forge",
"spectral_layout",
"spectral_ordering",
"spectrum",
"spiral_layout",
"spring_layout",
"sqrt",
"square_clustering",
"star_graph",
"starmap",
"stochastic_block_model",
"stochastic_graph",
"stoer_wagner",
"STRATEGIES",
"strategy_connected_sequential",
"strategy_connected_sequential_bfs",
"strategy_connected_sequential_dfs",
"strategy_independent_set",
"strategy_largest_first",
"strategy_random_sequential",
"strategy_saturation_largest_first",
"strategy_smallest_last",
"StringIO",
"strong_product",
"strongly_connected_components",
"SubElement",
"subgraph",
"subgraph_centrality",
"subgraph_centrality_exp",
"subgraph_view",
"sudoku_graph",
"SuperLUInverseLaplacian",
"symmetric_difference",
"sys",
"tadpole_graph",
"tensor_product",
"tests",
"tetrahedral_graph",
"thresholded_random_geometric_graph",
"time",
"to_dict_of_dicts",
"to_dict_of_lists",
"to_directed",
"to_edgelist",
"to_graph6_bytes",
"to_latex",
"to_latex_raw",
"to_nested_tuple",
"to_networkx_graph",
"to_numpy_array",
"to_pandas_adjacency",
"to_pandas_edgelist",
"to_prufer_sequence",
"to_scipy_sparse_array",
"to_sparse6_bytes",
"to_undirected",
"Token",
"top_of_stack",
"topological_generations",
"topological_sort",
"tostring",
"total_spanning_tree_weight",
"tournament",
"transitive_closure",
"transitive_closure_dag",
"transitive_reduction",
"transitivity",
"traversal",
"tree",
"tree_all_pairs_lowest_common_ancestor",
"tree_broadcast_center",
"tree_broadcast_time",
"tree_data",
"TREE_EDGE",
"tree_graph",
"TRIAD_EDGES",
"triad_graph",
"TRIAD_NAMES",
"triad_type",
"triadic_census",
"triads_by_type",
"triangles",
"triangular_lattice_graph",
"triangulate_embedding",
"triangulate_face",
"TRICODE_TO_NAME",
"TRICODES",
"trivial_graph",
"trophic_differences",
"trophic_incoherence_parameter",
"trophic_levels",
"truncated_cube_graph",
"truncated_tetrahedron_graph",
"turan_graph",
"tutte_graph",
"tutte_polynomial",
"unescape",
"uniform_int_from_avg",
"uniform_random_intersection_graph",
"union",
"union_all",
"UnionFind",
"UtfBaseGlyphs",
"UtfDirectedGlyphs",
"UtfUndirectedGlyphs",
"utils",
"v_structures",
"vf2pp_all_isomorphisms",
"vf2pp_is_isomorphic",
"vf2pp_isomorphism",
"visibility_graph",
"volume",
"voronoi_cells",
"voterank",
"warn",
"warnings",
"watts_strogatz_graph",
"waxman_graph",
"weakly_connected_components",
"weighted_choice",
"weisfeiler_lehman_graph_hash",
"weisfeiler_lehman_subgraph_hashes",
"wheel_graph",
"wiener_index",
"windmill_graph",
"within_inter_cluster",
"wraps",
"write_adjlist",
"write_edgelist",
"write_gexf",
"write_gml",
"write_graph6",
"write_graph6_file",
"write_graphml",
"write_graphml_lxml",
"write_graphml_xml",
"write_latex",
"write_multiline_adjlist",
"write_network_text",
"write_pajek",
"write_sparse6",
"write_weighted_edgelist",
"zip_longest",
"_accumulate_basic",
"_accumulate_edges",
"_accumulate_edges_subset",
"_accumulate_endpoints",
"_accumulate_percolation",
"_accumulate_subset",
"_add_edge_keys",
"_add_edge_to_spanner",
"_AdjEntry",
"_all_simple_edge_paths",
"_apply_prediction",
"_attrs",
"_average_weight",
"_basic_graphical_tests",
"_bellman_ford",
"_bicenter",
"_biconnected_dfs",
"_bidirectional_dijkstra",
"_bidirectional_pred_succ",
"_bidirectional_shortest_path",
"_bounded_cycle_search",
"_build_paths_from_predecessors",
"_chordless_cycle_search",
"_clear_cache",
"_community",
"_compute_rc",
"_configuration_model",
"_consistent_PT",
"_coo_gen_triples",
"_core_subgraph",
"_csc_gen_triples",
"_csr_gen_triples",
"_cut_PT",
"_dfbnb",
"_dict_product",
"_dijkstra",
"_dijkstra_multisource",
"_directed_cycle_search",
"_directed_edges_cross_edges",
"_directed_neighbor_switch",
"_directed_neighbor_switch_rev",
"_directed_triangles_and_degree_iter",
"_directed_weighted_triangles_and_degree_iter",
"_dispatchable",
"_DOC_WRAPPER_TIKZ",
"_dok_gen_triples",
"_edge_betweenness",
"_edge_value",
"_edges_cross_nodes",
"_edges_cross_nodes_and_nodes",
"_extrema_bounding",
"_feasibility",
"_FIG_WRAPPER",
"_find_candidates",
"_find_candidates_Di",
"_find_chordality_breaker",
"_find_missing_edge",
"_find_partition",
"_find_path_start",
"_find_sources",
"_fruchterman_reingold",
"_G_H",
"_generate_communities",
"_generate_graph6_bytes",
"_generate_graphs",
"_generate_min_degree",
"_generate_sparse6_bytes",
"_generate_weighted_edges",
"_geometric_edges",
"_get_broadcast_centers",
"_get_fiedler_func",
"_get_max_broadcast_value",
"_GraphParameters",
"_greedy_coloring_with_interchange",
"_group_preprocessing",
"_hash_label",
"_heuristic",
"_hits_numpy",
"_hits_python",
"_hits_scipy",
"_hurwitz_zeta",
"_init_node_labels",
"_init_product_graph",
"_initialize_parameters",
"_inner_bellman_ford",
"_is_complete_graph",
"_johnson_cycle_search",
"_kamada_kawai_costfn",
"_kamada_kawai_solve",
"_layout_to_graph",
"_layout_to_matrix",
"_lazy_import",
"_lg_directed",
"_lg_undirected",
"_lightest_edge_dicts",
"_LUSolver",
"_matching_order",
"_max_cardinality_node",
"_maximal_independent_set",
"_min_cycle",
"_min_cycle_basis",
"_multigraph_eulerian_circuit",
"_neighbor_switch",
"_neighborhood_aggregate",
"_NeighborhoodCache",
"_next_rooted_tree",
"_next_tree",
"_Node",
"_node_betweenness",
"_node_product",
"_node_value",
"_nodes_cross_edges",
"_num_rooted_forests",
"_num_rooted_trees",
"_num_trees",
"_numeric_ac",
"_odd_triangle",
"_pagerank_numpy",
"_pagerank_python",
"_pagerank_scipy",
"_parse_network_text",
"_PCGSolver",
"_plain_bfs",
"_powerlaw_sequence",
"_precheck_label_properties",
"_preprocess_graph",
"_process_params",
"_raise_on_directed",
"_random_subset",
"_random_unlabeled_rooted_forest",
"_random_unlabeled_rooted_tree",
"_random_unlabeled_tree",
"_rcm_estimate",
"_reachable",
"_reciprocity_iter",
"_relabel_copy",
"_relabel_inplace",
"_rescale",
"_rescale_e",
"_restore_Tinout",
"_restore_Tinout_Di",
"_select_jd_forests",
"_select_jd_trees",
"_select_starting_cell",
"_setup_residual_graph",
"_simplegraph_eulerian_circuit",
"_simrank_similarity_numpy",
"_simrank_similarity_python",
"_single_shortest_path",
"_single_shortest_path_length",
"_single_source_dijkstra_path_basic",
"_single_source_shortest_path_basic",
"_snap_build_graph",
"_snap_eligible_group",
"_snap_split",
"_sparse_fruchterman_reingold",
"_sparse_spectral",
"_spectral",
"_split_tree",
"_StateParameters",
"_SUBFIG_WRAPPER",
"_to_nx",
"_to_stublist",
"_to_tuple",
"_tracemin_fiedler",
"_transition_matrix",
"_tree_edges",
"_triangles",
"_triangles_and_degree_iter",
"_tricode",
"_undirected_cycle_search",
"_undirected_edges_cross_edges",
"_update_Tinout",
"_weight_function",
"_weighted_triangles_and_degree_iter",
"_zipf_rv_below",
"__all__",
"__doc__",
"__file__",
"__name__",
"__package__",
"__version__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx. | .transitive_reduction(g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
1 | 1-197-26 | infile | transitive_reduction | [
"accumulate",
"adamic_adar_index",
"ALGORITHMS",
"all_node_cuts",
"all_pairs_all_shortest_paths",
"all_pairs_bellman_ford_path",
"all_pairs_bellman_ford_path_length",
"all_pairs_dijkstra",
"all_pairs_dijkstra_path",
"all_pairs_dijkstra_path_length",
"all_pairs_lowest_common_ancestor",
"all_pairs_node_connectivity",
"all_pairs_shortest_path",
"all_pairs_shortest_path_length",
"all_shortest_paths",
"all_simple_edge_paths",
"all_simple_paths",
"all_topological_sorts",
"all_triads",
"all_triplets",
"ancestors",
"antichains",
"approximate_current_flow_betweenness_centrality",
"approximation",
"arbitrary_element",
"ArborescenceIterator",
"articulation_points",
"assortativity",
"astar_path",
"astar_path_length",
"asteroidal",
"attracting_components",
"attribute_ac",
"attribute_assortativity_coefficient",
"attribute_mixing_dict",
"attribute_mixing_matrix",
"average_clustering",
"average_degree_connectivity",
"average_neighbor_degree",
"average_node_connectivity",
"average_shortest_path_length",
"barycenter",
"bellman_ford_path",
"bellman_ford_path_length",
"bellman_ford_predecessor_and_distance",
"betweenness_centrality",
"betweenness_centrality_subset",
"bfs_beam_edges",
"bfs_edges",
"bfs_labeled_edges",
"bfs_layers",
"bfs_predecessors",
"bfs_successors",
"bfs_tree",
"biconnected_component_edges",
"biconnected_components",
"bidirectional_dijkstra",
"bidirectional_shortest_path",
"bipartite",
"blake2b",
"boruvka_mst_edges",
"boundary",
"boundary_expansion",
"bridges",
"broadcasting",
"capacity_scaling",
"cartesian_product",
"cd_index",
"center",
"centrality",
"CGInverseLaplacian",
"chain",
"chain_decomposition",
"chaini",
"chains",
"check_planarity",
"check_planarity_recursive",
"chordal",
"chordal_graph_cliques",
"chordal_graph_treewidth",
"chordless_cycles",
"chromatic_polynomial",
"clique",
"closeness_centrality",
"closeness_vitality",
"cluster",
"clustering",
"cn_soundarajan_hopcroft",
"collections",
"colliders",
"coloring",
"combinations",
"combinations_with_replacement",
"combinatorial_embedding_to_pos",
"common_neighbor_centrality",
"communicability",
"communicability_alg",
"communicability_betweenness_centrality",
"communicability_exp",
"community",
"complement",
"complete_bipartite_graph",
"complete_to_chordal_graph",
"components",
"compose",
"compose_all",
"compute_v_structures",
"condensation",
"conductance",
"ConflictPair",
"connected_components",
"connected_double_edge_swap",
"connectivity",
"constraint",
"contracted_edge",
"contracted_nodes",
"copy",
"core",
"core_number",
"corona_product",
"cost_of_flow",
"could_be_isomorphic",
"count",
"Counter",
"covering",
"create_component_structure",
"current_flow_betweenness_centrality",
"current_flow_betweenness_centrality_subset",
"current_flow_closeness_centrality",
"cut_size",
"cuts",
"cycle_basis",
"cycles",
"d_separated",
"d_separation",
"dag",
"dag_longest_path",
"dag_longest_path_length",
"dag_to_branching",
"dataclass",
"debug_print",
"dedensify",
"deepcopy",
"defaultdict",
"degree_assortativity_coefficient",
"degree_centrality",
"degree_mixing_dict",
"degree_mixing_matrix",
"degree_pearson_correlation_coefficient",
"deque",
"descendants",
"descendants_at_distance",
"dfs_edges",
"dfs_labeled_edges",
"dfs_postorder_nodes",
"dfs_predecessors",
"dfs_preorder_nodes",
"dfs_successors",
"dfs_tree",
"diameter",
"dict_to_numpy_array",
"difference",
"dijkstra",
"dijkstra_path",
"dijkstra_path_length",
"dijkstra_predecessor_and_distance",
"directed_edge_swap",
"disjoint_union",
"disjoint_union_all",
"dispersion",
"distance_measures",
"distance_regular",
"dominance",
"dominance_frontiers",
"dominating",
"dominating_set",
"double_edge_swap",
"eccentricity",
"edge_betweenness_centrality",
"edge_betweenness_centrality_subset",
"edge_bfs",
"edge_boundary",
"edge_connectivity",
"edge_current_flow_betweenness_centrality",
"edge_current_flow_betweenness_centrality_subset",
"edge_dfs",
"edge_disjoint_paths",
"edge_expansion",
"edge_load_centrality",
"EdgePartition",
"effective_graph_resistance",
"effective_size",
"efficiency",
"efficiency_measures",
"eigenvector_centrality",
"eigenvector_centrality_numpy",
"Enum",
"enumerate_all_cliques",
"equitable_color",
"equivalence_classes",
"estrada_index",
"euler",
"eulerian_circuit",
"eulerian_path",
"eulerize",
"fast_could_be_isomorphic",
"faster_could_be_isomorphic",
"field",
"find_asteroidal_triple",
"find_cliques",
"find_cliques_recursive",
"find_cycle",
"find_induced_nodes",
"find_minimal_d_separator",
"find_negative_cycle",
"flow",
"flow_hierarchy",
"flow_matrix_row",
"floyd_warshall",
"floyd_warshall_numpy",
"floyd_warshall_predecessor_and_distance",
"FORWARD",
"FORWARD_EDGE",
"from_nested_tuple",
"from_prufer_sequence",
"full_join",
"FullInverseLaplacian",
"functools",
"gcd",
"generalized_degree",
"generate_random_paths",
"generic_bfs_edges",
"get_canonical_ordering",
"get_counterexample",
"get_counterexample_recursive",
"girth",
"global_efficiency",
"global_parameters",
"global_reaching_centrality",
"goldberg_radzik",
"gomory_hu_tree",
"google_matrix",
"graph_edit_distance",
"graph_hashing",
"graphical",
"greedy_color",
"group_betweenness_centrality",
"group_closeness_centrality",
"group_degree_centrality",
"group_in_degree_centrality",
"group_out_degree_centrality",
"groups",
"gutman_index",
"harmonic_centrality",
"harmonic_diameter",
"has_bridges",
"has_cycle",
"has_eulerian_path",
"has_path",
"heappop",
"heappush",
"heapq",
"hierarchy",
"hits",
"hybrid",
"identified_nodes",
"immediate_dominators",
"in_degree_centrality",
"incremental_closeness_centrality",
"inf",
"information_centrality",
"intersection",
"intersection_all",
"intersection_array",
"Interval",
"is_aperiodic",
"is_arborescence",
"is_at_free",
"is_attracting_component",
"is_biconnected",
"is_bipartite",
"is_branching",
"is_chordal",
"is_connected",
"is_d_separator",
"is_digraphical",
"is_directed_acyclic_graph",
"is_distance_regular",
"is_dominating_set",
"is_edge_cover",
"is_eulerian",
"is_forest",
"is_graphical",
"is_isolate",
"is_isomorphic",
"is_k_edge_connected",
"is_k_regular",
"is_kl_connected",
"is_matching",
"is_maximal_matching",
"is_minimal_d_separator",
"is_multigraphical",
"is_perfect_matching",
"is_planar",
"is_pseudographical",
"is_regular",
"is_semiconnected",
"is_semieulerian",
"is_simple_path",
"is_strongly_connected",
"is_strongly_regular",
"is_tournament",
"is_tree",
"is_triad",
"is_valid_degree_sequence_erdos_gallai",
"is_valid_degree_sequence_havel_hakimi",
"is_weakly_connected",
"islice",
"isnan",
"isolate",
"isolates",
"isomorphism",
"it",
"itemgetter",
"itertools",
"jaccard_coefficient",
"johnson",
"join_trees",
"junction_tree",
"k_components",
"k_core",
"k_corona",
"k_crust",
"k_edge_augmentation",
"k_edge_components",
"k_edge_subgraphs",
"k_factor",
"k_shell",
"k_truss",
"katz_centrality",
"katz_centrality_numpy",
"kemeny_constant",
"kl_connected_subgraph",
"kosaraju_strongly_connected_components",
"kruskal_mst_edges",
"laplacian_centrality",
"lattice_reference",
"LEVEL_EDGE",
"lexicographic_product",
"lexicographical_topological_sort",
"link_analysis",
"link_prediction",
"load_centrality",
"local_bridges",
"local_constraint",
"local_efficiency",
"local_reaching_centrality",
"log",
"lowest_common_ancestor",
"lowest_common_ancestors",
"LRPlanarity",
"make_bi_connected",
"make_clique_bipartite",
"make_max_clique_graph",
"Mapping",
"matching",
"matching_dict_to_set",
"math",
"max_flow_min_cost",
"max_weight_clique",
"max_weight_matching",
"maximal_independent_set",
"maximal_matching",
"maximum_branching",
"maximum_flow",
"maximum_flow_value",
"maximum_spanning_arborescence",
"maximum_spanning_edges",
"maximum_spanning_tree",
"MaxWeightClique",
"min_cost_flow",
"min_cost_flow_cost",
"min_edge_cover",
"min_weight_matching",
"minimal_d_separator",
"minimum_branching",
"minimum_cut",
"minimum_cut_value",
"minimum_cycle_basis",
"minimum_edge_cut",
"minimum_node_cut",
"minimum_spanning_arborescence",
"minimum_spanning_edges",
"minimum_spanning_tree",
"minors",
"mis",
"mixing_dict",
"mixing_expansion",
"modular_product",
"moral",
"moral_graph",
"multi_source_dijkstra",
"multi_source_dijkstra_path",
"multi_source_dijkstra_path_length",
"mutual_weight",
"negative_edge_cycle",
"network_simplex",
"NetworkXError",
"NetworkXNoPath",
"NetworkXTreewidthBoundExceeded",
"newman_betweenness_centrality",
"node_attribute_xy",
"node_boundary",
"node_classification",
"node_clique_number",
"node_connected_component",
"node_connectivity",
"node_degree_xy",
"node_disjoint_paths",
"node_expansion",
"non_randomness",
"normalized_cut_size",
"normalized_mutual_weight",
"not_implemented_for",
"NotATree",
"np_random_state",
"number_attracting_components",
"number_connected_components",
"number_of_cliques",
"number_of_isolates",
"number_of_spanning_trees",
"number_of_walks",
"number_strongly_connected_components",
"number_weakly_connected_components",
"numeric_assortativity_coefficient",
"nx",
"omega",
"onion_layers",
"operators",
"optimal_edit_paths",
"optimize_edit_paths",
"optimize_graph_edit_distance",
"out_degree_centrality",
"overall_reciprocity",
"pagerank",
"pairwise",
"panther_similarity",
"partial",
"partition_spanning_tree",
"PathBuffer",
"percolation_centrality",
"periphery",
"permutations",
"planar_drawing",
"PlanarEmbedding",
"planarity",
"polynomials",
"power",
"predecessor",
"preferential_attachment",
"prim_mst_edges",
"PriorityQueue",
"product",
"projected_graph",
"prominent_group",
"py_random_state",
"quotient_graph",
"ra_index_soundarajan_hopcroft",
"radius",
"random_reference",
"random_spanning_tree",
"random_triad",
"reciprocity",
"reconstruct_path",
"recursive_simple_cycles",
"reduce",
"regular",
"repeat",
"resistance_distance",
"resource_allocation_index",
"reverse",
"REVERSE",
"reverse_cuthill_mckee_ordering",
"REVERSE_EDGE",
"rich_club_coefficient",
"richclub",
"root_to_leaf_paths",
"rooted_product",
"s_metric",
"schultz_index",
"second_order_centrality",
"Set",
"set_position",
"shortest_path",
"shortest_path_length",
"shortest_paths",
"shortest_simple_paths",
"sigma",
"similarity",
"simple_cycles",
"simple_paths",
"simrank_similarity",
"single_source_all_shortest_paths",
"single_source_bellman_ford",
"single_source_bellman_ford_path",
"single_source_bellman_ford_path_length",
"single_source_dijkstra",
"single_source_dijkstra_path",
"single_source_dijkstra_path_length",
"single_source_shortest_path",
"single_source_shortest_path_length",
"single_target_shortest_path",
"single_target_shortest_path_length",
"smallworld",
"smetric",
"snap_aggregation",
"spanner",
"SpanningTreeIterator",
"sparsifiers",
"square_clustering",
"starmap",
"stoer_wagner",
"STRATEGIES",
"strategy_connected_sequential",
"strategy_connected_sequential_bfs",
"strategy_connected_sequential_dfs",
"strategy_independent_set",
"strategy_largest_first",
"strategy_random_sequential",
"strategy_saturation_largest_first",
"strategy_smallest_last",
"strong_product",
"strongly_connected_components",
"structuralholes",
"subgraph_centrality",
"subgraph_centrality_exp",
"summarization",
"SuperLUInverseLaplacian",
"swap",
"symmetric_difference",
"sys",
"tensor_product",
"tests",
"threshold",
"time",
"time_dependent",
"to_nested_tuple",
"to_prufer_sequence",
"top_of_stack",
"topological_generations",
"topological_sort",
"tournament",
"transitive_closure",
"transitive_closure_dag",
"transitive_reduction",
"transitivity",
"traversal",
"tree",
"tree_all_pairs_lowest_common_ancestor",
"tree_broadcast_center",
"tree_broadcast_time",
"TREE_EDGE",
"TRIAD_NAMES",
"triad_type",
"triadic_census",
"triads",
"triads_by_type",
"triangles",
"triangulate_embedding",
"triangulate_face",
"TRICODE_TO_NAME",
"TRICODES",
"trophic_differences",
"trophic_incoherence_parameter",
"trophic_levels",
"tutte_polynomial",
"union",
"union_all",
"UnionFind",
"v_structures",
"vf2pp_all_isomorphisms",
"vf2pp_is_isomorphic",
"vf2pp_isomorphism",
"vitality",
"volume",
"voronoi",
"voronoi_cells",
"voterank",
"walks",
"warn",
"warnings",
"weakly_connected_components",
"weisfeiler_lehman_graph_hash",
"weisfeiler_lehman_subgraph_hashes",
"wiener",
"wiener_index",
"within_inter_cluster",
"_accumulate_basic",
"_accumulate_edges",
"_accumulate_edges_subset",
"_accumulate_endpoints",
"_accumulate_percolation",
"_accumulate_subset",
"_add_edge_keys",
"_add_edge_to_spanner",
"_AdjEntry",
"_all_simple_edge_paths",
"_apply_prediction",
"_average_weight",
"_basic_graphical_tests",
"_bellman_ford",
"_biconnected_dfs",
"_bidirectional_dijkstra",
"_bidirectional_pred_succ",
"_bidirectional_shortest_path",
"_bounded_cycle_search",
"_build_paths_from_predecessors",
"_chordless_cycle_search",
"_community",
"_compute_rc",
"_consistent_PT",
"_core_subgraph",
"_cut_PT",
"_dfbnb",
"_dict_product",
"_dijkstra",
"_dijkstra_multisource",
"_directed_cycle_search",
"_directed_edges_cross_edges",
"_directed_triangles_and_degree_iter",
"_directed_weighted_triangles_and_degree_iter",
"_edge_betweenness",
"_edges_cross_nodes",
"_edges_cross_nodes_and_nodes",
"_extrema_bounding",
"_feasibility",
"_find_candidates",
"_find_candidates_Di",
"_find_chordality_breaker",
"_find_missing_edge",
"_find_path_start",
"_G_H",
"_get_broadcast_centers",
"_get_max_broadcast_value",
"_GraphParameters",
"_greedy_coloring_with_interchange",
"_group_preprocessing",
"_hash_label",
"_heuristic",
"_hits_numpy",
"_hits_python",
"_hits_scipy",
"_init_node_labels",
"_init_product_graph",
"_initialize_parameters",
"_inner_bellman_ford",
"_is_complete_graph",
"_johnson_cycle_search",
"_lightest_edge_dicts",
"_matching_order",
"_max_cardinality_node",
"_maximal_independent_set",
"_min_cycle",
"_min_cycle_basis",
"_multigraph_eulerian_circuit",
"_neighborhood_aggregate",
"_NeighborhoodCache",
"_Node",
"_node_betweenness",
"_node_product",
"_nodes_cross_edges",
"_numeric_ac",
"_pagerank_numpy",
"_pagerank_python",
"_pagerank_scipy",
"_plain_bfs",
"_precheck_label_properties",
"_reachable",
"_reciprocity_iter",
"_rescale",
"_rescale_e",
"_restore_Tinout",
"_restore_Tinout_Di",
"_setup_residual_graph",
"_simplegraph_eulerian_circuit",
"_simrank_similarity_numpy",
"_simrank_similarity_python",
"_single_shortest_path",
"_single_shortest_path_length",
"_single_source_dijkstra_path_basic",
"_single_source_shortest_path_basic",
"_snap_build_graph",
"_snap_eligible_group",
"_snap_split",
"_StateParameters",
"_triangles_and_degree_iter",
"_tricode",
"_undirected_cycle_search",
"_undirected_edges_cross_edges",
"_update_Tinout",
"_weight_function",
"_weighted_triangles_and_degree_iter",
"__all__",
"__doc__",
"__file__",
"__name__",
"__package__"
] | import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.remove_node(node)
# connect the neighboring nodes
for parent in parents:
for daughter in daughters:
edge = (parent, daughter)
self.edges.add(edge)
def transitive_reduction(self):
g = nx.DiGraph(self.edges)
g = nx.algorithms. | (g)
self.edges = set(g.edges())
# def _get_edges(self, node_id, kind="any"):
# if kind == "any":
# return [e for e in self.edges if node_id in e]
# if kind == "parents":
# return [e for e in self.edges if e[1] == node_id]
# if kind == "daughters":
# return [e for e in self.edges if e[0] == node_id]
# raise ValueError
| vanheeringen-lab__seq2science |
2 | 2-34-13 | inproject | post | [
"add_api_route",
"add_api_websocket_route",
"add_event_handler",
"add_route",
"add_websocket_route",
"api_route",
"app",
"callbacks",
"default",
"default_response_class",
"delete",
"dependencies",
"dependency_overrides_provider",
"deprecated",
"generate_unique_id_function",
"get",
"head",
"host",
"include_in_schema",
"include_router",
"lifespan",
"lifespan_context",
"middleware_stack",
"mount",
"not_found",
"on_event",
"on_shutdown",
"on_startup",
"options",
"patch",
"post",
"prefix",
"put",
"redirect_slashes",
"responses",
"route",
"route_class",
"routes",
"shutdown",
"startup",
"tags",
"trace",
"url_path_for",
"websocket",
"websocket_route",
"__annotations__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """
Routes for the Agent Service.
This module defines the API routes for the Agent service. While there are multiple endpoints provided by the service,
the ones that require special attention due to their complexity are:
1. `execute_agent_task_step`:
This route is significant because this is where the agent actually performs the work. The function handles
executing the next step for a task based on its current state, and it requires careful implementation to ensure
all scenarios (like the presence or absence of steps or a step marked as `last_step`) are handled correctly.
2. `upload_agent_task_artifacts`:
This route allows for the upload of artifacts, supporting various URI types (e.g., s3, gcs, ftp, http).
The support for different URI types makes it a bit more complex, and it's important to ensure that all
supported URI types are correctly managed. NOTE: The Auto-GPT team will eventually handle the most common
uri types for you.
3. `create_agent_task`:
While this is a simpler route, it plays a crucial role in the workflow, as it's responsible for the creation
of a new task.
Developers and contributors should be especially careful when making modifications to these routes to ensure
consistency and correctness in the system's behavior.
"""
from typing import List
from fastapi import APIRouter, Request, UploadFile
from fastapi.responses import FileResponse
from autogpt.schema import Artifact, Step, StepRequestBody, Task, TaskRequestBody
base_router = APIRouter()
@base_router. | ("/agent/tasks", tags=["agent"], response_model=Task)
async def create_agent_task(request: Request, task_request: TaskRequestBody) -> Task:
"""
Creates a new task using the provided TaskRequestBody and returns a Task.
Args:
request (Request): FastAPI request object.
task (TaskRequestBody): The task request containing input and additional input data.
Returns:
Task: A new task with task_id, input, additional_input, and empty lists for artifacts and steps.
Example:
Request (TaskRequestBody defined in schema.py):
{
"input": "Write the words you receive to the file 'output.txt'.",
"additional_input": "python/code"
}
Response (Task defined in schema.py):
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"input": "Write the word 'Washington' to a .txt file",
"additional_input": "python/code",
"artifacts": [],
"steps": []
}
"""
agent = request["agent"]
task_request = await agent.create_task(task_request)
return task_request
@base_router.get("/agent/tasks", tags=["agent"], response_model=List[str])
async def list_agent_tasks_ids(request: Request) -> List[str]:
"""
Gets a list of all task IDs.
Args:
request (Request): FastAPI request object.
Returns:
List[str]: A list of all task IDs.
Example:
Request:
GET /agent/tasks
Response:
[
"50da533e-3904-4401-8a07-c49adf88b5eb",
"b7d3c70a-7266-4b3a-818e-1327679f0117",
...
]
"""
agent = request["agent"]
return await agent.list_tasks()
@base_router.get("/agent/tasks/{task_id}", tags=["agent"], response_model=Task)
async def get_agent_task(request: Request, task_id: str):
"""
Gets the details of a task by ID.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
Task: The task with the given ID.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb
Response (Task defined in schema.py):
{
"input": "Write the word 'Washington' to a .txt file",
"additional_input": null,
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"artifacts": [
{
"artifact_id": "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"file_name": "output.txt",
"agent_created": true,
"uri": "file://50da533e-3904-4401-8a07-c49adf88b5eb/output.txt"
}
],
"steps": [
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "6bb1801a-fd80-45e8-899a-4dd723cc602e",
"input": "Write the word 'Washington' to a .txt file",
"additional_input": "challenge:write_to_file",
"name": "Write to file",
"status": "completed",
"output": "I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')>",
"additional_output": "Do you want me to continue?",
"artifacts": [
{
"artifact_id": "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"file_name": "output.txt",
"agent_created": true,
"uri": "file://50da533e-3904-4401-8a07-c49adf88b5eb/output.txt"
}
],
"is_last": true
}
]
}
"""
agent = request["agent"]
return await agent.get_task(task_id)
@base_router.get(
"/agent/tasks/{task_id}/steps", tags=["agent"], response_model=List[str]
)
async def list_agent_task_steps(request: Request, task_id: str) -> List[str]:
"""
Retrieves a list of step IDs associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
List[str]: A list of step IDs.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps
Response:
["step1_id", "step2_id", ...]
"""
agent = request["agent"]
return await agent.list_steps(task_id)
@base_router.post("/agent/tasks/{task_id}/steps", tags=["agent"], response_model=Step)
async def execute_agent_task_step(
request: Request, task_id: str, step: StepRequestBody
) -> Step:
"""
Executes the next step for a specified task based on the current task status and returns the
executed step with additional feedback fields.
Depending on the current state of the task, the following scenarios are supported:
1. No steps exist for the task.
2. There is at least one step already for the task, and the task does not have a completed step marked as `last_step`.
3. There is a completed step marked as `last_step` already on the task.
In each of these scenarios, a step object will be returned with two additional fields: `output` and `additional_output`.
- `output`: Provides the primary response or feedback to the user.
- `additional_output`: Supplementary information or data. Its specific content is not strictly defined and can vary based on the step or agent's implementation.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
step (StepRequestBody): The details for executing the step.
Returns:
Step: Details of the executed step with additional feedback.
Example:
Request:
POST /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps
{
"input": "Step input details...",
...
}
Response:
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
"output": "Primary feedback...",
"additional_output": "Supplementary details...",
...
}
"""
agent = request["agent"]
return await agent.create_and_execute_step(task_id, step)
@base_router.get(
"/agent/tasks/{task_id}/steps/{step_id}", tags=["agent"], response_model=Step
)
async def get_agent_task_step(request: Request, task_id: str, step_id: str) -> Step:
"""
Retrieves the details of a specific step for a given task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
step_id (str): The ID of the step.
Returns:
Step: Details of the specific step.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps/step1_id
Response:
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
...
}
"""
agent = request["agent"]
return await agent.get_step(task_id, step_id)
@base_router.get(
"/agent/tasks/{task_id}/artifacts", tags=["agent"], response_model=List[Artifact]
)
async def list_agent_task_artifacts(request: Request, task_id: str) -> List[Artifact]:
"""
Retrieves a list of artifacts associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
List[Artifact]: A list of artifacts.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts
Response:
[
{"artifact_id": "artifact1_id", ...},
{"artifact_id": "artifact2_id", ...},
...
]
"""
agent = request["agent"]
return await agent.list_artifacts(task_id)
@base_router.post(
"/agent/tasks/{task_id}/artifacts", tags=["agent"], response_model=Artifact
)
async def upload_agent_task_artifacts(
request: Request,
task_id: str,
file: UploadFile | None = None,
uri: str | None = None,
) -> Artifact:
"""
Uploads an artifact for a specific task using either a provided file or a URI.
At least one of the parameters, `file` or `uri`, must be specified. The `uri` can point to
cloud storage resources such as S3, GCS, etc., or to other resources like FTP or HTTP.
To check the supported URI types for the agent, use the `/agent/artifacts/uris` endpoint.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
file (UploadFile, optional): The uploaded file. Defaults to None.
uri (str, optional): The URI pointing to the resource. Defaults to None.
Returns:
Artifact: Details of the uploaded artifact.
Note:
Either `file` or `uri` must be provided. If both are provided, the behavior depends on
the agent's implementation. If neither is provided, the function will return an error.
Example:
Request:
POST /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts
File: <uploaded_file>
OR
URI: "s3://path/to/artifact"
Response:
{
"artifact_id": "artifact1_id",
...
}
"""
agent = request["agent"]
return await agent.create_artifact(task_id, file, uri)
@base_router.get(
"/agent/tasks/{task_id}/artifacts/{artifact_id}", tags=["agent"], response_model=str
)
async def download_agent_task_artifact(
request: Request, task_id: str, artifact_id: str
) -> FileResponse:
"""
Downloads an artifact associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
artifact_id (str): The ID of the artifact.
Returns:
FileResponse: The downloaded artifact file.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts/artifact1_id
Response:
<file_content_of_artifact>
"""
agent = request["agent"]
print(f"task_id: {task_id}, artifact_id: {artifact_id}")
return await agent.get_artifact(task_id, artifact_id)
| Significant-Gravitas__Auto-GPT |
2 | 2-67-13 | common | get | [
"add_api_route",
"add_api_websocket_route",
"add_event_handler",
"add_route",
"add_websocket_route",
"api_route",
"app",
"callbacks",
"default",
"default_response_class",
"delete",
"dependencies",
"dependency_overrides_provider",
"deprecated",
"generate_unique_id_function",
"get",
"head",
"host",
"include_in_schema",
"include_router",
"lifespan",
"lifespan_context",
"middleware_stack",
"mount",
"not_found",
"on_event",
"on_shutdown",
"on_startup",
"options",
"patch",
"post",
"prefix",
"put",
"redirect_slashes",
"responses",
"route",
"route_class",
"routes",
"shutdown",
"startup",
"tags",
"trace",
"url_path_for",
"websocket",
"websocket_route",
"__annotations__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """
Routes for the Agent Service.
This module defines the API routes for the Agent service. While there are multiple endpoints provided by the service,
the ones that require special attention due to their complexity are:
1. `execute_agent_task_step`:
This route is significant because this is where the agent actually performs the work. The function handles
executing the next step for a task based on its current state, and it requires careful implementation to ensure
all scenarios (like the presence or absence of steps or a step marked as `last_step`) are handled correctly.
2. `upload_agent_task_artifacts`:
This route allows for the upload of artifacts, supporting various URI types (e.g., s3, gcs, ftp, http).
The support for different URI types makes it a bit more complex, and it's important to ensure that all
supported URI types are correctly managed. NOTE: The Auto-GPT team will eventually handle the most common
uri types for you.
3. `create_agent_task`:
While this is a simpler route, it plays a crucial role in the workflow, as it's responsible for the creation
of a new task.
Developers and contributors should be especially careful when making modifications to these routes to ensure
consistency and correctness in the system's behavior.
"""
from typing import List
from fastapi import APIRouter, Request, UploadFile
from fastapi.responses import FileResponse
from autogpt.schema import Artifact, Step, StepRequestBody, Task, TaskRequestBody
base_router = APIRouter()
@base_router.post("/agent/tasks", tags=["agent"], response_model=Task)
async def create_agent_task(request: Request, task_request: TaskRequestBody) -> Task:
"""
Creates a new task using the provided TaskRequestBody and returns a Task.
Args:
request (Request): FastAPI request object.
task (TaskRequestBody): The task request containing input and additional input data.
Returns:
Task: A new task with task_id, input, additional_input, and empty lists for artifacts and steps.
Example:
Request (TaskRequestBody defined in schema.py):
{
"input": "Write the words you receive to the file 'output.txt'.",
"additional_input": "python/code"
}
Response (Task defined in schema.py):
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"input": "Write the word 'Washington' to a .txt file",
"additional_input": "python/code",
"artifacts": [],
"steps": []
}
"""
agent = request["agent"]
task_request = await agent.create_task(task_request)
return task_request
@base_router. | ("/agent/tasks", tags=["agent"], response_model=List[str])
async def list_agent_tasks_ids(request: Request) -> List[str]:
"""
Gets a list of all task IDs.
Args:
request (Request): FastAPI request object.
Returns:
List[str]: A list of all task IDs.
Example:
Request:
GET /agent/tasks
Response:
[
"50da533e-3904-4401-8a07-c49adf88b5eb",
"b7d3c70a-7266-4b3a-818e-1327679f0117",
...
]
"""
agent = request["agent"]
return await agent.list_tasks()
@base_router.get("/agent/tasks/{task_id}", tags=["agent"], response_model=Task)
async def get_agent_task(request: Request, task_id: str):
"""
Gets the details of a task by ID.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
Task: The task with the given ID.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb
Response (Task defined in schema.py):
{
"input": "Write the word 'Washington' to a .txt file",
"additional_input": null,
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"artifacts": [
{
"artifact_id": "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"file_name": "output.txt",
"agent_created": true,
"uri": "file://50da533e-3904-4401-8a07-c49adf88b5eb/output.txt"
}
],
"steps": [
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "6bb1801a-fd80-45e8-899a-4dd723cc602e",
"input": "Write the word 'Washington' to a .txt file",
"additional_input": "challenge:write_to_file",
"name": "Write to file",
"status": "completed",
"output": "I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')>",
"additional_output": "Do you want me to continue?",
"artifacts": [
{
"artifact_id": "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"file_name": "output.txt",
"agent_created": true,
"uri": "file://50da533e-3904-4401-8a07-c49adf88b5eb/output.txt"
}
],
"is_last": true
}
]
}
"""
agent = request["agent"]
return await agent.get_task(task_id)
@base_router.get(
"/agent/tasks/{task_id}/steps", tags=["agent"], response_model=List[str]
)
async def list_agent_task_steps(request: Request, task_id: str) -> List[str]:
"""
Retrieves a list of step IDs associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
List[str]: A list of step IDs.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps
Response:
["step1_id", "step2_id", ...]
"""
agent = request["agent"]
return await agent.list_steps(task_id)
@base_router.post("/agent/tasks/{task_id}/steps", tags=["agent"], response_model=Step)
async def execute_agent_task_step(
request: Request, task_id: str, step: StepRequestBody
) -> Step:
"""
Executes the next step for a specified task based on the current task status and returns the
executed step with additional feedback fields.
Depending on the current state of the task, the following scenarios are supported:
1. No steps exist for the task.
2. There is at least one step already for the task, and the task does not have a completed step marked as `last_step`.
3. There is a completed step marked as `last_step` already on the task.
In each of these scenarios, a step object will be returned with two additional fields: `output` and `additional_output`.
- `output`: Provides the primary response or feedback to the user.
- `additional_output`: Supplementary information or data. Its specific content is not strictly defined and can vary based on the step or agent's implementation.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
step (StepRequestBody): The details for executing the step.
Returns:
Step: Details of the executed step with additional feedback.
Example:
Request:
POST /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps
{
"input": "Step input details...",
...
}
Response:
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
"output": "Primary feedback...",
"additional_output": "Supplementary details...",
...
}
"""
agent = request["agent"]
return await agent.create_and_execute_step(task_id, step)
@base_router.get(
"/agent/tasks/{task_id}/steps/{step_id}", tags=["agent"], response_model=Step
)
async def get_agent_task_step(request: Request, task_id: str, step_id: str) -> Step:
"""
Retrieves the details of a specific step for a given task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
step_id (str): The ID of the step.
Returns:
Step: Details of the specific step.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps/step1_id
Response:
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
...
}
"""
agent = request["agent"]
return await agent.get_step(task_id, step_id)
@base_router.get(
"/agent/tasks/{task_id}/artifacts", tags=["agent"], response_model=List[Artifact]
)
async def list_agent_task_artifacts(request: Request, task_id: str) -> List[Artifact]:
"""
Retrieves a list of artifacts associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
List[Artifact]: A list of artifacts.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts
Response:
[
{"artifact_id": "artifact1_id", ...},
{"artifact_id": "artifact2_id", ...},
...
]
"""
agent = request["agent"]
return await agent.list_artifacts(task_id)
@base_router.post(
"/agent/tasks/{task_id}/artifacts", tags=["agent"], response_model=Artifact
)
async def upload_agent_task_artifacts(
request: Request,
task_id: str,
file: UploadFile | None = None,
uri: str | None = None,
) -> Artifact:
"""
Uploads an artifact for a specific task using either a provided file or a URI.
At least one of the parameters, `file` or `uri`, must be specified. The `uri` can point to
cloud storage resources such as S3, GCS, etc., or to other resources like FTP or HTTP.
To check the supported URI types for the agent, use the `/agent/artifacts/uris` endpoint.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
file (UploadFile, optional): The uploaded file. Defaults to None.
uri (str, optional): The URI pointing to the resource. Defaults to None.
Returns:
Artifact: Details of the uploaded artifact.
Note:
Either `file` or `uri` must be provided. If both are provided, the behavior depends on
the agent's implementation. If neither is provided, the function will return an error.
Example:
Request:
POST /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts
File: <uploaded_file>
OR
URI: "s3://path/to/artifact"
Response:
{
"artifact_id": "artifact1_id",
...
}
"""
agent = request["agent"]
return await agent.create_artifact(task_id, file, uri)
@base_router.get(
"/agent/tasks/{task_id}/artifacts/{artifact_id}", tags=["agent"], response_model=str
)
async def download_agent_task_artifact(
request: Request, task_id: str, artifact_id: str
) -> FileResponse:
"""
Downloads an artifact associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
artifact_id (str): The ID of the artifact.
Returns:
FileResponse: The downloaded artifact file.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts/artifact1_id
Response:
<file_content_of_artifact>
"""
agent = request["agent"]
print(f"task_id: {task_id}, artifact_id: {artifact_id}")
return await agent.get_artifact(task_id, artifact_id)
| Significant-Gravitas__Auto-GPT |
2 | 2-93-13 | inproject | get | [
"add_api_route",
"add_api_websocket_route",
"add_event_handler",
"add_route",
"add_websocket_route",
"api_route",
"app",
"callbacks",
"default",
"default_response_class",
"delete",
"dependencies",
"dependency_overrides_provider",
"deprecated",
"generate_unique_id_function",
"get",
"head",
"host",
"include_in_schema",
"include_router",
"lifespan",
"lifespan_context",
"middleware_stack",
"mount",
"not_found",
"on_event",
"on_shutdown",
"on_startup",
"options",
"patch",
"post",
"prefix",
"put",
"redirect_slashes",
"responses",
"route",
"route_class",
"routes",
"shutdown",
"startup",
"tags",
"trace",
"url_path_for",
"websocket",
"websocket_route",
"__annotations__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """
Routes for the Agent Service.
This module defines the API routes for the Agent service. While there are multiple endpoints provided by the service,
the ones that require special attention due to their complexity are:
1. `execute_agent_task_step`:
This route is significant because this is where the agent actually performs the work. The function handles
executing the next step for a task based on its current state, and it requires careful implementation to ensure
all scenarios (like the presence or absence of steps or a step marked as `last_step`) are handled correctly.
2. `upload_agent_task_artifacts`:
This route allows for the upload of artifacts, supporting various URI types (e.g., s3, gcs, ftp, http).
The support for different URI types makes it a bit more complex, and it's important to ensure that all
supported URI types are correctly managed. NOTE: The Auto-GPT team will eventually handle the most common
uri types for you.
3. `create_agent_task`:
While this is a simpler route, it plays a crucial role in the workflow, as it's responsible for the creation
of a new task.
Developers and contributors should be especially careful when making modifications to these routes to ensure
consistency and correctness in the system's behavior.
"""
from typing import List
from fastapi import APIRouter, Request, UploadFile
from fastapi.responses import FileResponse
from autogpt.schema import Artifact, Step, StepRequestBody, Task, TaskRequestBody
base_router = APIRouter()
@base_router.post("/agent/tasks", tags=["agent"], response_model=Task)
async def create_agent_task(request: Request, task_request: TaskRequestBody) -> Task:
"""
Creates a new task using the provided TaskRequestBody and returns a Task.
Args:
request (Request): FastAPI request object.
task (TaskRequestBody): The task request containing input and additional input data.
Returns:
Task: A new task with task_id, input, additional_input, and empty lists for artifacts and steps.
Example:
Request (TaskRequestBody defined in schema.py):
{
"input": "Write the words you receive to the file 'output.txt'.",
"additional_input": "python/code"
}
Response (Task defined in schema.py):
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"input": "Write the word 'Washington' to a .txt file",
"additional_input": "python/code",
"artifacts": [],
"steps": []
}
"""
agent = request["agent"]
task_request = await agent.create_task(task_request)
return task_request
@base_router.get("/agent/tasks", tags=["agent"], response_model=List[str])
async def list_agent_tasks_ids(request: Request) -> List[str]:
"""
Gets a list of all task IDs.
Args:
request (Request): FastAPI request object.
Returns:
List[str]: A list of all task IDs.
Example:
Request:
GET /agent/tasks
Response:
[
"50da533e-3904-4401-8a07-c49adf88b5eb",
"b7d3c70a-7266-4b3a-818e-1327679f0117",
...
]
"""
agent = request["agent"]
return await agent.list_tasks()
@base_router. | ("/agent/tasks/{task_id}", tags=["agent"], response_model=Task)
async def get_agent_task(request: Request, task_id: str):
"""
Gets the details of a task by ID.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
Task: The task with the given ID.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb
Response (Task defined in schema.py):
{
"input": "Write the word 'Washington' to a .txt file",
"additional_input": null,
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"artifacts": [
{
"artifact_id": "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"file_name": "output.txt",
"agent_created": true,
"uri": "file://50da533e-3904-4401-8a07-c49adf88b5eb/output.txt"
}
],
"steps": [
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "6bb1801a-fd80-45e8-899a-4dd723cc602e",
"input": "Write the word 'Washington' to a .txt file",
"additional_input": "challenge:write_to_file",
"name": "Write to file",
"status": "completed",
"output": "I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')>",
"additional_output": "Do you want me to continue?",
"artifacts": [
{
"artifact_id": "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"file_name": "output.txt",
"agent_created": true,
"uri": "file://50da533e-3904-4401-8a07-c49adf88b5eb/output.txt"
}
],
"is_last": true
}
]
}
"""
agent = request["agent"]
return await agent.get_task(task_id)
@base_router.get(
"/agent/tasks/{task_id}/steps", tags=["agent"], response_model=List[str]
)
async def list_agent_task_steps(request: Request, task_id: str) -> List[str]:
"""
Retrieves a list of step IDs associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
List[str]: A list of step IDs.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps
Response:
["step1_id", "step2_id", ...]
"""
agent = request["agent"]
return await agent.list_steps(task_id)
@base_router.post("/agent/tasks/{task_id}/steps", tags=["agent"], response_model=Step)
async def execute_agent_task_step(
request: Request, task_id: str, step: StepRequestBody
) -> Step:
"""
Executes the next step for a specified task based on the current task status and returns the
executed step with additional feedback fields.
Depending on the current state of the task, the following scenarios are supported:
1. No steps exist for the task.
2. There is at least one step already for the task, and the task does not have a completed step marked as `last_step`.
3. There is a completed step marked as `last_step` already on the task.
In each of these scenarios, a step object will be returned with two additional fields: `output` and `additional_output`.
- `output`: Provides the primary response or feedback to the user.
- `additional_output`: Supplementary information or data. Its specific content is not strictly defined and can vary based on the step or agent's implementation.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
step (StepRequestBody): The details for executing the step.
Returns:
Step: Details of the executed step with additional feedback.
Example:
Request:
POST /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps
{
"input": "Step input details...",
...
}
Response:
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
"output": "Primary feedback...",
"additional_output": "Supplementary details...",
...
}
"""
agent = request["agent"]
return await agent.create_and_execute_step(task_id, step)
@base_router.get(
"/agent/tasks/{task_id}/steps/{step_id}", tags=["agent"], response_model=Step
)
async def get_agent_task_step(request: Request, task_id: str, step_id: str) -> Step:
"""
Retrieves the details of a specific step for a given task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
step_id (str): The ID of the step.
Returns:
Step: Details of the specific step.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps/step1_id
Response:
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
...
}
"""
agent = request["agent"]
return await agent.get_step(task_id, step_id)
@base_router.get(
"/agent/tasks/{task_id}/artifacts", tags=["agent"], response_model=List[Artifact]
)
async def list_agent_task_artifacts(request: Request, task_id: str) -> List[Artifact]:
"""
Retrieves a list of artifacts associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
List[Artifact]: A list of artifacts.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts
Response:
[
{"artifact_id": "artifact1_id", ...},
{"artifact_id": "artifact2_id", ...},
...
]
"""
agent = request["agent"]
return await agent.list_artifacts(task_id)
@base_router.post(
"/agent/tasks/{task_id}/artifacts", tags=["agent"], response_model=Artifact
)
async def upload_agent_task_artifacts(
request: Request,
task_id: str,
file: UploadFile | None = None,
uri: str | None = None,
) -> Artifact:
"""
Uploads an artifact for a specific task using either a provided file or a URI.
At least one of the parameters, `file` or `uri`, must be specified. The `uri` can point to
cloud storage resources such as S3, GCS, etc., or to other resources like FTP or HTTP.
To check the supported URI types for the agent, use the `/agent/artifacts/uris` endpoint.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
file (UploadFile, optional): The uploaded file. Defaults to None.
uri (str, optional): The URI pointing to the resource. Defaults to None.
Returns:
Artifact: Details of the uploaded artifact.
Note:
Either `file` or `uri` must be provided. If both are provided, the behavior depends on
the agent's implementation. If neither is provided, the function will return an error.
Example:
Request:
POST /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts
File: <uploaded_file>
OR
URI: "s3://path/to/artifact"
Response:
{
"artifact_id": "artifact1_id",
...
}
"""
agent = request["agent"]
return await agent.create_artifact(task_id, file, uri)
@base_router.get(
"/agent/tasks/{task_id}/artifacts/{artifact_id}", tags=["agent"], response_model=str
)
async def download_agent_task_artifact(
request: Request, task_id: str, artifact_id: str
) -> FileResponse:
"""
Downloads an artifact associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
artifact_id (str): The ID of the artifact.
Returns:
FileResponse: The downloaded artifact file.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts/artifact1_id
Response:
<file_content_of_artifact>
"""
agent = request["agent"]
print(f"task_id: {task_id}, artifact_id: {artifact_id}")
return await agent.get_artifact(task_id, artifact_id)
| Significant-Gravitas__Auto-GPT |
2 | 2-174-13 | inproject | post | [
"add_api_route",
"add_api_websocket_route",
"add_event_handler",
"add_route",
"add_websocket_route",
"api_route",
"app",
"callbacks",
"default",
"default_response_class",
"delete",
"dependencies",
"dependency_overrides_provider",
"deprecated",
"generate_unique_id_function",
"get",
"head",
"host",
"include_in_schema",
"include_router",
"lifespan",
"lifespan_context",
"middleware_stack",
"mount",
"not_found",
"on_event",
"on_shutdown",
"on_startup",
"options",
"patch",
"post",
"prefix",
"put",
"redirect_slashes",
"responses",
"route",
"route_class",
"routes",
"shutdown",
"startup",
"tags",
"trace",
"url_path_for",
"websocket",
"websocket_route",
"__annotations__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """
Routes for the Agent Service.
This module defines the API routes for the Agent service. While there are multiple endpoints provided by the service,
the ones that require special attention due to their complexity are:
1. `execute_agent_task_step`:
This route is significant because this is where the agent actually performs the work. The function handles
executing the next step for a task based on its current state, and it requires careful implementation to ensure
all scenarios (like the presence or absence of steps or a step marked as `last_step`) are handled correctly.
2. `upload_agent_task_artifacts`:
This route allows for the upload of artifacts, supporting various URI types (e.g., s3, gcs, ftp, http).
The support for different URI types makes it a bit more complex, and it's important to ensure that all
supported URI types are correctly managed. NOTE: The Auto-GPT team will eventually handle the most common
uri types for you.
3. `create_agent_task`:
While this is a simpler route, it plays a crucial role in the workflow, as it's responsible for the creation
of a new task.
Developers and contributors should be especially careful when making modifications to these routes to ensure
consistency and correctness in the system's behavior.
"""
from typing import List
from fastapi import APIRouter, Request, UploadFile
from fastapi.responses import FileResponse
from autogpt.schema import Artifact, Step, StepRequestBody, Task, TaskRequestBody
base_router = APIRouter()
@base_router.post("/agent/tasks", tags=["agent"], response_model=Task)
async def create_agent_task(request: Request, task_request: TaskRequestBody) -> Task:
"""
Creates a new task using the provided TaskRequestBody and returns a Task.
Args:
request (Request): FastAPI request object.
task (TaskRequestBody): The task request containing input and additional input data.
Returns:
Task: A new task with task_id, input, additional_input, and empty lists for artifacts and steps.
Example:
Request (TaskRequestBody defined in schema.py):
{
"input": "Write the words you receive to the file 'output.txt'.",
"additional_input": "python/code"
}
Response (Task defined in schema.py):
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"input": "Write the word 'Washington' to a .txt file",
"additional_input": "python/code",
"artifacts": [],
"steps": []
}
"""
agent = request["agent"]
task_request = await agent.create_task(task_request)
return task_request
@base_router.get("/agent/tasks", tags=["agent"], response_model=List[str])
async def list_agent_tasks_ids(request: Request) -> List[str]:
"""
Gets a list of all task IDs.
Args:
request (Request): FastAPI request object.
Returns:
List[str]: A list of all task IDs.
Example:
Request:
GET /agent/tasks
Response:
[
"50da533e-3904-4401-8a07-c49adf88b5eb",
"b7d3c70a-7266-4b3a-818e-1327679f0117",
...
]
"""
agent = request["agent"]
return await agent.list_tasks()
@base_router.get("/agent/tasks/{task_id}", tags=["agent"], response_model=Task)
async def get_agent_task(request: Request, task_id: str):
"""
Gets the details of a task by ID.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
Task: The task with the given ID.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb
Response (Task defined in schema.py):
{
"input": "Write the word 'Washington' to a .txt file",
"additional_input": null,
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"artifacts": [
{
"artifact_id": "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"file_name": "output.txt",
"agent_created": true,
"uri": "file://50da533e-3904-4401-8a07-c49adf88b5eb/output.txt"
}
],
"steps": [
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "6bb1801a-fd80-45e8-899a-4dd723cc602e",
"input": "Write the word 'Washington' to a .txt file",
"additional_input": "challenge:write_to_file",
"name": "Write to file",
"status": "completed",
"output": "I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')>",
"additional_output": "Do you want me to continue?",
"artifacts": [
{
"artifact_id": "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"file_name": "output.txt",
"agent_created": true,
"uri": "file://50da533e-3904-4401-8a07-c49adf88b5eb/output.txt"
}
],
"is_last": true
}
]
}
"""
agent = request["agent"]
return await agent.get_task(task_id)
@base_router.get(
"/agent/tasks/{task_id}/steps", tags=["agent"], response_model=List[str]
)
async def list_agent_task_steps(request: Request, task_id: str) -> List[str]:
"""
Retrieves a list of step IDs associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
List[str]: A list of step IDs.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps
Response:
["step1_id", "step2_id", ...]
"""
agent = request["agent"]
return await agent.list_steps(task_id)
@base_router. | ("/agent/tasks/{task_id}/steps", tags=["agent"], response_model=Step)
async def execute_agent_task_step(
request: Request, task_id: str, step: StepRequestBody
) -> Step:
"""
Executes the next step for a specified task based on the current task status and returns the
executed step with additional feedback fields.
Depending on the current state of the task, the following scenarios are supported:
1. No steps exist for the task.
2. There is at least one step already for the task, and the task does not have a completed step marked as `last_step`.
3. There is a completed step marked as `last_step` already on the task.
In each of these scenarios, a step object will be returned with two additional fields: `output` and `additional_output`.
- `output`: Provides the primary response or feedback to the user.
- `additional_output`: Supplementary information or data. Its specific content is not strictly defined and can vary based on the step or agent's implementation.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
step (StepRequestBody): The details for executing the step.
Returns:
Step: Details of the executed step with additional feedback.
Example:
Request:
POST /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps
{
"input": "Step input details...",
...
}
Response:
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
"output": "Primary feedback...",
"additional_output": "Supplementary details...",
...
}
"""
agent = request["agent"]
return await agent.create_and_execute_step(task_id, step)
@base_router.get(
"/agent/tasks/{task_id}/steps/{step_id}", tags=["agent"], response_model=Step
)
async def get_agent_task_step(request: Request, task_id: str, step_id: str) -> Step:
"""
Retrieves the details of a specific step for a given task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
step_id (str): The ID of the step.
Returns:
Step: Details of the specific step.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps/step1_id
Response:
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
...
}
"""
agent = request["agent"]
return await agent.get_step(task_id, step_id)
@base_router.get(
"/agent/tasks/{task_id}/artifacts", tags=["agent"], response_model=List[Artifact]
)
async def list_agent_task_artifacts(request: Request, task_id: str) -> List[Artifact]:
"""
Retrieves a list of artifacts associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
List[Artifact]: A list of artifacts.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts
Response:
[
{"artifact_id": "artifact1_id", ...},
{"artifact_id": "artifact2_id", ...},
...
]
"""
agent = request["agent"]
return await agent.list_artifacts(task_id)
@base_router.post(
"/agent/tasks/{task_id}/artifacts", tags=["agent"], response_model=Artifact
)
async def upload_agent_task_artifacts(
request: Request,
task_id: str,
file: UploadFile | None = None,
uri: str | None = None,
) -> Artifact:
"""
Uploads an artifact for a specific task using either a provided file or a URI.
At least one of the parameters, `file` or `uri`, must be specified. The `uri` can point to
cloud storage resources such as S3, GCS, etc., or to other resources like FTP or HTTP.
To check the supported URI types for the agent, use the `/agent/artifacts/uris` endpoint.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
file (UploadFile, optional): The uploaded file. Defaults to None.
uri (str, optional): The URI pointing to the resource. Defaults to None.
Returns:
Artifact: Details of the uploaded artifact.
Note:
Either `file` or `uri` must be provided. If both are provided, the behavior depends on
the agent's implementation. If neither is provided, the function will return an error.
Example:
Request:
POST /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts
File: <uploaded_file>
OR
URI: "s3://path/to/artifact"
Response:
{
"artifact_id": "artifact1_id",
...
}
"""
agent = request["agent"]
return await agent.create_artifact(task_id, file, uri)
@base_router.get(
"/agent/tasks/{task_id}/artifacts/{artifact_id}", tags=["agent"], response_model=str
)
async def download_agent_task_artifact(
request: Request, task_id: str, artifact_id: str
) -> FileResponse:
"""
Downloads an artifact associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
artifact_id (str): The ID of the artifact.
Returns:
FileResponse: The downloaded artifact file.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts/artifact1_id
Response:
<file_content_of_artifact>
"""
agent = request["agent"]
print(f"task_id: {task_id}, artifact_id: {artifact_id}")
return await agent.get_artifact(task_id, artifact_id)
| Significant-Gravitas__Auto-GPT |
2 | 2-280-13 | common | post | [
"add_api_route",
"add_api_websocket_route",
"add_event_handler",
"add_route",
"add_websocket_route",
"api_route",
"app",
"callbacks",
"default",
"default_response_class",
"delete",
"dependencies",
"dependency_overrides_provider",
"deprecated",
"generate_unique_id_function",
"get",
"head",
"host",
"include_in_schema",
"include_router",
"lifespan",
"lifespan_context",
"middleware_stack",
"mount",
"not_found",
"on_event",
"on_shutdown",
"on_startup",
"options",
"patch",
"post",
"prefix",
"put",
"redirect_slashes",
"responses",
"route",
"route_class",
"routes",
"shutdown",
"startup",
"tags",
"trace",
"url_path_for",
"websocket",
"websocket_route",
"__annotations__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """
Routes for the Agent Service.
This module defines the API routes for the Agent service. While there are multiple endpoints provided by the service,
the ones that require special attention due to their complexity are:
1. `execute_agent_task_step`:
This route is significant because this is where the agent actually performs the work. The function handles
executing the next step for a task based on its current state, and it requires careful implementation to ensure
all scenarios (like the presence or absence of steps or a step marked as `last_step`) are handled correctly.
2. `upload_agent_task_artifacts`:
This route allows for the upload of artifacts, supporting various URI types (e.g., s3, gcs, ftp, http).
The support for different URI types makes it a bit more complex, and it's important to ensure that all
supported URI types are correctly managed. NOTE: The Auto-GPT team will eventually handle the most common
uri types for you.
3. `create_agent_task`:
While this is a simpler route, it plays a crucial role in the workflow, as it's responsible for the creation
of a new task.
Developers and contributors should be especially careful when making modifications to these routes to ensure
consistency and correctness in the system's behavior.
"""
from typing import List
from fastapi import APIRouter, Request, UploadFile
from fastapi.responses import FileResponse
from autogpt.schema import Artifact, Step, StepRequestBody, Task, TaskRequestBody
base_router = APIRouter()
@base_router.post("/agent/tasks", tags=["agent"], response_model=Task)
async def create_agent_task(request: Request, task_request: TaskRequestBody) -> Task:
"""
Creates a new task using the provided TaskRequestBody and returns a Task.
Args:
request (Request): FastAPI request object.
task (TaskRequestBody): The task request containing input and additional input data.
Returns:
Task: A new task with task_id, input, additional_input, and empty lists for artifacts and steps.
Example:
Request (TaskRequestBody defined in schema.py):
{
"input": "Write the words you receive to the file 'output.txt'.",
"additional_input": "python/code"
}
Response (Task defined in schema.py):
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"input": "Write the word 'Washington' to a .txt file",
"additional_input": "python/code",
"artifacts": [],
"steps": []
}
"""
agent = request["agent"]
task_request = await agent.create_task(task_request)
return task_request
@base_router.get("/agent/tasks", tags=["agent"], response_model=List[str])
async def list_agent_tasks_ids(request: Request) -> List[str]:
"""
Gets a list of all task IDs.
Args:
request (Request): FastAPI request object.
Returns:
List[str]: A list of all task IDs.
Example:
Request:
GET /agent/tasks
Response:
[
"50da533e-3904-4401-8a07-c49adf88b5eb",
"b7d3c70a-7266-4b3a-818e-1327679f0117",
...
]
"""
agent = request["agent"]
return await agent.list_tasks()
@base_router.get("/agent/tasks/{task_id}", tags=["agent"], response_model=Task)
async def get_agent_task(request: Request, task_id: str):
"""
Gets the details of a task by ID.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
Task: The task with the given ID.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb
Response (Task defined in schema.py):
{
"input": "Write the word 'Washington' to a .txt file",
"additional_input": null,
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"artifacts": [
{
"artifact_id": "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"file_name": "output.txt",
"agent_created": true,
"uri": "file://50da533e-3904-4401-8a07-c49adf88b5eb/output.txt"
}
],
"steps": [
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "6bb1801a-fd80-45e8-899a-4dd723cc602e",
"input": "Write the word 'Washington' to a .txt file",
"additional_input": "challenge:write_to_file",
"name": "Write to file",
"status": "completed",
"output": "I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')>",
"additional_output": "Do you want me to continue?",
"artifacts": [
{
"artifact_id": "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"file_name": "output.txt",
"agent_created": true,
"uri": "file://50da533e-3904-4401-8a07-c49adf88b5eb/output.txt"
}
],
"is_last": true
}
]
}
"""
agent = request["agent"]
return await agent.get_task(task_id)
@base_router.get(
"/agent/tasks/{task_id}/steps", tags=["agent"], response_model=List[str]
)
async def list_agent_task_steps(request: Request, task_id: str) -> List[str]:
"""
Retrieves a list of step IDs associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
List[str]: A list of step IDs.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps
Response:
["step1_id", "step2_id", ...]
"""
agent = request["agent"]
return await agent.list_steps(task_id)
@base_router.post("/agent/tasks/{task_id}/steps", tags=["agent"], response_model=Step)
async def execute_agent_task_step(
request: Request, task_id: str, step: StepRequestBody
) -> Step:
"""
Executes the next step for a specified task based on the current task status and returns the
executed step with additional feedback fields.
Depending on the current state of the task, the following scenarios are supported:
1. No steps exist for the task.
2. There is at least one step already for the task, and the task does not have a completed step marked as `last_step`.
3. There is a completed step marked as `last_step` already on the task.
In each of these scenarios, a step object will be returned with two additional fields: `output` and `additional_output`.
- `output`: Provides the primary response or feedback to the user.
- `additional_output`: Supplementary information or data. Its specific content is not strictly defined and can vary based on the step or agent's implementation.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
step (StepRequestBody): The details for executing the step.
Returns:
Step: Details of the executed step with additional feedback.
Example:
Request:
POST /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps
{
"input": "Step input details...",
...
}
Response:
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
"output": "Primary feedback...",
"additional_output": "Supplementary details...",
...
}
"""
agent = request["agent"]
return await agent.create_and_execute_step(task_id, step)
@base_router.get(
"/agent/tasks/{task_id}/steps/{step_id}", tags=["agent"], response_model=Step
)
async def get_agent_task_step(request: Request, task_id: str, step_id: str) -> Step:
"""
Retrieves the details of a specific step for a given task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
step_id (str): The ID of the step.
Returns:
Step: Details of the specific step.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps/step1_id
Response:
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
...
}
"""
agent = request["agent"]
return await agent.get_step(task_id, step_id)
@base_router.get(
"/agent/tasks/{task_id}/artifacts", tags=["agent"], response_model=List[Artifact]
)
async def list_agent_task_artifacts(request: Request, task_id: str) -> List[Artifact]:
"""
Retrieves a list of artifacts associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
List[Artifact]: A list of artifacts.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts
Response:
[
{"artifact_id": "artifact1_id", ...},
{"artifact_id": "artifact2_id", ...},
...
]
"""
agent = request["agent"]
return await agent.list_artifacts(task_id)
@base_router. | (
"/agent/tasks/{task_id}/artifacts", tags=["agent"], response_model=Artifact
)
async def upload_agent_task_artifacts(
request: Request,
task_id: str,
file: UploadFile | None = None,
uri: str | None = None,
) -> Artifact:
"""
Uploads an artifact for a specific task using either a provided file or a URI.
At least one of the parameters, `file` or `uri`, must be specified. The `uri` can point to
cloud storage resources such as S3, GCS, etc., or to other resources like FTP or HTTP.
To check the supported URI types for the agent, use the `/agent/artifacts/uris` endpoint.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
file (UploadFile, optional): The uploaded file. Defaults to None.
uri (str, optional): The URI pointing to the resource. Defaults to None.
Returns:
Artifact: Details of the uploaded artifact.
Note:
Either `file` or `uri` must be provided. If both are provided, the behavior depends on
the agent's implementation. If neither is provided, the function will return an error.
Example:
Request:
POST /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts
File: <uploaded_file>
OR
URI: "s3://path/to/artifact"
Response:
{
"artifact_id": "artifact1_id",
...
}
"""
agent = request["agent"]
return await agent.create_artifact(task_id, file, uri)
@base_router.get(
"/agent/tasks/{task_id}/artifacts/{artifact_id}", tags=["agent"], response_model=str
)
async def download_agent_task_artifact(
request: Request, task_id: str, artifact_id: str
) -> FileResponse:
"""
Downloads an artifact associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
artifact_id (str): The ID of the artifact.
Returns:
FileResponse: The downloaded artifact file.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts/artifact1_id
Response:
<file_content_of_artifact>
"""
agent = request["agent"]
print(f"task_id: {task_id}, artifact_id: {artifact_id}")
return await agent.get_artifact(task_id, artifact_id)
| Significant-Gravitas__Auto-GPT |
2 | 2-326-13 | common | get | [
"add_api_route",
"add_api_websocket_route",
"add_event_handler",
"add_route",
"add_websocket_route",
"api_route",
"app",
"callbacks",
"default",
"default_response_class",
"delete",
"dependencies",
"dependency_overrides_provider",
"deprecated",
"generate_unique_id_function",
"get",
"head",
"host",
"include_in_schema",
"include_router",
"lifespan",
"lifespan_context",
"middleware_stack",
"mount",
"not_found",
"on_event",
"on_shutdown",
"on_startup",
"options",
"patch",
"post",
"prefix",
"put",
"redirect_slashes",
"responses",
"route",
"route_class",
"routes",
"shutdown",
"startup",
"tags",
"trace",
"url_path_for",
"websocket",
"websocket_route",
"__annotations__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """
Routes for the Agent Service.
This module defines the API routes for the Agent service. While there are multiple endpoints provided by the service,
the ones that require special attention due to their complexity are:
1. `execute_agent_task_step`:
This route is significant because this is where the agent actually performs the work. The function handles
executing the next step for a task based on its current state, and it requires careful implementation to ensure
all scenarios (like the presence or absence of steps or a step marked as `last_step`) are handled correctly.
2. `upload_agent_task_artifacts`:
This route allows for the upload of artifacts, supporting various URI types (e.g., s3, gcs, ftp, http).
The support for different URI types makes it a bit more complex, and it's important to ensure that all
supported URI types are correctly managed. NOTE: The Auto-GPT team will eventually handle the most common
uri types for you.
3. `create_agent_task`:
While this is a simpler route, it plays a crucial role in the workflow, as it's responsible for the creation
of a new task.
Developers and contributors should be especially careful when making modifications to these routes to ensure
consistency and correctness in the system's behavior.
"""
from typing import List
from fastapi import APIRouter, Request, UploadFile
from fastapi.responses import FileResponse
from autogpt.schema import Artifact, Step, StepRequestBody, Task, TaskRequestBody
base_router = APIRouter()
@base_router.post("/agent/tasks", tags=["agent"], response_model=Task)
async def create_agent_task(request: Request, task_request: TaskRequestBody) -> Task:
"""
Creates a new task using the provided TaskRequestBody and returns a Task.
Args:
request (Request): FastAPI request object.
task (TaskRequestBody): The task request containing input and additional input data.
Returns:
Task: A new task with task_id, input, additional_input, and empty lists for artifacts and steps.
Example:
Request (TaskRequestBody defined in schema.py):
{
"input": "Write the words you receive to the file 'output.txt'.",
"additional_input": "python/code"
}
Response (Task defined in schema.py):
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"input": "Write the word 'Washington' to a .txt file",
"additional_input": "python/code",
"artifacts": [],
"steps": []
}
"""
agent = request["agent"]
task_request = await agent.create_task(task_request)
return task_request
@base_router.get("/agent/tasks", tags=["agent"], response_model=List[str])
async def list_agent_tasks_ids(request: Request) -> List[str]:
"""
Gets a list of all task IDs.
Args:
request (Request): FastAPI request object.
Returns:
List[str]: A list of all task IDs.
Example:
Request:
GET /agent/tasks
Response:
[
"50da533e-3904-4401-8a07-c49adf88b5eb",
"b7d3c70a-7266-4b3a-818e-1327679f0117",
...
]
"""
agent = request["agent"]
return await agent.list_tasks()
@base_router.get("/agent/tasks/{task_id}", tags=["agent"], response_model=Task)
async def get_agent_task(request: Request, task_id: str):
"""
Gets the details of a task by ID.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
Task: The task with the given ID.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb
Response (Task defined in schema.py):
{
"input": "Write the word 'Washington' to a .txt file",
"additional_input": null,
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"artifacts": [
{
"artifact_id": "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"file_name": "output.txt",
"agent_created": true,
"uri": "file://50da533e-3904-4401-8a07-c49adf88b5eb/output.txt"
}
],
"steps": [
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "6bb1801a-fd80-45e8-899a-4dd723cc602e",
"input": "Write the word 'Washington' to a .txt file",
"additional_input": "challenge:write_to_file",
"name": "Write to file",
"status": "completed",
"output": "I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')>",
"additional_output": "Do you want me to continue?",
"artifacts": [
{
"artifact_id": "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
"file_name": "output.txt",
"agent_created": true,
"uri": "file://50da533e-3904-4401-8a07-c49adf88b5eb/output.txt"
}
],
"is_last": true
}
]
}
"""
agent = request["agent"]
return await agent.get_task(task_id)
@base_router.get(
"/agent/tasks/{task_id}/steps", tags=["agent"], response_model=List[str]
)
async def list_agent_task_steps(request: Request, task_id: str) -> List[str]:
"""
Retrieves a list of step IDs associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
List[str]: A list of step IDs.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps
Response:
["step1_id", "step2_id", ...]
"""
agent = request["agent"]
return await agent.list_steps(task_id)
@base_router.post("/agent/tasks/{task_id}/steps", tags=["agent"], response_model=Step)
async def execute_agent_task_step(
request: Request, task_id: str, step: StepRequestBody
) -> Step:
"""
Executes the next step for a specified task based on the current task status and returns the
executed step with additional feedback fields.
Depending on the current state of the task, the following scenarios are supported:
1. No steps exist for the task.
2. There is at least one step already for the task, and the task does not have a completed step marked as `last_step`.
3. There is a completed step marked as `last_step` already on the task.
In each of these scenarios, a step object will be returned with two additional fields: `output` and `additional_output`.
- `output`: Provides the primary response or feedback to the user.
- `additional_output`: Supplementary information or data. Its specific content is not strictly defined and can vary based on the step or agent's implementation.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
step (StepRequestBody): The details for executing the step.
Returns:
Step: Details of the executed step with additional feedback.
Example:
Request:
POST /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps
{
"input": "Step input details...",
...
}
Response:
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
"output": "Primary feedback...",
"additional_output": "Supplementary details...",
...
}
"""
agent = request["agent"]
return await agent.create_and_execute_step(task_id, step)
@base_router.get(
"/agent/tasks/{task_id}/steps/{step_id}", tags=["agent"], response_model=Step
)
async def get_agent_task_step(request: Request, task_id: str, step_id: str) -> Step:
"""
Retrieves the details of a specific step for a given task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
step_id (str): The ID of the step.
Returns:
Step: Details of the specific step.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/steps/step1_id
Response:
{
"task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
"step_id": "step1_id",
...
}
"""
agent = request["agent"]
return await agent.get_step(task_id, step_id)
@base_router.get(
"/agent/tasks/{task_id}/artifacts", tags=["agent"], response_model=List[Artifact]
)
async def list_agent_task_artifacts(request: Request, task_id: str) -> List[Artifact]:
"""
Retrieves a list of artifacts associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
Returns:
List[Artifact]: A list of artifacts.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts
Response:
[
{"artifact_id": "artifact1_id", ...},
{"artifact_id": "artifact2_id", ...},
...
]
"""
agent = request["agent"]
return await agent.list_artifacts(task_id)
@base_router.post(
"/agent/tasks/{task_id}/artifacts", tags=["agent"], response_model=Artifact
)
async def upload_agent_task_artifacts(
request: Request,
task_id: str,
file: UploadFile | None = None,
uri: str | None = None,
) -> Artifact:
"""
Uploads an artifact for a specific task using either a provided file or a URI.
At least one of the parameters, `file` or `uri`, must be specified. The `uri` can point to
cloud storage resources such as S3, GCS, etc., or to other resources like FTP or HTTP.
To check the supported URI types for the agent, use the `/agent/artifacts/uris` endpoint.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
file (UploadFile, optional): The uploaded file. Defaults to None.
uri (str, optional): The URI pointing to the resource. Defaults to None.
Returns:
Artifact: Details of the uploaded artifact.
Note:
Either `file` or `uri` must be provided. If both are provided, the behavior depends on
the agent's implementation. If neither is provided, the function will return an error.
Example:
Request:
POST /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts
File: <uploaded_file>
OR
URI: "s3://path/to/artifact"
Response:
{
"artifact_id": "artifact1_id",
...
}
"""
agent = request["agent"]
return await agent.create_artifact(task_id, file, uri)
@base_router. | (
"/agent/tasks/{task_id}/artifacts/{artifact_id}", tags=["agent"], response_model=str
)
async def download_agent_task_artifact(
request: Request, task_id: str, artifact_id: str
) -> FileResponse:
"""
Downloads an artifact associated with a specific task.
Args:
request (Request): FastAPI request object.
task_id (str): The ID of the task.
artifact_id (str): The ID of the artifact.
Returns:
FileResponse: The downloaded artifact file.
Example:
Request:
GET /agent/tasks/50da533e-3904-4401-8a07-c49adf88b5eb/artifacts/artifact1_id
Response:
<file_content_of_artifact>
"""
agent = request["agent"]
print(f"task_id: {task_id}, artifact_id: {artifact_id}")
return await agent.get_artifact(task_id, artifact_id)
| Significant-Gravitas__Auto-GPT |
3 | 3-84-16 | commited | __init__ | [
"converter",
"datefmt",
"default_msec_format",
"default_time_format",
"format",
"formatException",
"formatMessage",
"formatStack",
"formatTime",
"usesTime",
"_fmt",
"_style",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import json
import logging
import logging.config
import logging.handlers
import os
import queue
ENABLE_TRACING = os.environ.get("ENABLE_TRACING", "false").lower() == "true"
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false").lower() == "true"
CHAT = 29
logging.addLevelName(CHAT, "CHAT")
RESET_SEQ: str = "\033[0m"
COLOR_SEQ: str = "\033[1;%dm"
BOLD_SEQ: str = "\033[1m"
UNDERLINE_SEQ: str = "\033[04m"
ORANGE: str = "\033[33m"
YELLOW: str = "\033[93m"
WHITE: str = "\33[37m"
BLUE: str = "\033[34m"
LIGHT_BLUE: str = "\033[94m"
RED: str = "\033[91m"
GREY: str = "\33[90m"
GREEN: str = "\033[92m"
EMOJIS: dict[str, str] = {
"DEBUG": "🐛",
"INFO": "📝",
"CHAT": "💬",
"WARNING": "⚠️",
"ERROR": "❌",
"CRITICAL": "💥",
}
KEYWORD_COLORS: dict[str, str] = {
"DEBUG": WHITE,
"INFO": LIGHT_BLUE,
"CHAT": GREEN,
"WARNING": YELLOW,
"ERROR": ORANGE,
"CRITICAL": RED,
}
class JsonFormatter(logging.Formatter):
def format(self, record):
return json.dumps(record.__dict__)
def formatter_message(message: str, use_color: bool = True) -> str:
"""
Syntax highlight certain keywords
"""
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
def format_word(
message: str, word: str, color_seq: str, bold: bool = False, underline: bool = False
) -> str:
"""
Surround the fiven word with a sequence
"""
replacer = color_seq + word + RESET_SEQ
if underline:
replacer = UNDERLINE_SEQ + replacer
if bold:
replacer = BOLD_SEQ + replacer
return message.replace(word, replacer)
class ConsoleFormatter(logging.Formatter):
"""
This Formatted simply colors in the levelname i.e 'INFO', 'DEBUG'
"""
def __init__(
self, fmt: str, datefmt: str = None, style: str = "%", use_color: bool = True
):
super().__init__(fmt, date | le)
self.use_color = use_color
def format(self, record: logging.LogRecord) -> str:
"""
Format and highlight certain keywords
"""
rec = record
levelname = rec.levelname
if self.use_color and levelname in KEYWORD_COLORS:
levelname_color = KEYWORD_COLORS[levelname] + levelname + RESET_SEQ
rec.levelname = levelname_color
rec.name = f"{GREY}{rec.name:<15}{RESET_SEQ}"
rec.msg = (
KEYWORD_COLORS[levelname] + EMOJIS[levelname] + " " + rec.msg + RESET_SEQ
)
return logging.Formatter.format(self, rec)
class CustomLogger(logging.Logger):
"""
This adds extra logging functions such as logger.trade and also
sets the logger to use the custom formatter
"""
CONSOLE_FORMAT: str = (
"[%(asctime)s] [$BOLD%(name)-15s$RESET] [%(levelname)-8s]\t%(message)s"
)
FORMAT: str = "%(asctime)s %(name)-15s %(levelname)-8s %(message)s"
COLOR_FORMAT: str = formatter_message(CONSOLE_FORMAT, True)
JSON_FORMAT: str = '{"time": "%(asctime)s", "name": "%(name)s", "level": "%(levelname)s", "message": "%(message)s"}'
def __init__(self, name: str, logLevel: str = "DEBUG"):
logging.Logger.__init__(self, name, logLevel)
# Queue Handler
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
json_formatter = logging.Formatter(self.JSON_FORMAT)
queue_handler.setFormatter(json_formatter)
self.addHandler(queue_handler)
if JSON_LOGGING:
console_formatter = JsonFormatter()
else:
console_formatter = ConsoleFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(console_formatter)
self.addHandler(console)
def chat(self, role: str, openai_repsonse: dict, messages=None, *args, **kws):
"""
Parse the content, log the message and extract the usage into prometheus metrics
"""
role_emojis = {
"system": "🖥️",
"user": "👤",
"assistant": "🤖",
"function": "⚙️",
}
if self.isEnabledFor(CHAT):
if messages:
for message in messages:
self._log(
CHAT,
f"{role_emojis.get(message['role'], '🔵')}: {message['content']}",
)
else:
response = json.loads(openai_repsonse)
self._log(
CHAT,
f"{role_emojis.get(role, '🔵')}: {response['choices'][0]['message']['content']}",
)
class QueueLogger(logging.Logger):
"""
Custom logger class with queue
"""
def __init__(self, name: str, level: int = logging.NOTSET):
super().__init__(name, level)
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
self.addHandler(queue_handler)
logging_config: dict = dict(
version=1,
formatters={
"console": {
"()": ConsoleFormatter,
"format": CustomLogger.COLOR_FORMAT,
},
},
handlers={
"h": {
"class": "logging.StreamHandler",
"formatter": "console",
"level": logging.DEBUG,
},
},
root={
"handlers": ["h"],
"level": logging.WARNING,
},
loggers={
"autogpt": {
"handlers": ["h"],
"level": logging.DEBUG,
"propagate": False,
},
},
)
def setup_logger():
"""
Setup the logger with the specified format
"""
logging.config.dictConfig(logging_config)
| Significant-Gravitas__Auto-GPT |
3 | 3-100-23 | infile | Formatter | [
"addLevelName",
"atexit",
"BASIC_FORMAT",
"basicConfig",
"BufferingFormatter",
"captureWarnings",
"collections",
"config",
"critical",
"CRITICAL",
"currentframe",
"debug",
"DEBUG",
"disable",
"error",
"ERROR",
"exception",
"fatal",
"FATAL",
"FileHandler",
"Filter",
"Filterer",
"Formatter",
"getLevelName",
"getLogger",
"getLoggerClass",
"getLogRecordFactory",
"Handler",
"handlers",
"info",
"INFO",
"io",
"lastResort",
"log",
"Logger",
"LoggerAdapter",
"logMultiprocessing",
"logProcesses",
"LogRecord",
"logThreads",
"makeLogRecord",
"Manager",
"NOTSET",
"NullHandler",
"os",
"PercentStyle",
"PlaceHolder",
"raiseExceptions",
"re",
"root",
"RootLogger",
"setLoggerClass",
"setLogRecordFactory",
"shutdown",
"StreamHandler",
"StrFormatStyle",
"StringTemplateStyle",
"sys",
"Template",
"threading",
"time",
"traceback",
"warn",
"WARN",
"warning",
"WARNING",
"warnings",
"weakref",
"_acquireLock",
"_addHandlerRef",
"_after_at_fork_child_reinit_locks",
"_at_fork_reinit_lock_weakset",
"_checkLevel",
"_defaultFormatter",
"_defaultLastResort",
"_handlerList",
"_handlers",
"_levelToName",
"_lock",
"_loggerClass",
"_logRecordFactory",
"_nameToLevel",
"_register_at_fork_reinit_lock",
"_releaseLock",
"_removeHandlerRef",
"_showwarning",
"_srcfile",
"_startTime",
"_StderrHandler",
"_str_formatter",
"_STYLES",
"_warnings_showwarning",
"__all__",
"__author__",
"__date__",
"__doc__",
"__file__",
"__name__",
"__package__",
"__status__",
"__version__"
] | import json
import logging
import logging.config
import logging.handlers
import os
import queue
ENABLE_TRACING = os.environ.get("ENABLE_TRACING", "false").lower() == "true"
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false").lower() == "true"
CHAT = 29
logging.addLevelName(CHAT, "CHAT")
RESET_SEQ: str = "\033[0m"
COLOR_SEQ: str = "\033[1;%dm"
BOLD_SEQ: str = "\033[1m"
UNDERLINE_SEQ: str = "\033[04m"
ORANGE: str = "\033[33m"
YELLOW: str = "\033[93m"
WHITE: str = "\33[37m"
BLUE: str = "\033[34m"
LIGHT_BLUE: str = "\033[94m"
RED: str = "\033[91m"
GREY: str = "\33[90m"
GREEN: str = "\033[92m"
EMOJIS: dict[str, str] = {
"DEBUG": "🐛",
"INFO": "📝",
"CHAT": "💬",
"WARNING": "⚠️",
"ERROR": "❌",
"CRITICAL": "💥",
}
KEYWORD_COLORS: dict[str, str] = {
"DEBUG": WHITE,
"INFO": LIGHT_BLUE,
"CHAT": GREEN,
"WARNING": YELLOW,
"ERROR": ORANGE,
"CRITICAL": RED,
}
class JsonFormatter(logging.Formatter):
def format(self, record):
return json.dumps(record.__dict__)
def formatter_message(message: str, use_color: bool = True) -> str:
"""
Syntax highlight certain keywords
"""
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
def format_word(
message: str, word: str, color_seq: str, bold: bool = False, underline: bool = False
) -> str:
"""
Surround the fiven word with a sequence
"""
replacer = color_seq + word + RESET_SEQ
if underline:
replacer = UNDERLINE_SEQ + replacer
if bold:
replacer = BOLD_SEQ + replacer
return message.replace(word, replacer)
class ConsoleFormatter(logging.Formatter):
"""
This Formatted simply colors in the levelname i.e 'INFO', 'DEBUG'
"""
def __init__(
self, fmt: str, datefmt: str = None, style: str = "%", use_color: bool = True
):
super().__init__(fmt, datefmt, style)
self.use_color = use_color
def format(self, record: logging.LogRecord) -> str:
"""
Format and highlight certain keywords
"""
rec = record
levelname = rec.levelname
if self.use_color and levelname in KEYWORD_COLORS:
levelname_color = KEYWORD_COLORS[levelname] + levelname + RESET_SEQ
rec.levelname = levelname_color
rec.name = f"{GREY}{rec.name:<15}{RESET_SEQ}"
rec.msg = (
KEYWORD_COLORS[levelname] + EMOJIS[levelname] + " " + rec.msg + RESET_SEQ
)
return logging.Formatter.format(s |
class CustomLogger(logging.Logger):
"""
This adds extra logging functions such as logger.trade and also
sets the logger to use the custom formatter
"""
CONSOLE_FORMAT: str = (
"[%(asctime)s] [$BOLD%(name)-15s$RESET] [%(levelname)-8s]\t%(message)s"
)
FORMAT: str = "%(asctime)s %(name)-15s %(levelname)-8s %(message)s"
COLOR_FORMAT: str = formatter_message(CONSOLE_FORMAT, True)
JSON_FORMAT: str = '{"time": "%(asctime)s", "name": "%(name)s", "level": "%(levelname)s", "message": "%(message)s"}'
def __init__(self, name: str, logLevel: str = "DEBUG"):
logging.Logger.__init__(self, name, logLevel)
# Queue Handler
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
json_formatter = logging.Formatter(self.JSON_FORMAT)
queue_handler.setFormatter(json_formatter)
self.addHandler(queue_handler)
if JSON_LOGGING:
console_formatter = JsonFormatter()
else:
console_formatter = ConsoleFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(console_formatter)
self.addHandler(console)
def chat(self, role: str, openai_repsonse: dict, messages=None, *args, **kws):
"""
Parse the content, log the message and extract the usage into prometheus metrics
"""
role_emojis = {
"system": "🖥️",
"user": "👤",
"assistant": "🤖",
"function": "⚙️",
}
if self.isEnabledFor(CHAT):
if messages:
for message in messages:
self._log(
CHAT,
f"{role_emojis.get(message['role'], '🔵')}: {message['content']}",
)
else:
response = json.loads(openai_repsonse)
self._log(
CHAT,
f"{role_emojis.get(role, '🔵')}: {response['choices'][0]['message']['content']}",
)
class QueueLogger(logging.Logger):
"""
Custom logger class with queue
"""
def __init__(self, name: str, level: int = logging.NOTSET):
super().__init__(name, level)
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
self.addHandler(queue_handler)
logging_config: dict = dict(
version=1,
formatters={
"console": {
"()": ConsoleFormatter,
"format": CustomLogger.COLOR_FORMAT,
},
},
handlers={
"h": {
"class": "logging.StreamHandler",
"formatter": "console",
"level": logging.DEBUG,
},
},
root={
"handlers": ["h"],
"level": logging.WARNING,
},
loggers={
"autogpt": {
"handlers": ["h"],
"level": logging.DEBUG,
"propagate": False,
},
},
)
def setup_logger():
"""
Setup the logger with the specified format
"""
logging.config.dictConfig(logging_config)
| Significant-Gravitas__Auto-GPT |
3 | 3-100-33 | infile | format | [
"converter",
"datefmt",
"default_msec_format",
"default_time_format",
"format",
"formatException",
"formatMessage",
"formatStack",
"formatTime",
"mro",
"usesTime",
"_fmt",
"_style",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__doc__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__module__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__text_signature__",
"__weakrefoffset__"
] | import json
import logging
import logging.config
import logging.handlers
import os
import queue
ENABLE_TRACING = os.environ.get("ENABLE_TRACING", "false").lower() == "true"
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false").lower() == "true"
CHAT = 29
logging.addLevelName(CHAT, "CHAT")
RESET_SEQ: str = "\033[0m"
COLOR_SEQ: str = "\033[1;%dm"
BOLD_SEQ: str = "\033[1m"
UNDERLINE_SEQ: str = "\033[04m"
ORANGE: str = "\033[33m"
YELLOW: str = "\033[93m"
WHITE: str = "\33[37m"
BLUE: str = "\033[34m"
LIGHT_BLUE: str = "\033[94m"
RED: str = "\033[91m"
GREY: str = "\33[90m"
GREEN: str = "\033[92m"
EMOJIS: dict[str, str] = {
"DEBUG": "🐛",
"INFO": "📝",
"CHAT": "💬",
"WARNING": "⚠️",
"ERROR": "❌",
"CRITICAL": "💥",
}
KEYWORD_COLORS: dict[str, str] = {
"DEBUG": WHITE,
"INFO": LIGHT_BLUE,
"CHAT": GREEN,
"WARNING": YELLOW,
"ERROR": ORANGE,
"CRITICAL": RED,
}
class JsonFormatter(logging.Formatter):
def format(self, record):
return json.dumps(record.__dict__)
def formatter_message(message: str, use_color: bool = True) -> str:
"""
Syntax highlight certain keywords
"""
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
def format_word(
message: str, word: str, color_seq: str, bold: bool = False, underline: bool = False
) -> str:
"""
Surround the fiven word with a sequence
"""
replacer = color_seq + word + RESET_SEQ
if underline:
replacer = UNDERLINE_SEQ + replacer
if bold:
replacer = BOLD_SEQ + replacer
return message.replace(word, replacer)
class ConsoleFormatter(logging.Formatter):
"""
This Formatted simply colors in the levelname i.e 'INFO', 'DEBUG'
"""
def __init__(
self, fmt: str, datefmt: str = None, style: str = "%", use_color: bool = True
):
super().__init__(fmt, datefmt, style)
self.use_color = use_color
def format(self, record: logging.LogRecord) -> str:
"""
Format and highlight certain keywords
"""
rec = record
levelname = rec.levelname
if self.use_color and levelname in KEYWORD_COLORS:
levelname_color = KEYWORD_COLORS[levelname] + levelname + RESET_SEQ
rec.levelname = levelname_color
rec.name = f"{GREY}{rec.name:<15}{RESET_SEQ}"
rec.msg = (
KEYWORD_COLORS[levelname] + EMOJIS[levelname] + " " + rec.msg + RESET_SEQ
)
return logging.Formatter.format(self, rec)
| s CustomLogger(logging.Logger):
"""
This adds extra logging functions such as logger.trade and also
sets the logger to use the custom formatter
"""
CONSOLE_FORMAT: str = (
"[%(asctime)s] [$BOLD%(name)-15s$RESET] [%(levelname)-8s]\t%(message)s"
)
FORMAT: str = "%(asctime)s %(name)-15s %(levelname)-8s %(message)s"
COLOR_FORMAT: str = formatter_message(CONSOLE_FORMAT, True)
JSON_FORMAT: str = '{"time": "%(asctime)s", "name": "%(name)s", "level": "%(levelname)s", "message": "%(message)s"}'
def __init__(self, name: str, logLevel: str = "DEBUG"):
logging.Logger.__init__(self, name, logLevel)
# Queue Handler
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
json_formatter = logging.Formatter(self.JSON_FORMAT)
queue_handler.setFormatter(json_formatter)
self.addHandler(queue_handler)
if JSON_LOGGING:
console_formatter = JsonFormatter()
else:
console_formatter = ConsoleFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(console_formatter)
self.addHandler(console)
def chat(self, role: str, openai_repsonse: dict, messages=None, *args, **kws):
"""
Parse the content, log the message and extract the usage into prometheus metrics
"""
role_emojis = {
"system": "🖥️",
"user": "👤",
"assistant": "🤖",
"function": "⚙️",
}
if self.isEnabledFor(CHAT):
if messages:
for message in messages:
self._log(
CHAT,
f"{role_emojis.get(message['role'], '🔵')}: {message['content']}",
)
else:
response = json.loads(openai_repsonse)
self._log(
CHAT,
f"{role_emojis.get(role, '🔵')}: {response['choices'][0]['message']['content']}",
)
class QueueLogger(logging.Logger):
"""
Custom logger class with queue
"""
def __init__(self, name: str, level: int = logging.NOTSET):
super().__init__(name, level)
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
self.addHandler(queue_handler)
logging_config: dict = dict(
version=1,
formatters={
"console": {
"()": ConsoleFormatter,
"format": CustomLogger.COLOR_FORMAT,
},
},
handlers={
"h": {
"class": "logging.StreamHandler",
"formatter": "console",
"level": logging.DEBUG,
},
},
root={
"handlers": ["h"],
"level": logging.WARNING,
},
loggers={
"autogpt": {
"handlers": ["h"],
"level": logging.DEBUG,
"propagate": False,
},
},
)
def setup_logger():
"""
Setup the logger with the specified format
"""
logging.config.dictConfig(logging_config)
| Significant-Gravitas__Auto-GPT |
3 | 3-117-16 | commited | Logger | [
"addLevelName",
"atexit",
"BASIC_FORMAT",
"basicConfig",
"BufferingFormatter",
"captureWarnings",
"collections",
"config",
"critical",
"CRITICAL",
"currentframe",
"debug",
"DEBUG",
"disable",
"error",
"ERROR",
"exception",
"fatal",
"FATAL",
"FileHandler",
"Filter",
"Filterer",
"Formatter",
"getLevelName",
"getLogger",
"getLoggerClass",
"getLogRecordFactory",
"Handler",
"handlers",
"info",
"INFO",
"io",
"lastResort",
"log",
"Logger",
"LoggerAdapter",
"logMultiprocessing",
"logProcesses",
"LogRecord",
"logThreads",
"makeLogRecord",
"Manager",
"NOTSET",
"NullHandler",
"os",
"PercentStyle",
"PlaceHolder",
"raiseExceptions",
"re",
"root",
"RootLogger",
"setLoggerClass",
"setLogRecordFactory",
"shutdown",
"StreamHandler",
"StrFormatStyle",
"StringTemplateStyle",
"sys",
"Template",
"threading",
"time",
"traceback",
"warn",
"WARN",
"warning",
"WARNING",
"warnings",
"weakref",
"_acquireLock",
"_addHandlerRef",
"_after_at_fork_child_reinit_locks",
"_at_fork_reinit_lock_weakset",
"_checkLevel",
"_defaultFormatter",
"_defaultLastResort",
"_handlerList",
"_handlers",
"_levelToName",
"_lock",
"_loggerClass",
"_logRecordFactory",
"_nameToLevel",
"_register_at_fork_reinit_lock",
"_releaseLock",
"_removeHandlerRef",
"_showwarning",
"_srcfile",
"_startTime",
"_StderrHandler",
"_str_formatter",
"_STYLES",
"_warnings_showwarning",
"__all__",
"__author__",
"__date__",
"__doc__",
"__file__",
"__name__",
"__package__",
"__status__",
"__version__"
] | import json
import logging
import logging.config
import logging.handlers
import os
import queue
ENABLE_TRACING = os.environ.get("ENABLE_TRACING", "false").lower() == "true"
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false").lower() == "true"
CHAT = 29
logging.addLevelName(CHAT, "CHAT")
RESET_SEQ: str = "\033[0m"
COLOR_SEQ: str = "\033[1;%dm"
BOLD_SEQ: str = "\033[1m"
UNDERLINE_SEQ: str = "\033[04m"
ORANGE: str = "\033[33m"
YELLOW: str = "\033[93m"
WHITE: str = "\33[37m"
BLUE: str = "\033[34m"
LIGHT_BLUE: str = "\033[94m"
RED: str = "\033[91m"
GREY: str = "\33[90m"
GREEN: str = "\033[92m"
EMOJIS: dict[str, str] = {
"DEBUG": "🐛",
"INFO": "📝",
"CHAT": "💬",
"WARNING": "⚠️",
"ERROR": "❌",
"CRITICAL": "💥",
}
KEYWORD_COLORS: dict[str, str] = {
"DEBUG": WHITE,
"INFO": LIGHT_BLUE,
"CHAT": GREEN,
"WARNING": YELLOW,
"ERROR": ORANGE,
"CRITICAL": RED,
}
class JsonFormatter(logging.Formatter):
def format(self, record):
return json.dumps(record.__dict__)
def formatter_message(message: str, use_color: bool = True) -> str:
"""
Syntax highlight certain keywords
"""
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
def format_word(
message: str, word: str, color_seq: str, bold: bool = False, underline: bool = False
) -> str:
"""
Surround the fiven word with a sequence
"""
replacer = color_seq + word + RESET_SEQ
if underline:
replacer = UNDERLINE_SEQ + replacer
if bold:
replacer = BOLD_SEQ + replacer
return message.replace(word, replacer)
class ConsoleFormatter(logging.Formatter):
"""
This Formatted simply colors in the levelname i.e 'INFO', 'DEBUG'
"""
def __init__(
self, fmt: str, datefmt: str = None, style: str = "%", use_color: bool = True
):
super().__init__(fmt, datefmt, style)
self.use_color = use_color
def format(self, record: logging.LogRecord) -> str:
"""
Format and highlight certain keywords
"""
rec = record
levelname = rec.levelname
if self.use_color and levelname in KEYWORD_COLORS:
levelname_color = KEYWORD_COLORS[levelname] + levelname + RESET_SEQ
rec.levelname = levelname_color
rec.name = f"{GREY}{rec.name:<15}{RESET_SEQ}"
rec.msg = (
KEYWORD_COLORS[levelname] + EMOJIS[levelname] + " " + rec.msg + RESET_SEQ
)
return logging.Formatter.format(self, rec)
class CustomLogger(logging.Logger):
"""
This adds extra logging functions such as logger.trade and also
sets the logger to use the custom formatter
"""
CONSOLE_FORMAT: str = (
"[%(asctime)s] [$BOLD%(name)-15s$RESET] [%(levelname)-8s]\t%(message)s"
)
FORMAT: str = "%(asctime)s %(name)-15s %(levelname)-8s %(message)s"
COLOR_FORMAT: str = formatter_message(CONSOLE_FORMAT, True)
JSON_FORMAT: str = '{"time": "%(asctime)s", "name": "%(name)s", "level": "%(levelname)s", "message": "%(message)s"}'
def __init__(self, name: str, logLevel: str = "DEBUG"):
logging.Logger.__init__(se | me, logLevel)
# Queue Handler
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
json_formatter = logging.Formatter(self.JSON_FORMAT)
queue_handler.setFormatter(json_formatter)
self.addHandler(queue_handler)
if JSON_LOGGING:
console_formatter = JsonFormatter()
else:
console_formatter = ConsoleFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(console_formatter)
self.addHandler(console)
def chat(self, role: str, openai_repsonse: dict, messages=None, *args, **kws):
"""
Parse the content, log the message and extract the usage into prometheus metrics
"""
role_emojis = {
"system": "🖥️",
"user": "👤",
"assistant": "🤖",
"function": "⚙️",
}
if self.isEnabledFor(CHAT):
if messages:
for message in messages:
self._log(
CHAT,
f"{role_emojis.get(message['role'], '🔵')}: {message['content']}",
)
else:
response = json.loads(openai_repsonse)
self._log(
CHAT,
f"{role_emojis.get(role, '🔵')}: {response['choices'][0]['message']['content']}",
)
class QueueLogger(logging.Logger):
"""
Custom logger class with queue
"""
def __init__(self, name: str, level: int = logging.NOTSET):
super().__init__(name, level)
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
self.addHandler(queue_handler)
logging_config: dict = dict(
version=1,
formatters={
"console": {
"()": ConsoleFormatter,
"format": CustomLogger.COLOR_FORMAT,
},
},
handlers={
"h": {
"class": "logging.StreamHandler",
"formatter": "console",
"level": logging.DEBUG,
},
},
root={
"handlers": ["h"],
"level": logging.WARNING,
},
loggers={
"autogpt": {
"handlers": ["h"],
"level": logging.DEBUG,
"propagate": False,
},
},
)
def setup_logger():
"""
Setup the logger with the specified format
"""
logging.config.dictConfig(logging_config)
| Significant-Gravitas__Auto-GPT |
3 | 3-117-23 | commited | __init__ | [
"addFilter",
"addHandler",
"callHandlers",
"critical",
"debug",
"disabled",
"error",
"exception",
"fatal",
"filter",
"filters",
"findCaller",
"getChild",
"getEffectiveLevel",
"handle",
"handlers",
"hasHandlers",
"info",
"isEnabledFor",
"level",
"log",
"makeRecord",
"mro",
"name",
"parent",
"propagate",
"removeFilter",
"removeHandler",
"setLevel",
"warn",
"warning",
"_log",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__doc__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__module__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__text_signature__",
"__weakrefoffset__"
] | import json
import logging
import logging.config
import logging.handlers
import os
import queue
ENABLE_TRACING = os.environ.get("ENABLE_TRACING", "false").lower() == "true"
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false").lower() == "true"
CHAT = 29
logging.addLevelName(CHAT, "CHAT")
RESET_SEQ: str = "\033[0m"
COLOR_SEQ: str = "\033[1;%dm"
BOLD_SEQ: str = "\033[1m"
UNDERLINE_SEQ: str = "\033[04m"
ORANGE: str = "\033[33m"
YELLOW: str = "\033[93m"
WHITE: str = "\33[37m"
BLUE: str = "\033[34m"
LIGHT_BLUE: str = "\033[94m"
RED: str = "\033[91m"
GREY: str = "\33[90m"
GREEN: str = "\033[92m"
EMOJIS: dict[str, str] = {
"DEBUG": "🐛",
"INFO": "📝",
"CHAT": "💬",
"WARNING": "⚠️",
"ERROR": "❌",
"CRITICAL": "💥",
}
KEYWORD_COLORS: dict[str, str] = {
"DEBUG": WHITE,
"INFO": LIGHT_BLUE,
"CHAT": GREEN,
"WARNING": YELLOW,
"ERROR": ORANGE,
"CRITICAL": RED,
}
class JsonFormatter(logging.Formatter):
def format(self, record):
return json.dumps(record.__dict__)
def formatter_message(message: str, use_color: bool = True) -> str:
"""
Syntax highlight certain keywords
"""
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
def format_word(
message: str, word: str, color_seq: str, bold: bool = False, underline: bool = False
) -> str:
"""
Surround the fiven word with a sequence
"""
replacer = color_seq + word + RESET_SEQ
if underline:
replacer = UNDERLINE_SEQ + replacer
if bold:
replacer = BOLD_SEQ + replacer
return message.replace(word, replacer)
class ConsoleFormatter(logging.Formatter):
"""
This Formatted simply colors in the levelname i.e 'INFO', 'DEBUG'
"""
def __init__(
self, fmt: str, datefmt: str = None, style: str = "%", use_color: bool = True
):
super().__init__(fmt, datefmt, style)
self.use_color = use_color
def format(self, record: logging.LogRecord) -> str:
"""
Format and highlight certain keywords
"""
rec = record
levelname = rec.levelname
if self.use_color and levelname in KEYWORD_COLORS:
levelname_color = KEYWORD_COLORS[levelname] + levelname + RESET_SEQ
rec.levelname = levelname_color
rec.name = f"{GREY}{rec.name:<15}{RESET_SEQ}"
rec.msg = (
KEYWORD_COLORS[levelname] + EMOJIS[levelname] + " " + rec.msg + RESET_SEQ
)
return logging.Formatter.format(self, rec)
class CustomLogger(logging.Logger):
"""
This adds extra logging functions such as logger.trade and also
sets the logger to use the custom formatter
"""
CONSOLE_FORMAT: str = (
"[%(asctime)s] [$BOLD%(name)-15s$RESET] [%(levelname)-8s]\t%(message)s"
)
FORMAT: str = "%(asctime)s %(name)-15s %(levelname)-8s %(message)s"
COLOR_FORMAT: str = formatter_message(CONSOLE_FORMAT, True)
JSON_FORMAT: str = '{"time": "%(asctime)s", "name": "%(name)s", "level": "%(levelname)s", "message": "%(message)s"}'
def __init__(self, name: str, logLevel: str = "DEBUG"):
logging.Logger.__init__(self, nam | vel)
# Queue Handler
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
json_formatter = logging.Formatter(self.JSON_FORMAT)
queue_handler.setFormatter(json_formatter)
self.addHandler(queue_handler)
if JSON_LOGGING:
console_formatter = JsonFormatter()
else:
console_formatter = ConsoleFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(console_formatter)
self.addHandler(console)
def chat(self, role: str, openai_repsonse: dict, messages=None, *args, **kws):
"""
Parse the content, log the message and extract the usage into prometheus metrics
"""
role_emojis = {
"system": "🖥️",
"user": "👤",
"assistant": "🤖",
"function": "⚙️",
}
if self.isEnabledFor(CHAT):
if messages:
for message in messages:
self._log(
CHAT,
f"{role_emojis.get(message['role'], '🔵')}: {message['content']}",
)
else:
response = json.loads(openai_repsonse)
self._log(
CHAT,
f"{role_emojis.get(role, '🔵')}: {response['choices'][0]['message']['content']}",
)
class QueueLogger(logging.Logger):
"""
Custom logger class with queue
"""
def __init__(self, name: str, level: int = logging.NOTSET):
super().__init__(name, level)
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
self.addHandler(queue_handler)
logging_config: dict = dict(
version=1,
formatters={
"console": {
"()": ConsoleFormatter,
"format": CustomLogger.COLOR_FORMAT,
},
},
handlers={
"h": {
"class": "logging.StreamHandler",
"formatter": "console",
"level": logging.DEBUG,
},
},
root={
"handlers": ["h"],
"level": logging.WARNING,
},
loggers={
"autogpt": {
"handlers": ["h"],
"level": logging.DEBUG,
"propagate": False,
},
},
)
def setup_logger():
"""
Setup the logger with the specified format
"""
logging.config.dictConfig(logging_config)
| Significant-Gravitas__Auto-GPT |
3 | 3-128-54 | infile | COLOR_FORMAT | [
"addFilter",
"addHandler",
"chat",
"COLOR_FORMAT",
"CONSOLE_FORMAT",
"critical",
"debug",
"disabled",
"error",
"exception",
"fatal",
"filter",
"filters",
"findCaller",
"FORMAT",
"getChild",
"getEffectiveLevel",
"handle",
"handlers",
"hasHandlers",
"info",
"isEnabledFor",
"JSON_FORMAT",
"level",
"log",
"makeRecord",
"name",
"parent",
"propagate",
"removeFilter",
"removeHandler",
"setLevel",
"warn",
"warning",
"_log",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import json
import logging
import logging.config
import logging.handlers
import os
import queue
ENABLE_TRACING = os.environ.get("ENABLE_TRACING", "false").lower() == "true"
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false").lower() == "true"
CHAT = 29
logging.addLevelName(CHAT, "CHAT")
RESET_SEQ: str = "\033[0m"
COLOR_SEQ: str = "\033[1;%dm"
BOLD_SEQ: str = "\033[1m"
UNDERLINE_SEQ: str = "\033[04m"
ORANGE: str = "\033[33m"
YELLOW: str = "\033[93m"
WHITE: str = "\33[37m"
BLUE: str = "\033[34m"
LIGHT_BLUE: str = "\033[94m"
RED: str = "\033[91m"
GREY: str = "\33[90m"
GREEN: str = "\033[92m"
EMOJIS: dict[str, str] = {
"DEBUG": "🐛",
"INFO": "📝",
"CHAT": "💬",
"WARNING": "⚠️",
"ERROR": "❌",
"CRITICAL": "💥",
}
KEYWORD_COLORS: dict[str, str] = {
"DEBUG": WHITE,
"INFO": LIGHT_BLUE,
"CHAT": GREEN,
"WARNING": YELLOW,
"ERROR": ORANGE,
"CRITICAL": RED,
}
class JsonFormatter(logging.Formatter):
def format(self, record):
return json.dumps(record.__dict__)
def formatter_message(message: str, use_color: bool = True) -> str:
"""
Syntax highlight certain keywords
"""
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
def format_word(
message: str, word: str, color_seq: str, bold: bool = False, underline: bool = False
) -> str:
"""
Surround the fiven word with a sequence
"""
replacer = color_seq + word + RESET_SEQ
if underline:
replacer = UNDERLINE_SEQ + replacer
if bold:
replacer = BOLD_SEQ + replacer
return message.replace(word, replacer)
class ConsoleFormatter(logging.Formatter):
"""
This Formatted simply colors in the levelname i.e 'INFO', 'DEBUG'
"""
def __init__(
self, fmt: str, datefmt: str = None, style: str = "%", use_color: bool = True
):
super().__init__(fmt, datefmt, style)
self.use_color = use_color
def format(self, record: logging.LogRecord) -> str:
"""
Format and highlight certain keywords
"""
rec = record
levelname = rec.levelname
if self.use_color and levelname in KEYWORD_COLORS:
levelname_color = KEYWORD_COLORS[levelname] + levelname + RESET_SEQ
rec.levelname = levelname_color
rec.name = f"{GREY}{rec.name:<15}{RESET_SEQ}"
rec.msg = (
KEYWORD_COLORS[levelname] + EMOJIS[levelname] + " " + rec.msg + RESET_SEQ
)
return logging.Formatter.format(self, rec)
class CustomLogger(logging.Logger):
"""
This adds extra logging functions such as logger.trade and also
sets the logger to use the custom formatter
"""
CONSOLE_FORMAT: str = (
"[%(asctime)s] [$BOLD%(name)-15s$RESET] [%(levelname)-8s]\t%(message)s"
)
FORMAT: str = "%(asctime)s %(name)-15s %(levelname)-8s %(message)s"
COLOR_FORMAT: str = formatter_message(CONSOLE_FORMAT, True)
JSON_FORMAT: str = '{"time": "%(asctime)s", "name": "%(name)s", "level": "%(levelname)s", "message": "%(message)s"}'
def __init__(self, name: str, logLevel: str = "DEBUG"):
logging.Logger.__init__(self, name, logLevel)
# Queue Handler
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
json_formatter = logging.Formatter(self.JSON_FORMAT)
queue_handler.setFormatter(json_formatter)
self.addHandler(queue_handler)
if JSON_LOGGING:
console_formatter = JsonFormatter()
else:
console_formatter = ConsoleFormatter(self.COLOR_FORMAT)
| = logging.StreamHandler()
console.setFormatter(console_formatter)
self.addHandler(console)
def chat(self, role: str, openai_repsonse: dict, messages=None, *args, **kws):
"""
Parse the content, log the message and extract the usage into prometheus metrics
"""
role_emojis = {
"system": "🖥️",
"user": "👤",
"assistant": "🤖",
"function": "⚙️",
}
if self.isEnabledFor(CHAT):
if messages:
for message in messages:
self._log(
CHAT,
f"{role_emojis.get(message['role'], '🔵')}: {message['content']}",
)
else:
response = json.loads(openai_repsonse)
self._log(
CHAT,
f"{role_emojis.get(role, '🔵')}: {response['choices'][0]['message']['content']}",
)
class QueueLogger(logging.Logger):
"""
Custom logger class with queue
"""
def __init__(self, name: str, level: int = logging.NOTSET):
super().__init__(name, level)
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
self.addHandler(queue_handler)
logging_config: dict = dict(
version=1,
formatters={
"console": {
"()": ConsoleFormatter,
"format": CustomLogger.COLOR_FORMAT,
},
},
handlers={
"h": {
"class": "logging.StreamHandler",
"formatter": "console",
"level": logging.DEBUG,
},
},
root={
"handlers": ["h"],
"level": logging.WARNING,
},
loggers={
"autogpt": {
"handlers": ["h"],
"level": logging.DEBUG,
"propagate": False,
},
},
)
def setup_logger():
"""
Setup the logger with the specified format
"""
logging.config.dictConfig(logging_config)
| Significant-Gravitas__Auto-GPT |
3 | 3-131-13 | random | addHandler | [
"addFilter",
"addHandler",
"chat",
"COLOR_FORMAT",
"CONSOLE_FORMAT",
"critical",
"debug",
"disabled",
"error",
"exception",
"fatal",
"filter",
"filters",
"findCaller",
"FORMAT",
"getChild",
"getEffectiveLevel",
"handle",
"handlers",
"hasHandlers",
"info",
"isEnabledFor",
"JSON_FORMAT",
"level",
"log",
"makeRecord",
"name",
"parent",
"propagate",
"removeFilter",
"removeHandler",
"setLevel",
"warn",
"warning",
"_log",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import json
import logging
import logging.config
import logging.handlers
import os
import queue
ENABLE_TRACING = os.environ.get("ENABLE_TRACING", "false").lower() == "true"
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false").lower() == "true"
CHAT = 29
logging.addLevelName(CHAT, "CHAT")
RESET_SEQ: str = "\033[0m"
COLOR_SEQ: str = "\033[1;%dm"
BOLD_SEQ: str = "\033[1m"
UNDERLINE_SEQ: str = "\033[04m"
ORANGE: str = "\033[33m"
YELLOW: str = "\033[93m"
WHITE: str = "\33[37m"
BLUE: str = "\033[34m"
LIGHT_BLUE: str = "\033[94m"
RED: str = "\033[91m"
GREY: str = "\33[90m"
GREEN: str = "\033[92m"
EMOJIS: dict[str, str] = {
"DEBUG": "🐛",
"INFO": "📝",
"CHAT": "💬",
"WARNING": "⚠️",
"ERROR": "❌",
"CRITICAL": "💥",
}
KEYWORD_COLORS: dict[str, str] = {
"DEBUG": WHITE,
"INFO": LIGHT_BLUE,
"CHAT": GREEN,
"WARNING": YELLOW,
"ERROR": ORANGE,
"CRITICAL": RED,
}
class JsonFormatter(logging.Formatter):
def format(self, record):
return json.dumps(record.__dict__)
def formatter_message(message: str, use_color: bool = True) -> str:
"""
Syntax highlight certain keywords
"""
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
def format_word(
message: str, word: str, color_seq: str, bold: bool = False, underline: bool = False
) -> str:
"""
Surround the fiven word with a sequence
"""
replacer = color_seq + word + RESET_SEQ
if underline:
replacer = UNDERLINE_SEQ + replacer
if bold:
replacer = BOLD_SEQ + replacer
return message.replace(word, replacer)
class ConsoleFormatter(logging.Formatter):
"""
This Formatted simply colors in the levelname i.e 'INFO', 'DEBUG'
"""
def __init__(
self, fmt: str, datefmt: str = None, style: str = "%", use_color: bool = True
):
super().__init__(fmt, datefmt, style)
self.use_color = use_color
def format(self, record: logging.LogRecord) -> str:
"""
Format and highlight certain keywords
"""
rec = record
levelname = rec.levelname
if self.use_color and levelname in KEYWORD_COLORS:
levelname_color = KEYWORD_COLORS[levelname] + levelname + RESET_SEQ
rec.levelname = levelname_color
rec.name = f"{GREY}{rec.name:<15}{RESET_SEQ}"
rec.msg = (
KEYWORD_COLORS[levelname] + EMOJIS[levelname] + " " + rec.msg + RESET_SEQ
)
return logging.Formatter.format(self, rec)
class CustomLogger(logging.Logger):
"""
This adds extra logging functions such as logger.trade and also
sets the logger to use the custom formatter
"""
CONSOLE_FORMAT: str = (
"[%(asctime)s] [$BOLD%(name)-15s$RESET] [%(levelname)-8s]\t%(message)s"
)
FORMAT: str = "%(asctime)s %(name)-15s %(levelname)-8s %(message)s"
COLOR_FORMAT: str = formatter_message(CONSOLE_FORMAT, True)
JSON_FORMAT: str = '{"time": "%(asctime)s", "name": "%(name)s", "level": "%(levelname)s", "message": "%(message)s"}'
def __init__(self, name: str, logLevel: str = "DEBUG"):
logging.Logger.__init__(self, name, logLevel)
# Queue Handler
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
json_formatter = logging.Formatter(self.JSON_FORMAT)
queue_handler.setFormatter(json_formatter)
self.addHandler(queue_handler)
if JSON_LOGGING:
console_formatter = JsonFormatter()
else:
console_formatter = ConsoleFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(console_formatter)
self.addHandler(console | chat(self, role: str, openai_repsonse: dict, messages=None, *args, **kws):
"""
Parse the content, log the message and extract the usage into prometheus metrics
"""
role_emojis = {
"system": "🖥️",
"user": "👤",
"assistant": "🤖",
"function": "⚙️",
}
if self.isEnabledFor(CHAT):
if messages:
for message in messages:
self._log(
CHAT,
f"{role_emojis.get(message['role'], '🔵')}: {message['content']}",
)
else:
response = json.loads(openai_repsonse)
self._log(
CHAT,
f"{role_emojis.get(role, '🔵')}: {response['choices'][0]['message']['content']}",
)
class QueueLogger(logging.Logger):
"""
Custom logger class with queue
"""
def __init__(self, name: str, level: int = logging.NOTSET):
super().__init__(name, level)
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
self.addHandler(queue_handler)
logging_config: dict = dict(
version=1,
formatters={
"console": {
"()": ConsoleFormatter,
"format": CustomLogger.COLOR_FORMAT,
},
},
handlers={
"h": {
"class": "logging.StreamHandler",
"formatter": "console",
"level": logging.DEBUG,
},
},
root={
"handlers": ["h"],
"level": logging.WARNING,
},
loggers={
"autogpt": {
"handlers": ["h"],
"level": logging.DEBUG,
"propagate": False,
},
},
)
def setup_logger():
"""
Setup the logger with the specified format
"""
logging.config.dictConfig(logging_config)
| Significant-Gravitas__Auto-GPT |
3 | 3-148-39 | common | get | [
"clear",
"copy",
"fromkeys",
"get",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
"__annotations__",
"__class__",
"__class_getitem__",
"__contains__",
"__delattr__",
"__delitem__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__getitem__",
"__hash__",
"__init__",
"__init_subclass__",
"__ior__",
"__iter__",
"__len__",
"__module__",
"__ne__",
"__new__",
"__or__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__setattr__",
"__setitem__",
"__sizeof__",
"__slots__",
"__str__"
] | import json
import logging
import logging.config
import logging.handlers
import os
import queue
ENABLE_TRACING = os.environ.get("ENABLE_TRACING", "false").lower() == "true"
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false").lower() == "true"
CHAT = 29
logging.addLevelName(CHAT, "CHAT")
RESET_SEQ: str = "\033[0m"
COLOR_SEQ: str = "\033[1;%dm"
BOLD_SEQ: str = "\033[1m"
UNDERLINE_SEQ: str = "\033[04m"
ORANGE: str = "\033[33m"
YELLOW: str = "\033[93m"
WHITE: str = "\33[37m"
BLUE: str = "\033[34m"
LIGHT_BLUE: str = "\033[94m"
RED: str = "\033[91m"
GREY: str = "\33[90m"
GREEN: str = "\033[92m"
EMOJIS: dict[str, str] = {
"DEBUG": "🐛",
"INFO": "📝",
"CHAT": "💬",
"WARNING": "⚠️",
"ERROR": "❌",
"CRITICAL": "💥",
}
KEYWORD_COLORS: dict[str, str] = {
"DEBUG": WHITE,
"INFO": LIGHT_BLUE,
"CHAT": GREEN,
"WARNING": YELLOW,
"ERROR": ORANGE,
"CRITICAL": RED,
}
class JsonFormatter(logging.Formatter):
def format(self, record):
return json.dumps(record.__dict__)
def formatter_message(message: str, use_color: bool = True) -> str:
"""
Syntax highlight certain keywords
"""
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
def format_word(
message: str, word: str, color_seq: str, bold: bool = False, underline: bool = False
) -> str:
"""
Surround the fiven word with a sequence
"""
replacer = color_seq + word + RESET_SEQ
if underline:
replacer = UNDERLINE_SEQ + replacer
if bold:
replacer = BOLD_SEQ + replacer
return message.replace(word, replacer)
class ConsoleFormatter(logging.Formatter):
"""
This Formatted simply colors in the levelname i.e 'INFO', 'DEBUG'
"""
def __init__(
self, fmt: str, datefmt: str = None, style: str = "%", use_color: bool = True
):
super().__init__(fmt, datefmt, style)
self.use_color = use_color
def format(self, record: logging.LogRecord) -> str:
"""
Format and highlight certain keywords
"""
rec = record
levelname = rec.levelname
if self.use_color and levelname in KEYWORD_COLORS:
levelname_color = KEYWORD_COLORS[levelname] + levelname + RESET_SEQ
rec.levelname = levelname_color
rec.name = f"{GREY}{rec.name:<15}{RESET_SEQ}"
rec.msg = (
KEYWORD_COLORS[levelname] + EMOJIS[levelname] + " " + rec.msg + RESET_SEQ
)
return logging.Formatter.format(self, rec)
class CustomLogger(logging.Logger):
"""
This adds extra logging functions such as logger.trade and also
sets the logger to use the custom formatter
"""
CONSOLE_FORMAT: str = (
"[%(asctime)s] [$BOLD%(name)-15s$RESET] [%(levelname)-8s]\t%(message)s"
)
FORMAT: str = "%(asctime)s %(name)-15s %(levelname)-8s %(message)s"
COLOR_FORMAT: str = formatter_message(CONSOLE_FORMAT, True)
JSON_FORMAT: str = '{"time": "%(asctime)s", "name": "%(name)s", "level": "%(levelname)s", "message": "%(message)s"}'
def __init__(self, name: str, logLevel: str = "DEBUG"):
logging.Logger.__init__(self, name, logLevel)
# Queue Handler
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
json_formatter = logging.Formatter(self.JSON_FORMAT)
queue_handler.setFormatter(json_formatter)
self.addHandler(queue_handler)
if JSON_LOGGING:
console_formatter = JsonFormatter()
else:
console_formatter = ConsoleFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(console_formatter)
self.addHandler(console)
def chat(self, role: str, openai_repsonse: dict, messages=None, *args, **kws):
"""
Parse the content, log the message and extract the usage into prometheus metrics
"""
role_emojis = {
"system": "🖥️",
"user": "👤",
"assistant": "🤖",
"function": "⚙️",
}
if self.isEnabledFor(CHAT):
if messages:
for message in messages:
self._log(
CHAT,
f"{role_emojis.get(message['role'], '🔵')}: {mess | ['content']}",
)
else:
response = json.loads(openai_repsonse)
self._log(
CHAT,
f"{role_emojis.get(role, '🔵')}: {response['choices'][0]['message']['content']}",
)
class QueueLogger(logging.Logger):
"""
Custom logger class with queue
"""
def __init__(self, name: str, level: int = logging.NOTSET):
super().__init__(name, level)
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
self.addHandler(queue_handler)
logging_config: dict = dict(
version=1,
formatters={
"console": {
"()": ConsoleFormatter,
"format": CustomLogger.COLOR_FORMAT,
},
},
handlers={
"h": {
"class": "logging.StreamHandler",
"formatter": "console",
"level": logging.DEBUG,
},
},
root={
"handlers": ["h"],
"level": logging.WARNING,
},
loggers={
"autogpt": {
"handlers": ["h"],
"level": logging.DEBUG,
"propagate": False,
},
},
)
def setup_logger():
"""
Setup the logger with the specified format
"""
logging.config.dictConfig(logging_config)
| Significant-Gravitas__Auto-GPT |
3 | 3-155-35 | common | get | [
"clear",
"copy",
"fromkeys",
"get",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
"__annotations__",
"__class__",
"__class_getitem__",
"__contains__",
"__delattr__",
"__delitem__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__getitem__",
"__hash__",
"__init__",
"__init_subclass__",
"__ior__",
"__iter__",
"__len__",
"__module__",
"__ne__",
"__new__",
"__or__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__setattr__",
"__setitem__",
"__sizeof__",
"__slots__",
"__str__"
] | import json
import logging
import logging.config
import logging.handlers
import os
import queue
ENABLE_TRACING = os.environ.get("ENABLE_TRACING", "false").lower() == "true"
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false").lower() == "true"
CHAT = 29
logging.addLevelName(CHAT, "CHAT")
RESET_SEQ: str = "\033[0m"
COLOR_SEQ: str = "\033[1;%dm"
BOLD_SEQ: str = "\033[1m"
UNDERLINE_SEQ: str = "\033[04m"
ORANGE: str = "\033[33m"
YELLOW: str = "\033[93m"
WHITE: str = "\33[37m"
BLUE: str = "\033[34m"
LIGHT_BLUE: str = "\033[94m"
RED: str = "\033[91m"
GREY: str = "\33[90m"
GREEN: str = "\033[92m"
EMOJIS: dict[str, str] = {
"DEBUG": "🐛",
"INFO": "📝",
"CHAT": "💬",
"WARNING": "⚠️",
"ERROR": "❌",
"CRITICAL": "💥",
}
KEYWORD_COLORS: dict[str, str] = {
"DEBUG": WHITE,
"INFO": LIGHT_BLUE,
"CHAT": GREEN,
"WARNING": YELLOW,
"ERROR": ORANGE,
"CRITICAL": RED,
}
class JsonFormatter(logging.Formatter):
def format(self, record):
return json.dumps(record.__dict__)
def formatter_message(message: str, use_color: bool = True) -> str:
"""
Syntax highlight certain keywords
"""
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
def format_word(
message: str, word: str, color_seq: str, bold: bool = False, underline: bool = False
) -> str:
"""
Surround the fiven word with a sequence
"""
replacer = color_seq + word + RESET_SEQ
if underline:
replacer = UNDERLINE_SEQ + replacer
if bold:
replacer = BOLD_SEQ + replacer
return message.replace(word, replacer)
class ConsoleFormatter(logging.Formatter):
"""
This Formatted simply colors in the levelname i.e 'INFO', 'DEBUG'
"""
def __init__(
self, fmt: str, datefmt: str = None, style: str = "%", use_color: bool = True
):
super().__init__(fmt, datefmt, style)
self.use_color = use_color
def format(self, record: logging.LogRecord) -> str:
"""
Format and highlight certain keywords
"""
rec = record
levelname = rec.levelname
if self.use_color and levelname in KEYWORD_COLORS:
levelname_color = KEYWORD_COLORS[levelname] + levelname + RESET_SEQ
rec.levelname = levelname_color
rec.name = f"{GREY}{rec.name:<15}{RESET_SEQ}"
rec.msg = (
KEYWORD_COLORS[levelname] + EMOJIS[levelname] + " " + rec.msg + RESET_SEQ
)
return logging.Formatter.format(self, rec)
class CustomLogger(logging.Logger):
"""
This adds extra logging functions such as logger.trade and also
sets the logger to use the custom formatter
"""
CONSOLE_FORMAT: str = (
"[%(asctime)s] [$BOLD%(name)-15s$RESET] [%(levelname)-8s]\t%(message)s"
)
FORMAT: str = "%(asctime)s %(name)-15s %(levelname)-8s %(message)s"
COLOR_FORMAT: str = formatter_message(CONSOLE_FORMAT, True)
JSON_FORMAT: str = '{"time": "%(asctime)s", "name": "%(name)s", "level": "%(levelname)s", "message": "%(message)s"}'
def __init__(self, name: str, logLevel: str = "DEBUG"):
logging.Logger.__init__(self, name, logLevel)
# Queue Handler
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
json_formatter = logging.Formatter(self.JSON_FORMAT)
queue_handler.setFormatter(json_formatter)
self.addHandler(queue_handler)
if JSON_LOGGING:
console_formatter = JsonFormatter()
else:
console_formatter = ConsoleFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(console_formatter)
self.addHandler(console)
def chat(self, role: str, openai_repsonse: dict, messages=None, *args, **kws):
"""
Parse the content, log the message and extract the usage into prometheus metrics
"""
role_emojis = {
"system": "🖥️",
"user": "👤",
"assistant": "🤖",
"function": "⚙️",
}
if self.isEnabledFor(CHAT):
if messages:
for message in messages:
self._log(
CHAT,
f"{role_emojis.get(message['role'], '🔵')}: {message['content']}",
)
else:
response = json.loads(openai_repsonse)
self._log(
CHAT,
f"{role_emojis.get(role, '🔵')}: {response['choices' | ]['message']['content']}",
)
class QueueLogger(logging.Logger):
"""
Custom logger class with queue
"""
def __init__(self, name: str, level: int = logging.NOTSET):
super().__init__(name, level)
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
self.addHandler(queue_handler)
logging_config: dict = dict(
version=1,
formatters={
"console": {
"()": ConsoleFormatter,
"format": CustomLogger.COLOR_FORMAT,
},
},
handlers={
"h": {
"class": "logging.StreamHandler",
"formatter": "console",
"level": logging.DEBUG,
},
},
root={
"handlers": ["h"],
"level": logging.WARNING,
},
loggers={
"autogpt": {
"handlers": ["h"],
"level": logging.DEBUG,
"propagate": False,
},
},
)
def setup_logger():
"""
Setup the logger with the specified format
"""
logging.config.dictConfig(logging_config)
| Significant-Gravitas__Auto-GPT |
3 | 3-164-55 | non_informative | NOTSET | [
"addLevelName",
"atexit",
"BASIC_FORMAT",
"basicConfig",
"BufferingFormatter",
"captureWarnings",
"collections",
"config",
"critical",
"CRITICAL",
"currentframe",
"debug",
"DEBUG",
"disable",
"error",
"ERROR",
"exception",
"fatal",
"FATAL",
"FileHandler",
"Filter",
"Filterer",
"Formatter",
"getLevelName",
"getLogger",
"getLoggerClass",
"getLogRecordFactory",
"Handler",
"handlers",
"info",
"INFO",
"io",
"lastResort",
"log",
"Logger",
"LoggerAdapter",
"logMultiprocessing",
"logProcesses",
"LogRecord",
"logThreads",
"makeLogRecord",
"Manager",
"NOTSET",
"NullHandler",
"os",
"PercentStyle",
"PlaceHolder",
"raiseExceptions",
"re",
"root",
"RootLogger",
"setLoggerClass",
"setLogRecordFactory",
"shutdown",
"StreamHandler",
"StrFormatStyle",
"StringTemplateStyle",
"sys",
"Template",
"threading",
"time",
"traceback",
"warn",
"WARN",
"warning",
"WARNING",
"warnings",
"weakref",
"_acquireLock",
"_addHandlerRef",
"_after_at_fork_child_reinit_locks",
"_at_fork_reinit_lock_weakset",
"_checkLevel",
"_defaultFormatter",
"_defaultLastResort",
"_handlerList",
"_handlers",
"_levelToName",
"_lock",
"_loggerClass",
"_logRecordFactory",
"_nameToLevel",
"_register_at_fork_reinit_lock",
"_releaseLock",
"_removeHandlerRef",
"_showwarning",
"_srcfile",
"_startTime",
"_StderrHandler",
"_str_formatter",
"_STYLES",
"_warnings_showwarning",
"__all__",
"__author__",
"__date__",
"__doc__",
"__file__",
"__name__",
"__package__",
"__status__",
"__version__"
] | import json
import logging
import logging.config
import logging.handlers
import os
import queue
ENABLE_TRACING = os.environ.get("ENABLE_TRACING", "false").lower() == "true"
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false").lower() == "true"
CHAT = 29
logging.addLevelName(CHAT, "CHAT")
RESET_SEQ: str = "\033[0m"
COLOR_SEQ: str = "\033[1;%dm"
BOLD_SEQ: str = "\033[1m"
UNDERLINE_SEQ: str = "\033[04m"
ORANGE: str = "\033[33m"
YELLOW: str = "\033[93m"
WHITE: str = "\33[37m"
BLUE: str = "\033[34m"
LIGHT_BLUE: str = "\033[94m"
RED: str = "\033[91m"
GREY: str = "\33[90m"
GREEN: str = "\033[92m"
EMOJIS: dict[str, str] = {
"DEBUG": "🐛",
"INFO": "📝",
"CHAT": "💬",
"WARNING": "⚠️",
"ERROR": "❌",
"CRITICAL": "💥",
}
KEYWORD_COLORS: dict[str, str] = {
"DEBUG": WHITE,
"INFO": LIGHT_BLUE,
"CHAT": GREEN,
"WARNING": YELLOW,
"ERROR": ORANGE,
"CRITICAL": RED,
}
class JsonFormatter(logging.Formatter):
def format(self, record):
return json.dumps(record.__dict__)
def formatter_message(message: str, use_color: bool = True) -> str:
"""
Syntax highlight certain keywords
"""
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
def format_word(
message: str, word: str, color_seq: str, bold: bool = False, underline: bool = False
) -> str:
"""
Surround the fiven word with a sequence
"""
replacer = color_seq + word + RESET_SEQ
if underline:
replacer = UNDERLINE_SEQ + replacer
if bold:
replacer = BOLD_SEQ + replacer
return message.replace(word, replacer)
class ConsoleFormatter(logging.Formatter):
"""
This Formatted simply colors in the levelname i.e 'INFO', 'DEBUG'
"""
def __init__(
self, fmt: str, datefmt: str = None, style: str = "%", use_color: bool = True
):
super().__init__(fmt, datefmt, style)
self.use_color = use_color
def format(self, record: logging.LogRecord) -> str:
"""
Format and highlight certain keywords
"""
rec = record
levelname = rec.levelname
if self.use_color and levelname in KEYWORD_COLORS:
levelname_color = KEYWORD_COLORS[levelname] + levelname + RESET_SEQ
rec.levelname = levelname_color
rec.name = f"{GREY}{rec.name:<15}{RESET_SEQ}"
rec.msg = (
KEYWORD_COLORS[levelname] + EMOJIS[levelname] + " " + rec.msg + RESET_SEQ
)
return logging.Formatter.format(self, rec)
class CustomLogger(logging.Logger):
"""
This adds extra logging functions such as logger.trade and also
sets the logger to use the custom formatter
"""
CONSOLE_FORMAT: str = (
"[%(asctime)s] [$BOLD%(name)-15s$RESET] [%(levelname)-8s]\t%(message)s"
)
FORMAT: str = "%(asctime)s %(name)-15s %(levelname)-8s %(message)s"
COLOR_FORMAT: str = formatter_message(CONSOLE_FORMAT, True)
JSON_FORMAT: str = '{"time": "%(asctime)s", "name": "%(name)s", "level": "%(levelname)s", "message": "%(message)s"}'
def __init__(self, name: str, logLevel: str = "DEBUG"):
logging.Logger.__init__(self, name, logLevel)
# Queue Handler
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
json_formatter = logging.Formatter(self.JSON_FORMAT)
queue_handler.setFormatter(json_formatter)
self.addHandler(queue_handler)
if JSON_LOGGING:
console_formatter = JsonFormatter()
else:
console_formatter = ConsoleFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(console_formatter)
self.addHandler(console)
def chat(self, role: str, openai_repsonse: dict, messages=None, *args, **kws):
"""
Parse the content, log the message and extract the usage into prometheus metrics
"""
role_emojis = {
"system": "🖥️",
"user": "👤",
"assistant": "🤖",
"function": "⚙️",
}
if self.isEnabledFor(CHAT):
if messages:
for message in messages:
self._log(
CHAT,
f"{role_emojis.get(message['role'], '🔵')}: {message['content']}",
)
else:
response = json.loads(openai_repsonse)
self._log(
CHAT,
f"{role_emojis.get(role, '🔵')}: {response['choices'][0]['message']['content']}",
)
class QueueLogger(logging.Logger):
"""
Custom logger class with queue
"""
def __init__(self, name: str, level: int = logging.NOTSET):
super().__init__(name, | )
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
self.addHandler(queue_handler)
logging_config: dict = dict(
version=1,
formatters={
"console": {
"()": ConsoleFormatter,
"format": CustomLogger.COLOR_FORMAT,
},
},
handlers={
"h": {
"class": "logging.StreamHandler",
"formatter": "console",
"level": logging.DEBUG,
},
},
root={
"handlers": ["h"],
"level": logging.WARNING,
},
loggers={
"autogpt": {
"handlers": ["h"],
"level": logging.DEBUG,
"propagate": False,
},
},
)
def setup_logger():
"""
Setup the logger with the specified format
"""
logging.config.dictConfig(logging_config)
| Significant-Gravitas__Auto-GPT |
3 | 3-165-16 | commited | __init__ | [
"addFilter",
"addHandler",
"callHandlers",
"critical",
"debug",
"disabled",
"error",
"exception",
"fatal",
"filter",
"filters",
"findCaller",
"getChild",
"getEffectiveLevel",
"handle",
"handlers",
"hasHandlers",
"info",
"isEnabledFor",
"level",
"log",
"makeRecord",
"name",
"parent",
"propagate",
"removeFilter",
"removeHandler",
"setLevel",
"warn",
"warning",
"_cache",
"_log",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import json
import logging
import logging.config
import logging.handlers
import os
import queue
ENABLE_TRACING = os.environ.get("ENABLE_TRACING", "false").lower() == "true"
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false").lower() == "true"
CHAT = 29
logging.addLevelName(CHAT, "CHAT")
RESET_SEQ: str = "\033[0m"
COLOR_SEQ: str = "\033[1;%dm"
BOLD_SEQ: str = "\033[1m"
UNDERLINE_SEQ: str = "\033[04m"
ORANGE: str = "\033[33m"
YELLOW: str = "\033[93m"
WHITE: str = "\33[37m"
BLUE: str = "\033[34m"
LIGHT_BLUE: str = "\033[94m"
RED: str = "\033[91m"
GREY: str = "\33[90m"
GREEN: str = "\033[92m"
EMOJIS: dict[str, str] = {
"DEBUG": "🐛",
"INFO": "📝",
"CHAT": "💬",
"WARNING": "⚠️",
"ERROR": "❌",
"CRITICAL": "💥",
}
KEYWORD_COLORS: dict[str, str] = {
"DEBUG": WHITE,
"INFO": LIGHT_BLUE,
"CHAT": GREEN,
"WARNING": YELLOW,
"ERROR": ORANGE,
"CRITICAL": RED,
}
class JsonFormatter(logging.Formatter):
def format(self, record):
return json.dumps(record.__dict__)
def formatter_message(message: str, use_color: bool = True) -> str:
"""
Syntax highlight certain keywords
"""
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
def format_word(
message: str, word: str, color_seq: str, bold: bool = False, underline: bool = False
) -> str:
"""
Surround the fiven word with a sequence
"""
replacer = color_seq + word + RESET_SEQ
if underline:
replacer = UNDERLINE_SEQ + replacer
if bold:
replacer = BOLD_SEQ + replacer
return message.replace(word, replacer)
class ConsoleFormatter(logging.Formatter):
"""
This Formatted simply colors in the levelname i.e 'INFO', 'DEBUG'
"""
def __init__(
self, fmt: str, datefmt: str = None, style: str = "%", use_color: bool = True
):
super().__init__(fmt, datefmt, style)
self.use_color = use_color
def format(self, record: logging.LogRecord) -> str:
"""
Format and highlight certain keywords
"""
rec = record
levelname = rec.levelname
if self.use_color and levelname in KEYWORD_COLORS:
levelname_color = KEYWORD_COLORS[levelname] + levelname + RESET_SEQ
rec.levelname = levelname_color
rec.name = f"{GREY}{rec.name:<15}{RESET_SEQ}"
rec.msg = (
KEYWORD_COLORS[levelname] + EMOJIS[levelname] + " " + rec.msg + RESET_SEQ
)
return logging.Formatter.format(self, rec)
class CustomLogger(logging.Logger):
"""
This adds extra logging functions such as logger.trade and also
sets the logger to use the custom formatter
"""
CONSOLE_FORMAT: str = (
"[%(asctime)s] [$BOLD%(name)-15s$RESET] [%(levelname)-8s]\t%(message)s"
)
FORMAT: str = "%(asctime)s %(name)-15s %(levelname)-8s %(message)s"
COLOR_FORMAT: str = formatter_message(CONSOLE_FORMAT, True)
JSON_FORMAT: str = '{"time": "%(asctime)s", "name": "%(name)s", "level": "%(levelname)s", "message": "%(message)s"}'
def __init__(self, name: str, logLevel: str = "DEBUG"):
logging.Logger.__init__(self, name, logLevel)
# Queue Handler
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
json_formatter = logging.Formatter(self.JSON_FORMAT)
queue_handler.setFormatter(json_formatter)
self.addHandler(queue_handler)
if JSON_LOGGING:
console_formatter = JsonFormatter()
else:
console_formatter = ConsoleFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(console_formatter)
self.addHandler(console)
def chat(self, role: str, openai_repsonse: dict, messages=None, *args, **kws):
"""
Parse the content, log the message and extract the usage into prometheus metrics
"""
role_emojis = {
"system": "🖥️",
"user": "👤",
"assistant": "🤖",
"function": "⚙️",
}
if self.isEnabledFor(CHAT):
if messages:
for message in messages:
self._log(
CHAT,
f"{role_emojis.get(message['role'], '🔵')}: {message['content']}",
)
else:
response = json.loads(openai_repsonse)
self._log(
CHAT,
f"{role_emojis.get(role, '🔵')}: {response['choices'][0]['message']['content']}",
)
class QueueLogger(logging.Logger):
"""
Custom logger class with queue
"""
def __init__(self, name: str, level: int = logging.NOTSET):
super().__init__(name, level)
queue_han | ogging.handlers.QueueHandler(queue.Queue(-1))
self.addHandler(queue_handler)
logging_config: dict = dict(
version=1,
formatters={
"console": {
"()": ConsoleFormatter,
"format": CustomLogger.COLOR_FORMAT,
},
},
handlers={
"h": {
"class": "logging.StreamHandler",
"formatter": "console",
"level": logging.DEBUG,
},
},
root={
"handlers": ["h"],
"level": logging.WARNING,
},
loggers={
"autogpt": {
"handlers": ["h"],
"level": logging.DEBUG,
"propagate": False,
},
},
)
def setup_logger():
"""
Setup the logger with the specified format
"""
logging.config.dictConfig(logging_config)
| Significant-Gravitas__Auto-GPT |
3 | 3-175-35 | infile | COLOR_FORMAT | [
"addFilter",
"addHandler",
"chat",
"COLOR_FORMAT",
"CONSOLE_FORMAT",
"critical",
"debug",
"disabled",
"error",
"exception",
"fatal",
"filter",
"filters",
"findCaller",
"FORMAT",
"getChild",
"getEffectiveLevel",
"handle",
"handlers",
"hasHandlers",
"info",
"isEnabledFor",
"JSON_FORMAT",
"level",
"log",
"makeRecord",
"mro",
"name",
"parent",
"propagate",
"removeFilter",
"removeHandler",
"setLevel",
"warn",
"warning",
"_log",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__doc__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__module__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__text_signature__",
"__weakrefoffset__"
] | import json
import logging
import logging.config
import logging.handlers
import os
import queue
ENABLE_TRACING = os.environ.get("ENABLE_TRACING", "false").lower() == "true"
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false").lower() == "true"
CHAT = 29
logging.addLevelName(CHAT, "CHAT")
RESET_SEQ: str = "\033[0m"
COLOR_SEQ: str = "\033[1;%dm"
BOLD_SEQ: str = "\033[1m"
UNDERLINE_SEQ: str = "\033[04m"
ORANGE: str = "\033[33m"
YELLOW: str = "\033[93m"
WHITE: str = "\33[37m"
BLUE: str = "\033[34m"
LIGHT_BLUE: str = "\033[94m"
RED: str = "\033[91m"
GREY: str = "\33[90m"
GREEN: str = "\033[92m"
EMOJIS: dict[str, str] = {
"DEBUG": "🐛",
"INFO": "📝",
"CHAT": "💬",
"WARNING": "⚠️",
"ERROR": "❌",
"CRITICAL": "💥",
}
KEYWORD_COLORS: dict[str, str] = {
"DEBUG": WHITE,
"INFO": LIGHT_BLUE,
"CHAT": GREEN,
"WARNING": YELLOW,
"ERROR": ORANGE,
"CRITICAL": RED,
}
class JsonFormatter(logging.Formatter):
def format(self, record):
return json.dumps(record.__dict__)
def formatter_message(message: str, use_color: bool = True) -> str:
"""
Syntax highlight certain keywords
"""
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
def format_word(
message: str, word: str, color_seq: str, bold: bool = False, underline: bool = False
) -> str:
"""
Surround the fiven word with a sequence
"""
replacer = color_seq + word + RESET_SEQ
if underline:
replacer = UNDERLINE_SEQ + replacer
if bold:
replacer = BOLD_SEQ + replacer
return message.replace(word, replacer)
class ConsoleFormatter(logging.Formatter):
"""
This Formatted simply colors in the levelname i.e 'INFO', 'DEBUG'
"""
def __init__(
self, fmt: str, datefmt: str = None, style: str = "%", use_color: bool = True
):
super().__init__(fmt, datefmt, style)
self.use_color = use_color
def format(self, record: logging.LogRecord) -> str:
"""
Format and highlight certain keywords
"""
rec = record
levelname = rec.levelname
if self.use_color and levelname in KEYWORD_COLORS:
levelname_color = KEYWORD_COLORS[levelname] + levelname + RESET_SEQ
rec.levelname = levelname_color
rec.name = f"{GREY}{rec.name:<15}{RESET_SEQ}"
rec.msg = (
KEYWORD_COLORS[levelname] + EMOJIS[levelname] + " " + rec.msg + RESET_SEQ
)
return logging.Formatter.format(self, rec)
class CustomLogger(logging.Logger):
"""
This adds extra logging functions such as logger.trade and also
sets the logger to use the custom formatter
"""
CONSOLE_FORMAT: str = (
"[%(asctime)s] [$BOLD%(name)-15s$RESET] [%(levelname)-8s]\t%(message)s"
)
FORMAT: str = "%(asctime)s %(name)-15s %(levelname)-8s %(message)s"
COLOR_FORMAT: str = formatter_message(CONSOLE_FORMAT, True)
JSON_FORMAT: str = '{"time": "%(asctime)s", "name": "%(name)s", "level": "%(levelname)s", "message": "%(message)s"}'
def __init__(self, name: str, logLevel: str = "DEBUG"):
logging.Logger.__init__(self, name, logLevel)
# Queue Handler
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
json_formatter = logging.Formatter(self.JSON_FORMAT)
queue_handler.setFormatter(json_formatter)
self.addHandler(queue_handler)
if JSON_LOGGING:
console_formatter = JsonFormatter()
else:
console_formatter = ConsoleFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(console_formatter)
self.addHandler(console)
def chat(self, role: str, openai_repsonse: dict, messages=None, *args, **kws):
"""
Parse the content, log the message and extract the usage into prometheus metrics
"""
role_emojis = {
"system": "🖥️",
"user": "👤",
"assistant": "🤖",
"function": "⚙️",
}
if self.isEnabledFor(CHAT):
if messages:
for message in messages:
self._log(
CHAT,
f"{role_emojis.get(message['role'], '🔵')}: {message['content']}",
)
else:
response = json.loads(openai_repsonse)
self._log(
CHAT,
f"{role_emojis.get(role, '🔵')}: {response['choices'][0]['message']['content']}",
)
class QueueLogger(logging.Logger):
"""
Custom logger class with queue
"""
def __init__(self, name: str, level: int = logging.NOTSET):
super().__init__(name, level)
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
self.addHandler(queue_handler)
logging_config: dict = dict(
version=1,
formatters={
"console": {
"()": ConsoleFormatter,
"format": CustomLogger.COLOR_FORMAT,
},
},
han | "h": {
"class": "logging.StreamHandler",
"formatter": "console",
"level": logging.DEBUG,
},
},
root={
"handlers": ["h"],
"level": logging.WARNING,
},
loggers={
"autogpt": {
"handlers": ["h"],
"level": logging.DEBUG,
"propagate": False,
},
},
)
def setup_logger():
"""
Setup the logger with the specified format
"""
logging.config.dictConfig(logging_config)
| Significant-Gravitas__Auto-GPT |
5 | 5-162-20 | common | get | [
"clear",
"copy",
"fromkeys",
"get",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
"__annotations__",
"__class__",
"__class_getitem__",
"__contains__",
"__delattr__",
"__delitem__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__getitem__",
"__hash__",
"__init__",
"__init_subclass__",
"__ior__",
"__iter__",
"__len__",
"__module__",
"__ne__",
"__new__",
"__or__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__setattr__",
"__setitem__",
"__sizeof__",
"__slots__",
"__str__"
] |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.20 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig: # pylint: disable=too-few-public-methods
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "xopt/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
# pylint:disable=too-many-arguments,consider-using-with # noqa
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords. | ("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| christophermayes__xopt |
5 | 5-349-21 | common | get | [
"clear",
"copy",
"fromkeys",
"get",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
"__annotations__",
"__class__",
"__class_getitem__",
"__contains__",
"__delattr__",
"__delitem__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__getitem__",
"__hash__",
"__init__",
"__init_subclass__",
"__ior__",
"__iter__",
"__len__",
"__module__",
"__ne__",
"__new__",
"__or__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__setattr__",
"__setitem__",
"__sizeof__",
"__slots__",
"__str__"
] |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.20 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig: # pylint: disable=too-few-public-methods
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "xopt/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
# pylint:disable=too-many-arguments,consider-using-with # noqa
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces. | ("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| christophermayes__xopt |
5 | 5-547-42 | common | get | [
"clear",
"copy",
"fromkeys",
"get",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
"__annotations__",
"__class__",
"__class_getitem__",
"__contains__",
"__delattr__",
"__delitem__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__getitem__",
"__hash__",
"__init__",
"__init_subclass__",
"__ior__",
"__iter__",
"__len__",
"__module__",
"__ne__",
"__new__",
"__or__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__setattr__",
"__setitem__",
"__sizeof__",
"__slots__",
"__str__"
] |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.20 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig: # pylint: disable=too-few-public-methods
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "xopt/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
# pylint:disable=too-many-arguments,consider-using-with # noqa
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces. | ("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| christophermayes__xopt |
5 | 5-576-27 | common | get | [
"clear",
"copy",
"fromkeys",
"get",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
"__annotations__",
"__class__",
"__class_getitem__",
"__contains__",
"__delattr__",
"__delitem__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__getitem__",
"__hash__",
"__init__",
"__init_subclass__",
"__ior__",
"__iter__",
"__len__",
"__module__",
"__ne__",
"__new__",
"__or__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__setattr__",
"__setitem__",
"__sizeof__",
"__slots__",
"__str__"
] |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.20 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig: # pylint: disable=too-few-public-methods
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "xopt/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
# pylint:disable=too-many-arguments,consider-using-with # noqa
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces. | ("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| christophermayes__xopt |
7 | 7-88-20 | inproject | raises | [
"annotations",
"approx",
"Cache",
"CallInfo",
"CaptureFixture",
"Class",
"cmdline",
"Collector",
"CollectReport",
"Config",
"console_main",
"deprecated_call",
"Dir",
"Directory",
"DoctestItem",
"ExceptionInfo",
"exit",
"ExitCode",
"fail",
"File",
"fixture",
"FixtureDef",
"FixtureLookupError",
"FixtureRequest",
"freeze_includes",
"Function",
"hookimpl",
"HookRecorder",
"hookspec",
"importorskip",
"Item",
"LineMatcher",
"LogCaptureFixture",
"main",
"mark",
"Mark",
"MarkDecorator",
"MarkGenerator",
"Metafunc",
"Module",
"MonkeyPatch",
"OptionGroup",
"Package",
"param",
"Parser",
"PytestAssertRewriteWarning",
"PytestCacheWarning",
"PytestCollectionWarning",
"PytestConfigWarning",
"PytestDeprecationWarning",
"Pytester",
"PytestExperimentalApiWarning",
"PytestPluginManager",
"PytestRemovedIn9Warning",
"PytestReturnNotNoneWarning",
"PytestUnhandledCoroutineWarning",
"PytestUnhandledThreadExceptionWarning",
"PytestUnknownMarkWarning",
"PytestUnraisableExceptionWarning",
"PytestWarning",
"raises",
"RecordedHookCall",
"register_assert_rewrite",
"RunResult",
"Session",
"set_trace",
"skip",
"Stash",
"StashKey",
"TempdirFactory",
"TempPathFactory",
"Testdir",
"TestReport",
"TestShortLogReport",
"UsageError",
"version_tuple",
"WarningsRecorder",
"warns",
"xfail",
"yield_fixture",
"__all__",
"__doc__",
"__file__",
"__main__",
"__name__",
"__package__",
"__pytestPDB",
"__version__"
] | import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest. | (InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.OUTPUT_NC_FILE, self.OUTPUT_NC_FILE_2])
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
xr.testing.assert_equal(ds_res.patapouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_zarr_store_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_zarr(self.OUTPUT_ZARR_STORE)
# WHEN
ds_res, chunk_it = read_dataset(self.OUTPUT_ZARR_STORE)
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_not_implemented_error(self):
# WHEN
with pytest.raises(NotImplementedError):
read_dataset(42) # noqa
| cerfacs-globc__icclim |
7 | 7-89-39 | inproject | WW | [
"CD",
"CDD",
"CFD",
"CSDI",
"CSU",
"CW",
"CWD",
"DTR",
"ETR",
"FD",
"GD4",
"HD17",
"ID",
"lookup",
"mro",
"name",
"PRCPTOT",
"R10MM",
"R20MM",
"R75P",
"R75PTOT",
"R95P",
"R95PTOT",
"R99P",
"R99PTOT",
"RR1",
"RX1DAY",
"RX5DAY",
"SD",
"SD1",
"SD50CM",
"SD5CM",
"SDII",
"SU",
"TG",
"TG10P",
"TG90P",
"TN",
"TN10P",
"TN90P",
"TNN",
"TNX",
"TR",
"TX",
"TX10P",
"TX90P",
"TXN",
"TXX",
"value",
"VDTR",
"WD",
"WSDI",
"WW",
"_generate_next_value_",
"_ignore_",
"_member_map_",
"_member_names_",
"_missing_",
"_name_",
"_order_",
"_value2member_map_",
"_value_",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__doc__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__module__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__order__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__text_signature__",
"__weakrefoffset__"
] | import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex. | )
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.OUTPUT_NC_FILE, self.OUTPUT_NC_FILE_2])
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
xr.testing.assert_equal(ds_res.patapouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_zarr_store_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_zarr(self.OUTPUT_ZARR_STORE)
# WHEN
ds_res, chunk_it = read_dataset(self.OUTPUT_ZARR_STORE)
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_not_implemented_error(self):
# WHEN
with pytest.raises(NotImplementedError):
read_dataset(42) # noqa
| cerfacs-globc__icclim |
7 | 7-103-54 | inproject | TX90P | [
"CD",
"CDD",
"CFD",
"CSDI",
"CSU",
"CW",
"CWD",
"DTR",
"ETR",
"FD",
"GD4",
"HD17",
"ID",
"lookup",
"mro",
"name",
"PRCPTOT",
"R10MM",
"R20MM",
"R75P",
"R75PTOT",
"R95P",
"R95PTOT",
"R99P",
"R99PTOT",
"RR1",
"RX1DAY",
"RX5DAY",
"SD",
"SD1",
"SD50CM",
"SD5CM",
"SDII",
"SU",
"TG",
"TG10P",
"TG90P",
"TN",
"TN10P",
"TN90P",
"TNN",
"TNX",
"TR",
"TX",
"TX10P",
"TX90P",
"TXN",
"TXX",
"value",
"VDTR",
"WD",
"WSDI",
"WW",
"_generate_next_value_",
"_ignore_",
"_member_map_",
"_member_names_",
"_missing_",
"_name_",
"_order_",
"_value2member_map_",
"_value_",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__doc__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__module__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__order__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__text_signature__",
"__weakrefoffset__"
] | import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex. | )
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.OUTPUT_NC_FILE, self.OUTPUT_NC_FILE_2])
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
xr.testing.assert_equal(ds_res.patapouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_zarr_store_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_zarr(self.OUTPUT_ZARR_STORE)
# WHEN
ds_res, chunk_it = read_dataset(self.OUTPUT_ZARR_STORE)
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_not_implemented_error(self):
# WHEN
with pytest.raises(NotImplementedError):
read_dataset(42) # noqa
| cerfacs-globc__icclim |
7 | 7-104-11 | inproject | testing | [
"align",
"ALL_DIMS",
"apply_ufunc",
"as_variable",
"backends",
"broadcast",
"cftime_range",
"CFTimeIndex",
"coding",
"combine_by_coords",
"combine_nested",
"concat",
"Context",
"conventions",
"convert",
"Coordinates",
"core",
"corr",
"cov",
"cross",
"DataArray",
"Dataset",
"DataTree",
"date_range",
"date_range_like",
"decode_cf",
"dot",
"full_like",
"get_options",
"group_subtrees",
"groupers",
"Index",
"indexes",
"IndexSelResult",
"IndexVariable",
"infer_freq",
"InvalidTreeError",
"load_dataarray",
"load_dataset",
"map_blocks",
"map_over_datasets",
"merge",
"MergeError",
"NamedArray",
"namedarray",
"NotFoundInTreeError",
"ones_like",
"open_dataarray",
"open_dataset",
"open_datatree",
"open_groups",
"open_mfdataset",
"open_zarr",
"plot",
"polyval",
"register_dataarray_accessor",
"register_dataset_accessor",
"register_datatree_accessor",
"save_mfdataset",
"SerializationWarning",
"set_options",
"show_versions",
"static",
"testing",
"tests",
"TreeIsomorphismError",
"tutorial",
"ufuncs",
"unify_chunks",
"util",
"Variable",
"where",
"zeros_like",
"_version",
"__all__",
"__doc__",
"__file__",
"__name__",
"__package__",
"__version__"
] | import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr. | .assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.OUTPUT_NC_FILE, self.OUTPUT_NC_FILE_2])
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
xr.testing.assert_equal(ds_res.patapouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_zarr_store_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_zarr(self.OUTPUT_ZARR_STORE)
# WHEN
ds_res, chunk_it = read_dataset(self.OUTPUT_ZARR_STORE)
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_not_implemented_error(self):
# WHEN
with pytest.raises(NotImplementedError):
read_dataset(42) # noqa
| cerfacs-globc__icclim |
7 | 7-104-19 | inproject | assert_equal | [
"assert_allclose",
"assert_chunks_equal",
"assert_duckarray_allclose",
"assert_duckarray_equal",
"assert_equal",
"assert_identical",
"assert_isomorphic",
"assertions",
"strategies",
"_assert_dataarray_invariants",
"_assert_dataset_invariants",
"_assert_indexes_invariants_checks",
"_assert_internal_invariants",
"_assert_variable_invariants",
"_data_allclose_or_equiv",
"__all__",
"__doc__",
"__file__",
"__name__",
"__package__"
] | import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing. | (ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.OUTPUT_NC_FILE, self.OUTPUT_NC_FILE_2])
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
xr.testing.assert_equal(ds_res.patapouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_zarr_store_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_zarr(self.OUTPUT_ZARR_STORE)
# WHEN
ds_res, chunk_it = read_dataset(self.OUTPUT_ZARR_STORE)
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_not_implemented_error(self):
# WHEN
with pytest.raises(NotImplementedError):
read_dataset(42) # noqa
| cerfacs-globc__icclim |
7 | 7-146-28 | random | DataArray | [
"align",
"ALL_DIMS",
"apply_ufunc",
"as_variable",
"backends",
"broadcast",
"cftime_range",
"CFTimeIndex",
"coding",
"combine_by_coords",
"combine_nested",
"concat",
"Context",
"conventions",
"convert",
"Coordinates",
"core",
"corr",
"cov",
"cross",
"DataArray",
"Dataset",
"DataTree",
"date_range",
"date_range_like",
"decode_cf",
"dot",
"full_like",
"get_options",
"group_subtrees",
"groupers",
"Index",
"indexes",
"IndexSelResult",
"IndexVariable",
"infer_freq",
"InvalidTreeError",
"load_dataarray",
"load_dataset",
"map_blocks",
"map_over_datasets",
"merge",
"MergeError",
"NamedArray",
"namedarray",
"NotFoundInTreeError",
"ones_like",
"open_dataarray",
"open_dataset",
"open_datatree",
"open_groups",
"open_mfdataset",
"open_zarr",
"plot",
"polyval",
"register_dataarray_accessor",
"register_dataset_accessor",
"register_datatree_accessor",
"save_mfdataset",
"SerializationWarning",
"set_options",
"show_versions",
"static",
"testing",
"tests",
"TreeIsomorphismError",
"tutorial",
"ufuncs",
"unify_chunks",
"util",
"Variable",
"where",
"zeros_like",
"_version",
"__all__",
"__doc__",
"__file__",
"__name__",
"__package__",
"__version__"
] | import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr. | (
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.OUTPUT_NC_FILE, self.OUTPUT_NC_FILE_2])
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
xr.testing.assert_equal(ds_res.patapouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_zarr_store_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_zarr(self.OUTPUT_ZARR_STORE)
# WHEN
ds_res, chunk_it = read_dataset(self.OUTPUT_ZARR_STORE)
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_not_implemented_error(self):
# WHEN
with pytest.raises(NotImplementedError):
read_dataset(42) # noqa
| cerfacs-globc__icclim |
7 | 7-160-45 | inproject | OUTPUT_NC_FILE | [
"cleanup",
"OUTPUT_NC_FILE",
"OUTPUT_NC_FILE_2",
"OUTPUT_UNKNOWN_FORMAT",
"OUTPUT_ZARR_STORE",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_xr_ds_success",
"test_read_dataset_zarr_store_success",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self. | )
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.OUTPUT_NC_FILE, self.OUTPUT_NC_FILE_2])
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
xr.testing.assert_equal(ds_res.patapouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_zarr_store_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_zarr(self.OUTPUT_ZARR_STORE)
# WHEN
ds_res, chunk_it = read_dataset(self.OUTPUT_ZARR_STORE)
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_not_implemented_error(self):
# WHEN
with pytest.raises(NotImplementedError):
read_dataset(42) # noqa
| cerfacs-globc__icclim |
7 | 7-183-46 | inproject | OUTPUT_NC_FILE | [
"cleanup",
"OUTPUT_NC_FILE",
"OUTPUT_NC_FILE_2",
"OUTPUT_UNKNOWN_FORMAT",
"OUTPUT_ZARR_STORE",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_xr_ds_success",
"test_read_dataset_zarr_store_success",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self. | , self.OUTPUT_NC_FILE_2])
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
xr.testing.assert_equal(ds_res.patapouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_zarr_store_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_zarr(self.OUTPUT_ZARR_STORE)
# WHEN
ds_res, chunk_it = read_dataset(self.OUTPUT_ZARR_STORE)
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_not_implemented_error(self):
# WHEN
with pytest.raises(NotImplementedError):
read_dataset(42) # noqa
| cerfacs-globc__icclim |
7 | 7-183-67 | inproject | OUTPUT_NC_FILE_2 | [
"cleanup",
"OUTPUT_NC_FILE",
"OUTPUT_NC_FILE_2",
"OUTPUT_UNKNOWN_FORMAT",
"OUTPUT_ZARR_STORE",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_xr_ds_success",
"test_read_dataset_zarr_store_success",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.OUTPUT_NC_FILE, self. | ])
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
xr.testing.assert_equal(ds_res.patapouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_zarr_store_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_zarr(self.OUTPUT_ZARR_STORE)
# WHEN
ds_res, chunk_it = read_dataset(self.OUTPUT_ZARR_STORE)
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_not_implemented_error(self):
# WHEN
with pytest.raises(NotImplementedError):
read_dataset(42) # noqa
| cerfacs-globc__icclim |
7 | 7-207-45 | inproject | OUTPUT_ZARR_STORE | [
"cleanup",
"OUTPUT_NC_FILE",
"OUTPUT_NC_FILE_2",
"OUTPUT_UNKNOWN_FORMAT",
"OUTPUT_ZARR_STORE",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_xr_ds_success",
"test_read_dataset_zarr_store_success",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.OUTPUT_NC_FILE, self.OUTPUT_NC_FILE_2])
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
xr.testing.assert_equal(ds_res.patapouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_zarr_store_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_zarr(self.OUTPUT_ZARR_STORE)
# WHEN
ds_res, chunk_it = read_dataset(self. | )
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_not_implemented_error(self):
# WHEN
with pytest.raises(NotImplementedError):
read_dataset(42) # noqa
| cerfacs-globc__icclim |
12 | 12-83-27 | random | JDBCTOJDBC_OUTPUT_URL | [
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"__doc__",
"__file__",
"__name__",
"__package__"
] | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants. | }',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_PARTITIONCOLUMN, input_jdbc_partitioncolumn) \
.option(constants.JDBC_LOWERBOUND, input_jdbc_lowerbound) \
.option(constants.JDBC_UPPERBOUND, input_jdbc_upperbound) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
# Write
input_data.write \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, output_jdbc_url) \
.option(constants.JDBC_DRIVER, output_jdbc_driver) \
.option(constants.JDBC_TABLE, output_jdbc_table) \
.option(constants.JDBC_CREATE_TABLE_OPTIONS, output_jdbc_create_table_option) \
.option(constants.JDBC_BATCH_SIZE, output_jdbc_batch_size) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.mode(output_jdbc_mode) \
.save() | googlecloudplatform__dataproc-templates |
12 | 12-94-15 | random | add_argument | [
"add_argument",
"add_argument_group",
"add_help",
"add_mutually_exclusive_group",
"add_subparsers",
"allow_abbrev",
"argument_default",
"conflict_handler",
"convert_arg_line_to_args",
"description",
"epilog",
"error",
"exit",
"exit_on_error",
"format_help",
"format_usage",
"formatter_class",
"fromfile_prefix_chars",
"get_default",
"parse_args",
"parse_intermixed_args",
"parse_known_args",
"parse_known_intermixed_args",
"prefix_chars",
"print_help",
"print_usage",
"prog",
"register",
"set_defaults",
"usage",
"_action_groups",
"_actions",
"_add_action",
"_add_container_actions",
"_check_conflict",
"_check_value",
"_defaults",
"_get_args",
"_get_formatter",
"_get_handler",
"_get_kwargs",
"_get_nargs_pattern",
"_get_option_tuples",
"_get_optional_actions",
"_get_optional_kwargs",
"_get_positional_actions",
"_get_positional_kwargs",
"_get_value",
"_get_values",
"_handle_conflict_error",
"_handle_conflict_resolve",
"_has_negative_number_optionals",
"_match_argument",
"_match_arguments_partial",
"_mutually_exclusive_groups",
"_negative_number_matcher",
"_option_string_actions",
"_optionals",
"_parse_known_args",
"_parse_optional",
"_pop_action_class",
"_positionals",
"_print_message",
"_read_args_from_files",
"_registries",
"_registry_get",
"_remove_action",
"_subparsers",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser. | (
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_PARTITIONCOLUMN, input_jdbc_partitioncolumn) \
.option(constants.JDBC_LOWERBOUND, input_jdbc_lowerbound) \
.option(constants.JDBC_UPPERBOUND, input_jdbc_upperbound) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
# Write
input_data.write \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, output_jdbc_url) \
.option(constants.JDBC_DRIVER, output_jdbc_driver) \
.option(constants.JDBC_TABLE, output_jdbc_table) \
.option(constants.JDBC_CREATE_TABLE_OPTIONS, output_jdbc_create_table_option) \
.option(constants.JDBC_BATCH_SIZE, output_jdbc_batch_size) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.mode(output_jdbc_mode) \
.save() | googlecloudplatform__dataproc-templates |
12 | 12-119-26 | random | OUTPUT_MODE_APPEND | [
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"__doc__",
"__file__",
"__name__",
"__package__"
] | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants. | ,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_PARTITIONCOLUMN, input_jdbc_partitioncolumn) \
.option(constants.JDBC_LOWERBOUND, input_jdbc_lowerbound) \
.option(constants.JDBC_UPPERBOUND, input_jdbc_upperbound) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
# Write
input_data.write \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, output_jdbc_url) \
.option(constants.JDBC_DRIVER, output_jdbc_driver) \
.option(constants.JDBC_TABLE, output_jdbc_table) \
.option(constants.JDBC_CREATE_TABLE_OPTIONS, output_jdbc_create_table_option) \
.option(constants.JDBC_BATCH_SIZE, output_jdbc_batch_size) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.mode(output_jdbc_mode) \
.save() | googlecloudplatform__dataproc-templates |
12 | 12-140-30 | inproject | get_logger | [
"build",
"get_logger",
"parse_args",
"run",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self. | (spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_PARTITIONCOLUMN, input_jdbc_partitioncolumn) \
.option(constants.JDBC_LOWERBOUND, input_jdbc_lowerbound) \
.option(constants.JDBC_UPPERBOUND, input_jdbc_upperbound) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
# Write
input_data.write \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, output_jdbc_url) \
.option(constants.JDBC_DRIVER, output_jdbc_driver) \
.option(constants.JDBC_TABLE, output_jdbc_table) \
.option(constants.JDBC_CREATE_TABLE_OPTIONS, output_jdbc_create_table_option) \
.option(constants.JDBC_BATCH_SIZE, output_jdbc_batch_size) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.mode(output_jdbc_mode) \
.save() | googlecloudplatform__dataproc-templates |
12 | 12-160-22 | non_informative | pformat | [
"isreadable",
"isrecursive",
"pformat",
"pp",
"pprint",
"PrettyPrinter",
"re",
"saferepr",
"_builtin_scalars",
"_collections",
"_dataclasses",
"_perfcheck",
"_recursion",
"_safe_key",
"_safe_tuple",
"_StringIO",
"_sys",
"_types",
"_wrap_bytes_repr",
"__all__",
"__doc__",
"__file__",
"__name__",
"__package__"
] | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint. | (args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_PARTITIONCOLUMN, input_jdbc_partitioncolumn) \
.option(constants.JDBC_LOWERBOUND, input_jdbc_lowerbound) \
.option(constants.JDBC_UPPERBOUND, input_jdbc_upperbound) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
# Write
input_data.write \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, output_jdbc_url) \
.option(constants.JDBC_DRIVER, output_jdbc_driver) \
.option(constants.JDBC_TABLE, output_jdbc_table) \
.option(constants.JDBC_CREATE_TABLE_OPTIONS, output_jdbc_create_table_option) \
.option(constants.JDBC_BATCH_SIZE, output_jdbc_batch_size) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.mode(output_jdbc_mode) \
.save() | googlecloudplatform__dataproc-templates |
12 | 12-168-19 | non_informative | error | [
"addFilter",
"addHandler",
"callHandlers",
"critical",
"debug",
"disabled",
"error",
"exception",
"fatal",
"filter",
"filters",
"findCaller",
"getChild",
"getEffectiveLevel",
"handle",
"handlers",
"hasHandlers",
"info",
"isEnabledFor",
"level",
"log",
"makeRecord",
"name",
"parent",
"propagate",
"removeFilter",
"removeHandler",
"setLevel",
"warn",
"warning",
"_cache",
"_log",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger. | ("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_PARTITIONCOLUMN, input_jdbc_partitioncolumn) \
.option(constants.JDBC_LOWERBOUND, input_jdbc_lowerbound) \
.option(constants.JDBC_UPPERBOUND, input_jdbc_upperbound) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
# Write
input_data.write \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, output_jdbc_url) \
.option(constants.JDBC_DRIVER, output_jdbc_driver) \
.option(constants.JDBC_TABLE, output_jdbc_table) \
.option(constants.JDBC_CREATE_TABLE_OPTIONS, output_jdbc_create_table_option) \
.option(constants.JDBC_BATCH_SIZE, output_jdbc_batch_size) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.mode(output_jdbc_mode) \
.save() | googlecloudplatform__dataproc-templates |
12 | 12-177-17 | common | load | [
"csv",
"format",
"jdbc",
"json",
"load",
"option",
"options",
"orc",
"parquet",
"schema",
"table",
"text",
"_df",
"_jreader",
"_set_opts",
"_spark",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
. | ()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_PARTITIONCOLUMN, input_jdbc_partitioncolumn) \
.option(constants.JDBC_LOWERBOUND, input_jdbc_lowerbound) \
.option(constants.JDBC_UPPERBOUND, input_jdbc_upperbound) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
# Write
input_data.write \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, output_jdbc_url) \
.option(constants.JDBC_DRIVER, output_jdbc_driver) \
.option(constants.JDBC_TABLE, output_jdbc_table) \
.option(constants.JDBC_CREATE_TABLE_OPTIONS, output_jdbc_create_table_option) \
.option(constants.JDBC_BATCH_SIZE, output_jdbc_batch_size) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.mode(output_jdbc_mode) \
.save() | googlecloudplatform__dataproc-templates |
13 | 13-32-43 | inproject | parse_args | [
"build",
"get_logger",
"parse_args",
"run",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template. | (
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy.assert_called_once_with("column")
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option().csv.assert_called_once_with("gs://test") | googlecloudplatform__dataproc-templates |
13 | 13-145-86 | random | JDBC_DRIVER | [
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"__doc__",
"__file__",
"__name__",
"__package__"
] | """
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants. | , "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy.assert_called_once_with("column")
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option().csv.assert_called_once_with("gs://test") | googlecloudplatform__dataproc-templates |
13 | 13-150-131 | non_informative | JDBC_NUMPARTITIONS | [
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"__doc__",
"__file__",
"__name__",
"__package__"
] | """
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants. | , "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy.assert_called_once_with("column")
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option().csv.assert_called_once_with("gs://test") | googlecloudplatform__dataproc-templates |
13 | 13-175-29 | common | run | [
"build",
"get_logger",
"parse_args",
"run",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template. | (mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy.assert_called_once_with("column")
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option().csv.assert_called_once_with("gs://test") | googlecloudplatform__dataproc-templates |
13 | 13-194-48 | inproject | parse_args | [
"build",
"get_logger",
"parse_args",
"run",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template. | (
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy.assert_called_once_with("column")
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option().csv.assert_called_once_with("gs://test") | googlecloudplatform__dataproc-templates |
14 | 14-32-43 | inproject | parse_args | [
"build",
"get_logger",
"parse_args",
"run",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template. | (
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy.assert_called_once_with("column")
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option().csv.assert_called_once_with("gs://test") | googlecloudplatform__dataproc-templates |
14 | 14-83-113 | random | JDBC_LOWERBOUND | [
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER",
"__doc__",
"__file__",
"__name__",
"__package__"
] | """
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants. | , "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy.assert_called_once_with("column")
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option().csv.assert_called_once_with("gs://test") | googlecloudplatform__dataproc-templates |
14 | 14-123-10 | random | patch | [
"absolute_import",
"ANY",
"asyncio",
"AsyncMagicMixin",
"AsyncMock",
"AsyncMockMixin",
"backports",
"Base",
"builtins",
"call",
"CallableMixin",
"CodeType",
"contextlib",
"create_autospec",
"DEFAULT",
"file_spec",
"FILTER_DIR",
"FunctionTypes",
"inplace",
"inspect",
"InvalidSpecError",
"io",
"IS_PYPY",
"iscoroutinefunction",
"magic_methods",
"MagicMixin",
"MagicMock",
"MagicProxy",
"MethodType",
"Mock",
"mock",
"mock_open",
"ModuleType",
"NonCallableMagicMock",
"NonCallableMock",
"numerics",
"open_spec",
"partial",
"patch",
"pprint",
"PropertyMock",
"re",
"removeprefix",
"right",
"RLock",
"safe_repr",
"seal",
"sentinel",
"sys",
"threading",
"ThreadingMixin",
"ThreadingMock",
"version_info",
"wraps",
"_all_magics",
"_all_sync_magics",
"_allowed_names",
"_ANY",
"_AnyComparer",
"_async_magics",
"_async_method_magics",
"_AsyncIterator",
"_ATTRIB_DENY_LIST",
"_builtins",
"_calculate_return_value",
"_Call",
"_callable",
"_CallList",
"_check_and_set_parent",
"_check_signature",
"_check_spec_arg_typos",
"_clear_dict",
"_CODE_ATTRS",
"_CODE_SIG",
"_copy_func_details",
"_delegating_property",
"_deleted",
"_dot_lookup",
"_extract_mock",
"_format_call_signature",
"_get_async_iter",
"_get_eq",
"_get_iter",
"_get_method",
"_get_ne",
"_get_signature_object",
"_get_target",
"_importer",
"_instance_callable",
"_is_async_func",
"_is_async_obj",
"_is_exception",
"_is_instance_mock",
"_is_list",
"_is_magic",
"_magics",
"_missing",
"_mock",
"_MockIter",
"_must_skip",
"_non_defaults",
"_patch",
"_patch_dict",
"_patch_multiple",
"_patch_object",
"_patch_stopall",
"_return_values",
"_safe_super",
"_Sentinel",
"_SentinelObject",
"_set_async_signature",
"_set_return_value",
"_set_signature",
"_setup_async_mock",
"_setup_func",
"_side_effect_methods",
"_SpecState",
"_sync_async_magics",
"_timeout_unset",
"_to_stream",
"_try_iter",
"_unsupported_magics",
"__all__",
"__doc__",
"__file__",
"__name__",
"__package__",
"__version__"
] | """
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock. | .object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy.assert_called_once_with("column")
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option().csv.assert_called_once_with("gs://test") | googlecloudplatform__dataproc-templates |
14 | 14-123-16 | random | object | [
"dict",
"multiple",
"object",
"stopall",
"TEST_PREFIX",
"__annotations__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch. | (pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy.assert_called_once_with("column")
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().partitionBy().option().csv.assert_called_once_with("gs://test") | googlecloudplatform__dataproc-templates |