max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
report_writer/report_writer.py | DoubleBridges/door-order-parser | 0 | 500 | from reportlab.lib.units import inch
from reportlab.platypus import SimpleDocTemplate, Spacer
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import inch
from reportlab.platypus.flowables import Flowable
def generate_order(job, path, door_style, doors=[], drawers=[]):
PAGE_HEIGHT = defaultPageSize[1]
PAGE_WIDTH = defaultPageSize[0]
LEFT_MARGIN = 30
LINE_HEIGHT = 18
BACKGROUND_COLOR = (33 / 255, 80 / 255, 156 / 255)
CURSOR_HEIGHT = PAGE_HEIGHT - 60
INPUT_HEIGHT = LINE_HEIGHT - (LINE_HEIGHT * 0.1)
SPECIES = door_style.species
STYLE = door_style.name
INSIDE_PROFILE = door_style.inside_profile
OUTSIDE_PROFILE = door_style.outside_profile
TOTAL_DRS = len(doors)
TOTAL_DWRS = len(drawers)
def myFirstPage(c, doc):
cursor = CURSOR_HEIGHT
c.saveState()
c.setStrokeColorRGB(
BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]
)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.rect(
LEFT_MARGIN, PAGE_HEIGHT - 40, PAGE_WIDTH - (LEFT_MARGIN * 2), 24, fill=1
)
c.setFillColorRGB(1, 1, 1)
c.setFont("Helvetica-Bold", 16)
c.drawCentredString(PAGE_WIDTH / 2.0, PAGE_HEIGHT - 34, "DOOR ORDER FORM")
c.setFont("Helvetica", 12)
c.setFillColorRGB(0, 0, 0)
c.drawString(LEFT_MARGIN, cursor, f"Customer : JS Designs Shop, LLC")
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
f"Order Date : {job.order_date}",
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"PO # : {job.name}-{STYLE}-{SPECIES}")
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, "Delivery Date : ASAP"
)
cursor -= LINE_HEIGHT
c.setFont("Helvetica-Bold", 12)
c.drawString(LEFT_MARGIN, cursor, f"Door Style : {STYLE}")
c.setFont("Helvetica", 12)
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, "Phone : 901-853-7568"
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Panel : ")
c.acroForm.textfield(
x=LEFT_MARGIN + 40,
y=cursor - 4,
name="Panel",
value=" N/A ",
height=INPUT_HEIGHT,
width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 60,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, "Comments : ")
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Wood Type : {SPECIES}")
c.line(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
PAGE_WIDTH - LEFT_MARGIN,
cursor,
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Inside Profile : {INSIDE_PROFILE}")
# c.acroForm.textfield(
# x=LEFT_MARGIN + 78,
# y=cursor - 4,
# name="inside_profile",
# value=" N/A ",
# height=INPUT_HEIGHT,
# width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 98,
# borderWidth=0,
# # fillColor=([1, 1, 1]),
# relative=True,
# )
c.line(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
PAGE_WIDTH - LEFT_MARGIN,
cursor,
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Outside Profile : {OUTSIDE_PROFILE}")
# c.acroForm.textfield(
# x=LEFT_MARGIN + 88,
# y=cursor - 4,
# name="outside_profile",
# value=" N/A ",
# height=INPUT_HEIGHT,
# width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 108,
# borderWidth=0,
# # fillColor=([1, 1, 1]),
# relative=True,
# )
c.line(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
PAGE_WIDTH - LEFT_MARGIN,
cursor,
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Stile/Rails : ")
c.acroForm.textfield(
x=LEFT_MARGIN + 62,
y=cursor - 4,
name="stiles_rails",
value=" N/A ",
height=INPUT_HEIGHT,
width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 82,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
c.setFont("Helvetica-Bold", 12)
c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, f"Drawer Fronts : ")
c.acroForm.textfield(
x=LEFT_MARGIN + 375,
y=cursor - 4,
name="drawer_fronts",
value=" N/A ",
height=INPUT_HEIGHT,
width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 92,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
c.setFont("Helvetica", 12)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Boring For Hinges : No")
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, f"Outside Profile : "
)
c.acroForm.textfield(
x=LEFT_MARGIN + 370,
y=cursor - 4,
name="out_profile",
value=" N/A ",
height=INPUT_HEIGHT,
width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 87,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Add Hinges : No")
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
f" 5 PC Front: Slab:",
)
c.acroForm.textfield(
x=LEFT_MARGIN + 350,
y=cursor - 4,
name="5_pc_front",
value=" N/A ",
height=INPUT_HEIGHT,
width=30,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
c.acroForm.textfield(
x=LEFT_MARGIN + 430,
y=cursor - 4,
name="slab_front",
value=" N/A ",
height=INPUT_HEIGHT,
width=30,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
cursor -= 12
c.setFont("Times-Italic", 10)
c.drawString(
LEFT_MARGIN,
cursor,
f"Boring not available in arched doors, applied mould doors",
)
cursor -= 10
c.drawString(
LEFT_MARGIN,
cursor,
f"and raised bead profile mitered doors",
)
cursor -= 14
c.setFont("Times-BoldItalic", 12)
c.drawString(
LEFT_MARGIN, cursor, f'Cullman will not bore any door with 2" stiles'
)
cursor -= 20
c.setFont("Helvetica-Bold", 14)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor, f"Total Doors: {TOTAL_DRS}")
c.drawCentredString(
((PAGE_WIDTH / 4) * 3) + 10, cursor, f"Total Drawer Fronts: {TOTAL_DWRS}"
)
cursor -= 24
c.setStrokeColorRGB(0, 0, 0)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.rect(LEFT_MARGIN + 38, cursor, 60, 20, fill=1)
c.rect(LEFT_MARGIN + 98, cursor, 170, 20, fill=1)
c.rect(LEFT_MARGIN + 308, cursor, 60, 20, fill=1)
c.rect(LEFT_MARGIN + 368, cursor, 170, 20, fill=1)
c.setFont("Helvetica-Bold", 12)
c.setFillColorRGB(1, 1, 1)
string_center = LEFT_MARGIN + 68
c.drawCentredString(string_center, cursor + 5, "Qty")
string_center += 115
c.drawCentredString(string_center, cursor + 5, "Width X Height")
string_center += 155
c.drawCentredString(string_center, cursor + 5, "Qty")
string_center += 115
c.drawCentredString(string_center, cursor + 5, "Width X Height")
c.setFont("Helvetica", 9)
c.setFillColorRGB(0, 0, 0)
c.drawCentredString(
PAGE_WIDTH / 2, 40, f"Page 1 of {job.name}-{STYLE}-{SPECIES}"
)
c.drawCentredString(
PAGE_WIDTH / 2,
20,
'Reminder : Any doors 46" and over in height will automatically receive a horizontal center rail unless otherwise noted.',
)
c.restoreState()
def myLaterPages(c, doc):
cursor = PAGE_HEIGHT - 54
c.saveState()
c.setFont("Helvetica-Bold", 14)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor, "Doors")
c.drawCentredString(((PAGE_WIDTH / 4) * 3) + 10, cursor, "Drawer Fronts")
cursor -= 24
c.setStrokeColorRGB(0, 0, 0)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.rect(LEFT_MARGIN + 38, cursor, 60, 20, fill=1)
c.rect(LEFT_MARGIN + 98, cursor, 170, 20, fill=1)
c.rect(LEFT_MARGIN + 308, cursor, 60, 20, fill=1)
c.rect(LEFT_MARGIN + 368, cursor, 170, 20, fill=1)
c.setFont("Helvetica-Bold", 12)
c.setFillColorRGB(1, 1, 1)
string_center = LEFT_MARGIN + 68
c.drawCentredString(string_center, cursor + 5, "Qty")
string_center += 115
c.drawCentredString(string_center, cursor + 5, "Width X Height")
string_center += 155
c.drawCentredString(string_center, cursor + 5, "Qty")
string_center += 115
c.drawCentredString(string_center, cursor + 5, "Width X Height")
c.setFont("Helvetica", 9)
c.setFillColorRGB(0, 0, 0)
c.drawCentredString(
PAGE_WIDTH / 2, 40, f"Page {doc.page} of {job.name}-{STYLE}-{SPECIES}"
)
c.drawCentredString(
PAGE_WIDTH / 2,
20,
'Reminder : Any doors 46" and over in height will automatically receive a horizontal center rail unless otherwise noted.',
)
c.restoreState()
class OrderEntry(Flowable):
"""Draws table entry for each item in list of door sizes."""
def __init__(
self,
xoffset=0,
height=20,
dr_qty="",
dr_size="",
dwr_qty="",
dwr_size="",
index=0,
):
Flowable.__init__(self)
self.dr_qty = dr_qty
self.dr_size = dr_size
self.dwr_qty = dwr_qty
self.dwr_size = dwr_size
self.index = index
self.height = height
self.idx_box_x = xoffset
self.idx_box_width = 40
self.string_center = xoffset + (self.idx_box_width / 2)
self.qty_box_x = self.idx_box_width + xoffset
self.qty_box_width = 60
self.size_box_x = self.qty_box_width - 10
self.size_box_width = 170
self.second_column_offset = 270
def draw(self):
# Door
self.canv.setStrokeColorRGB(0, 0, 0)
self.canv.setFillColorRGB(
BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]
)
self.canv.rect(self.idx_box_x, 0, self.idx_box_width, self.height, fill=1)
self.canv.setFillColorRGB(1, 1, 1)
self.canv.setFont("Helvetica", 12)
self.canv.drawCentredString(
self.string_center, 0.25 * self.height, str(self.index)
)
self.canv.setFillColorRGB(0, 0, 0)
self.canv.rect(self.qty_box_x, 0, self.qty_box_width, self.height)
self.string_center += (self.idx_box_width / 2) + (self.qty_box_width / 2)
self.canv.drawCentredString(
self.string_center, 0.25 * self.height, self.dr_qty
)
self.canv.rect(self.size_box_x, 0, self.size_box_width, self.height)
self.string_center += (self.qty_box_width / 2) + (self.size_box_width / 2)
self.canv.drawCentredString(
self.string_center, 0.25 * self.height, self.dr_size
)
# Drawer
if self.dwr_qty != "" and self.dwr_size != "":
self.canv.rect(
self.second_column_offset + self.qty_box_x,
0,
self.qty_box_width,
self.height,
)
self.string_center += 155
self.canv.drawCentredString(
self.string_center,
0.25 * self.height,
self.dwr_qty,
)
self.canv.rect(
self.second_column_offset + self.size_box_x,
0,
self.size_box_width,
self.height,
)
self.string_center += (self.qty_box_width / 2) + (
self.size_box_width / 2
)
self.canv.drawCentredString(
self.string_center, 0.25 * self.height, self.dwr_size
)
def build_pdf(path, name, door_list, drawer_list):
doc = SimpleDocTemplate(f"{path}/{name}-{STYLE}.pdf")
Story = [Spacer(1, 3.11 * inch)]
num_of_doors = len(door_list)
num_of_drawers = len(drawer_list)
num_of_entries = max(num_of_doors, num_of_drawers)
for i in range(0, num_of_entries):
try:
door_qty, door_size = door_list[i]["qty"], door_list[i]["size"]
except IndexError:
door_qty, door_size = "", ""
try:
drawer_qty, drawer_size = drawer_list[i]["qty"], drawer_list[i]["size"]
except IndexError:
drawer_qty, drawer_size = "", ""
p = OrderEntry(
xoffset=-50,
dr_qty=door_qty,
dr_size=door_size,
dwr_qty=drawer_qty,
dwr_size=drawer_size,
index=i + 1,
)
Story.append(p)
doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages)
build_pdf(path, job.name, doors, drawers)
| from reportlab.lib.units import inch
from reportlab.platypus import SimpleDocTemplate, Spacer
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import inch
from reportlab.platypus.flowables import Flowable
def generate_order(job, path, door_style, doors=[], drawers=[]):
PAGE_HEIGHT = defaultPageSize[1]
PAGE_WIDTH = defaultPageSize[0]
LEFT_MARGIN = 30
LINE_HEIGHT = 18
BACKGROUND_COLOR = (33 / 255, 80 / 255, 156 / 255)
CURSOR_HEIGHT = PAGE_HEIGHT - 60
INPUT_HEIGHT = LINE_HEIGHT - (LINE_HEIGHT * 0.1)
SPECIES = door_style.species
STYLE = door_style.name
INSIDE_PROFILE = door_style.inside_profile
OUTSIDE_PROFILE = door_style.outside_profile
TOTAL_DRS = len(doors)
TOTAL_DWRS = len(drawers)
def myFirstPage(c, doc):
cursor = CURSOR_HEIGHT
c.saveState()
c.setStrokeColorRGB(
BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]
)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.rect(
LEFT_MARGIN, PAGE_HEIGHT - 40, PAGE_WIDTH - (LEFT_MARGIN * 2), 24, fill=1
)
c.setFillColorRGB(1, 1, 1)
c.setFont("Helvetica-Bold", 16)
c.drawCentredString(PAGE_WIDTH / 2.0, PAGE_HEIGHT - 34, "DOOR ORDER FORM")
c.setFont("Helvetica", 12)
c.setFillColorRGB(0, 0, 0)
c.drawString(LEFT_MARGIN, cursor, f"Customer : JS Designs Shop, LLC")
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
f"Order Date : {job.order_date}",
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"PO # : {job.name}-{STYLE}-{SPECIES}")
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, "Delivery Date : ASAP"
)
cursor -= LINE_HEIGHT
c.setFont("Helvetica-Bold", 12)
c.drawString(LEFT_MARGIN, cursor, f"Door Style : {STYLE}")
c.setFont("Helvetica", 12)
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, "Phone : 901-853-7568"
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Panel : ")
c.acroForm.textfield(
x=LEFT_MARGIN + 40,
y=cursor - 4,
name="Panel",
value=" N/A ",
height=INPUT_HEIGHT,
width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 60,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, "Comments : ")
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Wood Type : {SPECIES}")
c.line(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
PAGE_WIDTH - LEFT_MARGIN,
cursor,
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Inside Profile : {INSIDE_PROFILE}")
# c.acroForm.textfield(
# x=LEFT_MARGIN + 78,
# y=cursor - 4,
# name="inside_profile",
# value=" N/A ",
# height=INPUT_HEIGHT,
# width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 98,
# borderWidth=0,
# # fillColor=([1, 1, 1]),
# relative=True,
# )
c.line(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
PAGE_WIDTH - LEFT_MARGIN,
cursor,
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Outside Profile : {OUTSIDE_PROFILE}")
# c.acroForm.textfield(
# x=LEFT_MARGIN + 88,
# y=cursor - 4,
# name="outside_profile",
# value=" N/A ",
# height=INPUT_HEIGHT,
# width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 108,
# borderWidth=0,
# # fillColor=([1, 1, 1]),
# relative=True,
# )
c.line(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
PAGE_WIDTH - LEFT_MARGIN,
cursor,
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Stile/Rails : ")
c.acroForm.textfield(
x=LEFT_MARGIN + 62,
y=cursor - 4,
name="stiles_rails",
value=" N/A ",
height=INPUT_HEIGHT,
width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 82,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
c.setFont("Helvetica-Bold", 12)
c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, f"Drawer Fronts : ")
c.acroForm.textfield(
x=LEFT_MARGIN + 375,
y=cursor - 4,
name="drawer_fronts",
value=" N/A ",
height=INPUT_HEIGHT,
width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 92,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
c.setFont("Helvetica", 12)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Boring For Hinges : No")
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, f"Outside Profile : "
)
c.acroForm.textfield(
x=LEFT_MARGIN + 370,
y=cursor - 4,
name="out_profile",
value=" N/A ",
height=INPUT_HEIGHT,
width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 87,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Add Hinges : No")
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
f" 5 PC Front: Slab:",
)
c.acroForm.textfield(
x=LEFT_MARGIN + 350,
y=cursor - 4,
name="5_pc_front",
value=" N/A ",
height=INPUT_HEIGHT,
width=30,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
c.acroForm.textfield(
x=LEFT_MARGIN + 430,
y=cursor - 4,
name="slab_front",
value=" N/A ",
height=INPUT_HEIGHT,
width=30,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
cursor -= 12
c.setFont("Times-Italic", 10)
c.drawString(
LEFT_MARGIN,
cursor,
f"Boring not available in arched doors, applied mould doors",
)
cursor -= 10
c.drawString(
LEFT_MARGIN,
cursor,
f"and raised bead profile mitered doors",
)
cursor -= 14
c.setFont("Times-BoldItalic", 12)
c.drawString(
LEFT_MARGIN, cursor, f'Cullman will not bore any door with 2" stiles'
)
cursor -= 20
c.setFont("Helvetica-Bold", 14)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor, f"Total Doors: {TOTAL_DRS}")
c.drawCentredString(
((PAGE_WIDTH / 4) * 3) + 10, cursor, f"Total Drawer Fronts: {TOTAL_DWRS}"
)
cursor -= 24
c.setStrokeColorRGB(0, 0, 0)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.rect(LEFT_MARGIN + 38, cursor, 60, 20, fill=1)
c.rect(LEFT_MARGIN + 98, cursor, 170, 20, fill=1)
c.rect(LEFT_MARGIN + 308, cursor, 60, 20, fill=1)
c.rect(LEFT_MARGIN + 368, cursor, 170, 20, fill=1)
c.setFont("Helvetica-Bold", 12)
c.setFillColorRGB(1, 1, 1)
string_center = LEFT_MARGIN + 68
c.drawCentredString(string_center, cursor + 5, "Qty")
string_center += 115
c.drawCentredString(string_center, cursor + 5, "Width X Height")
string_center += 155
c.drawCentredString(string_center, cursor + 5, "Qty")
string_center += 115
c.drawCentredString(string_center, cursor + 5, "Width X Height")
c.setFont("Helvetica", 9)
c.setFillColorRGB(0, 0, 0)
c.drawCentredString(
PAGE_WIDTH / 2, 40, f"Page 1 of {job.name}-{STYLE}-{SPECIES}"
)
c.drawCentredString(
PAGE_WIDTH / 2,
20,
'Reminder : Any doors 46" and over in height will automatically receive a horizontal center rail unless otherwise noted.',
)
c.restoreState()
def myLaterPages(c, doc):
cursor = PAGE_HEIGHT - 54
c.saveState()
c.setFont("Helvetica-Bold", 14)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor, "Doors")
c.drawCentredString(((PAGE_WIDTH / 4) * 3) + 10, cursor, "Drawer Fronts")
cursor -= 24
c.setStrokeColorRGB(0, 0, 0)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.rect(LEFT_MARGIN + 38, cursor, 60, 20, fill=1)
c.rect(LEFT_MARGIN + 98, cursor, 170, 20, fill=1)
c.rect(LEFT_MARGIN + 308, cursor, 60, 20, fill=1)
c.rect(LEFT_MARGIN + 368, cursor, 170, 20, fill=1)
c.setFont("Helvetica-Bold", 12)
c.setFillColorRGB(1, 1, 1)
string_center = LEFT_MARGIN + 68
c.drawCentredString(string_center, cursor + 5, "Qty")
string_center += 115
c.drawCentredString(string_center, cursor + 5, "Width X Height")
string_center += 155
c.drawCentredString(string_center, cursor + 5, "Qty")
string_center += 115
c.drawCentredString(string_center, cursor + 5, "Width X Height")
c.setFont("Helvetica", 9)
c.setFillColorRGB(0, 0, 0)
c.drawCentredString(
PAGE_WIDTH / 2, 40, f"Page {doc.page} of {job.name}-{STYLE}-{SPECIES}"
)
c.drawCentredString(
PAGE_WIDTH / 2,
20,
'Reminder : Any doors 46" and over in height will automatically receive a horizontal center rail unless otherwise noted.',
)
c.restoreState()
class OrderEntry(Flowable):
"""Draws table entry for each item in list of door sizes."""
def __init__(
self,
xoffset=0,
height=20,
dr_qty="",
dr_size="",
dwr_qty="",
dwr_size="",
index=0,
):
Flowable.__init__(self)
self.dr_qty = dr_qty
self.dr_size = dr_size
self.dwr_qty = dwr_qty
self.dwr_size = dwr_size
self.index = index
self.height = height
self.idx_box_x = xoffset
self.idx_box_width = 40
self.string_center = xoffset + (self.idx_box_width / 2)
self.qty_box_x = self.idx_box_width + xoffset
self.qty_box_width = 60
self.size_box_x = self.qty_box_width - 10
self.size_box_width = 170
self.second_column_offset = 270
def draw(self):
# Door
self.canv.setStrokeColorRGB(0, 0, 0)
self.canv.setFillColorRGB(
BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]
)
self.canv.rect(self.idx_box_x, 0, self.idx_box_width, self.height, fill=1)
self.canv.setFillColorRGB(1, 1, 1)
self.canv.setFont("Helvetica", 12)
self.canv.drawCentredString(
self.string_center, 0.25 * self.height, str(self.index)
)
self.canv.setFillColorRGB(0, 0, 0)
self.canv.rect(self.qty_box_x, 0, self.qty_box_width, self.height)
self.string_center += (self.idx_box_width / 2) + (self.qty_box_width / 2)
self.canv.drawCentredString(
self.string_center, 0.25 * self.height, self.dr_qty
)
self.canv.rect(self.size_box_x, 0, self.size_box_width, self.height)
self.string_center += (self.qty_box_width / 2) + (self.size_box_width / 2)
self.canv.drawCentredString(
self.string_center, 0.25 * self.height, self.dr_size
)
# Drawer
if self.dwr_qty != "" and self.dwr_size != "":
self.canv.rect(
self.second_column_offset + self.qty_box_x,
0,
self.qty_box_width,
self.height,
)
self.string_center += 155
self.canv.drawCentredString(
self.string_center,
0.25 * self.height,
self.dwr_qty,
)
self.canv.rect(
self.second_column_offset + self.size_box_x,
0,
self.size_box_width,
self.height,
)
self.string_center += (self.qty_box_width / 2) + (
self.size_box_width / 2
)
self.canv.drawCentredString(
self.string_center, 0.25 * self.height, self.dwr_size
)
def build_pdf(path, name, door_list, drawer_list):
doc = SimpleDocTemplate(f"{path}/{name}-{STYLE}.pdf")
Story = [Spacer(1, 3.11 * inch)]
num_of_doors = len(door_list)
num_of_drawers = len(drawer_list)
num_of_entries = max(num_of_doors, num_of_drawers)
for i in range(0, num_of_entries):
try:
door_qty, door_size = door_list[i]["qty"], door_list[i]["size"]
except IndexError:
door_qty, door_size = "", ""
try:
drawer_qty, drawer_size = drawer_list[i]["qty"], drawer_list[i]["size"]
except IndexError:
drawer_qty, drawer_size = "", ""
p = OrderEntry(
xoffset=-50,
dr_qty=door_qty,
dr_size=door_size,
dwr_qty=drawer_qty,
dwr_size=drawer_size,
index=i + 1,
)
Story.append(p)
doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages)
build_pdf(path, job.name, doors, drawers)
| en | 0.33643 | # : {job.name}-{STYLE}-{SPECIES}") # fillColor=([1, 1, 1]), # c.acroForm.textfield( # x=LEFT_MARGIN + 78, # y=cursor - 4, # name="inside_profile", # value=" N/A ", # height=INPUT_HEIGHT, # width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 98, # borderWidth=0, # # fillColor=([1, 1, 1]), # relative=True, # ) # c.acroForm.textfield( # x=LEFT_MARGIN + 88, # y=cursor - 4, # name="outside_profile", # value=" N/A ", # height=INPUT_HEIGHT, # width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 108, # borderWidth=0, # # fillColor=([1, 1, 1]), # relative=True, # ) # fillColor=([1, 1, 1]), # fillColor=([1, 1, 1]), # fillColor=([1, 1, 1]), # fillColor=([1, 1, 1]), # fillColor=([1, 1, 1]), Draws table entry for each item in list of door sizes. # Door # Drawer | 2.417624 | 2 |
app/retweet_graphs_v2/prep/migrate_daily_bot_probabilities.py | s2t2/tweet-analyzer-py | 5 | 501 | <reponame>s2t2/tweet-analyzer-py
from app.bq_service import BigQueryService
if __name__ == "__main__":
bq_service = BigQueryService()
bq_service.migrate_daily_bot_probabilities_table()
print("MIGRATION SUCCESSFUL!")
| from app.bq_service import BigQueryService
if __name__ == "__main__":
bq_service = BigQueryService()
bq_service.migrate_daily_bot_probabilities_table()
print("MIGRATION SUCCESSFUL!") | none | 1 | 1.636442 | 2 |
|
colosseum/mdps/minigrid_doorkey/minigrid_doorkey.py | MichelangeloConserva/Colosseum | 0 | 502 | <gh_stars>0
from copy import deepcopy
from dataclasses import asdict, dataclass
from enum import IntEnum
from colosseum.utils.random_vars import deterministic, get_dist
try:
from functools import cached_property
except:
from backports.cached_property import cached_property
from typing import Any, Dict, List, Tuple, Type, Union
import numpy as np
from scipy.stats import beta, rv_continuous
from colosseum.mdps import MDP
from colosseum.mdps.base_mdp import NextStateSampler
from colosseum.mdps.minigrid_rooms.continuous.mdp import MiniGridRoomsContinuous
from colosseum.utils.mdps import check_distributions
class MiniGridDoorKeyAction(IntEnum):
"""The action available in the MiniGridDoorKey MDP."""
MoveForward = 0
TurnRight = 1
TurnLeft = 2
PickObject = 3
DropObject = 4
UseObject = 5
class MiniGridDoorKeyDirection(IntEnum):
"""The possible agent direction in the MiniGridDoorKey MDP."""
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
@dataclass(frozen=True)
class MiniGridDoorKeyNode:
X: int
Y: int
Dir: MiniGridDoorKeyDirection
XKey: int
YKey: int
IsDoorOpened: bool
def __str__(self):
return f"X={self.X},Y={self.Y},Dir={MiniGridDoorKeyDirection(self.Dir).name},XKey={self.XKey},YKey={self.YKey},IsDoorOpened{self.IsDoorOpened}"
class MiniGridDoorKeyMDP(MDP):
@staticmethod
def testing_parameters() -> Dict[str, Tuple]:
t_params = MDP.testing_parameters()
t_params["size"] = (3, 5, 7)
t_params["make_reward_stochastic"] = (True, False)
t_params["n_starting_states"] = (1, 4)
return t_params
@staticmethod
def get_node_class() -> Type[MiniGridDoorKeyNode]:
return MiniGridDoorKeyNode
def __init__(
self,
seed: int,
size: int,
randomize_actions: bool = True,
lazy: float = None,
make_reward_stochastic=False,
n_starting_states: int = 2,
optimal_distribution: Union[Tuple, rv_continuous] = None,
other_distribution: Union[Tuple, rv_continuous] = None,
**kwargs,
):
"""
Parameters
----------
seed : int
the seed used for sampling rewards and next states.
randomize_actions : bool, optional
whether the effect of the actions changes for every node. It is particularly important to set this value to
true when doing experiments to avoid immediately reaching highly rewarding states in some MDPs by just
selecting the same action repeatedly. By default, it is set to true.
lazy : float
the probability of an action not producing any effect on the MDP.
size : int
the size of the grid.
make_reward_stochastic : bool, optional
checks whether the rewards are to be made stochastic. By default, it is set to False.
n_starting_states : int, optional
the number of states in the starting distribution. By default, it is set to two.
optimal_distribution : Union[Tuple, rv_continuous], optional
The distribution of the highly rewarding state. It can be either passed as a tuple containing Beta parameters
or as a rv_continuous object.
other_distribution : Union[Tuple, rv_continuous]
The distribution of the non highly rewarding states. It can be either passed as a tuple containing Beta parameters
or as a rv_continuous object.
"""
if type(optimal_distribution) == tuple:
optimal_distribution = get_dist(
optimal_distribution[0], optimal_distribution[1:]
)
if type(other_distribution) == tuple:
other_distribution = get_dist(other_distribution[0], other_distribution[1:])
self.n_starting_states = n_starting_states
self.size = size
self.make_reward_stochastic = make_reward_stochastic
dists = [
optimal_distribution,
other_distribution,
]
if dists.count(None) == 0:
self.optimal_distribution = optimal_distribution
self.other_distribution = other_distribution
else:
if make_reward_stochastic:
self.other_distribution = beta(1, size ** 2 - 1)
self.optimal_distribution = beta(size ** 2 - 1, 1)
else:
self.optimal_distribution = deterministic(1.0)
self.other_distribution = deterministic(0.0)
super().__init__(
seed=seed,
randomize_actions=randomize_actions,
lazy=lazy,
**kwargs,
)
@property
def parameters(self) -> Dict[str, Any]:
return {
**super(MiniGridDoorKeyMDP, self).parameters,
**dict(
size=self.size,
n_starting_states=self.n_starting_states,
optimal_distribution=self.optimal_distribution,
other_distribution=self.other_distribution,
),
}
@property
def possible_starting_nodes(self) -> List[MiniGridDoorKeyNode]:
return self._possible_starting_nodes
@cached_property
def coordinates_available(self):
coords = (
MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0))
.ravel()
.tolist()
)
for i in range(self.size):
if self.is_wall_horizontal:
coords.remove((i, self.wall_position))
else:
coords.remove((self.wall_position, i))
return tuple(coords)
@property
def num_actions(self):
return len(MiniGridDoorKeyAction)
def _calculate_next_nodes_prms(
self, node: MiniGridDoorKeyNode, action: int
) -> Tuple[Tuple[dict, float], ...]:
newnode_prms = deepcopy(asdict(node))
if action == MiniGridDoorKeyAction.TurnRight:
newnode_prms["Dir"] = (node.Dir + 1) % 4
if action == MiniGridDoorKeyAction.TurnLeft:
newnode_prms["Dir"] = (node.Dir - 1) % 4
if action == MiniGridDoorKeyAction.MoveForward:
if node.Dir == MiniGridDoorKeyDirection.UP:
next_coord = (node.X, node.Y + 1)
if node.Dir == MiniGridDoorKeyDirection.RIGHT:
next_coord = node.X + 1, node.Y
if node.Dir == MiniGridDoorKeyDirection.DOWN:
next_coord = node.X, node.Y - 1
if node.Dir == MiniGridDoorKeyDirection.LEFT:
next_coord = node.X - 1, node.Y
if next_coord in self.coordinates_available or (
node.IsDoorOpened and next_coord == self.door_position
):
newnode_prms["X"], newnode_prms["Y"] = next_coord
if action == MiniGridDoorKeyAction.PickObject:
if node.X == node.XKey and node.Y == node.YKey:
newnode_prms["XKey"] = newnode_prms["YKey"] = -1
if node.XKey == -1 and not node.IsDoorOpened:
if action == MiniGridDoorKeyAction.DropObject:
newnode_prms["XKey"] = node.X
newnode_prms["YKey"] = node.Y
if action == MiniGridDoorKeyAction.UseObject:
if node.Dir == MiniGridDoorKeyDirection.UP:
next_coord = (node.X, node.Y + 1)
if node.Dir == MiniGridDoorKeyDirection.RIGHT:
next_coord = node.X + 1, node.Y
if node.Dir == MiniGridDoorKeyDirection.DOWN:
next_coord = node.X, node.Y - 1
if node.Dir == MiniGridDoorKeyDirection.LEFT:
next_coord = node.X - 1, node.Y
if next_coord == self.door_position:
newnode_prms["IsDoorOpened"] = True
return ((newnode_prms, 1.0),)
def _calculate_reward_distribution(
self, node: Any, action: IntEnum, next_node: Any
) -> rv_continuous:
return (
self.optimal_distribution
if next_node.X == self.goal_position[0]
and next_node.Y == self.goal_position[1]
else self.other_distribution
)
def _check_input_parameters(self):
super(MiniGridDoorKeyMDP, self)._check_input_parameters()
assert self.size >= 3
check_distributions(
[
self.optimal_distribution,
self.other_distribution,
],
self.make_reward_stochastic,
)
def _instantiate_starting_node_sampler(self) -> NextStateSampler:
# noinspection PyAttributeOutsideInit
self.wall_position = self._rng.randint(self.size - 2) + 1
# noinspection PyAttributeOutsideInit
self.is_wall_horizontal = self._rng.rand() > 0.5
if self.is_wall_horizontal:
self.door_position = self._rng.randint(self.size), self.wall_position
else:
self.door_position = self.wall_position, self._rng.randint(self.size)
self.is_goal_before = self._rng.rand() > 0.5
coords = MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0))
goal_positions = []
starting_positions = []
for i, j in coords.ravel():
if (
i < self.wall_position
if self.is_goal_before
else i > self.wall_position
):
goal_positions.append((j, i) if self.is_wall_horizontal else (i, j))
elif (
i > self.wall_position
if self.is_goal_before
else i < self.wall_position
):
starting_positions.append((j, i) if self.is_wall_horizontal else (i, j))
possible_starting_positions = deepcopy(starting_positions)
self._rng.shuffle(goal_positions)
self.goal_position = goal_positions[0]
self._rng.shuffle(starting_positions)
self.start_key_position = starting_positions.pop(0)
starting_positions = [
(x, y, dir)
for x, y in starting_positions
for dir in MiniGridDoorKeyDirection
]
assert self.n_starting_states < len(starting_positions)
self._possible_starting_nodes = [
MiniGridDoorKeyNode(
x,
y,
dir.value,
*self.start_key_position,
False,
)
for x, y, dir in starting_positions
]
return NextStateSampler(
next_states=self._possible_starting_nodes[: self.n_starting_states],
probs=[1 / self.n_starting_states for _ in range(self.n_starting_states)],
seed=self._next_seed(),
)
def calc_grid_repr(self, node: Any) -> np.array:
grid_size = self.size
door_position = self.door_position
wall_position = self.wall_position
is_wall_horizontal = self.is_wall_horizontal
grid = np.zeros((grid_size, grid_size), dtype=str)
grid[:, :] = " "
grid[self.goal_position[1], self.goal_position[0]] = "G"
if self.cur_node.XKey != -1:
grid[self.cur_node.YKey, self.cur_node.XKey] = "K"
for i in range(grid_size):
if not is_wall_horizontal:
grid[i, wall_position] = "W_en"
else:
grid[wall_position, i] = "W_en"
grid[door_position[1], door_position[0]] = (
"O" if self.cur_node.IsDoorOpened else "C"
)
if self.cur_node.Dir == MiniGridDoorKeyDirection.UP:
grid[self.cur_node.Y, self.cur_node.X] = "^"
elif self.cur_node.Dir == MiniGridDoorKeyDirection.RIGHT:
grid[self.cur_node.Y, self.cur_node.X] = ">"
elif self.cur_node.Dir == MiniGridDoorKeyDirection.DOWN:
grid[self.cur_node.Y, self.cur_node.X] = "v"
elif self.cur_node.Dir == MiniGridDoorKeyDirection.LEFT:
grid[self.cur_node.Y, self.cur_node.X] = "<"
return grid[::-1, :]
| from copy import deepcopy
from dataclasses import asdict, dataclass
from enum import IntEnum
from colosseum.utils.random_vars import deterministic, get_dist
try:
from functools import cached_property
except:
from backports.cached_property import cached_property
from typing import Any, Dict, List, Tuple, Type, Union
import numpy as np
from scipy.stats import beta, rv_continuous
from colosseum.mdps import MDP
from colosseum.mdps.base_mdp import NextStateSampler
from colosseum.mdps.minigrid_rooms.continuous.mdp import MiniGridRoomsContinuous
from colosseum.utils.mdps import check_distributions
class MiniGridDoorKeyAction(IntEnum):
"""The action available in the MiniGridDoorKey MDP."""
MoveForward = 0
TurnRight = 1
TurnLeft = 2
PickObject = 3
DropObject = 4
UseObject = 5
class MiniGridDoorKeyDirection(IntEnum):
"""The possible agent direction in the MiniGridDoorKey MDP."""
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
@dataclass(frozen=True)
class MiniGridDoorKeyNode:
X: int
Y: int
Dir: MiniGridDoorKeyDirection
XKey: int
YKey: int
IsDoorOpened: bool
def __str__(self):
return f"X={self.X},Y={self.Y},Dir={MiniGridDoorKeyDirection(self.Dir).name},XKey={self.XKey},YKey={self.YKey},IsDoorOpened{self.IsDoorOpened}"
class MiniGridDoorKeyMDP(MDP):
@staticmethod
def testing_parameters() -> Dict[str, Tuple]:
t_params = MDP.testing_parameters()
t_params["size"] = (3, 5, 7)
t_params["make_reward_stochastic"] = (True, False)
t_params["n_starting_states"] = (1, 4)
return t_params
@staticmethod
def get_node_class() -> Type[MiniGridDoorKeyNode]:
return MiniGridDoorKeyNode
def __init__(
self,
seed: int,
size: int,
randomize_actions: bool = True,
lazy: float = None,
make_reward_stochastic=False,
n_starting_states: int = 2,
optimal_distribution: Union[Tuple, rv_continuous] = None,
other_distribution: Union[Tuple, rv_continuous] = None,
**kwargs,
):
"""
Parameters
----------
seed : int
the seed used for sampling rewards and next states.
randomize_actions : bool, optional
whether the effect of the actions changes for every node. It is particularly important to set this value to
true when doing experiments to avoid immediately reaching highly rewarding states in some MDPs by just
selecting the same action repeatedly. By default, it is set to true.
lazy : float
the probability of an action not producing any effect on the MDP.
size : int
the size of the grid.
make_reward_stochastic : bool, optional
checks whether the rewards are to be made stochastic. By default, it is set to False.
n_starting_states : int, optional
the number of states in the starting distribution. By default, it is set to two.
optimal_distribution : Union[Tuple, rv_continuous], optional
The distribution of the highly rewarding state. It can be either passed as a tuple containing Beta parameters
or as a rv_continuous object.
other_distribution : Union[Tuple, rv_continuous]
The distribution of the non highly rewarding states. It can be either passed as a tuple containing Beta parameters
or as a rv_continuous object.
"""
if type(optimal_distribution) == tuple:
optimal_distribution = get_dist(
optimal_distribution[0], optimal_distribution[1:]
)
if type(other_distribution) == tuple:
other_distribution = get_dist(other_distribution[0], other_distribution[1:])
self.n_starting_states = n_starting_states
self.size = size
self.make_reward_stochastic = make_reward_stochastic
dists = [
optimal_distribution,
other_distribution,
]
if dists.count(None) == 0:
self.optimal_distribution = optimal_distribution
self.other_distribution = other_distribution
else:
if make_reward_stochastic:
self.other_distribution = beta(1, size ** 2 - 1)
self.optimal_distribution = beta(size ** 2 - 1, 1)
else:
self.optimal_distribution = deterministic(1.0)
self.other_distribution = deterministic(0.0)
super().__init__(
seed=seed,
randomize_actions=randomize_actions,
lazy=lazy,
**kwargs,
)
@property
def parameters(self) -> Dict[str, Any]:
return {
**super(MiniGridDoorKeyMDP, self).parameters,
**dict(
size=self.size,
n_starting_states=self.n_starting_states,
optimal_distribution=self.optimal_distribution,
other_distribution=self.other_distribution,
),
}
@property
def possible_starting_nodes(self) -> List[MiniGridDoorKeyNode]:
return self._possible_starting_nodes
@cached_property
def coordinates_available(self):
coords = (
MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0))
.ravel()
.tolist()
)
for i in range(self.size):
if self.is_wall_horizontal:
coords.remove((i, self.wall_position))
else:
coords.remove((self.wall_position, i))
return tuple(coords)
@property
def num_actions(self):
return len(MiniGridDoorKeyAction)
def _calculate_next_nodes_prms(
self, node: MiniGridDoorKeyNode, action: int
) -> Tuple[Tuple[dict, float], ...]:
newnode_prms = deepcopy(asdict(node))
if action == MiniGridDoorKeyAction.TurnRight:
newnode_prms["Dir"] = (node.Dir + 1) % 4
if action == MiniGridDoorKeyAction.TurnLeft:
newnode_prms["Dir"] = (node.Dir - 1) % 4
if action == MiniGridDoorKeyAction.MoveForward:
if node.Dir == MiniGridDoorKeyDirection.UP:
next_coord = (node.X, node.Y + 1)
if node.Dir == MiniGridDoorKeyDirection.RIGHT:
next_coord = node.X + 1, node.Y
if node.Dir == MiniGridDoorKeyDirection.DOWN:
next_coord = node.X, node.Y - 1
if node.Dir == MiniGridDoorKeyDirection.LEFT:
next_coord = node.X - 1, node.Y
if next_coord in self.coordinates_available or (
node.IsDoorOpened and next_coord == self.door_position
):
newnode_prms["X"], newnode_prms["Y"] = next_coord
if action == MiniGridDoorKeyAction.PickObject:
if node.X == node.XKey and node.Y == node.YKey:
newnode_prms["XKey"] = newnode_prms["YKey"] = -1
if node.XKey == -1 and not node.IsDoorOpened:
if action == MiniGridDoorKeyAction.DropObject:
newnode_prms["XKey"] = node.X
newnode_prms["YKey"] = node.Y
if action == MiniGridDoorKeyAction.UseObject:
if node.Dir == MiniGridDoorKeyDirection.UP:
next_coord = (node.X, node.Y + 1)
if node.Dir == MiniGridDoorKeyDirection.RIGHT:
next_coord = node.X + 1, node.Y
if node.Dir == MiniGridDoorKeyDirection.DOWN:
next_coord = node.X, node.Y - 1
if node.Dir == MiniGridDoorKeyDirection.LEFT:
next_coord = node.X - 1, node.Y
if next_coord == self.door_position:
newnode_prms["IsDoorOpened"] = True
return ((newnode_prms, 1.0),)
def _calculate_reward_distribution(
self, node: Any, action: IntEnum, next_node: Any
) -> rv_continuous:
return (
self.optimal_distribution
if next_node.X == self.goal_position[0]
and next_node.Y == self.goal_position[1]
else self.other_distribution
)
def _check_input_parameters(self):
super(MiniGridDoorKeyMDP, self)._check_input_parameters()
assert self.size >= 3
check_distributions(
[
self.optimal_distribution,
self.other_distribution,
],
self.make_reward_stochastic,
)
def _instantiate_starting_node_sampler(self) -> NextStateSampler:
# noinspection PyAttributeOutsideInit
self.wall_position = self._rng.randint(self.size - 2) + 1
# noinspection PyAttributeOutsideInit
self.is_wall_horizontal = self._rng.rand() > 0.5
if self.is_wall_horizontal:
self.door_position = self._rng.randint(self.size), self.wall_position
else:
self.door_position = self.wall_position, self._rng.randint(self.size)
self.is_goal_before = self._rng.rand() > 0.5
coords = MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0))
goal_positions = []
starting_positions = []
for i, j in coords.ravel():
if (
i < self.wall_position
if self.is_goal_before
else i > self.wall_position
):
goal_positions.append((j, i) if self.is_wall_horizontal else (i, j))
elif (
i > self.wall_position
if self.is_goal_before
else i < self.wall_position
):
starting_positions.append((j, i) if self.is_wall_horizontal else (i, j))
possible_starting_positions = deepcopy(starting_positions)
self._rng.shuffle(goal_positions)
self.goal_position = goal_positions[0]
self._rng.shuffle(starting_positions)
self.start_key_position = starting_positions.pop(0)
starting_positions = [
(x, y, dir)
for x, y in starting_positions
for dir in MiniGridDoorKeyDirection
]
assert self.n_starting_states < len(starting_positions)
self._possible_starting_nodes = [
MiniGridDoorKeyNode(
x,
y,
dir.value,
*self.start_key_position,
False,
)
for x, y, dir in starting_positions
]
return NextStateSampler(
next_states=self._possible_starting_nodes[: self.n_starting_states],
probs=[1 / self.n_starting_states for _ in range(self.n_starting_states)],
seed=self._next_seed(),
)
def calc_grid_repr(self, node: Any) -> np.array:
grid_size = self.size
door_position = self.door_position
wall_position = self.wall_position
is_wall_horizontal = self.is_wall_horizontal
grid = np.zeros((grid_size, grid_size), dtype=str)
grid[:, :] = " "
grid[self.goal_position[1], self.goal_position[0]] = "G"
if self.cur_node.XKey != -1:
grid[self.cur_node.YKey, self.cur_node.XKey] = "K"
for i in range(grid_size):
if not is_wall_horizontal:
grid[i, wall_position] = "W_en"
else:
grid[wall_position, i] = "W_en"
grid[door_position[1], door_position[0]] = (
"O" if self.cur_node.IsDoorOpened else "C"
)
if self.cur_node.Dir == MiniGridDoorKeyDirection.UP:
grid[self.cur_node.Y, self.cur_node.X] = "^"
elif self.cur_node.Dir == MiniGridDoorKeyDirection.RIGHT:
grid[self.cur_node.Y, self.cur_node.X] = ">"
elif self.cur_node.Dir == MiniGridDoorKeyDirection.DOWN:
grid[self.cur_node.Y, self.cur_node.X] = "v"
elif self.cur_node.Dir == MiniGridDoorKeyDirection.LEFT:
grid[self.cur_node.Y, self.cur_node.X] = "<"
return grid[::-1, :] | en | 0.790204 | The action available in the MiniGridDoorKey MDP. The possible agent direction in the MiniGridDoorKey MDP. Parameters ---------- seed : int the seed used for sampling rewards and next states. randomize_actions : bool, optional whether the effect of the actions changes for every node. It is particularly important to set this value to true when doing experiments to avoid immediately reaching highly rewarding states in some MDPs by just selecting the same action repeatedly. By default, it is set to true. lazy : float the probability of an action not producing any effect on the MDP. size : int the size of the grid. make_reward_stochastic : bool, optional checks whether the rewards are to be made stochastic. By default, it is set to False. n_starting_states : int, optional the number of states in the starting distribution. By default, it is set to two. optimal_distribution : Union[Tuple, rv_continuous], optional The distribution of the highly rewarding state. It can be either passed as a tuple containing Beta parameters or as a rv_continuous object. other_distribution : Union[Tuple, rv_continuous] The distribution of the non highly rewarding states. It can be either passed as a tuple containing Beta parameters or as a rv_continuous object. # noinspection PyAttributeOutsideInit # noinspection PyAttributeOutsideInit | 2.187283 | 2 |
tests/bugs/core_6266_test.py | reevespaul/firebird-qa | 0 | 503 | <filename>tests/bugs/core_6266_test.py
#coding:utf-8
#
# id: bugs.core_6266
# title: Deleting records from MON$ATTACHMENTS using ORDER BY clause doesn't close the corresponding attachments
# decription:
# Old title: Don't close attach while deleting record from MON$ATTACHMENTS using ORDER BY clause.
# Confirmed bug on 3.0.6.33271.
# Checked on 3.0.6.33272 (SS/CS) - works fine.
# 22.04.2020. Checked separately on 4.0.0.1931 SS/CS: all OK. FB 4.0 can also be tested since this build.
#
# tracker_id: CORE-6266
# min_versions: ['3.0.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
# import os
# import sys
# import time
# import fdb
#
# ATT_CNT=5
# ATT_DELAY=1
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = <PASSWORD>
#
# db_conn.close()
#
# con_list={}
# for i in range(0, ATT_CNT):
# if i > 0:
# time.sleep( ATT_DELAY )
#
# c = fdb.connect(dsn = dsn)
# a = c.attachment_id
# con_list[ i ] = (a, c)
# # print('created attachment ', (a,c) )
#
# con_admin = con_list[0][1]
#
# #print(con_admin.firebird_version)
#
# # this removes ALL connections --> should NOT be used for reproducing ticket issue:
# #con_admin.execute_immediate('delete from mon$attachments where mon$attachment_id != current_connection order by mon$timestamp')
#
# # this removes ALL connections --> should NOT be used for reproducing ticket issue:
# #con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection order by mon$timestamp')
#
# # This DOES NOT remove all attachments (only 'last' in order of timestamp), but
# # DELETE statement must NOT contain phrase 'mon$attachment_id != current_connection':
# con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 order by mon$timestamp')
#
# con_admin.commit()
#
# cur_admin = con_admin.cursor()
# cur_admin.execute('select mon$attachment_id,mon$user from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection' )
# i=0
# for r in cur_admin:
# print( '### ACHTUNG ### STILL ALIVE ATTACHMENT DETECTED: ', r[0], r[1].strip(), '###' )
# i += 1
# print('Number of attachments that remains alive: ',i)
#
# cur_admin.close()
#
# #print('Final cleanup before quit from Python.')
#
# for k,v in sorted( con_list.items() ):
# #print('attempt to close attachment ', v[0] )
# try:
# v[1].close()
# #print('done.')
# except Exception as e:
# pass
# #print('Got exception:', sys.exc_info()[0])
# #print(e[0])
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Number of attachments that remains alive: 0
"""
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
| <filename>tests/bugs/core_6266_test.py
#coding:utf-8
#
# id: bugs.core_6266
# title: Deleting records from MON$ATTACHMENTS using ORDER BY clause doesn't close the corresponding attachments
# decription:
# Old title: Don't close attach while deleting record from MON$ATTACHMENTS using ORDER BY clause.
# Confirmed bug on 3.0.6.33271.
# Checked on 3.0.6.33272 (SS/CS) - works fine.
# 22.04.2020. Checked separately on 4.0.0.1931 SS/CS: all OK. FB 4.0 can also be tested since this build.
#
# tracker_id: CORE-6266
# min_versions: ['3.0.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
# import os
# import sys
# import time
# import fdb
#
# ATT_CNT=5
# ATT_DELAY=1
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = <PASSWORD>
#
# db_conn.close()
#
# con_list={}
# for i in range(0, ATT_CNT):
# if i > 0:
# time.sleep( ATT_DELAY )
#
# c = fdb.connect(dsn = dsn)
# a = c.attachment_id
# con_list[ i ] = (a, c)
# # print('created attachment ', (a,c) )
#
# con_admin = con_list[0][1]
#
# #print(con_admin.firebird_version)
#
# # this removes ALL connections --> should NOT be used for reproducing ticket issue:
# #con_admin.execute_immediate('delete from mon$attachments where mon$attachment_id != current_connection order by mon$timestamp')
#
# # this removes ALL connections --> should NOT be used for reproducing ticket issue:
# #con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection order by mon$timestamp')
#
# # This DOES NOT remove all attachments (only 'last' in order of timestamp), but
# # DELETE statement must NOT contain phrase 'mon$attachment_id != current_connection':
# con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 order by mon$timestamp')
#
# con_admin.commit()
#
# cur_admin = con_admin.cursor()
# cur_admin.execute('select mon$attachment_id,mon$user from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection' )
# i=0
# for r in cur_admin:
# print( '### ACHTUNG ### STILL ALIVE ATTACHMENT DETECTED: ', r[0], r[1].strip(), '###' )
# i += 1
# print('Number of attachments that remains alive: ',i)
#
# cur_admin.close()
#
# #print('Final cleanup before quit from Python.')
#
# for k,v in sorted( con_list.items() ):
# #print('attempt to close attachment ', v[0] )
# try:
# v[1].close()
# #print('done.')
# except Exception as e:
# pass
# #print('Got exception:', sys.exc_info()[0])
# #print(e[0])
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Number of attachments that remains alive: 0
"""
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
| en | 0.66344 | #coding:utf-8 # # id: bugs.core_6266 # title: Deleting records from MON$ATTACHMENTS using ORDER BY clause doesn't close the corresponding attachments # decription: # Old title: Don't close attach while deleting record from MON$ATTACHMENTS using ORDER BY clause. # Confirmed bug on 3.0.6.33271. # Checked on 3.0.6.33272 (SS/CS) - works fine. # 22.04.2020. Checked separately on 4.0.0.1931 SS/CS: all OK. FB 4.0 can also be tested since this build. # # tracker_id: CORE-6266 # min_versions: ['3.0.0'] # versions: 3.0 # qmid: None # version: 3.0 # resources: None # test_script_1 #--- # import os # import sys # import time # import fdb # # ATT_CNT=5 # ATT_DELAY=1 # # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = <PASSWORD> # # db_conn.close() # # con_list={} # for i in range(0, ATT_CNT): # if i > 0: # time.sleep( ATT_DELAY ) # # c = fdb.connect(dsn = dsn) # a = c.attachment_id # con_list[ i ] = (a, c) # # print('created attachment ', (a,c) ) # # con_admin = con_list[0][1] # # #print(con_admin.firebird_version) # # # this removes ALL connections --> should NOT be used for reproducing ticket issue: # #con_admin.execute_immediate('delete from mon$attachments where mon$attachment_id != current_connection order by mon$timestamp') # # # this removes ALL connections --> should NOT be used for reproducing ticket issue: # #con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection order by mon$timestamp') # # # This DOES NOT remove all attachments (only 'last' in order of timestamp), but # # DELETE statement must NOT contain phrase 'mon$attachment_id != current_connection': # con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 order by mon$timestamp') # # con_admin.commit() # # cur_admin = con_admin.cursor() # cur_admin.execute('select mon$attachment_id,mon$user from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection' ) # i=0 # for r in cur_admin: # print( '### ACHTUNG ### STILL ALIVE ATTACHMENT DETECTED: ', r[0], r[1].strip(), '###' ) # i += 1 # print('Number of attachments that remains alive: ',i) # # cur_admin.close() # # #print('Final cleanup before quit from Python.') # # for k,v in sorted( con_list.items() ): # #print('attempt to close attachment ', v[0] ) # try: # v[1].close() # #print('done.') # except Exception as e: # pass # #print('Got exception:', sys.exc_info()[0]) # #print(e[0]) # # #--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) Number of attachments that remains alive: 0 | 1.345458 | 1 |
scripts/senate_crawler.py | tompsh/tompsh.github.io | 0 | 504 | from bs4 import BeautifulSoup
import logging
import pandas as pd
import csv
import re
import requests
from urllib.parse import urljoin
logging.basicConfig(format="%(asctime)s %(levelname)s:%(message)s", level=logging.INFO)
def get_html(url):
return requests.get(url).text
class SenateCrawler:
def __init__(self):
self.base_url = "https://www25.senado.leg.br/"
self.search_url = self.base_url + "web/senadores/em-exercicio/-/e/por-nome"
self.senate = []
def get_senate(self, url):
soup = BeautifulSoup(get_html(self.search_url), "html.parser")
trs = soup.find("table").find("tbody").find_all("tr")
for tr in trs:
cells = tr.find_all("td")
senateperson = {
"name": cells[0].get_text(),
"party": cells[1].get_text(),
"email": cells[5].get_text(),
}
if senateperson["email"]:
self.senate.append(senateperson)
def run(self):
try:
self.get_senate(self.search_url)
except Exception:
logging.exception("global failure")
finally:
df = pd.DataFrame(self.senate)
df.to_csv("senate.csv")
logging.info("program exited")
| from bs4 import BeautifulSoup
import logging
import pandas as pd
import csv
import re
import requests
from urllib.parse import urljoin
logging.basicConfig(format="%(asctime)s %(levelname)s:%(message)s", level=logging.INFO)
def get_html(url):
return requests.get(url).text
class SenateCrawler:
def __init__(self):
self.base_url = "https://www25.senado.leg.br/"
self.search_url = self.base_url + "web/senadores/em-exercicio/-/e/por-nome"
self.senate = []
def get_senate(self, url):
soup = BeautifulSoup(get_html(self.search_url), "html.parser")
trs = soup.find("table").find("tbody").find_all("tr")
for tr in trs:
cells = tr.find_all("td")
senateperson = {
"name": cells[0].get_text(),
"party": cells[1].get_text(),
"email": cells[5].get_text(),
}
if senateperson["email"]:
self.senate.append(senateperson)
def run(self):
try:
self.get_senate(self.search_url)
except Exception:
logging.exception("global failure")
finally:
df = pd.DataFrame(self.senate)
df.to_csv("senate.csv")
logging.info("program exited")
| none | 1 | 3.020066 | 3 |
|
backend-project/small_eod/autocomplete/tests/test_views.py | merito/small_eod | 64 | 505 | from test_plus.test import TestCase
from ...administrative_units.factories import AdministrativeUnitFactory
from ...cases.factories import CaseFactory
from ...channels.factories import ChannelFactory
from ...events.factories import EventFactory
from ...features.factories import FeatureFactory, FeatureOptionFactory
from ...generic.tests.test_views import ReadOnlyViewSetMixin
from ...institutions.factories import InstitutionFactory
from ...letters.factories import DocumentTypeFactory, ReferenceNumberFactory
from ...search.tests.mixins import SearchQueryMixin
from ...tags.factories import TagFactory
from ...users.factories import UserFactory
class AdministrativeUnitAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_administrative_unit"
factory_class = AdministrativeUnitFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class CaseAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase):
basename = "autocomplete_case"
factory_class = CaseFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class ChannelAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_channel"
factory_class = ChannelFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class DocumentTypeAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_document_type"
factory_class = DocumentTypeFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class ReferenceNumberAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_reference_number"
factory_class = ReferenceNumberFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class EventAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_event"
factory_class = EventFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class FeatureAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_feature"
factory_class = FeatureFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class FeatureOptionAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_feature_option"
factory_class = FeatureOptionFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class InstitutionAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_institution"
factory_class = InstitutionFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class TagAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase):
basename = "autocomplete_tag"
factory_class = TagFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class UserAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase):
basename = "autocomplete_user"
factory_class = UserFactory
initial_count = 1
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["username"], self.obj.username)
| from test_plus.test import TestCase
from ...administrative_units.factories import AdministrativeUnitFactory
from ...cases.factories import CaseFactory
from ...channels.factories import ChannelFactory
from ...events.factories import EventFactory
from ...features.factories import FeatureFactory, FeatureOptionFactory
from ...generic.tests.test_views import ReadOnlyViewSetMixin
from ...institutions.factories import InstitutionFactory
from ...letters.factories import DocumentTypeFactory, ReferenceNumberFactory
from ...search.tests.mixins import SearchQueryMixin
from ...tags.factories import TagFactory
from ...users.factories import UserFactory
class AdministrativeUnitAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_administrative_unit"
factory_class = AdministrativeUnitFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class CaseAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase):
basename = "autocomplete_case"
factory_class = CaseFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class ChannelAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_channel"
factory_class = ChannelFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class DocumentTypeAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_document_type"
factory_class = DocumentTypeFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class ReferenceNumberAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_reference_number"
factory_class = ReferenceNumberFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class EventAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_event"
factory_class = EventFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class FeatureAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_feature"
factory_class = FeatureFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class FeatureOptionAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_feature_option"
factory_class = FeatureOptionFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class InstitutionAutocompleteViewSetTestCase(
ReadOnlyViewSetMixin, SearchQueryMixin, TestCase
):
basename = "autocomplete_institution"
factory_class = InstitutionFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class TagAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase):
basename = "autocomplete_tag"
factory_class = TagFactory
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["name"], self.obj.name)
class UserAutocompleteViewSetTestCase(ReadOnlyViewSetMixin, SearchQueryMixin, TestCase):
basename = "autocomplete_user"
factory_class = UserFactory
initial_count = 1
def validate_item(self, item):
self.assertEqual(item["id"], self.obj.id)
self.assertEqual(item["username"], self.obj.username)
| none | 1 | 2.099076 | 2 |
|
src/lennybot/model/plan.py | raynigon/lenny-bot | 1 | 506 | <filename>src/lennybot/model/plan.py
from typing import Any, List
from ..actions.iaction import IAction
from ..model.state import LennyBotState
class LennyBotPlan:
def __init__(self, state: LennyBotState, actions: List[IAction]) -> None:
self._state = state
self._actions = actions
@property
def applications(self) -> List[str]:
result = []
for action in self._actions:
result.append(action.application)
return list(set(result))
@property
def actions(self) -> List[IAction]:
return self._actions
@property
def state(self) -> LennyBotState:
return self._state
def source_version(self, application: str) -> str:
for action in self._actions:
if action.application != application:
continue
return action.source_version
return None
def target_version(self, application: str) -> str:
for action in self._actions:
if action.application != application:
continue
return action.target_version
return None
| <filename>src/lennybot/model/plan.py
from typing import Any, List
from ..actions.iaction import IAction
from ..model.state import LennyBotState
class LennyBotPlan:
def __init__(self, state: LennyBotState, actions: List[IAction]) -> None:
self._state = state
self._actions = actions
@property
def applications(self) -> List[str]:
result = []
for action in self._actions:
result.append(action.application)
return list(set(result))
@property
def actions(self) -> List[IAction]:
return self._actions
@property
def state(self) -> LennyBotState:
return self._state
def source_version(self, application: str) -> str:
for action in self._actions:
if action.application != application:
continue
return action.source_version
return None
def target_version(self, application: str) -> str:
for action in self._actions:
if action.application != application:
continue
return action.target_version
return None
| none | 1 | 2.312236 | 2 |
|
laceworksdk/api/container_registries.py | kiddinn/python-sdk | 10 | 507 | # -*- coding: utf-8 -*-
"""
Lacework Container Registries API wrapper.
"""
import logging
logger = logging.getLogger(__name__)
class ContainerRegistriesAPI(object):
"""
Lacework Container Registries API.
"""
def __init__(self, session):
"""
Initializes the ContainerRegistriesAPI object.
:param session: An instance of the HttpSession class
:return ContainerRegistriesAPI object.
"""
super(ContainerRegistriesAPI, self).__init__()
self._session = session
def create(self,
name,
type,
enabled,
data,
org=False):
"""
A method to create a new container registry.
:param name: A string representing the container registry name.
:param type: A string representing the container registry type.
:param enabled: A boolean/integer representing whether the container registry is enabled.
(0 or 1)
:param data: A JSON object matching the schema for the specified type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Creating container registry in Lacework...")
# Build the Container Registries request URI
api_uri = "/api/v2/ContainerRegistries"
data = {
"name": name,
"type": type,
"enabled": int(bool(enabled)),
"data": data
}
response = self._session.post(api_uri, org=org, data=data)
return response.json()
def get(self,
guid=None,
type=None,
org=False):
"""
A method to get all container registries.
:param guid: A string representing the container registry GUID.
:param type: A string representing the container registry type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Getting container registry info from Lacework...")
# Build the Container Registries request URI
if guid:
api_uri = f"/api/v2/ContainerRegistries/{guid}"
elif type:
api_uri = f"/api/v2/ContainerRegistries/{type}"
else:
api_uri = "/api/v2/ContainerRegistries"
response = self._session.get(api_uri, org=org)
return response.json()
def get_by_type(self,
type,
org=False):
"""
A method to get all container registries by type.
:param type: A string representing the container registry type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
return self.get(type=type, org=org)
def get_by_guid(self,
guid,
org=False):
"""
A method to get all container registries.
:param guid: A string representing the container registry GUID.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
return self.get(guid=guid, org=org)
def search(self,
query_data=None,
org=False):
"""
A method to search container registries.
:param query_data: A dictionary containing the desired search parameters.
(filters, returns)
:return response json
"""
logger.info("Searching container registries from Lacework...")
# Build the Container Registries request URI
api_uri = "/api/v2/ContainerRegistries/search"
response = self._session.post(api_uri, data=query_data, org=org)
return response.json()
def update(self,
guid,
name=None,
type=None,
enabled=None,
data=None,
org=False):
"""
A method to update an container registry.
:param guid: A string representing the container registry GUID.
:param name: A string representing the container registry name.
:param type: A string representing the container registry type.
:param enabled: A boolean/integer representing whether the container registry is enabled.
(0 or 1)
:param data: A JSON object matching the schema for the specified type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Updating container registry in Lacework...")
# Build the Container Registries request URI
api_uri = f"/api/v2/ContainerRegistries/{guid}"
tmp_data = {}
if name:
tmp_data["name"] = name
if type:
tmp_data["type"] = type
if enabled is not None:
tmp_data["enabled"] = int(bool(enabled))
if data:
tmp_data["data"] = data
response = self._session.patch(api_uri, org=org, data=tmp_data)
return response.json()
def delete(self,
guid,
org=False):
"""
A method to delete an container registry.
:param guid: A string representing the container registry GUID.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Deleting container registry in Lacework...")
# Build the Container Registries request URI
api_uri = f"/api/v2/ContainerRegistries/{guid}"
response = self._session.delete(api_uri, org=org)
if response.status_code == 204:
return response
else:
return response.json()
| # -*- coding: utf-8 -*-
"""
Lacework Container Registries API wrapper.
"""
import logging
logger = logging.getLogger(__name__)
class ContainerRegistriesAPI(object):
"""
Lacework Container Registries API.
"""
def __init__(self, session):
"""
Initializes the ContainerRegistriesAPI object.
:param session: An instance of the HttpSession class
:return ContainerRegistriesAPI object.
"""
super(ContainerRegistriesAPI, self).__init__()
self._session = session
def create(self,
name,
type,
enabled,
data,
org=False):
"""
A method to create a new container registry.
:param name: A string representing the container registry name.
:param type: A string representing the container registry type.
:param enabled: A boolean/integer representing whether the container registry is enabled.
(0 or 1)
:param data: A JSON object matching the schema for the specified type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Creating container registry in Lacework...")
# Build the Container Registries request URI
api_uri = "/api/v2/ContainerRegistries"
data = {
"name": name,
"type": type,
"enabled": int(bool(enabled)),
"data": data
}
response = self._session.post(api_uri, org=org, data=data)
return response.json()
def get(self,
guid=None,
type=None,
org=False):
"""
A method to get all container registries.
:param guid: A string representing the container registry GUID.
:param type: A string representing the container registry type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Getting container registry info from Lacework...")
# Build the Container Registries request URI
if guid:
api_uri = f"/api/v2/ContainerRegistries/{guid}"
elif type:
api_uri = f"/api/v2/ContainerRegistries/{type}"
else:
api_uri = "/api/v2/ContainerRegistries"
response = self._session.get(api_uri, org=org)
return response.json()
def get_by_type(self,
type,
org=False):
"""
A method to get all container registries by type.
:param type: A string representing the container registry type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
return self.get(type=type, org=org)
def get_by_guid(self,
guid,
org=False):
"""
A method to get all container registries.
:param guid: A string representing the container registry GUID.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
return self.get(guid=guid, org=org)
def search(self,
query_data=None,
org=False):
"""
A method to search container registries.
:param query_data: A dictionary containing the desired search parameters.
(filters, returns)
:return response json
"""
logger.info("Searching container registries from Lacework...")
# Build the Container Registries request URI
api_uri = "/api/v2/ContainerRegistries/search"
response = self._session.post(api_uri, data=query_data, org=org)
return response.json()
def update(self,
guid,
name=None,
type=None,
enabled=None,
data=None,
org=False):
"""
A method to update an container registry.
:param guid: A string representing the container registry GUID.
:param name: A string representing the container registry name.
:param type: A string representing the container registry type.
:param enabled: A boolean/integer representing whether the container registry is enabled.
(0 or 1)
:param data: A JSON object matching the schema for the specified type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Updating container registry in Lacework...")
# Build the Container Registries request URI
api_uri = f"/api/v2/ContainerRegistries/{guid}"
tmp_data = {}
if name:
tmp_data["name"] = name
if type:
tmp_data["type"] = type
if enabled is not None:
tmp_data["enabled"] = int(bool(enabled))
if data:
tmp_data["data"] = data
response = self._session.patch(api_uri, org=org, data=tmp_data)
return response.json()
def delete(self,
guid,
org=False):
"""
A method to delete an container registry.
:param guid: A string representing the container registry GUID.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Deleting container registry in Lacework...")
# Build the Container Registries request URI
api_uri = f"/api/v2/ContainerRegistries/{guid}"
response = self._session.delete(api_uri, org=org)
if response.status_code == 204:
return response
else:
return response.json()
| en | 0.67759 | # -*- coding: utf-8 -*- Lacework Container Registries API wrapper. Lacework Container Registries API. Initializes the ContainerRegistriesAPI object. :param session: An instance of the HttpSession class :return ContainerRegistriesAPI object. A method to create a new container registry. :param name: A string representing the container registry name. :param type: A string representing the container registry type. :param enabled: A boolean/integer representing whether the container registry is enabled. (0 or 1) :param data: A JSON object matching the schema for the specified type. :param org: A boolean representing whether the request should be performed at the Organization level :return response json # Build the Container Registries request URI A method to get all container registries. :param guid: A string representing the container registry GUID. :param type: A string representing the container registry type. :param org: A boolean representing whether the request should be performed at the Organization level :return response json # Build the Container Registries request URI A method to get all container registries by type. :param type: A string representing the container registry type. :param org: A boolean representing whether the request should be performed at the Organization level :return response json A method to get all container registries. :param guid: A string representing the container registry GUID. :param org: A boolean representing whether the request should be performed at the Organization level :return response json A method to search container registries. :param query_data: A dictionary containing the desired search parameters. (filters, returns) :return response json # Build the Container Registries request URI A method to update an container registry. :param guid: A string representing the container registry GUID. :param name: A string representing the container registry name. :param type: A string representing the container registry type. :param enabled: A boolean/integer representing whether the container registry is enabled. (0 or 1) :param data: A JSON object matching the schema for the specified type. :param org: A boolean representing whether the request should be performed at the Organization level :return response json # Build the Container Registries request URI A method to delete an container registry. :param guid: A string representing the container registry GUID. :param org: A boolean representing whether the request should be performed at the Organization level :return response json # Build the Container Registries request URI | 2.374622 | 2 |
mllib/nlp/seq2seq.py | pmaxit/dlnotebooks | 0 | 508 | <reponame>pmaxit/dlnotebooks<filename>mllib/nlp/seq2seq.py<gh_stars>0
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_seq2seq.ipynb (unless otherwise specified).
__all__ = ['Encoder', 'NewDecoder', 'Seq2Seq']
# Cell
from torch import nn
from torch import optim
import torch
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
# Cell
class Encoder(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size, num_layers=2, p=0.1):
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.dropout = nn.Dropout(p)
self.embedding = nn.Embedding(input_size, embedding_size)
self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p,batch_first=False)
def forward(self, x, x_len):
# x shape (seq_length, N)
embedding = self.dropout(self.embedding(x))
# embedding shape : (seq_length, N, embedding_size)
x_packed = pack_padded_sequence(embedding, x_len.cpu(), batch_first=False, enforce_sorted=False)
output_packed, (hidden,cell) = self.rnn(x_packed)
# irrelevant because we are interested only in hidden state
#output_padded, output_lengths = pad_packed_sequence(output_packed, batch_first=True)
# output is irrelevant, context vector is important
return hidden,cell
# Cell
class NewDecoder(nn.Module):
def __init__(self, hidden_size, embedding_size, output_size, n_layers=1, dropout_p=0.1):
super(NewDecoder, self).__init__()
# Define parameters
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers =n_layers
self.dropout_p = dropout_p
# Define layers
self.embedding = nn.Embedding(output_size, embedding_size)
self.dropout=nn.Dropout(dropout_p)
self.rnn = nn.LSTM(embedding_size, hidden_size, n_layers, dropout=dropout_p, batch_first=False)
self.out = nn.Linear(hidden_size, output_size)
def forward(self, word_input, last_hidden, encoder_outputs):
# Note that we will only be running forward for a single decoder time step, but will
# use all encoder outputs
word_input = word_input.unsqueeze(0)
# we are not using encoder_outputs here
word_embedded = self.embedding(word_input) # 1 X B
word_embedded = self.dropout(word_embedded) # 1 X B X emb_length
# Combine embedded input word and hidden vector, run through RNN
output, hidden = self.rnn(word_embedded, last_hidden) # 1 X B X hidden
predictions = self.out(output) # 1, B, out
#output = F.log_softmax(predictions)
return predictions, hidden
# Cell
import random
import pytorch_lightning as pl
import pytorch_lightning.metrics.functional as plfunc
from pytorch_lightning.loggers import TensorBoardLogger
# Cell
class Seq2Seq(pl.LightningModule):
""" Encoder decoder pytorch lightning module for training seq2seq model with teacher forcing
Module try to learn mapping from one sequence to another
"""
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--emb_dim", type=int, default=32)
parser.add_argument('--hidden_dim', type=int, default=64)
parser.add_argument('--dropout', type=float, default=0.1)
return parser
def __init__(self,
input_vocab_size,
output_vocab_size,
padding_index = 0,
emb_dim = 8,
hidden_dim=32,
dropout=0.1,
max_length=20,
**kwargs):
super().__init__()
# dynamic, based on tokenizer vocab size defined in datamodule
self.input_dim = input_vocab_size
self.output_dim = output_vocab_size
self.enc_emb_dim = emb_dim
self.dec_emb_dim = emb_dim
self.enc_hid_dim = hidden_dim
self.dec_hid_dim = hidden_dim
self.enc_dropout = dropout
self.dec_dropout = dropout
self.pad_idx = padding_index
self.num_layers = 2
self.max_length =10
self.save_hyperparameters()
self.max_epochs= kwargs.get('max_epochs',5)
self.learning_rate = 0.0005
self._loss = nn.CrossEntropyLoss(ignore_index=self.pad_idx)
self.encoder = Encoder(
self.input_dim,
self.enc_emb_dim,
self.enc_hid_dim,
self.num_layers,
self.enc_dropout
)
self.decoder = NewDecoder(
self.enc_hid_dim,
self.dec_emb_dim,
self.output_dim,
self.num_layers,
self.dec_dropout
)
self._init_weights()
def _init_weights(self):
for name, param in self.named_parameters():
if "weight" in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
def create_mask(self, src):
mask = (src != self.pad_idx).permute(1, 0)
return mask
def forward(self, src_seq, source_len, trg_seq, teacher_force_ratio=0.5):
"""
teacher_force_ratio is used to help in decoding.
In starting, original input token will be sent as input token
"""
source = src_seq.transpose(0, 1)
target_len = self.max_length
if trg_seq is not None:
target = trg_seq.transpose(0, 1)
target_len = target.shape[0]
batch_size = source.shape[1]
target_vocab_size = self.output_dim
outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(self.device)
encoder_hidden = self.encoder(source, source_len)
# mask = [batch_size, src len]
# without sos token at the beginning and eos token at the end
#x = target[0,:]
decoder_input = torch.ones(batch_size).long().to(self.device)
decoder_hidden = encoder_hidden
encoder_outputs = None
for t in range(target_len):
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs)
outputs[t] = decoder_output
#(N, english_vocab_size)
#best_guess = output.argmax(1)
topv, topi = decoder_output.topk(1)
decoder_input = topi.squeeze().detach()
decoder_input = target[t] if random.random() < teacher_force_ratio and target is not None else decoder_input
return outputs
def loss(self, logits, target):
return self._loss(logits, target)
def configure_optimizers(self):
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate)
lr_scheduler = {
'scheduler': optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr = self.learning_rate,
steps_per_epoch = 3379,
epochs=self.max_epochs,
anneal_strategy='linear',
final_div_factor=1000,
pct_start = 0.01
),
"name": "learning_rate",
"interval":"step",
"frequency": 1
}
return [optimizer],[lr_scheduler]
def training_step(self, batch, batch_idx):
src_seq, trg_seq, src_lengths = batch['src'],batch['trg'], batch['src_len']
output = self.forward(src_seq, src_lengths,trg_seq)
# do not know if this is a problem, loss will be computed with sos token
# without sos token at the beginning and eos token at the end
output = output.view(-1, self.output_dim)
trg_seq = trg_seq.transpose(0, 1)
trg = trg_seq.reshape(-1)
loss = self.loss(output, trg)
self.log('train_loss',loss.item(),
on_step = True,
on_epoch=True,
prog_bar = True,
logger=True)
return loss
def validation_step(self, batch,batch_idx):
""" validation is in eval model so we do not have to use placeholder input sensors"""
src_seq, trg_seq, src_lengths = batch['src'],batch['trg'], batch['src_len']
outputs = self.forward(src_seq, src_lengths, trg_seq, 0)
logits = outputs[1:].view(-1, self.output_dim)
trg = trg_seq[1:].reshape(-1)
loss = self.loss(logits, trg)
pred_seq = outputs[1:].argmax(2) # seq_len*batch_size*vocab_size -> seq_len * batch_size
# change layout: sesq_len * batch_size -> batch_size * seq_len
pred_seq = pred_seq.T
# change layout: seq_len * batch_size -> batch_size * seq_len
trg_batch = trg_seq[1:].T
# compare list of predicted ids for all sequences in a batch to targets
acc = plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1))
# need to cast to list of predicted sequences ( as list of token ids ) [ seq_tok1, seqtok2]
predicted_ids - pred_seq.tolist()
# need to add additional dim to each target reference sequence in order to
# conver to format needed by blue_score_func
# [seq1=[[reference1],[reference2]], seq2=[reference1]]
target_ids = torch.unsqueeze(trg_batch, 1).tolist()
bleu_score - plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(self.device)
self.log(
'val_loss',
loss,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True)
self.log(
"val_acc",
acc,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True
)
self.log(
"val_bleu_idx",
bleu_score,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True
)
return loss, acc, bleu_score
| # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_seq2seq.ipynb (unless otherwise specified).
__all__ = ['Encoder', 'NewDecoder', 'Seq2Seq']
# Cell
from torch import nn
from torch import optim
import torch
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
# Cell
class Encoder(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size, num_layers=2, p=0.1):
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.dropout = nn.Dropout(p)
self.embedding = nn.Embedding(input_size, embedding_size)
self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p,batch_first=False)
def forward(self, x, x_len):
# x shape (seq_length, N)
embedding = self.dropout(self.embedding(x))
# embedding shape : (seq_length, N, embedding_size)
x_packed = pack_padded_sequence(embedding, x_len.cpu(), batch_first=False, enforce_sorted=False)
output_packed, (hidden,cell) = self.rnn(x_packed)
# irrelevant because we are interested only in hidden state
#output_padded, output_lengths = pad_packed_sequence(output_packed, batch_first=True)
# output is irrelevant, context vector is important
return hidden,cell
# Cell
class NewDecoder(nn.Module):
def __init__(self, hidden_size, embedding_size, output_size, n_layers=1, dropout_p=0.1):
super(NewDecoder, self).__init__()
# Define parameters
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers =n_layers
self.dropout_p = dropout_p
# Define layers
self.embedding = nn.Embedding(output_size, embedding_size)
self.dropout=nn.Dropout(dropout_p)
self.rnn = nn.LSTM(embedding_size, hidden_size, n_layers, dropout=dropout_p, batch_first=False)
self.out = nn.Linear(hidden_size, output_size)
def forward(self, word_input, last_hidden, encoder_outputs):
# Note that we will only be running forward for a single decoder time step, but will
# use all encoder outputs
word_input = word_input.unsqueeze(0)
# we are not using encoder_outputs here
word_embedded = self.embedding(word_input) # 1 X B
word_embedded = self.dropout(word_embedded) # 1 X B X emb_length
# Combine embedded input word and hidden vector, run through RNN
output, hidden = self.rnn(word_embedded, last_hidden) # 1 X B X hidden
predictions = self.out(output) # 1, B, out
#output = F.log_softmax(predictions)
return predictions, hidden
# Cell
import random
import pytorch_lightning as pl
import pytorch_lightning.metrics.functional as plfunc
from pytorch_lightning.loggers import TensorBoardLogger
# Cell
class Seq2Seq(pl.LightningModule):
""" Encoder decoder pytorch lightning module for training seq2seq model with teacher forcing
Module try to learn mapping from one sequence to another
"""
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--emb_dim", type=int, default=32)
parser.add_argument('--hidden_dim', type=int, default=64)
parser.add_argument('--dropout', type=float, default=0.1)
return parser
def __init__(self,
input_vocab_size,
output_vocab_size,
padding_index = 0,
emb_dim = 8,
hidden_dim=32,
dropout=0.1,
max_length=20,
**kwargs):
super().__init__()
# dynamic, based on tokenizer vocab size defined in datamodule
self.input_dim = input_vocab_size
self.output_dim = output_vocab_size
self.enc_emb_dim = emb_dim
self.dec_emb_dim = emb_dim
self.enc_hid_dim = hidden_dim
self.dec_hid_dim = hidden_dim
self.enc_dropout = dropout
self.dec_dropout = dropout
self.pad_idx = padding_index
self.num_layers = 2
self.max_length =10
self.save_hyperparameters()
self.max_epochs= kwargs.get('max_epochs',5)
self.learning_rate = 0.0005
self._loss = nn.CrossEntropyLoss(ignore_index=self.pad_idx)
self.encoder = Encoder(
self.input_dim,
self.enc_emb_dim,
self.enc_hid_dim,
self.num_layers,
self.enc_dropout
)
self.decoder = NewDecoder(
self.enc_hid_dim,
self.dec_emb_dim,
self.output_dim,
self.num_layers,
self.dec_dropout
)
self._init_weights()
def _init_weights(self):
for name, param in self.named_parameters():
if "weight" in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
def create_mask(self, src):
mask = (src != self.pad_idx).permute(1, 0)
return mask
def forward(self, src_seq, source_len, trg_seq, teacher_force_ratio=0.5):
"""
teacher_force_ratio is used to help in decoding.
In starting, original input token will be sent as input token
"""
source = src_seq.transpose(0, 1)
target_len = self.max_length
if trg_seq is not None:
target = trg_seq.transpose(0, 1)
target_len = target.shape[0]
batch_size = source.shape[1]
target_vocab_size = self.output_dim
outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(self.device)
encoder_hidden = self.encoder(source, source_len)
# mask = [batch_size, src len]
# without sos token at the beginning and eos token at the end
#x = target[0,:]
decoder_input = torch.ones(batch_size).long().to(self.device)
decoder_hidden = encoder_hidden
encoder_outputs = None
for t in range(target_len):
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs)
outputs[t] = decoder_output
#(N, english_vocab_size)
#best_guess = output.argmax(1)
topv, topi = decoder_output.topk(1)
decoder_input = topi.squeeze().detach()
decoder_input = target[t] if random.random() < teacher_force_ratio and target is not None else decoder_input
return outputs
def loss(self, logits, target):
return self._loss(logits, target)
def configure_optimizers(self):
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate)
lr_scheduler = {
'scheduler': optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr = self.learning_rate,
steps_per_epoch = 3379,
epochs=self.max_epochs,
anneal_strategy='linear',
final_div_factor=1000,
pct_start = 0.01
),
"name": "learning_rate",
"interval":"step",
"frequency": 1
}
return [optimizer],[lr_scheduler]
def training_step(self, batch, batch_idx):
src_seq, trg_seq, src_lengths = batch['src'],batch['trg'], batch['src_len']
output = self.forward(src_seq, src_lengths,trg_seq)
# do not know if this is a problem, loss will be computed with sos token
# without sos token at the beginning and eos token at the end
output = output.view(-1, self.output_dim)
trg_seq = trg_seq.transpose(0, 1)
trg = trg_seq.reshape(-1)
loss = self.loss(output, trg)
self.log('train_loss',loss.item(),
on_step = True,
on_epoch=True,
prog_bar = True,
logger=True)
return loss
def validation_step(self, batch,batch_idx):
""" validation is in eval model so we do not have to use placeholder input sensors"""
src_seq, trg_seq, src_lengths = batch['src'],batch['trg'], batch['src_len']
outputs = self.forward(src_seq, src_lengths, trg_seq, 0)
logits = outputs[1:].view(-1, self.output_dim)
trg = trg_seq[1:].reshape(-1)
loss = self.loss(logits, trg)
pred_seq = outputs[1:].argmax(2) # seq_len*batch_size*vocab_size -> seq_len * batch_size
# change layout: sesq_len * batch_size -> batch_size * seq_len
pred_seq = pred_seq.T
# change layout: seq_len * batch_size -> batch_size * seq_len
trg_batch = trg_seq[1:].T
# compare list of predicted ids for all sequences in a batch to targets
acc = plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1))
# need to cast to list of predicted sequences ( as list of token ids ) [ seq_tok1, seqtok2]
predicted_ids - pred_seq.tolist()
# need to add additional dim to each target reference sequence in order to
# conver to format needed by blue_score_func
# [seq1=[[reference1],[reference2]], seq2=[reference1]]
target_ids = torch.unsqueeze(trg_batch, 1).tolist()
bleu_score - plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(self.device)
self.log(
'val_loss',
loss,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True)
self.log(
"val_acc",
acc,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True
)
self.log(
"val_bleu_idx",
bleu_score,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True
)
return loss, acc, bleu_score | en | 0.729894 | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_seq2seq.ipynb (unless otherwise specified). # Cell # Cell # x shape (seq_length, N) # embedding shape : (seq_length, N, embedding_size) # irrelevant because we are interested only in hidden state #output_padded, output_lengths = pad_packed_sequence(output_packed, batch_first=True) # output is irrelevant, context vector is important # Cell # Define parameters # Define layers # Note that we will only be running forward for a single decoder time step, but will # use all encoder outputs # we are not using encoder_outputs here # 1 X B # 1 X B X emb_length # Combine embedded input word and hidden vector, run through RNN # 1 X B X hidden # 1, B, out #output = F.log_softmax(predictions) # Cell # Cell Encoder decoder pytorch lightning module for training seq2seq model with teacher forcing Module try to learn mapping from one sequence to another # dynamic, based on tokenizer vocab size defined in datamodule teacher_force_ratio is used to help in decoding. In starting, original input token will be sent as input token # mask = [batch_size, src len] # without sos token at the beginning and eos token at the end #x = target[0,:] #(N, english_vocab_size) #best_guess = output.argmax(1) # do not know if this is a problem, loss will be computed with sos token # without sos token at the beginning and eos token at the end validation is in eval model so we do not have to use placeholder input sensors # seq_len*batch_size*vocab_size -> seq_len * batch_size # change layout: sesq_len * batch_size -> batch_size * seq_len # change layout: seq_len * batch_size -> batch_size * seq_len # compare list of predicted ids for all sequences in a batch to targets # need to cast to list of predicted sequences ( as list of token ids ) [ seq_tok1, seqtok2] # need to add additional dim to each target reference sequence in order to # conver to format needed by blue_score_func # [seq1=[[reference1],[reference2]], seq2=[reference1]] | 2.458362 | 2 |
tests/flows/test_consent.py | mrkday/SATOSA | 92 | 509 | import json
import re
import responses
from werkzeug.test import Client
from werkzeug.wrappers import Response
from satosa.proxy_server import make_app
from satosa.satosa_config import SATOSAConfig
class TestConsent:
def test_full_flow(self, satosa_config_dict, consent_module_config):
api_url = "https://consent.example.com/api"
redirect_url = "https://consent.example.com/redirect"
consent_module_config["config"]["api_url"] = api_url
consent_module_config["config"]["redirect_url"] = redirect_url
satosa_config_dict["MICRO_SERVICES"].append(consent_module_config)
# application
test_client = Client(make_app(SATOSAConfig(satosa_config_dict)), Response)
# incoming auth req
http_resp = test_client.get("/{}/{}/request".format(satosa_config_dict["BACKEND_MODULES"][0]["name"],
satosa_config_dict["FRONTEND_MODULES"][0]["name"]))
assert http_resp.status_code == 200
verify_url_re = re.compile(r"{}/verify/\w+".format(api_url))
with responses.RequestsMock() as rsps:
# fake no previous consent
consent_request_url_re = re.compile(r"{}/creq/\w+".format(api_url))
rsps.add(responses.GET, verify_url_re, status=401)
rsps.add(responses.GET, consent_request_url_re, "test_ticket", status=200)
# incoming auth resp
http_resp = test_client.get("/{}/response".format(satosa_config_dict["BACKEND_MODULES"][0]["name"]))
assert http_resp.status_code == 302
assert http_resp.headers["Location"].startswith(redirect_url)
with responses.RequestsMock() as rsps:
# fake consent
rsps.add(responses.GET, verify_url_re, json.dumps({"foo": "bar"}), status=200)
# incoming consent response
http_resp = test_client.get("/consent/handle_consent")
assert http_resp.status_code == 200
| import json
import re
import responses
from werkzeug.test import Client
from werkzeug.wrappers import Response
from satosa.proxy_server import make_app
from satosa.satosa_config import SATOSAConfig
class TestConsent:
def test_full_flow(self, satosa_config_dict, consent_module_config):
api_url = "https://consent.example.com/api"
redirect_url = "https://consent.example.com/redirect"
consent_module_config["config"]["api_url"] = api_url
consent_module_config["config"]["redirect_url"] = redirect_url
satosa_config_dict["MICRO_SERVICES"].append(consent_module_config)
# application
test_client = Client(make_app(SATOSAConfig(satosa_config_dict)), Response)
# incoming auth req
http_resp = test_client.get("/{}/{}/request".format(satosa_config_dict["BACKEND_MODULES"][0]["name"],
satosa_config_dict["FRONTEND_MODULES"][0]["name"]))
assert http_resp.status_code == 200
verify_url_re = re.compile(r"{}/verify/\w+".format(api_url))
with responses.RequestsMock() as rsps:
# fake no previous consent
consent_request_url_re = re.compile(r"{}/creq/\w+".format(api_url))
rsps.add(responses.GET, verify_url_re, status=401)
rsps.add(responses.GET, consent_request_url_re, "test_ticket", status=200)
# incoming auth resp
http_resp = test_client.get("/{}/response".format(satosa_config_dict["BACKEND_MODULES"][0]["name"]))
assert http_resp.status_code == 302
assert http_resp.headers["Location"].startswith(redirect_url)
with responses.RequestsMock() as rsps:
# fake consent
rsps.add(responses.GET, verify_url_re, json.dumps({"foo": "bar"}), status=200)
# incoming consent response
http_resp = test_client.get("/consent/handle_consent")
assert http_resp.status_code == 200
| en | 0.805195 | # application # incoming auth req # fake no previous consent # incoming auth resp # fake consent # incoming consent response | 2.104803 | 2 |
qnarre/models/transfo_xl.py | quantapix/qnarre.com | 0 | 510 | <gh_stars>0
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# https://arxiv.org/abs/1901.02860
# https://github.com/kimiyoung/transformer-xl
import torch
from torch import nn
from torch.nn import functional as F
from transformers.utils import logging
from .. import core as qc
from ..core import utils as qu
from ..core import forward as qf
from ..core import output as qo
from ..core.embed import Adaptive, Positional
from ..core.ffnet import Positionwise
from ..prep.config.transfo_xl import PreTrained
log = logging.get_logger(__name__)
class Model(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.tok_emb = Adaptive(cfg.cutoffs, div_val=cfg.div_val, **kw)
self.pos_emb = Positional(cfg.d_model, **kw)
if cfg.untie_r:
q_bias = None
r_bias = None
else:
q_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head))
r_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head))
self.lays = qc.Stack()
for _ in range(cfg.n_lays):
self.lays.append(Layer(q_bias=q_bias, r_bias=r_bias, **kw))
self.drop = qc.Dropout(cfg.drop, **kw)
def init_mems(self, b):
cfg = self.cfg
if cfg.mem_len > 0:
p = next(self.parameters())
kw = dict(dtype=p.dtype, device=p.device)
return [torch.zeros(cfg.mem_len, b, cfg.d_model, **kw) for _ in range(cfg.n_lays)]
return None
def update_mems(self, xs, ys, mlen, qlen):
assert len(xs) == len(ys)
e = mlen + max(0, qlen)
b = max(0, e - self.cfg.mem_len)
with torch.no_grad():
return [torch.cat([ys[i], xs[i]], dim=0)[b:e].detach() for i in range(len(xs))]
def forward(self, x, mems=None, head_m=None, x_emb=None, **kw):
cfg = self.cfg
yo = self.get_y_opts(**kw)
if x is None:
x_emb = x_emb.transpose(0, 1).contiguous()
s = x_emb.size()[:-1]
else:
assert x_emb is None
x = x.transpose(0, 1).contiguous()
s = x.size()
y = self.tok_emb(x) if x_emb is None else x_emb
n, b = s
if mems is None:
mems = self.init_mems(b)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + n
pos = torch.arange(klen - 1, -1, -1.0, device=y.device, dtype=y.dtype)
if cfg.clamp_len > 0:
pos.clamp_(max=cfg.clamp_len)
pos = self.drop(self.pos_emb(pos))
ones = y.new_ones((n, klen), dtype=torch.uint8)
if cfg.same_length:
d = klen - cfg.mem_len
shift = n - d if d > 0 else n
dec_m = (torch.triu(ones, 1 + mlen) + torch.tril(ones, -shift))[:, :, None]
else:
dec_m = torch.triu(ones, diagonal=1 + mlen)[:, :, None]
y = self.drop(y)
attns = () if yo.attn else None
hiddens = () if yo.hidden else None
head_m = self.get_head_m2(head_m, cfg.n_lays)
for i, lay in enumerate(self.lays):
if yo.hidden:
hiddens += (y,)
m = None if mems is None else mems[i]
ys = lay(y, pos, **kw, dec_m=dec_m, head_m=head_m[i], mems=m, yo=yo)
y = ys[0]
if yo.attn:
attns += (ys[1],)
y = self.drop(y)
mems = None if mems is None else self.update_mems(hiddens, mems, mlen, n)
if yo.attn:
attns = tuple(x.permute(2, 3, 0, 1).contiguous() for x in attns)
if yo.hidden:
hiddens += (y,)
hiddens = tuple(x.transpose(0, 1).contiguous() for x in hiddens)
y = y.transpose(0, 1).contiguous()
ys = (y, attns, hiddens, mems)
return qo.WithMems(*ys) if yo.kw else ys
class ForSeqClassifier(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = qc.Linear(cfg.d_embed, cfg.n_labels, bias=False, **kw)
forward = qf.forward_seq
def post_proj(self, x):
cfg = self.cfg
b = (x.shape[:2] if x is not None else x_emb.shape[:2])[0]
if cfg.PAD is None:
n = -1
else:
assert b == 1
n = -1 if x is None else torch.ne(x, cfg.PAD).sum(-1) - 1
return x[torch.arange(b, device=self.device), n]
class LLMHead(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
assert cfg.sample_softmax <= 0
self.proj = Projector(
cfg.s_vocab, cfg.d_embed, cfg.d_model, cfg.cutoffs, div_val=cfg.div_val, **kw
)
def tie_weights(self):
cfg = self.cfg
if cfg.tie_word_embeds:
for i in range(len(self.proj.out_layers)):
self._tie_or_clone_weights(self.proj.out_layers[i], self.model.tok_emb.lays[i])
if cfg.tie_projs:
for i, tie_proj in enumerate(cfg.tie_projs):
if tie_proj and cfg.div_val == 1 and cfg.d_model != cfg.d_embed:
if cfg.torchscript:
self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[0].clone())
else:
self.proj.out_projs[i] = self.model.tok_emb.projs[0]
elif tie_proj and cfg.div_val != 1:
if cfg.torchscript:
self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[i].clone())
else:
self.proj.out_projs[i] = self.model.tok_emb.projs[i]
def init_mems(self, bsz):
return self.model.init_mems(bsz)
def forward(self, x, x_emb=None, labels=None, **kw):
yo = self.get_y_opts(**kw)
if x is None:
assert x_emb is not None
b, tgt = x_emb.size(0), x_emb.size(1)
else:
b, tgt = x.size(0), x.size(1)
ys = self.model(x, x_emb=x_emb, **kw, yo=yo)
xs = self.proj(ys[0][:, -tgt:], labels)
y = xs.view(b, tgt, -1) if labels is None else ()
loss = xs.view(b, tgt - 1) if labels is not None else None
ys = (y,) + ys[1:] + (loss,)
return qo.LossMems(*ys) if yo.kw else ys
class Projector(qc.Module):
def __init__(self, s_vocab, d_embed, d_proj, cutoffs, div_val=1, keep_order=False):
super().__init__()
self.s_vocab = s_vocab
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [s_vocab]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.out_layers = qc.Stack()
self.out_projs = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
else:
self.out_projs.append(None)
self.out_layers.append(qc.Linear(d_embed, s_vocab))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
self.out_layers.append(qc.Linear(d_emb_i, r_idx - l_idx))
self.keep_order = keep_order
def _compute_logit(self, x, weight, bias, proj):
if proj is None:
y = F.linear(x, weight, bias=bias)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
x = F.linear(x, proj.t().contiguous())
y = F.linear(x, weight, bias=bias)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return y
def forward(self, x, labels=None, keep_order=False):
if labels is not None:
x = x[..., :-1, :].contiguous()
labels = labels[..., 1:].contiguous()
x = x.view(-1, x.size(-1))
labels = labels.view(-1)
assert x.size(0) == labels.size(0)
else:
x = x.view(-1, x.size(-1))
if self.n_clusters == 0:
y = self._compute_logit(
x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]
)
if labels is not None:
y = -F.log_softmax(y, dim=-1).gather(1, labels.unsqueeze(1)).squeeze(1)
else:
y = F.log_softmax(y, dim=-1)
else:
ws, bs = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
ws.append(weight_i)
bs.append(bias_i)
head_weight, head_bias, head_proj = ws[0], bs[0], self.out_projs[0]
head_logit = self._compute_logit(x, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
if labels is None:
y = x.new_empty((head_logit.size(0), self.s_vocab))
else:
y = torch.zeros_like(labels, dtype=x.dtype, device=x.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
mask_i = (labels >= l_idx) & (labels < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = labels.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
hidden_i = x.index_select(0, indices_i)
else:
hidden_i = x
if i == 0:
if labels is not None:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
y[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = ws[i], bs[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
cluster_prob_idx = self.cutoffs[0] + i - 1
if labels is not None:
logprob_i = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1, target_i[:, None]
).squeeze(1)
else:
logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
y[:, l_idx:r_idx] = logprob_i
if labels is not None:
if (hasattr(self, "keep_order") and self.keep_order) or keep_order:
y.index_copy_(0, indices_i, -logprob_i)
else:
y[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return y
def log_prob(self, x):
if self.n_clusters == 0:
y = self._compute_logit(
x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]
)
return F.log_softmax(y, dim=-1)
else:
ws, bs = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
ws.append(weight_i)
bs.append(bias_i)
head_weight, head_bias, head_proj = ws[0], bs[0], self.out_projs[0]
head_logit = self._compute_logit(x, head_weight, head_bias, head_proj)
y = x.new_empty((head_logit.size(0), self.s_vocab))
head_logprob = F.log_softmax(head_logit, dim=1)
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
beg_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
y[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = ws[i], bs[i], self.out_projs[i]
tail_logit_i = self._compute_logit(x, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob[:, -i] + tail_logprob_i
y[:, beg_idx, stop_idx] = logprob_i
return y
class Layer(qc.Module):
def __init__(self, **kw):
super().__init__()
self.attn = Attention(**kw)
self.ff = Positionwise(**kw)
def forward(self, x, r, dec_m=None, **kw):
ys = self.attn(x, r, mask=dec_m, **kw)
return (self.ff(ys[0]),) + ys[1:]
class Attention(qc.Module):
hs = qc.Hypers(
{"d_head", "d_model", "drop", "n_heads"},
{"drop_attn": 0.0, "eps": 1e-5, "pre_norm": False},
)
def __init__(self, r_bias=None, q_bias=None, ps={}, hs=[], **kw):
super().__init__(ps, [self.hs] + hs, **kw)
cfg = self.get_cfg(kw)
m, n, h = cfg.d_model, cfg.n_heads, cfg.d_head
cfg.scale = 1 / (h**0.5)
self.qkv = qc.Linear(m, 3 * n * h, bias=False)
self.r_net = qc.Linear(m, n * h, bias=False)
if r_bias is None or q_bias is None:
self.q_bias = nn.Parameter(torch.FloatTensor(n, h))
self.r_bias = nn.Parameter(torch.FloatTensor(n, h))
else:
self.q_bias = q_bias
self.r_bias = r_bias
self.drop = qc.Dropout(cfg.drop, **kw)
self.drop_attn = qc.Dropout(cfg.drop_attn, **kw)
self.proj = qc.Linear(n * h, m, bias=False, **kw)
self.norm = qc.LayerNorm(m, **kw)
def rel_shift(self, x, zero_triu=False):
s = (x.size(0), 1) + x.size()[2:]
y = torch.zeros(s, device=x.device, dtype=x.dtype)
y = torch.cat([y, x], dim=1)
s = (x.size(1) + 1, x.size(0)) + x.size()[2:]
y = y.view(*s)
y = y[1:].view_as(x)
if zero_triu:
ones = torch.ones((y.size(0), y.size(1)))
y = y * torch.tril(ones, y.size(1) - y.size(0))[:, :, None, None]
return y
def forward(self, x, r, mask=None, mems=None, head_m=None, **kw):
cfg = self.cfg
yo = self.get_y_opts(**kw)
y = x if mems is None else torch.cat([mems, x], 0)
y = self.qkv(self.norm(y) if cfg.pre_norm else y)
r = self.r_net(r)
q, k, v = torch.chunk(a, 3, dim=-1)
qlen, klen, rlen = x.size(0), k.size(0), r.size(0)
q = q if mems is None else q[-qlen:]
b, n, h = x.size(1), cfg.n_heads, cfg.d_head
q = q.view(qlen, b, n, h)
k = k.view(klen, b, n, h)
v = v.view(klen, b, n, h)
r = r.view(rlen, n, h)
AC = torch.einsum("ibnd,jbnd->ijbn", (q + self.q_bias, k))
BD = self.rel_shift(torch.einsum("ibnd,jnd->ijbn", (q + self.r_bias, r)))
a = AC + BD
a.mul_(cfg.scale)
if mask is not None and torch.sum(mask).item():
mask = mask == 1
i = self.get_minus_inf()
if mask.dim() == 2:
a = a.float().masked_fill(mask[None, :, :, None], i).type_as(a)
elif mask.dim() == 3:
a = a.float().masked_fill(mask[:, :, :, None], i).type_as(a)
a = self.drop_attn(F.softmax(a, dim=1))
if head_m is not None:
a = a * head_m
y = torch.einsum("ijbn,jbnd->ibnd", (a, v))
y = y.contiguous().view(y.size(0), y.size(1), n * h)
y = x + self.drop(self.proj(y))
ys = (y,) if cfg.pre_norm else (self.norm(y),)
if yo.attn:
ys += (a,)
return ys
| # Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# https://arxiv.org/abs/1901.02860
# https://github.com/kimiyoung/transformer-xl
import torch
from torch import nn
from torch.nn import functional as F
from transformers.utils import logging
from .. import core as qc
from ..core import utils as qu
from ..core import forward as qf
from ..core import output as qo
from ..core.embed import Adaptive, Positional
from ..core.ffnet import Positionwise
from ..prep.config.transfo_xl import PreTrained
log = logging.get_logger(__name__)
class Model(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.tok_emb = Adaptive(cfg.cutoffs, div_val=cfg.div_val, **kw)
self.pos_emb = Positional(cfg.d_model, **kw)
if cfg.untie_r:
q_bias = None
r_bias = None
else:
q_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head))
r_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head))
self.lays = qc.Stack()
for _ in range(cfg.n_lays):
self.lays.append(Layer(q_bias=q_bias, r_bias=r_bias, **kw))
self.drop = qc.Dropout(cfg.drop, **kw)
def init_mems(self, b):
cfg = self.cfg
if cfg.mem_len > 0:
p = next(self.parameters())
kw = dict(dtype=p.dtype, device=p.device)
return [torch.zeros(cfg.mem_len, b, cfg.d_model, **kw) for _ in range(cfg.n_lays)]
return None
def update_mems(self, xs, ys, mlen, qlen):
assert len(xs) == len(ys)
e = mlen + max(0, qlen)
b = max(0, e - self.cfg.mem_len)
with torch.no_grad():
return [torch.cat([ys[i], xs[i]], dim=0)[b:e].detach() for i in range(len(xs))]
def forward(self, x, mems=None, head_m=None, x_emb=None, **kw):
cfg = self.cfg
yo = self.get_y_opts(**kw)
if x is None:
x_emb = x_emb.transpose(0, 1).contiguous()
s = x_emb.size()[:-1]
else:
assert x_emb is None
x = x.transpose(0, 1).contiguous()
s = x.size()
y = self.tok_emb(x) if x_emb is None else x_emb
n, b = s
if mems is None:
mems = self.init_mems(b)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + n
pos = torch.arange(klen - 1, -1, -1.0, device=y.device, dtype=y.dtype)
if cfg.clamp_len > 0:
pos.clamp_(max=cfg.clamp_len)
pos = self.drop(self.pos_emb(pos))
ones = y.new_ones((n, klen), dtype=torch.uint8)
if cfg.same_length:
d = klen - cfg.mem_len
shift = n - d if d > 0 else n
dec_m = (torch.triu(ones, 1 + mlen) + torch.tril(ones, -shift))[:, :, None]
else:
dec_m = torch.triu(ones, diagonal=1 + mlen)[:, :, None]
y = self.drop(y)
attns = () if yo.attn else None
hiddens = () if yo.hidden else None
head_m = self.get_head_m2(head_m, cfg.n_lays)
for i, lay in enumerate(self.lays):
if yo.hidden:
hiddens += (y,)
m = None if mems is None else mems[i]
ys = lay(y, pos, **kw, dec_m=dec_m, head_m=head_m[i], mems=m, yo=yo)
y = ys[0]
if yo.attn:
attns += (ys[1],)
y = self.drop(y)
mems = None if mems is None else self.update_mems(hiddens, mems, mlen, n)
if yo.attn:
attns = tuple(x.permute(2, 3, 0, 1).contiguous() for x in attns)
if yo.hidden:
hiddens += (y,)
hiddens = tuple(x.transpose(0, 1).contiguous() for x in hiddens)
y = y.transpose(0, 1).contiguous()
ys = (y, attns, hiddens, mems)
return qo.WithMems(*ys) if yo.kw else ys
class ForSeqClassifier(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = qc.Linear(cfg.d_embed, cfg.n_labels, bias=False, **kw)
forward = qf.forward_seq
def post_proj(self, x):
cfg = self.cfg
b = (x.shape[:2] if x is not None else x_emb.shape[:2])[0]
if cfg.PAD is None:
n = -1
else:
assert b == 1
n = -1 if x is None else torch.ne(x, cfg.PAD).sum(-1) - 1
return x[torch.arange(b, device=self.device), n]
class LLMHead(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
assert cfg.sample_softmax <= 0
self.proj = Projector(
cfg.s_vocab, cfg.d_embed, cfg.d_model, cfg.cutoffs, div_val=cfg.div_val, **kw
)
def tie_weights(self):
cfg = self.cfg
if cfg.tie_word_embeds:
for i in range(len(self.proj.out_layers)):
self._tie_or_clone_weights(self.proj.out_layers[i], self.model.tok_emb.lays[i])
if cfg.tie_projs:
for i, tie_proj in enumerate(cfg.tie_projs):
if tie_proj and cfg.div_val == 1 and cfg.d_model != cfg.d_embed:
if cfg.torchscript:
self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[0].clone())
else:
self.proj.out_projs[i] = self.model.tok_emb.projs[0]
elif tie_proj and cfg.div_val != 1:
if cfg.torchscript:
self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[i].clone())
else:
self.proj.out_projs[i] = self.model.tok_emb.projs[i]
def init_mems(self, bsz):
return self.model.init_mems(bsz)
def forward(self, x, x_emb=None, labels=None, **kw):
yo = self.get_y_opts(**kw)
if x is None:
assert x_emb is not None
b, tgt = x_emb.size(0), x_emb.size(1)
else:
b, tgt = x.size(0), x.size(1)
ys = self.model(x, x_emb=x_emb, **kw, yo=yo)
xs = self.proj(ys[0][:, -tgt:], labels)
y = xs.view(b, tgt, -1) if labels is None else ()
loss = xs.view(b, tgt - 1) if labels is not None else None
ys = (y,) + ys[1:] + (loss,)
return qo.LossMems(*ys) if yo.kw else ys
class Projector(qc.Module):
def __init__(self, s_vocab, d_embed, d_proj, cutoffs, div_val=1, keep_order=False):
super().__init__()
self.s_vocab = s_vocab
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [s_vocab]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.out_layers = qc.Stack()
self.out_projs = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
else:
self.out_projs.append(None)
self.out_layers.append(qc.Linear(d_embed, s_vocab))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
self.out_layers.append(qc.Linear(d_emb_i, r_idx - l_idx))
self.keep_order = keep_order
def _compute_logit(self, x, weight, bias, proj):
if proj is None:
y = F.linear(x, weight, bias=bias)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
x = F.linear(x, proj.t().contiguous())
y = F.linear(x, weight, bias=bias)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return y
def forward(self, x, labels=None, keep_order=False):
if labels is not None:
x = x[..., :-1, :].contiguous()
labels = labels[..., 1:].contiguous()
x = x.view(-1, x.size(-1))
labels = labels.view(-1)
assert x.size(0) == labels.size(0)
else:
x = x.view(-1, x.size(-1))
if self.n_clusters == 0:
y = self._compute_logit(
x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]
)
if labels is not None:
y = -F.log_softmax(y, dim=-1).gather(1, labels.unsqueeze(1)).squeeze(1)
else:
y = F.log_softmax(y, dim=-1)
else:
ws, bs = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
ws.append(weight_i)
bs.append(bias_i)
head_weight, head_bias, head_proj = ws[0], bs[0], self.out_projs[0]
head_logit = self._compute_logit(x, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
if labels is None:
y = x.new_empty((head_logit.size(0), self.s_vocab))
else:
y = torch.zeros_like(labels, dtype=x.dtype, device=x.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
mask_i = (labels >= l_idx) & (labels < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = labels.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
hidden_i = x.index_select(0, indices_i)
else:
hidden_i = x
if i == 0:
if labels is not None:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
y[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = ws[i], bs[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
cluster_prob_idx = self.cutoffs[0] + i - 1
if labels is not None:
logprob_i = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1, target_i[:, None]
).squeeze(1)
else:
logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
y[:, l_idx:r_idx] = logprob_i
if labels is not None:
if (hasattr(self, "keep_order") and self.keep_order) or keep_order:
y.index_copy_(0, indices_i, -logprob_i)
else:
y[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return y
def log_prob(self, x):
if self.n_clusters == 0:
y = self._compute_logit(
x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]
)
return F.log_softmax(y, dim=-1)
else:
ws, bs = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
ws.append(weight_i)
bs.append(bias_i)
head_weight, head_bias, head_proj = ws[0], bs[0], self.out_projs[0]
head_logit = self._compute_logit(x, head_weight, head_bias, head_proj)
y = x.new_empty((head_logit.size(0), self.s_vocab))
head_logprob = F.log_softmax(head_logit, dim=1)
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
beg_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
y[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = ws[i], bs[i], self.out_projs[i]
tail_logit_i = self._compute_logit(x, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob[:, -i] + tail_logprob_i
y[:, beg_idx, stop_idx] = logprob_i
return y
class Layer(qc.Module):
def __init__(self, **kw):
super().__init__()
self.attn = Attention(**kw)
self.ff = Positionwise(**kw)
def forward(self, x, r, dec_m=None, **kw):
ys = self.attn(x, r, mask=dec_m, **kw)
return (self.ff(ys[0]),) + ys[1:]
class Attention(qc.Module):
hs = qc.Hypers(
{"d_head", "d_model", "drop", "n_heads"},
{"drop_attn": 0.0, "eps": 1e-5, "pre_norm": False},
)
def __init__(self, r_bias=None, q_bias=None, ps={}, hs=[], **kw):
super().__init__(ps, [self.hs] + hs, **kw)
cfg = self.get_cfg(kw)
m, n, h = cfg.d_model, cfg.n_heads, cfg.d_head
cfg.scale = 1 / (h**0.5)
self.qkv = qc.Linear(m, 3 * n * h, bias=False)
self.r_net = qc.Linear(m, n * h, bias=False)
if r_bias is None or q_bias is None:
self.q_bias = nn.Parameter(torch.FloatTensor(n, h))
self.r_bias = nn.Parameter(torch.FloatTensor(n, h))
else:
self.q_bias = q_bias
self.r_bias = r_bias
self.drop = qc.Dropout(cfg.drop, **kw)
self.drop_attn = qc.Dropout(cfg.drop_attn, **kw)
self.proj = qc.Linear(n * h, m, bias=False, **kw)
self.norm = qc.LayerNorm(m, **kw)
def rel_shift(self, x, zero_triu=False):
s = (x.size(0), 1) + x.size()[2:]
y = torch.zeros(s, device=x.device, dtype=x.dtype)
y = torch.cat([y, x], dim=1)
s = (x.size(1) + 1, x.size(0)) + x.size()[2:]
y = y.view(*s)
y = y[1:].view_as(x)
if zero_triu:
ones = torch.ones((y.size(0), y.size(1)))
y = y * torch.tril(ones, y.size(1) - y.size(0))[:, :, None, None]
return y
def forward(self, x, r, mask=None, mems=None, head_m=None, **kw):
cfg = self.cfg
yo = self.get_y_opts(**kw)
y = x if mems is None else torch.cat([mems, x], 0)
y = self.qkv(self.norm(y) if cfg.pre_norm else y)
r = self.r_net(r)
q, k, v = torch.chunk(a, 3, dim=-1)
qlen, klen, rlen = x.size(0), k.size(0), r.size(0)
q = q if mems is None else q[-qlen:]
b, n, h = x.size(1), cfg.n_heads, cfg.d_head
q = q.view(qlen, b, n, h)
k = k.view(klen, b, n, h)
v = v.view(klen, b, n, h)
r = r.view(rlen, n, h)
AC = torch.einsum("ibnd,jbnd->ijbn", (q + self.q_bias, k))
BD = self.rel_shift(torch.einsum("ibnd,jnd->ijbn", (q + self.r_bias, r)))
a = AC + BD
a.mul_(cfg.scale)
if mask is not None and torch.sum(mask).item():
mask = mask == 1
i = self.get_minus_inf()
if mask.dim() == 2:
a = a.float().masked_fill(mask[None, :, :, None], i).type_as(a)
elif mask.dim() == 3:
a = a.float().masked_fill(mask[:, :, :, None], i).type_as(a)
a = self.drop_attn(F.softmax(a, dim=1))
if head_m is not None:
a = a * head_m
y = torch.einsum("ijbn,jbnd->ibnd", (a, v))
y = y.contiguous().view(y.size(0), y.size(1), n * h)
y = x + self.drop(self.proj(y))
ys = (y,) if cfg.pre_norm else (self.norm(y),)
if yo.attn:
ys += (a,)
return ys | en | 0.751763 | # Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # https://arxiv.org/abs/1901.02860 # https://github.com/kimiyoung/transformer-xl # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias | 1.327191 | 1 |
src/align/face_align_celeba.py | Dou-Yu-xuan/pykinship | 12 | 511 | import argparse
import glob
import os
import pickle
from pathlib import Path
import numpy as np
from PIL import Image
from tqdm import tqdm
from src.align.align_trans import get_reference_facial_points, warp_and_crop_face
# sys.path.append("../../")
from src.align.detector import detect_faces
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="face alignment")
parser.add_argument(
"-source_root",
"--source_root",
help="specify your source dir",
default="../../data/fiw-videos/new-processed/",
type=str,
)
parser.add_argument(
"-dest_root",
"--dest_root",
help="specify your destination dir",
default="../../data/fiw-videos/new-processed/",
type=str,
)
parser.add_argument(
"-crop_size",
"--crop_size",
help="specify size of aligned faces, align and crop with padding",
default=112,
type=int,
)
args = parser.parse_args()
source_root = args.source_root # specify your source dir
dest_root = args.dest_root # specify your destination dir
crop_size = (
args.crop_size
) # specify size of aligned faces, align and crop with padding
scale = crop_size / 112.0
reference = get_reference_facial_points(default_square=True) * scale
cwd = os.getcwd() # delete '.DS_Store' existed in the source_root
os.chdir(source_root)
os.system("find . -name '*.DS_Store' -type f -delete")
os.chdir(cwd)
imfiles = [
f
for f in glob.glob(f"{source_root}F????/MID*/faces/msceleb*")
if Path(f).is_file()
]
# images = {imfile.replace(source_root, ''): Image.open(imfile) for imfile in imfiles}
meta = {}
# for subfolder in tqdm(os.listdir(source_root)):
for imfile in tqdm(imfiles):
ref = imfile.replace(source_root, "")
print("Processing\t{}".format(imfile))
img = Image.open(imfile)
try: # Handle exception
bbs, landmarks = detect_faces(img)
except Exception:
print("{} is discarded due to exception!".format(imfile))
continue
ref = imfile.replace(source_root, "")
ndetections = len(landmarks)
if (
ndetections == 0
): # If the landmarks cannot be detected, the img will be discarded
print("{} is discarded due to non-detected landmarks!".format(imfile))
meta[ref] = []
continue
li_meta = []
for i in range(ndetections):
im_meta = {}
im_meta["face"] = i
im_meta["landmarks"] = landmarks[i]
im_meta["bb"] = bbs[i]
facial5points = [[landmarks[i][j], landmarks[i][j + 5]] for j in range(5)]
warped_face = warp_and_crop_face(
np.array(img),
facial5points,
reference,
crop_size=(crop_size, crop_size),
)
img_warped = Image.fromarray(warped_face)
image_name = imfile.replace("images", "cropped").replace(
".jpg", "-{:02d}.jpg".format(i)
)
# im_meta['ref'] = "/".join(image_name.split('/')[-5:])
img_warped.save(image_name)
li_meta.append(im_meta)
meta[ref] = li_meta
with open(source_root + "cropped-meta.pkl", "wb") as f:
pickle.dump(meta, f)
| import argparse
import glob
import os
import pickle
from pathlib import Path
import numpy as np
from PIL import Image
from tqdm import tqdm
from src.align.align_trans import get_reference_facial_points, warp_and_crop_face
# sys.path.append("../../")
from src.align.detector import detect_faces
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="face alignment")
parser.add_argument(
"-source_root",
"--source_root",
help="specify your source dir",
default="../../data/fiw-videos/new-processed/",
type=str,
)
parser.add_argument(
"-dest_root",
"--dest_root",
help="specify your destination dir",
default="../../data/fiw-videos/new-processed/",
type=str,
)
parser.add_argument(
"-crop_size",
"--crop_size",
help="specify size of aligned faces, align and crop with padding",
default=112,
type=int,
)
args = parser.parse_args()
source_root = args.source_root # specify your source dir
dest_root = args.dest_root # specify your destination dir
crop_size = (
args.crop_size
) # specify size of aligned faces, align and crop with padding
scale = crop_size / 112.0
reference = get_reference_facial_points(default_square=True) * scale
cwd = os.getcwd() # delete '.DS_Store' existed in the source_root
os.chdir(source_root)
os.system("find . -name '*.DS_Store' -type f -delete")
os.chdir(cwd)
imfiles = [
f
for f in glob.glob(f"{source_root}F????/MID*/faces/msceleb*")
if Path(f).is_file()
]
# images = {imfile.replace(source_root, ''): Image.open(imfile) for imfile in imfiles}
meta = {}
# for subfolder in tqdm(os.listdir(source_root)):
for imfile in tqdm(imfiles):
ref = imfile.replace(source_root, "")
print("Processing\t{}".format(imfile))
img = Image.open(imfile)
try: # Handle exception
bbs, landmarks = detect_faces(img)
except Exception:
print("{} is discarded due to exception!".format(imfile))
continue
ref = imfile.replace(source_root, "")
ndetections = len(landmarks)
if (
ndetections == 0
): # If the landmarks cannot be detected, the img will be discarded
print("{} is discarded due to non-detected landmarks!".format(imfile))
meta[ref] = []
continue
li_meta = []
for i in range(ndetections):
im_meta = {}
im_meta["face"] = i
im_meta["landmarks"] = landmarks[i]
im_meta["bb"] = bbs[i]
facial5points = [[landmarks[i][j], landmarks[i][j + 5]] for j in range(5)]
warped_face = warp_and_crop_face(
np.array(img),
facial5points,
reference,
crop_size=(crop_size, crop_size),
)
img_warped = Image.fromarray(warped_face)
image_name = imfile.replace("images", "cropped").replace(
".jpg", "-{:02d}.jpg".format(i)
)
# im_meta['ref'] = "/".join(image_name.split('/')[-5:])
img_warped.save(image_name)
li_meta.append(im_meta)
meta[ref] = li_meta
with open(source_root + "cropped-meta.pkl", "wb") as f:
pickle.dump(meta, f)
| en | 0.348681 | # sys.path.append("../../") # specify your source dir # specify your destination dir # specify size of aligned faces, align and crop with padding # delete '.DS_Store' existed in the source_root # images = {imfile.replace(source_root, ''): Image.open(imfile) for imfile in imfiles} # for subfolder in tqdm(os.listdir(source_root)): # Handle exception # If the landmarks cannot be detected, the img will be discarded # im_meta['ref'] = "/".join(image_name.split('/')[-5:]) | 2.299347 | 2 |
extract.py | rmalav15/voice-data-extract | 0 | 512 | from srtvoiceext import extract
if __name__ == '__main__':
ext = extract('video.mkv', 'subtitles.srt', 'outdir') | from srtvoiceext import extract
if __name__ == '__main__':
ext = extract('video.mkv', 'subtitles.srt', 'outdir') | none | 1 | 1.509413 | 2 |
|
bacon/readonly_collections.py | aholkner/bacon | 37 | 513 | import collections
class ReadOnlyDict(collections.MutableMapping):
def __init__(self, store):
self.store = store
def __getitem__(self, key):
return self.store[key]
def __setitem__(self, key, value):
raise TypeError('Cannot modify ReadOnlyDict')
def __delitem__(self, key):
raise TypeError('Cannot modify ReadOnlyDict')
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __str__(self):
return 'ReadOnlyDict(%s)' % self.store
def __repr__(self):
return 'ReadOnlyDict(%r)' % self.store | import collections
class ReadOnlyDict(collections.MutableMapping):
def __init__(self, store):
self.store = store
def __getitem__(self, key):
return self.store[key]
def __setitem__(self, key, value):
raise TypeError('Cannot modify ReadOnlyDict')
def __delitem__(self, key):
raise TypeError('Cannot modify ReadOnlyDict')
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __str__(self):
return 'ReadOnlyDict(%s)' % self.store
def __repr__(self):
return 'ReadOnlyDict(%r)' % self.store | none | 1 | 3.294346 | 3 |
|
rpg_game/gui.py | ricott1/twissh | 0 | 514 | # encoding: utf-8
import urwid
import time, os, copy
from rpg_game.utils import log, mod, distance
from rpg_game.constants import *
from urwid import raw_display
SIZE = lambda scr=raw_display.Screen(): scr.get_cols_rows()
MIN_HEADER_HEIGHT = 3
MAX_MENU_WIDTH = 48
FOOTER_HEIGHT = 4
PALETTE = [
("line", 'black', 'white', "standout"),
("top","white","black"),
("frame","white","white"),
("player", "light green", "black"),
("other", "light blue", "black"),
("monster", "dark red", "black"),
("fatigued", "dark red", "white", "standout"),
("reversed", "standout", ""),
("common","white","black"),
("common_line","black","white","standout"),
("uncommon","dark cyan","black"),
("uncommon_line","dark cyan","white","standout"),
("rare","yellow","black"),
("rare_line","yellow","white","standout"),
("unique","light magenta","black"),
("unique_line","light magenta","white","standout"),
("set","light green","black"),
("set_line","light green","white","standout"),
("normal","white","black"),
("positive","light green","black"),
("negative","dark red","black"),
("white","white","black"),
("disabled","dark red","black"),
("red","dark red","black"),
("green","light green","black"),
("yellow","yellow","black"),
("brown","brown","black"),
("white_line","black","white", "standout"),
("red_line","dark red","white", "standout"),
("green_line","light green","white", "standout"),
("yellow_line","yellow","white", "standout"),
("cyan","light cyan","black"),
("cyan_line","light cyan","white", "standout"),
("name","white","black"),
]
class UiFrame(urwid.Frame):
def __init__(self, parent, mind, *args, **kargs):
self.parent = parent
self.mind = mind
urwid.AttrMap(self,"frame")
super().__init__(*args, **kargs)
@property
def player(self):
if self.mind.avatar.uuid in self.mind.master.players:
return self.mind.master.players[self.mind.avatar.uuid]
else:
return None
@property
def connection(self):
if self.mind.avatar.uuid in self.mind.connections:
return self.mind.connections[self.mind.avatar.uuid]
else:
return None
def handle_input(self, _input):
pass
def on_update(self):
pass
def dispatch_event(self, event_type, *args):
self.mind.get_GUI_event(event_type, *args)
def register_event(self, event_type, callback):
self.mind.register_GUI_event(event_type, callback)
def disconnect(self):
pass
def restart(self):
pass
def focus_next(self):
pass
def focus_previous(self):
pass
def update_body(self, title, no_title=False, boxed=False):
self.active_body = self.bodies[title]
if boxed:
if no_title:
self.contents["body"] = (urwid.LineBox(self.active_body), None)
else:
self.contents["body"] = (urwid.LineBox(self.active_body, title=title), None)
else:
self.contents["body"] = (self.active_body, None)
class GUI(UiFrame):
def __init__(self, parent, mind):
self.bodies = {"Intro" : IntroFrame(self, mind)}
self.active_body = self.bodies["Intro"]
super().__init__(parent, mind, self.active_body)
def on_update(self):
self.active_body.on_update()
def handle_input(self, _input):
# print("HANDLING", _input)
self.active_body.handle_input(_input)
# def exit(self):
# self.disconnect()
# self.mind.disconnect()#should use dispatch event
def restart(self):
self.update_body("Intro", no_title=True)
def start_game_frame(self):
self.bodies["Game"] = GameFrame(self, self.mind)
self.update_body("Game", no_title=True)
class IntroFrame(UiFrame):
def __init__(self, parent, mind):
# urwid.Padding(urwid.BigText(('top', "Hack\'n\'SSH"), urwid.HalfBlock5x4Font())),
self.choices = ("Warrior", "Dwarf", "Wizard", "Thief", "Bard")
self.descriptions = {"Warrior": "The mighty warrior\n\nStrength +1, Hit points +4\nCharge and parry",
"Dwarf": "The short dwarf\n\nStrength +1, Constitution +1, Hit points +6\nDemolish and parry",
"Wizard": "The opportune wizard\n\nIntelligence +1\n Fireball, teleport and ice wall",
"Thief": "The sneaky thief\n\nDexterity +1, Intelligence +1, Hit points +2\nSneak attack, hide and trap",
"Bard": "The noisy bard\n\nCharisma +1, Dexterity +1, Intelligence +1, Hit points +2\nSing and summon"}
line = []
for c in self.choices:
btn = attr_button(c, self.select_class)
line.append(btn)
walker = urwid.SimpleFocusListWalker(line)
urwid.connect_signal(walker, "modified", self.update_description)
self.listbox = SelectableListBox(walker)
header = urwid.LineBox(urwid.BoxAdapter(self.listbox, len(self.choices)+1))
super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions["Warrior"])])), header=header, focus_part="header")
def select_class(self, button):
index = min(self.listbox.focus_position, len(self.choices)-1)
choice = self.choices[index]
self.mind.master.new_player(self.mind.avatar.uuid, choice)
self.parent.start_game_frame()
def update_description(self):
index = min(self.listbox.focus_position, len(self.choices)-1)
choice = self.choices[index]
self.contents["body"] = (urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[choice])])), None)
class GameFrame(UiFrame):
def __init__(self, parent, mind):
self.mind = mind
_header = urwid.LineBox(urwid.BoxAdapter(SelectableListBox(urwid.SimpleFocusListWalker([urwid.Text("")])), self.header_height))
self._menu_view = True
self.map = MapFrame(self, mind)
self.menu = MenuFrame(self, mind)
super().__init__(parent, mind, urwid.Columns([(self.map_width, self.map), (self.menu_width, self.menu)], focus_column=1), header=_header, footer=None, focus_part="body")
self.menu_view = True
self.update_footer()
self.header_widget = self.header.original_widget.box_widget
self.footer_content_size = 0
@property
def header_height(self):
return MIN_HEADER_HEIGHT#max(MIN_HEADER_HEIGHT, self.mind.screen_size[1]//8)
@property
def menu_width(self):
if self.menu_view:
return min(MAX_MENU_WIDTH, (3*self.mind.screen_size[0])//7)
return 0
@property
def map_width(self):
if self.menu_view:
return self.mind.screen_size[0] - self.menu_width
return self.mind.screen_size[0]
@property
def body_width(self):
return self.mind.screen_size[0]
@property
def body_height(self):
return self.mind.screen_size[1] - self.header_height - FOOTER_HEIGHT - 2
@property
def menu_view(self):
return self._menu_view
@menu_view.setter
def menu_view(self, value):
self._menu_view = value
_columns = [(self.map_width, self.map), (self.menu_width, self.menu)]
self.contents["body"] = (urwid.Columns(_columns, focus_column=1), None)
@property
def header_list(self):
return sorted([ent for k, ent in self.player.location.entities.items() if distance(self.player.position, ent.position) <= 3 and ent.status], key=lambda ent: distance(self.player.position, ent.position))
def update_footer(self):
_size = 0
inv_btns = []
for i, obj in self.player.inventory.content.items():
if obj:
_size += 1
if obj.is_equipment and obj.is_equipped:
_marker = ["[", (obj.color, f"{obj.marker[0]}"), "]"]
elif obj.is_equipment and not obj.is_equipped:
_marker = ["]", (obj.color, f"{obj.marker[0]}"), "["]
elif obj.is_consumable:
_marker = ["(", (obj.color, f"{obj.marker[0]}"), ")"]
else:
_marker = [f" {obj.marker[0]} "]
else:
_marker = [f" "]
if i < 9:
_num = f"\n {i+1} "
elif i == 9:
_num = "\n 0 "
elif i == 10:
_num = "\n - "
elif i == 11:
_num = "\n = "
if obj and obj is self.player.inventory.selection:
_marker += [("line", _num)]
else:
_marker += [("top", _num)]
btn = urwid.Text(_marker, align="center")
inv_btns.append((5, urwid.LineBox(btn)))
if self.mind.screen_size != (80, 24):
inv_btns.append(urwid.Text("\nSET TERMINAL\nTO 80X24", align="center"))
self.contents["footer"] = (SelectableColumns(inv_btns, dividechars=0), None)
self.footer_content_size = _size
def on_update(self):
self.update_header()
if self.footer_content_size != len(self.player.inventory.all):
self.update_footer()
if self.mind.screen_size != (80, 24):
self.update_footer()
self.map.on_update()
if self.menu_view:
self.menu.on_update()
def handle_input(self, _input):
if _input == "tab":
self.menu_view = not self.menu_view
elif _input == "enter" and self.player.inventory.selection:
self.player.use_quick_item(self.player.inventory.selection)
self.update_footer()
elif _input == "Q" and self.player.inventory.selection:
self.player.actions["drop"].use(self.player, obj=self.player.inventory.selection)
self.update_footer()
elif _input.isnumeric() or _input in ("-", "="):
self.select_item(_input)
self.update_footer()
elif _input == self.mind.key_map["status-menu"] and self.menu_view:
self.menu.update_body("Status")
elif _input == self.mind.key_map["help-menu"] and self.menu_view:
self.menu.update_body("Help")
elif _input == self.mind.key_map["equipment-menu"] and self.menu_view:
self.menu.update_body("Equipment")
elif _input == self.mind.key_map["inventory-menu"] and self.menu_view:
self.menu.update_body("Inventory")
else:
self.map.handle_input(_input)
def select_item(self, _input):
if _input.isnumeric() and int(_input) > 0:
_input = int(_input)-1
elif _input == "0":
s_input = 9
elif _input == "-":
_input = 10
elif _input == "=":
_input = 11
self.player.inventory.selection = self.player.inventory.get(_input)
def update_header(self):
widgets = []
for p in self.header_list:
widgets.append(urwid.AttrMap(urwid.AttrMap(urwid.Text(p.status, wrap="clip"), {self.player.id:"player"}), {p.id:"other" for i, p in self.mind.master.players.items()}))
if widgets:
self.header_widget.body[:] = widgets
class MapFrame(UiFrame):
def __init__(self, parent, mind):
map_box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text("")]))
self.map_box = map_box.body
self.layer_view = -1
self.debug_view = False
super().__init__(parent, mind, map_box)
self.on_update()
@property
def visible_range(self):
header_height = self.parent.header_height + 2
tot_rows = self.mind.screen_size[1]
return (tot_rows - header_height - FOOTER_HEIGHT)
def on_update(self):
if self.layer_view == -1:
_map = copy.deepcopy(self.player.location.map)
else:
_map = self.player.location.layer_from_entities(self.layer_view, self.debug_view)
x, y, z = self.player.position
w = max(0, y - self.parent.body_width//3)
visible_map = [line[w:w+self.parent.body_width] for line in _map]
h = max(0, x - self.parent.body_height//2)
if h+self.parent.body_height >= len(visible_map):
visible_map = visible_map[len(visible_map)-self.parent.body_height:]
else:
visible_map = visible_map[h:h+self.parent.body_height]
map_with_attr = [urwid.AttrMap(urwid.AttrMap(urwid.Text(line, wrap="clip"), {self.player.id:"player"}), {p.id:"other" for i, p in self.mind.master.players.items()}) for line in visible_map]
self.map_box[:] = map_with_attr
def handle_input(self, _input):
if _input == "ctrl f":
self.debug_view = not self.debug_view
elif _input == "ctrl v":
self.layer_view = self.layer_view + 1
if self.layer_view > 2:
self.layer_view = -1
elif _input in self.mind.key_map:
_action = self.mind.key_map[_input]
self.player.handle_input(_action)
class MenuFrame(UiFrame):
def __init__(self, parent, mind):
_frames = ("Inventory", "Status", "Equipment", "Help")
self.bodies = {b : globals()[f"{b}Frame"](self, mind) for b in _frames}
idx = -1
_title = _frames[idx]
self.active_body = self.bodies[_title]
super().__init__(parent, mind, urwid.LineBox(self.active_body, title=_title))
def on_update(self):
self.active_body.on_update()
def selectable(self):
return False
def update_body(self, _title):
self.active_body = self.bodies[_title]
self.contents["body"] = (urwid.LineBox(self.active_body, title=_title), None)
class InventoryFrame(UiFrame):
def __init__(self, parent, mind):
columns = urwid.Columns([urwid.Text("")])
box = urwid.ListBox(urwid.SimpleListWalker([columns]))
self.box = box.body
self.default_header = urwid.Text("0/9-= to select\n\n", align="center")
self.default_footer = urwid.Text([("green", f"{'Enter:use/eqp':<14s}"), ("yellow", "Q:drop")], align="center")
super().__init__(parent, mind, box, header=self.default_header, footer=self.default_footer)
@property
def selection_data(self):
if not self.player.inventory.selection:
return urwid.Text("")
i = self.player.inventory.selection
_text = []
_text += [i.eq_description, f"\nEncumbrance:{i.encumbrance}\n"]
return urwid.Text(_text)
def update_header(self):
if not self.player.inventory.selection:
self.contents["header"] = (self.default_header, None)
else:
i = self.player.inventory.selection
self.contents["header"] = (urwid.Text([(i.color, f"{i.name}\n"), f"{i.description}\n"], align="center"), None)
def update_footer(self):
if not self.player.inventory.selection:
self.contents["footer"] = (self.default_footer, None)
else:
i = self.player.inventory.selection
_text = []
if not i.requisites(self.player):
_text += [("red", f"{'Cannot equip':<14s}")]
elif not i.is_equipped:
_text += [("green", f"{'Enter:equip':<14s}")]
elif i.is_equipped:
_text += [("green", f"{'Enter:unequip':<14s}")]
elif i.is_consumable:
_text += [("green", f"{'Enter:use':<14s}")]
_text += [("yellow", "Q:drop")]
self.contents["footer"] = (urwid.Text(_text, align="center"), None)
def update_body(self):
side = urwid.Text("║")
width = 8
height = 6
_marker_box = ["╔" +"═"*width+"╗\n"]
for x in range(height):
_marker_box += ["║"]
for y in range(width):
_marker_box += ["."]
_marker_box += ["║\n"]
_marker_box += ["╚" +"═"*width+"╝"]
if self.player.inventory.selection:
i = self.player.inventory.selection
X_OFFSET = 2
Y_OFFSET = 4
for m, pos in zip(i.in_inventory_markers, i.in_inventory_marker_positions):
x, y = pos
_marker_box[(x+X_OFFSET)*(width+2)+y+Y_OFFSET] = (i.color, m)
self.box[:] = [urwid.Columns([(width+2, urwid.Text(_marker_box)), self.selection_data], dividechars=1)]
def on_update(self):
self.update_header()
self.update_body()
self.update_footer()
class StatusFrame(UiFrame):
def __init__(self, parent, mind):
box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text("")]))
self.box = box.body
super().__init__(parent, mind, box)
def on_update(self):
player = self.player
x, y, z = player.position
_top = f"{player.name:<12s} {player.game_class.name:<10s}\nLev:{player.level:<2d} Exp:{player.exp:<4d} {player.location.name}@({x},{y})\n"
_left = []
for s in CHARACTERISTICS:
c = getattr(player, s)
state = ["normal", "positive", "negative"][-int(c.temp_bonus < 0) + int(c.temp_bonus > 0)]
if self.parent.parent.menu_width > 40:
_name = c.name[0].upper() + c.name[1:]
_left += [f"{_name:<12} ", (state, f"{c.value:>2d}"), f" ({c.mod:<+2d})\n"]
elif self.parent.parent.menu_width > 36:
_name = c.name[0].upper() + c.name[1:6]
_left += [f"{_name:<6} ", (state, f"{c.value:>2d}"), f" ({c.mod:<+2d})\n"]
else:
_left += [f"{s:<3} ", (state, f"{c.value:>2d}"), f" ({c.mod:<+2d})\n"]
_right = []
base = player.STR.mod
weapon = player.equipment["main_hand"]
if not weapon:
min_dmg, max_dmg = (1, 4)
else:
number, value = weapon.dmg
min_dmg, max_dmg = (number * 1, number * value)
min_dmg = max(1, base + min_dmg)
max_dmg = max(1, base + max_dmg)
_right.append(f"Damage {min_dmg:>3d}-{max_dmg:<3d}\n")
_right.append(f"Reduction {player.dmg_reduction:<3d}\n")
_right.append(f"Encumb ")
if player.inventory.encumbrance == EXTRA_ENCUMBRANCE_MULTI*player.encumbrance:
_right.append(("red", f"{player.inventory.encumbrance:>2d}"))
elif player.inventory.encumbrance > player.encumbrance:
_right.append(("yellow", f"{player.inventory.encumbrance:>2d}"))
else:
_right.append(("white", f"{player.inventory.encumbrance:>2d}"))
_right.append(f"/{player.encumbrance:<2d}\n")
_right.append(f"Speed {player.movement_speed}\n")
_right.append(f"Monsterized {player.MP:<2d}\n")
self.box[:] = [urwid.Text(_top), urwid.Columns([urwid.Text(_left), urwid.Text(_right)], dividechars = 1) ]
class EquipmentFrame(UiFrame):
def __init__(self, parent, mind):
box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text("")]))
self.box = box.body
super().__init__(parent, mind, box)
def on_update(self):
player = self.player
_equipment = []
for t, obj in player.equipment.items():
_name = t.replace("_", " ")
_name = _name[0].upper() + _name[1:]
if obj:
_equipment += [urwid.Text([f"{_name}: ", (obj.color, f"{obj.name}")])]
else:
_equipment += [urwid.Text([f"{_name}: "])]
_bonus = {}
for eqp in player.equipment_set:
for b in set(list(eqp.bonus.keys()) + list(eqp.set_bonus.keys())):
val = player.full_eqp_bonus(eqp, b)
if b not in _bonus:
_bonus[b] = val
else:
_bonus[b] += val
_top = ""
for b, val in _bonus.items():
if b == "dmg_reduction":
_top += f"Reduction:{val} "
else:
_top += f"{b}:{val} "
_top += "\n"
self.box[:] = [urwid.Text(_top)] + _equipment
class HelpFrame(UiFrame):
def __init__(self, parent, mind):
self.mind = mind
map_commands = ["Map commands\n\n", f"←→↑↓:move\n", f"shift+←→↑↓:dash\n", f"a:attack\n", f"q:pickup\n"]
class_action_keys = [k for k, act in self.mind.key_map.items() if act.startswith("class_ability")]
for i, act in enumerate(self.player.class_actions):
k = class_action_keys[i]
map_commands.append(f"{k}:{self.player.class_actions[act].description.lower()}\n")
menu_commands = ["Menu commands\n\n", f"tab:open/close\n",f"0/9-=:select item\n", f"ctrl+p:respawn\n", f"ctrl+a:inventory\n", f"ctrl+s:status\n", f"ctrl+d:help\n", f"ctrl+e:equipment\n"]
columns = urwid.Columns([urwid.Text(map_commands, wrap="clip"), urwid.Text(menu_commands, wrap="clip")], dividechars = 1)
super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([columns])))
class SelectableListBox(urwid.ListBox):
def __init__(self, body):
super(SelectableListBox, self).__init__(body)
def focus_next(self):
try:
self.focus_position += 1
except IndexError:
pass
def focus_previous(self):
try:
self.focus_position -= 1
except IndexError:
pass
class SelectableColumns(urwid.Columns):
def __init__(self, widget_list, focus_column=None, dividechars=0):
super().__init__(widget_list, dividechars, focus_column)
def focus_next(self):
try:
self.focus_position += 1
except:
pass
def focus_previous(self):
try:
self.focus_position -= 1
except:
pass
class FrameColumns(urwid.Columns):
def __init__(self, parent, widget_list, dividechars=0):
self.widget_size = len(widget_list)
super(FrameColumns, self).__init__(widget_list, dividechars)
self.parent = parent
def focus_next(self):
try:
self.focus_position += 1
if self.focus_position >= self.widget_size:
self.focus_position -= self.widget_size
new_body = [b for b in self.parent.bodies][self.focus_position]
self.parent.update_body(new_body)
except:
pass
def focus_previous(self):
try:
self.focus_position -= 1
if self.focus_position < 0:
self.focus_position += self.widget_size
new_body = [b for b in self.parent.bodies][self.focus_position]
self.parent.update_body(new_body)
except:
pass
class ButtonLabel(urwid.SelectableIcon):
def set_text(self, label):
'''
set_text is invoked by Button.set_label
'''
self.__super.set_text(label)
self._cursor_position = len(label) + 1
class MyButton(urwid.Button):
'''
- override __init__ to use our ButtonLabel instead of urwid.SelectableIcon
- make button_left and button_right plain strings and variable width -
any string, including an empty string, can be set and displayed
- otherwise, we leave Button behaviour unchanged
'''
button_left = "["
button_right = "]"
def __init__(self, label, on_press=None, user_data=None, borders=True, disabled=False):
self._label = ButtonLabel("")
if borders:
cols = urwid.Columns([
('fixed', len(self.button_left), urwid.Text(self.button_left)),
self._label,
('fixed', len(self.button_right), urwid.Text(self.button_right))],
dividechars=1)
else:
cols = urwid.Columns([self._label],
dividechars=0)
super(urwid.Button, self).__init__(cols)
self.disabled = disabled
if on_press:
urwid.connect_signal(self, 'click', on_press, user_data)
self.set_label(label)
self.lllavel = label
# @property
# def disabled(self):
# return self._disabled
# @disabled.setter
# def disabled(self, value):
# if self._disabled == value:
# return
# if self.disabled:
# urwid.AttrMap(self, "disabled")
# else:
# urwid.AttrMap(self, None, "line")
def selectable(self):
return not self.disabled
def attr_button(label, cmd=None, attr_map=None, focus_map = "line", align = "center", user_args = None, borders=True, disabled=False):
btn = create_button(label, cmd=cmd, align = align, user_args = user_args, borders=borders, disabled=disabled)
return urwid.AttrMap(btn, attr_map, focus_map=focus_map)
def create_button(label, cmd=None, align = "center", user_args = None, borders=True, disabled=False):
btn = MyButton(label, borders=borders, disabled=disabled)
btn._label.align = align
if cmd:
if user_args:
urwid.connect_signal(btn, "click", cmd, user_args = user_args)
else:
urwid.connect_signal(btn, "click", cmd)
return btn
| # encoding: utf-8
import urwid
import time, os, copy
from rpg_game.utils import log, mod, distance
from rpg_game.constants import *
from urwid import raw_display
SIZE = lambda scr=raw_display.Screen(): scr.get_cols_rows()
MIN_HEADER_HEIGHT = 3
MAX_MENU_WIDTH = 48
FOOTER_HEIGHT = 4
PALETTE = [
("line", 'black', 'white', "standout"),
("top","white","black"),
("frame","white","white"),
("player", "light green", "black"),
("other", "light blue", "black"),
("monster", "dark red", "black"),
("fatigued", "dark red", "white", "standout"),
("reversed", "standout", ""),
("common","white","black"),
("common_line","black","white","standout"),
("uncommon","dark cyan","black"),
("uncommon_line","dark cyan","white","standout"),
("rare","yellow","black"),
("rare_line","yellow","white","standout"),
("unique","light magenta","black"),
("unique_line","light magenta","white","standout"),
("set","light green","black"),
("set_line","light green","white","standout"),
("normal","white","black"),
("positive","light green","black"),
("negative","dark red","black"),
("white","white","black"),
("disabled","dark red","black"),
("red","dark red","black"),
("green","light green","black"),
("yellow","yellow","black"),
("brown","brown","black"),
("white_line","black","white", "standout"),
("red_line","dark red","white", "standout"),
("green_line","light green","white", "standout"),
("yellow_line","yellow","white", "standout"),
("cyan","light cyan","black"),
("cyan_line","light cyan","white", "standout"),
("name","white","black"),
]
class UiFrame(urwid.Frame):
def __init__(self, parent, mind, *args, **kargs):
self.parent = parent
self.mind = mind
urwid.AttrMap(self,"frame")
super().__init__(*args, **kargs)
@property
def player(self):
if self.mind.avatar.uuid in self.mind.master.players:
return self.mind.master.players[self.mind.avatar.uuid]
else:
return None
@property
def connection(self):
if self.mind.avatar.uuid in self.mind.connections:
return self.mind.connections[self.mind.avatar.uuid]
else:
return None
def handle_input(self, _input):
pass
def on_update(self):
pass
def dispatch_event(self, event_type, *args):
self.mind.get_GUI_event(event_type, *args)
def register_event(self, event_type, callback):
self.mind.register_GUI_event(event_type, callback)
def disconnect(self):
pass
def restart(self):
pass
def focus_next(self):
pass
def focus_previous(self):
pass
def update_body(self, title, no_title=False, boxed=False):
self.active_body = self.bodies[title]
if boxed:
if no_title:
self.contents["body"] = (urwid.LineBox(self.active_body), None)
else:
self.contents["body"] = (urwid.LineBox(self.active_body, title=title), None)
else:
self.contents["body"] = (self.active_body, None)
class GUI(UiFrame):
def __init__(self, parent, mind):
self.bodies = {"Intro" : IntroFrame(self, mind)}
self.active_body = self.bodies["Intro"]
super().__init__(parent, mind, self.active_body)
def on_update(self):
self.active_body.on_update()
def handle_input(self, _input):
# print("HANDLING", _input)
self.active_body.handle_input(_input)
# def exit(self):
# self.disconnect()
# self.mind.disconnect()#should use dispatch event
def restart(self):
self.update_body("Intro", no_title=True)
def start_game_frame(self):
self.bodies["Game"] = GameFrame(self, self.mind)
self.update_body("Game", no_title=True)
class IntroFrame(UiFrame):
def __init__(self, parent, mind):
# urwid.Padding(urwid.BigText(('top', "Hack\'n\'SSH"), urwid.HalfBlock5x4Font())),
self.choices = ("Warrior", "Dwarf", "Wizard", "Thief", "Bard")
self.descriptions = {"Warrior": "The mighty warrior\n\nStrength +1, Hit points +4\nCharge and parry",
"Dwarf": "The short dwarf\n\nStrength +1, Constitution +1, Hit points +6\nDemolish and parry",
"Wizard": "The opportune wizard\n\nIntelligence +1\n Fireball, teleport and ice wall",
"Thief": "The sneaky thief\n\nDexterity +1, Intelligence +1, Hit points +2\nSneak attack, hide and trap",
"Bard": "The noisy bard\n\nCharisma +1, Dexterity +1, Intelligence +1, Hit points +2\nSing and summon"}
line = []
for c in self.choices:
btn = attr_button(c, self.select_class)
line.append(btn)
walker = urwid.SimpleFocusListWalker(line)
urwid.connect_signal(walker, "modified", self.update_description)
self.listbox = SelectableListBox(walker)
header = urwid.LineBox(urwid.BoxAdapter(self.listbox, len(self.choices)+1))
super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions["Warrior"])])), header=header, focus_part="header")
def select_class(self, button):
index = min(self.listbox.focus_position, len(self.choices)-1)
choice = self.choices[index]
self.mind.master.new_player(self.mind.avatar.uuid, choice)
self.parent.start_game_frame()
def update_description(self):
index = min(self.listbox.focus_position, len(self.choices)-1)
choice = self.choices[index]
self.contents["body"] = (urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[choice])])), None)
class GameFrame(UiFrame):
def __init__(self, parent, mind):
self.mind = mind
_header = urwid.LineBox(urwid.BoxAdapter(SelectableListBox(urwid.SimpleFocusListWalker([urwid.Text("")])), self.header_height))
self._menu_view = True
self.map = MapFrame(self, mind)
self.menu = MenuFrame(self, mind)
super().__init__(parent, mind, urwid.Columns([(self.map_width, self.map), (self.menu_width, self.menu)], focus_column=1), header=_header, footer=None, focus_part="body")
self.menu_view = True
self.update_footer()
self.header_widget = self.header.original_widget.box_widget
self.footer_content_size = 0
@property
def header_height(self):
return MIN_HEADER_HEIGHT#max(MIN_HEADER_HEIGHT, self.mind.screen_size[1]//8)
@property
def menu_width(self):
if self.menu_view:
return min(MAX_MENU_WIDTH, (3*self.mind.screen_size[0])//7)
return 0
@property
def map_width(self):
if self.menu_view:
return self.mind.screen_size[0] - self.menu_width
return self.mind.screen_size[0]
@property
def body_width(self):
return self.mind.screen_size[0]
@property
def body_height(self):
return self.mind.screen_size[1] - self.header_height - FOOTER_HEIGHT - 2
@property
def menu_view(self):
return self._menu_view
@menu_view.setter
def menu_view(self, value):
self._menu_view = value
_columns = [(self.map_width, self.map), (self.menu_width, self.menu)]
self.contents["body"] = (urwid.Columns(_columns, focus_column=1), None)
@property
def header_list(self):
return sorted([ent for k, ent in self.player.location.entities.items() if distance(self.player.position, ent.position) <= 3 and ent.status], key=lambda ent: distance(self.player.position, ent.position))
def update_footer(self):
_size = 0
inv_btns = []
for i, obj in self.player.inventory.content.items():
if obj:
_size += 1
if obj.is_equipment and obj.is_equipped:
_marker = ["[", (obj.color, f"{obj.marker[0]}"), "]"]
elif obj.is_equipment and not obj.is_equipped:
_marker = ["]", (obj.color, f"{obj.marker[0]}"), "["]
elif obj.is_consumable:
_marker = ["(", (obj.color, f"{obj.marker[0]}"), ")"]
else:
_marker = [f" {obj.marker[0]} "]
else:
_marker = [f" "]
if i < 9:
_num = f"\n {i+1} "
elif i == 9:
_num = "\n 0 "
elif i == 10:
_num = "\n - "
elif i == 11:
_num = "\n = "
if obj and obj is self.player.inventory.selection:
_marker += [("line", _num)]
else:
_marker += [("top", _num)]
btn = urwid.Text(_marker, align="center")
inv_btns.append((5, urwid.LineBox(btn)))
if self.mind.screen_size != (80, 24):
inv_btns.append(urwid.Text("\nSET TERMINAL\nTO 80X24", align="center"))
self.contents["footer"] = (SelectableColumns(inv_btns, dividechars=0), None)
self.footer_content_size = _size
def on_update(self):
self.update_header()
if self.footer_content_size != len(self.player.inventory.all):
self.update_footer()
if self.mind.screen_size != (80, 24):
self.update_footer()
self.map.on_update()
if self.menu_view:
self.menu.on_update()
def handle_input(self, _input):
if _input == "tab":
self.menu_view = not self.menu_view
elif _input == "enter" and self.player.inventory.selection:
self.player.use_quick_item(self.player.inventory.selection)
self.update_footer()
elif _input == "Q" and self.player.inventory.selection:
self.player.actions["drop"].use(self.player, obj=self.player.inventory.selection)
self.update_footer()
elif _input.isnumeric() or _input in ("-", "="):
self.select_item(_input)
self.update_footer()
elif _input == self.mind.key_map["status-menu"] and self.menu_view:
self.menu.update_body("Status")
elif _input == self.mind.key_map["help-menu"] and self.menu_view:
self.menu.update_body("Help")
elif _input == self.mind.key_map["equipment-menu"] and self.menu_view:
self.menu.update_body("Equipment")
elif _input == self.mind.key_map["inventory-menu"] and self.menu_view:
self.menu.update_body("Inventory")
else:
self.map.handle_input(_input)
def select_item(self, _input):
if _input.isnumeric() and int(_input) > 0:
_input = int(_input)-1
elif _input == "0":
s_input = 9
elif _input == "-":
_input = 10
elif _input == "=":
_input = 11
self.player.inventory.selection = self.player.inventory.get(_input)
def update_header(self):
widgets = []
for p in self.header_list:
widgets.append(urwid.AttrMap(urwid.AttrMap(urwid.Text(p.status, wrap="clip"), {self.player.id:"player"}), {p.id:"other" for i, p in self.mind.master.players.items()}))
if widgets:
self.header_widget.body[:] = widgets
class MapFrame(UiFrame):
def __init__(self, parent, mind):
map_box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text("")]))
self.map_box = map_box.body
self.layer_view = -1
self.debug_view = False
super().__init__(parent, mind, map_box)
self.on_update()
@property
def visible_range(self):
header_height = self.parent.header_height + 2
tot_rows = self.mind.screen_size[1]
return (tot_rows - header_height - FOOTER_HEIGHT)
def on_update(self):
if self.layer_view == -1:
_map = copy.deepcopy(self.player.location.map)
else:
_map = self.player.location.layer_from_entities(self.layer_view, self.debug_view)
x, y, z = self.player.position
w = max(0, y - self.parent.body_width//3)
visible_map = [line[w:w+self.parent.body_width] for line in _map]
h = max(0, x - self.parent.body_height//2)
if h+self.parent.body_height >= len(visible_map):
visible_map = visible_map[len(visible_map)-self.parent.body_height:]
else:
visible_map = visible_map[h:h+self.parent.body_height]
map_with_attr = [urwid.AttrMap(urwid.AttrMap(urwid.Text(line, wrap="clip"), {self.player.id:"player"}), {p.id:"other" for i, p in self.mind.master.players.items()}) for line in visible_map]
self.map_box[:] = map_with_attr
def handle_input(self, _input):
if _input == "ctrl f":
self.debug_view = not self.debug_view
elif _input == "ctrl v":
self.layer_view = self.layer_view + 1
if self.layer_view > 2:
self.layer_view = -1
elif _input in self.mind.key_map:
_action = self.mind.key_map[_input]
self.player.handle_input(_action)
class MenuFrame(UiFrame):
def __init__(self, parent, mind):
_frames = ("Inventory", "Status", "Equipment", "Help")
self.bodies = {b : globals()[f"{b}Frame"](self, mind) for b in _frames}
idx = -1
_title = _frames[idx]
self.active_body = self.bodies[_title]
super().__init__(parent, mind, urwid.LineBox(self.active_body, title=_title))
def on_update(self):
self.active_body.on_update()
def selectable(self):
return False
def update_body(self, _title):
self.active_body = self.bodies[_title]
self.contents["body"] = (urwid.LineBox(self.active_body, title=_title), None)
class InventoryFrame(UiFrame):
def __init__(self, parent, mind):
columns = urwid.Columns([urwid.Text("")])
box = urwid.ListBox(urwid.SimpleListWalker([columns]))
self.box = box.body
self.default_header = urwid.Text("0/9-= to select\n\n", align="center")
self.default_footer = urwid.Text([("green", f"{'Enter:use/eqp':<14s}"), ("yellow", "Q:drop")], align="center")
super().__init__(parent, mind, box, header=self.default_header, footer=self.default_footer)
@property
def selection_data(self):
if not self.player.inventory.selection:
return urwid.Text("")
i = self.player.inventory.selection
_text = []
_text += [i.eq_description, f"\nEncumbrance:{i.encumbrance}\n"]
return urwid.Text(_text)
def update_header(self):
if not self.player.inventory.selection:
self.contents["header"] = (self.default_header, None)
else:
i = self.player.inventory.selection
self.contents["header"] = (urwid.Text([(i.color, f"{i.name}\n"), f"{i.description}\n"], align="center"), None)
def update_footer(self):
if not self.player.inventory.selection:
self.contents["footer"] = (self.default_footer, None)
else:
i = self.player.inventory.selection
_text = []
if not i.requisites(self.player):
_text += [("red", f"{'Cannot equip':<14s}")]
elif not i.is_equipped:
_text += [("green", f"{'Enter:equip':<14s}")]
elif i.is_equipped:
_text += [("green", f"{'Enter:unequip':<14s}")]
elif i.is_consumable:
_text += [("green", f"{'Enter:use':<14s}")]
_text += [("yellow", "Q:drop")]
self.contents["footer"] = (urwid.Text(_text, align="center"), None)
def update_body(self):
side = urwid.Text("║")
width = 8
height = 6
_marker_box = ["╔" +"═"*width+"╗\n"]
for x in range(height):
_marker_box += ["║"]
for y in range(width):
_marker_box += ["."]
_marker_box += ["║\n"]
_marker_box += ["╚" +"═"*width+"╝"]
if self.player.inventory.selection:
i = self.player.inventory.selection
X_OFFSET = 2
Y_OFFSET = 4
for m, pos in zip(i.in_inventory_markers, i.in_inventory_marker_positions):
x, y = pos
_marker_box[(x+X_OFFSET)*(width+2)+y+Y_OFFSET] = (i.color, m)
self.box[:] = [urwid.Columns([(width+2, urwid.Text(_marker_box)), self.selection_data], dividechars=1)]
def on_update(self):
self.update_header()
self.update_body()
self.update_footer()
class StatusFrame(UiFrame):
def __init__(self, parent, mind):
box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text("")]))
self.box = box.body
super().__init__(parent, mind, box)
def on_update(self):
player = self.player
x, y, z = player.position
_top = f"{player.name:<12s} {player.game_class.name:<10s}\nLev:{player.level:<2d} Exp:{player.exp:<4d} {player.location.name}@({x},{y})\n"
_left = []
for s in CHARACTERISTICS:
c = getattr(player, s)
state = ["normal", "positive", "negative"][-int(c.temp_bonus < 0) + int(c.temp_bonus > 0)]
if self.parent.parent.menu_width > 40:
_name = c.name[0].upper() + c.name[1:]
_left += [f"{_name:<12} ", (state, f"{c.value:>2d}"), f" ({c.mod:<+2d})\n"]
elif self.parent.parent.menu_width > 36:
_name = c.name[0].upper() + c.name[1:6]
_left += [f"{_name:<6} ", (state, f"{c.value:>2d}"), f" ({c.mod:<+2d})\n"]
else:
_left += [f"{s:<3} ", (state, f"{c.value:>2d}"), f" ({c.mod:<+2d})\n"]
_right = []
base = player.STR.mod
weapon = player.equipment["main_hand"]
if not weapon:
min_dmg, max_dmg = (1, 4)
else:
number, value = weapon.dmg
min_dmg, max_dmg = (number * 1, number * value)
min_dmg = max(1, base + min_dmg)
max_dmg = max(1, base + max_dmg)
_right.append(f"Damage {min_dmg:>3d}-{max_dmg:<3d}\n")
_right.append(f"Reduction {player.dmg_reduction:<3d}\n")
_right.append(f"Encumb ")
if player.inventory.encumbrance == EXTRA_ENCUMBRANCE_MULTI*player.encumbrance:
_right.append(("red", f"{player.inventory.encumbrance:>2d}"))
elif player.inventory.encumbrance > player.encumbrance:
_right.append(("yellow", f"{player.inventory.encumbrance:>2d}"))
else:
_right.append(("white", f"{player.inventory.encumbrance:>2d}"))
_right.append(f"/{player.encumbrance:<2d}\n")
_right.append(f"Speed {player.movement_speed}\n")
_right.append(f"Monsterized {player.MP:<2d}\n")
self.box[:] = [urwid.Text(_top), urwid.Columns([urwid.Text(_left), urwid.Text(_right)], dividechars = 1) ]
class EquipmentFrame(UiFrame):
def __init__(self, parent, mind):
box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text("")]))
self.box = box.body
super().__init__(parent, mind, box)
def on_update(self):
player = self.player
_equipment = []
for t, obj in player.equipment.items():
_name = t.replace("_", " ")
_name = _name[0].upper() + _name[1:]
if obj:
_equipment += [urwid.Text([f"{_name}: ", (obj.color, f"{obj.name}")])]
else:
_equipment += [urwid.Text([f"{_name}: "])]
_bonus = {}
for eqp in player.equipment_set:
for b in set(list(eqp.bonus.keys()) + list(eqp.set_bonus.keys())):
val = player.full_eqp_bonus(eqp, b)
if b not in _bonus:
_bonus[b] = val
else:
_bonus[b] += val
_top = ""
for b, val in _bonus.items():
if b == "dmg_reduction":
_top += f"Reduction:{val} "
else:
_top += f"{b}:{val} "
_top += "\n"
self.box[:] = [urwid.Text(_top)] + _equipment
class HelpFrame(UiFrame):
def __init__(self, parent, mind):
self.mind = mind
map_commands = ["Map commands\n\n", f"←→↑↓:move\n", f"shift+←→↑↓:dash\n", f"a:attack\n", f"q:pickup\n"]
class_action_keys = [k for k, act in self.mind.key_map.items() if act.startswith("class_ability")]
for i, act in enumerate(self.player.class_actions):
k = class_action_keys[i]
map_commands.append(f"{k}:{self.player.class_actions[act].description.lower()}\n")
menu_commands = ["Menu commands\n\n", f"tab:open/close\n",f"0/9-=:select item\n", f"ctrl+p:respawn\n", f"ctrl+a:inventory\n", f"ctrl+s:status\n", f"ctrl+d:help\n", f"ctrl+e:equipment\n"]
columns = urwid.Columns([urwid.Text(map_commands, wrap="clip"), urwid.Text(menu_commands, wrap="clip")], dividechars = 1)
super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([columns])))
class SelectableListBox(urwid.ListBox):
def __init__(self, body):
super(SelectableListBox, self).__init__(body)
def focus_next(self):
try:
self.focus_position += 1
except IndexError:
pass
def focus_previous(self):
try:
self.focus_position -= 1
except IndexError:
pass
class SelectableColumns(urwid.Columns):
def __init__(self, widget_list, focus_column=None, dividechars=0):
super().__init__(widget_list, dividechars, focus_column)
def focus_next(self):
try:
self.focus_position += 1
except:
pass
def focus_previous(self):
try:
self.focus_position -= 1
except:
pass
class FrameColumns(urwid.Columns):
def __init__(self, parent, widget_list, dividechars=0):
self.widget_size = len(widget_list)
super(FrameColumns, self).__init__(widget_list, dividechars)
self.parent = parent
def focus_next(self):
try:
self.focus_position += 1
if self.focus_position >= self.widget_size:
self.focus_position -= self.widget_size
new_body = [b for b in self.parent.bodies][self.focus_position]
self.parent.update_body(new_body)
except:
pass
def focus_previous(self):
try:
self.focus_position -= 1
if self.focus_position < 0:
self.focus_position += self.widget_size
new_body = [b for b in self.parent.bodies][self.focus_position]
self.parent.update_body(new_body)
except:
pass
class ButtonLabel(urwid.SelectableIcon):
def set_text(self, label):
'''
set_text is invoked by Button.set_label
'''
self.__super.set_text(label)
self._cursor_position = len(label) + 1
class MyButton(urwid.Button):
'''
- override __init__ to use our ButtonLabel instead of urwid.SelectableIcon
- make button_left and button_right plain strings and variable width -
any string, including an empty string, can be set and displayed
- otherwise, we leave Button behaviour unchanged
'''
button_left = "["
button_right = "]"
def __init__(self, label, on_press=None, user_data=None, borders=True, disabled=False):
self._label = ButtonLabel("")
if borders:
cols = urwid.Columns([
('fixed', len(self.button_left), urwid.Text(self.button_left)),
self._label,
('fixed', len(self.button_right), urwid.Text(self.button_right))],
dividechars=1)
else:
cols = urwid.Columns([self._label],
dividechars=0)
super(urwid.Button, self).__init__(cols)
self.disabled = disabled
if on_press:
urwid.connect_signal(self, 'click', on_press, user_data)
self.set_label(label)
self.lllavel = label
# @property
# def disabled(self):
# return self._disabled
# @disabled.setter
# def disabled(self, value):
# if self._disabled == value:
# return
# if self.disabled:
# urwid.AttrMap(self, "disabled")
# else:
# urwid.AttrMap(self, None, "line")
def selectable(self):
return not self.disabled
def attr_button(label, cmd=None, attr_map=None, focus_map = "line", align = "center", user_args = None, borders=True, disabled=False):
btn = create_button(label, cmd=cmd, align = align, user_args = user_args, borders=borders, disabled=disabled)
return urwid.AttrMap(btn, attr_map, focus_map=focus_map)
def create_button(label, cmd=None, align = "center", user_args = None, borders=True, disabled=False):
btn = MyButton(label, borders=borders, disabled=disabled)
btn._label.align = align
if cmd:
if user_args:
urwid.connect_signal(btn, "click", cmd, user_args = user_args)
else:
urwid.connect_signal(btn, "click", cmd)
return btn
| en | 0.424037 | # encoding: utf-8 # print("HANDLING", _input) # def exit(self): # self.disconnect() # self.mind.disconnect()#should use dispatch event # urwid.Padding(urwid.BigText(('top', "Hack\'n\'SSH"), urwid.HalfBlock5x4Font())), #max(MIN_HEADER_HEIGHT, self.mind.screen_size[1]//8) set_text is invoked by Button.set_label - override __init__ to use our ButtonLabel instead of urwid.SelectableIcon - make button_left and button_right plain strings and variable width - any string, including an empty string, can be set and displayed - otherwise, we leave Button behaviour unchanged # @property # def disabled(self): # return self._disabled # @disabled.setter # def disabled(self, value): # if self._disabled == value: # return # if self.disabled: # urwid.AttrMap(self, "disabled") # else: # urwid.AttrMap(self, None, "line") | 1.886018 | 2 |
lale/lib/autogen/linear_regression.py | gbdrt/lale | 0 | 515 | from numpy import inf, nan
from sklearn.linear_model import LinearRegression as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class LinearRegressionImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LinearRegression Ordinary least squares Linear Regression.",
"allOf": [
{
"type": "object",
"required": ["fit_intercept", "normalize", "copy_X", "n_jobs"],
"relevantToOptimizer": ["fit_intercept", "normalize", "copy_X"],
"additionalProperties": False,
"properties": {
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If True, X will be copied; else, it may be overwritten.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of jobs to use for the computation",
},
},
},
{
"XXX TODO XXX": "Parameter: n_jobs > only provide speedup for n_targets > 1 and sufficient large problems"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit linear model.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Training data",
},
"y": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Target values",
},
"sample_weight": {
"type": "array",
"items": {"type": "number"},
"description": "Individual weights for each sample ",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression#sklearn-linear_model-linearregression",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
set_docstrings(LinearRegressionImpl, _combined_schemas)
LinearRegression = make_operator(LinearRegressionImpl, _combined_schemas)
| from numpy import inf, nan
from sklearn.linear_model import LinearRegression as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class LinearRegressionImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LinearRegression Ordinary least squares Linear Regression.",
"allOf": [
{
"type": "object",
"required": ["fit_intercept", "normalize", "copy_X", "n_jobs"],
"relevantToOptimizer": ["fit_intercept", "normalize", "copy_X"],
"additionalProperties": False,
"properties": {
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If True, X will be copied; else, it may be overwritten.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of jobs to use for the computation",
},
},
},
{
"XXX TODO XXX": "Parameter: n_jobs > only provide speedup for n_targets > 1 and sufficient large problems"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit linear model.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Training data",
},
"y": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Target values",
},
"sample_weight": {
"type": "array",
"items": {"type": "number"},
"description": "Individual weights for each sample ",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression#sklearn-linear_model-linearregression",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
set_docstrings(LinearRegressionImpl, _combined_schemas)
LinearRegression = make_operator(LinearRegressionImpl, _combined_schemas)
| el | 0.195432 | #", #", #", #", #", #sklearn-linear_model-linearregression", | 2.44858 | 2 |
Models.py | jmj23/Kaggle-Pneumothorax | 0 | 516 | import numpy as np
from keras.applications.inception_v3 import InceptionV3
from keras.initializers import RandomNormal
from keras.layers import (BatchNormalization, Conv2D, Conv2DTranspose, Conv3D,
Cropping2D, Dense, Flatten, GlobalAveragePooling2D,
Input, Lambda, MaxPooling2D, Reshape, UpSampling2D,
ZeroPadding2D, ZeroPadding3D, add, concatenate)
from keras.layers.advanced_activations import ELU, LeakyReLU
from keras.models import Model
# Parameterized 2D Block Model
def BlockModel2D(input_shape, filt_num=16, numBlocks=3):
"""Creates a Block CED model for segmentation problems
Args:
input shape: a list or tuple of [rows,cols,channels] of input images
filt_num: the number of filters in the first and last layers
This number is multipled linearly increased and decreased throughout the model
numBlocks: number of processing blocks. The larger the number the deeper the model
output_chan: number of output channels. Set if doing multi-class segmentation
regression: Whether to have a continuous output with linear activation
Returns:
An unintialized Keras model
Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8)
Notes: Using rows/cols that are powers of 2 is recommended. Otherwise,
the rows/cols must be divisible by 2^numBlocks for skip connections
to match up properly
"""
use_bn = True
# check for input shape compatibility
rows, cols = input_shape[0:2]
assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible"
assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible"
# calculate size reduction
startsize = np.max(input_shape[0:2])
minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks
assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input"
# input layer
lay_input = Input(shape=input_shape, name='input_layer')
# contracting blocks
x = lay_input
skip_list = []
for rr in range(1, numBlocks+1):
x1 = Conv2D(filt_num*rr, (1, 1), padding='same',
name='Conv1_{}'.format(rr))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_x1_{}'.format(rr))(x1)
x3 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv3_{}'.format(rr))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_x3_{}'.format(rr))(x3)
x51 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv51_{}'.format(rr))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_x51_{}'.format(rr))(x51)
x52 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv52_{}'.format(rr))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_x52_{}'.format(rr))(x52)
x = concatenate([x1, x3, x52], name='merge_{}'.format(rr))
x = Conv2D(filt_num*rr, (1, 1), padding='valid',
name='ConvAll_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_all_{}'.format(rr))(x)
x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (4, 4), padding='valid',
strides=(2, 2), name='DownSample_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_downsample_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (3, 3), padding='same',
name='ConvClean_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_clean_{}'.format(rr))(x)
skip_list.append(x)
# expanding blocks
expnums = list(range(1, numBlocks+1))
expnums.reverse()
for dd in expnums:
if dd < len(skip_list):
x = concatenate([skip_list[dd-1], x],
name='skip_connect_{}'.format(dd))
x1 = Conv2D(filt_num*dd, (1, 1), padding='same',
name='DeConv1_{}'.format(dd))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1)
x3 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv3_{}'.format(dd))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3)
x51 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv51_{}'.format(dd))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51)
x52 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv52_{}'.format(dd))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52)
x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd))
x = Conv2D(filt_num*dd, (1, 1), padding='valid',
name='DeConvAll_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dall_{}'.format(dd))(x)
x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean1_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean1_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean2_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean2_{}'.format(dd))(x)
# classifier
lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
# Parameterized 2D Block Model
def BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3):
"""Creates a Block model for pretraining on classification task
Args:
input shape: a list or tuple of [rows,cols,channels] of input images
filt_num: the number of filters in the first and last layers
This number is multipled linearly increased and decreased throughout the model
numBlocks: number of processing blocks. The larger the number the deeper the model
output_chan: number of output channels. Set if doing multi-class segmentation
regression: Whether to have a continuous output with linear activation
Returns:
An unintialized Keras model
Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8)
Notes: Using rows/cols that are powers of 2 is recommended. Otherwise,
the rows/cols must be divisible by 2^numBlocks for skip connections
to match up properly
"""
use_bn = True
# check for input shape compatibility
rows, cols = input_shape[0:2]
assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible"
assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible"
# calculate size reduction
startsize = np.max(input_shape[0:2])
minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks
assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input"
# input layer
lay_input = Input(shape=input_shape, name='input_layer')
# contracting blocks
x = lay_input
skip_list = []
for rr in range(1, numBlocks+1):
x1 = Conv2D(filt_num*rr, (1, 1), padding='same',
name='Conv1_{}'.format(rr))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_x1_{}'.format(rr))(x1)
x3 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv3_{}'.format(rr))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_x3_{}'.format(rr))(x3)
x51 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv51_{}'.format(rr))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_x51_{}'.format(rr))(x51)
x52 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv52_{}'.format(rr))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_x52_{}'.format(rr))(x52)
x = concatenate([x1, x3, x52], name='merge_{}'.format(rr))
x = Conv2D(filt_num*rr, (1, 1), padding='valid',
name='ConvAll_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_all_{}'.format(rr))(x)
x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (4, 4), padding='valid',
strides=(2, 2), name='DownSample_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_downsample_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (3, 3), padding='same',
name='ConvClean_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_skip_{}'.format(rr))(x)
# average pooling
x = GlobalAveragePooling2D()(x)
# classifier
lay_out = Dense(1, activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
def ConvertEncoderToCED(model):
# Returns a model with frozen encoder layers
# and complimentary, unfrozen decoder layers
# get input layer
# model must be compiled again after using this function
lay_input = model.input
# get skip connection layer outputs
skip_list = [l.output for l in model.layers if 'skip' in l.name]
numBlocks = len(skip_list)
filt_num = int(skip_list[0].shape[-1])
x = model.layers[-3].output
# freeze encoder layers
for layer in model.layers:
layer.trainable = False
use_bn = True
# make expanding blocks
expnums = list(range(1, numBlocks+1))
expnums.reverse()
for dd in expnums:
if dd < len(skip_list):
x = concatenate([skip_list[dd-1], x],
name='skip_connect_{}'.format(dd))
x1 = Conv2D(filt_num*dd, (1, 1), padding='same',
name='DeConv1_{}'.format(dd))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1)
x3 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv3_{}'.format(dd))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3)
x51 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv51_{}'.format(dd))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51)
x52 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv52_{}'.format(dd))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52)
x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd))
x = Conv2D(filt_num*dd, (1, 1), padding='valid',
name='DeConvAll_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dall_{}'.format(dd))(x)
x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean1_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean1_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean2_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean2_{}'.format(dd))(x)
# classifier
lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
def Inception_model(input_shape=(299, 299, 3)):
incep_model = InceptionV3(
include_top=False, weights=None, input_shape=input_shape, pooling='avg')
input_layer = incep_model.input
incep_output = incep_model.output
# x = Conv2D(16, (3, 3), activation='relu')(incep_output)
# x = Flatten()(x)
x = Dense(1, activation='sigmoid')(incep_output)
return Model(inputs=input_layer, outputs=x)
| import numpy as np
from keras.applications.inception_v3 import InceptionV3
from keras.initializers import RandomNormal
from keras.layers import (BatchNormalization, Conv2D, Conv2DTranspose, Conv3D,
Cropping2D, Dense, Flatten, GlobalAveragePooling2D,
Input, Lambda, MaxPooling2D, Reshape, UpSampling2D,
ZeroPadding2D, ZeroPadding3D, add, concatenate)
from keras.layers.advanced_activations import ELU, LeakyReLU
from keras.models import Model
# Parameterized 2D Block Model
def BlockModel2D(input_shape, filt_num=16, numBlocks=3):
"""Creates a Block CED model for segmentation problems
Args:
input shape: a list or tuple of [rows,cols,channels] of input images
filt_num: the number of filters in the first and last layers
This number is multipled linearly increased and decreased throughout the model
numBlocks: number of processing blocks. The larger the number the deeper the model
output_chan: number of output channels. Set if doing multi-class segmentation
regression: Whether to have a continuous output with linear activation
Returns:
An unintialized Keras model
Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8)
Notes: Using rows/cols that are powers of 2 is recommended. Otherwise,
the rows/cols must be divisible by 2^numBlocks for skip connections
to match up properly
"""
use_bn = True
# check for input shape compatibility
rows, cols = input_shape[0:2]
assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible"
assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible"
# calculate size reduction
startsize = np.max(input_shape[0:2])
minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks
assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input"
# input layer
lay_input = Input(shape=input_shape, name='input_layer')
# contracting blocks
x = lay_input
skip_list = []
for rr in range(1, numBlocks+1):
x1 = Conv2D(filt_num*rr, (1, 1), padding='same',
name='Conv1_{}'.format(rr))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_x1_{}'.format(rr))(x1)
x3 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv3_{}'.format(rr))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_x3_{}'.format(rr))(x3)
x51 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv51_{}'.format(rr))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_x51_{}'.format(rr))(x51)
x52 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv52_{}'.format(rr))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_x52_{}'.format(rr))(x52)
x = concatenate([x1, x3, x52], name='merge_{}'.format(rr))
x = Conv2D(filt_num*rr, (1, 1), padding='valid',
name='ConvAll_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_all_{}'.format(rr))(x)
x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (4, 4), padding='valid',
strides=(2, 2), name='DownSample_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_downsample_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (3, 3), padding='same',
name='ConvClean_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_clean_{}'.format(rr))(x)
skip_list.append(x)
# expanding blocks
expnums = list(range(1, numBlocks+1))
expnums.reverse()
for dd in expnums:
if dd < len(skip_list):
x = concatenate([skip_list[dd-1], x],
name='skip_connect_{}'.format(dd))
x1 = Conv2D(filt_num*dd, (1, 1), padding='same',
name='DeConv1_{}'.format(dd))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1)
x3 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv3_{}'.format(dd))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3)
x51 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv51_{}'.format(dd))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51)
x52 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv52_{}'.format(dd))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52)
x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd))
x = Conv2D(filt_num*dd, (1, 1), padding='valid',
name='DeConvAll_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dall_{}'.format(dd))(x)
x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean1_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean1_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean2_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean2_{}'.format(dd))(x)
# classifier
lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
# Parameterized 2D Block Model
def BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3):
"""Creates a Block model for pretraining on classification task
Args:
input shape: a list or tuple of [rows,cols,channels] of input images
filt_num: the number of filters in the first and last layers
This number is multipled linearly increased and decreased throughout the model
numBlocks: number of processing blocks. The larger the number the deeper the model
output_chan: number of output channels. Set if doing multi-class segmentation
regression: Whether to have a continuous output with linear activation
Returns:
An unintialized Keras model
Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8)
Notes: Using rows/cols that are powers of 2 is recommended. Otherwise,
the rows/cols must be divisible by 2^numBlocks for skip connections
to match up properly
"""
use_bn = True
# check for input shape compatibility
rows, cols = input_shape[0:2]
assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible"
assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible"
# calculate size reduction
startsize = np.max(input_shape[0:2])
minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks
assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input"
# input layer
lay_input = Input(shape=input_shape, name='input_layer')
# contracting blocks
x = lay_input
skip_list = []
for rr in range(1, numBlocks+1):
x1 = Conv2D(filt_num*rr, (1, 1), padding='same',
name='Conv1_{}'.format(rr))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_x1_{}'.format(rr))(x1)
x3 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv3_{}'.format(rr))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_x3_{}'.format(rr))(x3)
x51 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv51_{}'.format(rr))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_x51_{}'.format(rr))(x51)
x52 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv52_{}'.format(rr))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_x52_{}'.format(rr))(x52)
x = concatenate([x1, x3, x52], name='merge_{}'.format(rr))
x = Conv2D(filt_num*rr, (1, 1), padding='valid',
name='ConvAll_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_all_{}'.format(rr))(x)
x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (4, 4), padding='valid',
strides=(2, 2), name='DownSample_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_downsample_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (3, 3), padding='same',
name='ConvClean_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_skip_{}'.format(rr))(x)
# average pooling
x = GlobalAveragePooling2D()(x)
# classifier
lay_out = Dense(1, activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
def ConvertEncoderToCED(model):
# Returns a model with frozen encoder layers
# and complimentary, unfrozen decoder layers
# get input layer
# model must be compiled again after using this function
lay_input = model.input
# get skip connection layer outputs
skip_list = [l.output for l in model.layers if 'skip' in l.name]
numBlocks = len(skip_list)
filt_num = int(skip_list[0].shape[-1])
x = model.layers[-3].output
# freeze encoder layers
for layer in model.layers:
layer.trainable = False
use_bn = True
# make expanding blocks
expnums = list(range(1, numBlocks+1))
expnums.reverse()
for dd in expnums:
if dd < len(skip_list):
x = concatenate([skip_list[dd-1], x],
name='skip_connect_{}'.format(dd))
x1 = Conv2D(filt_num*dd, (1, 1), padding='same',
name='DeConv1_{}'.format(dd))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1)
x3 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv3_{}'.format(dd))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3)
x51 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv51_{}'.format(dd))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51)
x52 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv52_{}'.format(dd))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52)
x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd))
x = Conv2D(filt_num*dd, (1, 1), padding='valid',
name='DeConvAll_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dall_{}'.format(dd))(x)
x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean1_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean1_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean2_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean2_{}'.format(dd))(x)
# classifier
lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
def Inception_model(input_shape=(299, 299, 3)):
incep_model = InceptionV3(
include_top=False, weights=None, input_shape=input_shape, pooling='avg')
input_layer = incep_model.input
incep_output = incep_model.output
# x = Conv2D(16, (3, 3), activation='relu')(incep_output)
# x = Flatten()(x)
x = Dense(1, activation='sigmoid')(incep_output)
return Model(inputs=input_layer, outputs=x)
| en | 0.774302 | # Parameterized 2D Block Model Creates a Block CED model for segmentation problems Args: input shape: a list or tuple of [rows,cols,channels] of input images filt_num: the number of filters in the first and last layers This number is multipled linearly increased and decreased throughout the model numBlocks: number of processing blocks. The larger the number the deeper the model output_chan: number of output channels. Set if doing multi-class segmentation regression: Whether to have a continuous output with linear activation Returns: An unintialized Keras model Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8) Notes: Using rows/cols that are powers of 2 is recommended. Otherwise, the rows/cols must be divisible by 2^numBlocks for skip connections to match up properly # check for input shape compatibility # calculate size reduction # input layer # contracting blocks # expanding blocks # classifier # Parameterized 2D Block Model Creates a Block model for pretraining on classification task Args: input shape: a list or tuple of [rows,cols,channels] of input images filt_num: the number of filters in the first and last layers This number is multipled linearly increased and decreased throughout the model numBlocks: number of processing blocks. The larger the number the deeper the model output_chan: number of output channels. Set if doing multi-class segmentation regression: Whether to have a continuous output with linear activation Returns: An unintialized Keras model Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8) Notes: Using rows/cols that are powers of 2 is recommended. Otherwise, the rows/cols must be divisible by 2^numBlocks for skip connections to match up properly # check for input shape compatibility # calculate size reduction # input layer # contracting blocks # average pooling # classifier # Returns a model with frozen encoder layers # and complimentary, unfrozen decoder layers # get input layer # model must be compiled again after using this function # get skip connection layer outputs # freeze encoder layers # make expanding blocks # classifier # x = Conv2D(16, (3, 3), activation='relu')(incep_output) # x = Flatten()(x) | 2.916053 | 3 |
initdb.py | dasmerlon/flunky-bot | 0 | 517 | #!/bin/env python
"""Drop and create a new database with schema."""
from sqlalchemy_utils.functions import database_exists, create_database, drop_database
from flunkybot.db import engine, base
from flunkybot.models import * # noqa
db_url = engine.url
if database_exists(db_url):
drop_database(db_url)
create_database(db_url)
base.metadata.drop_all()
base.metadata.create_all()
| #!/bin/env python
"""Drop and create a new database with schema."""
from sqlalchemy_utils.functions import database_exists, create_database, drop_database
from flunkybot.db import engine, base
from flunkybot.models import * # noqa
db_url = engine.url
if database_exists(db_url):
drop_database(db_url)
create_database(db_url)
base.metadata.drop_all()
base.metadata.create_all()
| en | 0.344226 | #!/bin/env python Drop and create a new database with schema. # noqa | 2.354138 | 2 |
setup.py | awesome-archive/webspider | 0 | 518 | <filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import find_packages, setup
from app import __version__
# get the dependencies and installs
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'requirements.txt')) as f:
all_requirements = f.read().split('\n')
setup(
name='webspider',
version=__version__,
license='MIT',
author='heguozhu',
author_email='<EMAIL>',
description='lagou.com spider',
url='<EMAIL>:GuozhuHe/webspider.git',
packages=find_packages(exclude=['tests']),
package_data={'webspider': ['README.md']},
zip_safe=False,
install_requires=all_requirements,
entry_points={
'console_scripts': [
'web = app.web_app:main',
'production_web = app.quickly_cmd:run_web_app_by_gunicorn',
'crawl_lagou_data = app.tasks:crawl_lagou_data',
'crawl_jobs_count = app.tasks.jobs_count:crawl_lagou_jobs_count',
'celery_jobs_count_worker = app.quickly_cmd:run_celery_jobs_count_worker',
'celery_lagou_data_worker = app.quickly_cmd:run_celery_lagou_data_worker',
'celery_beat = app.quickly_cmd:run_celery_beat',
'celery_flower = app.quickly_cmd.py:run_celery_flower',
],
}
)
| <filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import find_packages, setup
from app import __version__
# get the dependencies and installs
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'requirements.txt')) as f:
all_requirements = f.read().split('\n')
setup(
name='webspider',
version=__version__,
license='MIT',
author='heguozhu',
author_email='<EMAIL>',
description='lagou.com spider',
url='<EMAIL>:GuozhuHe/webspider.git',
packages=find_packages(exclude=['tests']),
package_data={'webspider': ['README.md']},
zip_safe=False,
install_requires=all_requirements,
entry_points={
'console_scripts': [
'web = app.web_app:main',
'production_web = app.quickly_cmd:run_web_app_by_gunicorn',
'crawl_lagou_data = app.tasks:crawl_lagou_data',
'crawl_jobs_count = app.tasks.jobs_count:crawl_lagou_jobs_count',
'celery_jobs_count_worker = app.quickly_cmd:run_celery_jobs_count_worker',
'celery_lagou_data_worker = app.quickly_cmd:run_celery_lagou_data_worker',
'celery_beat = app.quickly_cmd:run_celery_beat',
'celery_flower = app.quickly_cmd.py:run_celery_flower',
],
}
)
| en | 0.534031 | #!/usr/bin/env python # -*- coding: utf-8 -*- # get the dependencies and installs | 1.424059 | 1 |
Doc/conf.py | python-doc-tw/cpython-tw | 0 | 519 | <reponame>python-doc-tw/cpython-tw
#
# Python documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
import sys, os, time
sys.path.append(os.path.abspath('tools/extensions'))
# General configuration
# ---------------------
extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest',
'pyspecific', 'c_annotations']
# General substitutions.
project = 'Python'
copyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')
# We look for the Include/patchlevel.h file in the current Python source tree
# and replace the values accordingly.
import patchlevel
version, release = patchlevel.get_version_info()
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# By default, highlight as Python 3.
highlight_language = 'python3'
# Require Sphinx 1.2 for build.
needs_sphinx = '1.2'
# Ignore any .rst files in the venv/ directory.
exclude_patterns = ['venv/*']
# Options for HTML output
# -----------------------
# Use our custom theme.
html_theme = 'pydoctheme'
html_theme_path = ['tools']
html_theme_options = {'collapsiblesidebar': True}
# Short title used e.g. for <title> HTML tags.
html_short_title = '%s Documentation' % release
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# Path to find HTML templates.
templates_path = ['tools/templates']
# Custom sidebar templates, filenames relative to this file.
html_sidebars = {
'index': 'indexsidebar.html',
}
# Additional templates that should be rendered to pages.
html_additional_pages = {
'download': 'download.html',
'index': 'indexcontent.html',
}
# Output an OpenSearch description file.
html_use_opensearch = 'https://docs.python.org/' + version
# Additional static files.
html_static_path = ['tools/static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'python' + release.replace('.', '')
# Split the index
html_split_index = True
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = r'<NAME>\\and the Python development team'
latex_documents = [
('c-api/index', 'c-api.tex',
'The Python/C API', _stdauthor, 'manual'),
('distributing/index', 'distributing.tex',
'Distributing Python Modules', _stdauthor, 'manual'),
('extending/index', 'extending.tex',
'Extending and Embedding Python', _stdauthor, 'manual'),
('installing/index', 'installing.tex',
'Installing Python Modules', _stdauthor, 'manual'),
('library/index', 'library.tex',
'The Python Library Reference', _stdauthor, 'manual'),
('reference/index', 'reference.tex',
'The Python Language Reference', _stdauthor, 'manual'),
('tutorial/index', 'tutorial.tex',
'Python Tutorial', _stdauthor, 'manual'),
('using/index', 'using.tex',
'Python Setup and Usage', _stdauthor, 'manual'),
('faq/index', 'faq.tex',
'Python Frequently Asked Questions', _stdauthor, 'manual'),
('whatsnew/' + version, 'whatsnew.tex',
'What\'s New in Python', '<NAME>', 'howto'),
]
# Collect all HOWTOs individually
latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex',
'', _stdauthor, 'howto')
for fn in os.listdir('howto')
if fn.endswith('.rst') and fn != 'index.rst')
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\authoraddress{
\strong{Python Software Foundation}\\
Email: \email{<EMAIL>}
}
\let\Verbatim=\OriginalVerbatim
\let\endVerbatim=\endOriginalVerbatim
'''
# Documents to append as an appendix to all manuals.
latex_appendices = ['glossary', 'about', 'license', 'copyright']
# Get LaTeX to handle Unicode correctly
latex_elements = {'inputenc': r'\usepackage[utf8x]{inputenc}', 'utf8extra': ''}
# Options for Epub output
# -----------------------
epub_author = 'Python Documentation Authors'
epub_publisher = 'Python Software Foundation'
# Options for the coverage checker
# --------------------------------
# The coverage checker will ignore all modules/functions/classes whose names
# match any of the following regexes (using re.match).
coverage_ignore_modules = [
r'[T|t][k|K]',
r'Tix',
r'distutils.*',
]
coverage_ignore_functions = [
'test($|_)',
]
coverage_ignore_classes = [
]
# Glob patterns for C source files for C API coverage, relative to this directory.
coverage_c_path = [
'../Include/*.h',
]
# Regexes to find C items in the source files.
coverage_c_regexes = {
'cfunction': (r'^PyAPI_FUNC\(.*\)\s+([^_][\w_]+)'),
'data': (r'^PyAPI_DATA\(.*\)\s+([^_][\w_]+)'),
'macro': (r'^#define ([^_][\w_]+)\(.*\)[\s|\\]'),
}
# The coverage checker will ignore all C items whose names match these regexes
# (using re.match) -- the keys must be the same as in coverage_c_regexes.
coverage_ignore_c_items = {
# 'cfunction': [...]
}
# Options for the link checker
# ----------------------------
# Ignore certain URLs.
linkcheck_ignore = [r'https://bugs.python.org/(issue)?\d+',
# Ignore PEPs for now, they all have permanent redirects.
r'http://www.python.org/dev/peps/pep-\d+']
# Options for extensions
# ----------------------
# Relative filename of the reference count data file.
refcount_file = 'data/refcounts.dat'
# Translation
# -----------
gettext_compact = False
locale_dirs = ["locale"]
| #
# Python documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
import sys, os, time
sys.path.append(os.path.abspath('tools/extensions'))
# General configuration
# ---------------------
extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest',
'pyspecific', 'c_annotations']
# General substitutions.
project = 'Python'
copyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')
# We look for the Include/patchlevel.h file in the current Python source tree
# and replace the values accordingly.
import patchlevel
version, release = patchlevel.get_version_info()
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# By default, highlight as Python 3.
highlight_language = 'python3'
# Require Sphinx 1.2 for build.
needs_sphinx = '1.2'
# Ignore any .rst files in the venv/ directory.
exclude_patterns = ['venv/*']
# Options for HTML output
# -----------------------
# Use our custom theme.
html_theme = 'pydoctheme'
html_theme_path = ['tools']
html_theme_options = {'collapsiblesidebar': True}
# Short title used e.g. for <title> HTML tags.
html_short_title = '%s Documentation' % release
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# Path to find HTML templates.
templates_path = ['tools/templates']
# Custom sidebar templates, filenames relative to this file.
html_sidebars = {
'index': 'indexsidebar.html',
}
# Additional templates that should be rendered to pages.
html_additional_pages = {
'download': 'download.html',
'index': 'indexcontent.html',
}
# Output an OpenSearch description file.
html_use_opensearch = 'https://docs.python.org/' + version
# Additional static files.
html_static_path = ['tools/static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'python' + release.replace('.', '')
# Split the index
html_split_index = True
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = r'<NAME>\\and the Python development team'
latex_documents = [
('c-api/index', 'c-api.tex',
'The Python/C API', _stdauthor, 'manual'),
('distributing/index', 'distributing.tex',
'Distributing Python Modules', _stdauthor, 'manual'),
('extending/index', 'extending.tex',
'Extending and Embedding Python', _stdauthor, 'manual'),
('installing/index', 'installing.tex',
'Installing Python Modules', _stdauthor, 'manual'),
('library/index', 'library.tex',
'The Python Library Reference', _stdauthor, 'manual'),
('reference/index', 'reference.tex',
'The Python Language Reference', _stdauthor, 'manual'),
('tutorial/index', 'tutorial.tex',
'Python Tutorial', _stdauthor, 'manual'),
('using/index', 'using.tex',
'Python Setup and Usage', _stdauthor, 'manual'),
('faq/index', 'faq.tex',
'Python Frequently Asked Questions', _stdauthor, 'manual'),
('whatsnew/' + version, 'whatsnew.tex',
'What\'s New in Python', '<NAME>', 'howto'),
]
# Collect all HOWTOs individually
latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex',
'', _stdauthor, 'howto')
for fn in os.listdir('howto')
if fn.endswith('.rst') and fn != 'index.rst')
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\authoraddress{
\strong{Python Software Foundation}\\
Email: \email{<EMAIL>}
}
\let\Verbatim=\OriginalVerbatim
\let\endVerbatim=\endOriginalVerbatim
'''
# Documents to append as an appendix to all manuals.
latex_appendices = ['glossary', 'about', 'license', 'copyright']
# Get LaTeX to handle Unicode correctly
latex_elements = {'inputenc': r'\usepackage[utf8x]{inputenc}', 'utf8extra': ''}
# Options for Epub output
# -----------------------
epub_author = 'Python Documentation Authors'
epub_publisher = 'Python Software Foundation'
# Options for the coverage checker
# --------------------------------
# The coverage checker will ignore all modules/functions/classes whose names
# match any of the following regexes (using re.match).
coverage_ignore_modules = [
r'[T|t][k|K]',
r'Tix',
r'distutils.*',
]
coverage_ignore_functions = [
'test($|_)',
]
coverage_ignore_classes = [
]
# Glob patterns for C source files for C API coverage, relative to this directory.
coverage_c_path = [
'../Include/*.h',
]
# Regexes to find C items in the source files.
coverage_c_regexes = {
'cfunction': (r'^PyAPI_FUNC\(.*\)\s+([^_][\w_]+)'),
'data': (r'^PyAPI_DATA\(.*\)\s+([^_][\w_]+)'),
'macro': (r'^#define ([^_][\w_]+)\(.*\)[\s|\\]'),
}
# The coverage checker will ignore all C items whose names match these regexes
# (using re.match) -- the keys must be the same as in coverage_c_regexes.
coverage_ignore_c_items = {
# 'cfunction': [...]
}
# Options for the link checker
# ----------------------------
# Ignore certain URLs.
linkcheck_ignore = [r'https://bugs.python.org/(issue)?\d+',
# Ignore PEPs for now, they all have permanent redirects.
r'http://www.python.org/dev/peps/pep-\d+']
# Options for extensions
# ----------------------
# Relative filename of the reference count data file.
refcount_file = 'data/refcounts.dat'
# Translation
# -----------
gettext_compact = False
locale_dirs = ["locale"] | en | 0.668794 | # # Python documentation build configuration file # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). # General configuration # --------------------- # General substitutions. # We look for the Include/patchlevel.h file in the current Python source tree # and replace the values accordingly. # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # Else, today_fmt is used as the format for a strftime call. # By default, highlight as Python 3. # Require Sphinx 1.2 for build. # Ignore any .rst files in the venv/ directory. # Options for HTML output # ----------------------- # Use our custom theme. # Short title used e.g. for <title> HTML tags. # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # Path to find HTML templates. # Custom sidebar templates, filenames relative to this file. # Additional templates that should be rendered to pages. # Output an OpenSearch description file. # Additional static files. # Output file base name for HTML help builder. # Split the index # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). # The font size ('10pt', '11pt' or '12pt'). # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). # Collect all HOWTOs individually # Additional stuff for the LaTeX preamble. \authoraddress{ \strong{Python Software Foundation}\\ Email: \email{<EMAIL>} } \let\Verbatim=\OriginalVerbatim \let\endVerbatim=\endOriginalVerbatim # Documents to append as an appendix to all manuals. # Get LaTeX to handle Unicode correctly # Options for Epub output # ----------------------- # Options for the coverage checker # -------------------------------- # The coverage checker will ignore all modules/functions/classes whose names # match any of the following regexes (using re.match). # Glob patterns for C source files for C API coverage, relative to this directory. # Regexes to find C items in the source files. #define ([^_][\w_]+)\(.*\)[\s|\\]'), # The coverage checker will ignore all C items whose names match these regexes # (using re.match) -- the keys must be the same as in coverage_c_regexes. # 'cfunction': [...] # Options for the link checker # ---------------------------- # Ignore certain URLs. # Ignore PEPs for now, they all have permanent redirects. # Options for extensions # ---------------------- # Relative filename of the reference count data file. # Translation # ----------- | 2.011656 | 2 |
basic_stats.py/basic_stats.py | RahmB/basic_stats | 0 | 520 | <filename>basic_stats.py/basic_stats.py
# Import the matplotlib module here. No other modules should be used.
# Import plotting library
import matplotlib.pyplot as plt
#import....
from os import *
# Import Numpy
import numpy as np
def mean(my_list): # This is the defintion in the head.
i = 0
my_sum = 0
for number in my_list:
my_sum = my_sum + my_list[i]
i+=1
mu = my_sum / i
print('mean = ' + str(mu))
return mu
def sd(my_list):
j = 0
sigma = 0
my_sumsd = 0
mu = mean(my_list)
for number in my_list:
my_sumsd = my_sumsd + (my_list[j] - mu)**2
j +=1
sigma = (my_sumsd/j)**(.5)
print('standard deviation = ' + str(sigma))
return sigma
def norm(my_list):
k = 0
l = 0
mu = mean(my_list)
sigma = sd(my_list)
for number in my_list:
if abs(my_list[l] - mu) < sigma:
k += 1
l += 1
else:
l += 1
dist = k / l
return dist
def is_norm(my_list):
dist = norm(my_list)
if 0.66 < dist < 0.70:
print('Data is normally distributed')
return True
else:
print('Data is not normally distributed')
return False
def is_skew(my_list):
m = 0
skew = 0
sumsk = 0
mu = mean(my_list)
sigma = sd(my_list)
for numbers in my_list:
sumsk = (my_list[m] - mu)**3 + sumsk
m +=1
skew = sumsk /(len(my_list)*sigma**3)
print('skewness = ' + str(skew))
if skew == 0:
print('skewness = 0, therefore sample is normally distributed')
else:
print('skewness =/= 0, therefore sample is not normally distributed')
def graph(my_list):
plt.hist(my_list,density=True, facecolor='b')
sigma = sd(my_list) #stores standard deviation
mu = mean(my_list) #stores mean
plt.title('my_list Histogram')
plt.xlabel('Number')
plt.ylabel('Probability')
plt.xlim(mu - 4*sigma, mu + 4*sigma)
plt.grid(True)
plt.show()
def stats(my_list):
mu = mean(my_list)
std = sd(my_list)
dist = norm(my_list)
graph(my_list)
is_norm(my_list)
is_skew(my_list)
return (mu, std, dist)
| <filename>basic_stats.py/basic_stats.py
# Import the matplotlib module here. No other modules should be used.
# Import plotting library
import matplotlib.pyplot as plt
#import....
from os import *
# Import Numpy
import numpy as np
def mean(my_list): # This is the defintion in the head.
i = 0
my_sum = 0
for number in my_list:
my_sum = my_sum + my_list[i]
i+=1
mu = my_sum / i
print('mean = ' + str(mu))
return mu
def sd(my_list):
j = 0
sigma = 0
my_sumsd = 0
mu = mean(my_list)
for number in my_list:
my_sumsd = my_sumsd + (my_list[j] - mu)**2
j +=1
sigma = (my_sumsd/j)**(.5)
print('standard deviation = ' + str(sigma))
return sigma
def norm(my_list):
k = 0
l = 0
mu = mean(my_list)
sigma = sd(my_list)
for number in my_list:
if abs(my_list[l] - mu) < sigma:
k += 1
l += 1
else:
l += 1
dist = k / l
return dist
def is_norm(my_list):
dist = norm(my_list)
if 0.66 < dist < 0.70:
print('Data is normally distributed')
return True
else:
print('Data is not normally distributed')
return False
def is_skew(my_list):
m = 0
skew = 0
sumsk = 0
mu = mean(my_list)
sigma = sd(my_list)
for numbers in my_list:
sumsk = (my_list[m] - mu)**3 + sumsk
m +=1
skew = sumsk /(len(my_list)*sigma**3)
print('skewness = ' + str(skew))
if skew == 0:
print('skewness = 0, therefore sample is normally distributed')
else:
print('skewness =/= 0, therefore sample is not normally distributed')
def graph(my_list):
plt.hist(my_list,density=True, facecolor='b')
sigma = sd(my_list) #stores standard deviation
mu = mean(my_list) #stores mean
plt.title('my_list Histogram')
plt.xlabel('Number')
plt.ylabel('Probability')
plt.xlim(mu - 4*sigma, mu + 4*sigma)
plt.grid(True)
plt.show()
def stats(my_list):
mu = mean(my_list)
std = sd(my_list)
dist = norm(my_list)
graph(my_list)
is_norm(my_list)
is_skew(my_list)
return (mu, std, dist)
| en | 0.617113 | # Import the matplotlib module here. No other modules should be used. # Import plotting library #import.... # Import Numpy # This is the defintion in the head. #stores standard deviation #stores mean | 3.898444 | 4 |
src/catkin_pkg/cli/tag_changelog.py | delftrobotics-forks/catkin_pkg | 2 | 521 | """This script renames the forthcoming section in changelog files with the upcoming version and the current date"""
from __future__ import print_function
import argparse
import datetime
import docutils.core
import os
import re
import sys
from catkin_pkg.changelog import CHANGELOG_FILENAME, get_changelog_from_path
from catkin_pkg.changelog_generator import FORTHCOMING_LABEL
from catkin_pkg.package_version import bump_version
from catkin_pkg.packages import find_packages, verify_equal_package_versions
def get_forthcoming_label(rst):
document = docutils.core.publish_doctree(rst)
forthcoming_label = None
for child in document.children:
title = None
if isinstance(child, docutils.nodes.subtitle):
title = child
elif isinstance(child, docutils.nodes.section):
section = child
if len(section.children) > 0 and isinstance(section.children[0], docutils.nodes.title):
title = section.children[0]
if title and len(title.children) > 0 and isinstance(title.children[0], docutils.nodes.Text):
title_text = title.children[0].rawsource
if FORTHCOMING_LABEL.lower() in title_text.lower():
if forthcoming_label:
raise RuntimeError('Found multiple forthcoming sections')
forthcoming_label = title_text
return forthcoming_label
def rename_section(data, old_label, new_label):
valid_section_characters = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
def replace_section(match):
section_char = match.group(2)[0]
return new_label + '\n' + section_char * len(new_label)
pattern = '^(' + re.escape(old_label) + ')\n([' + re.escape(valid_section_characters) + ']+)$'
data, count = re.subn(pattern, replace_section, data, flags=re.MULTILINE)
if count == 0:
raise RuntimeError('Could not find section')
if count > 1:
raise RuntimeError('Found multiple matching sections')
return data
def main(sysargs=None):
parser = argparse.ArgumentParser(description='Tag the forthcoming section in the changelog files with an upcoming version number')
parser.add_argument('--bump', choices=('major', 'minor', 'patch'), default='patch', help='Which part of the version number to bump? (default: %(default)s)')
args = parser.parse_args(sysargs)
base_path = '.'
# find packages
packages = find_packages(base_path)
if not packages:
raise RuntimeError('No packages found')
print('Found packages: %s' % ', '.join([p.name for p in packages.values()]))
# fetch current version and verify that all packages have same version number
old_version = verify_equal_package_versions(packages.values())
new_version = bump_version(old_version, args.bump)
print('Tag version %s' % new_version)
# check for changelog entries
changelogs = []
missing_forthcoming = []
already_tagged = []
for pkg_path, package in packages.items():
changelog_path = os.path.join(base_path, pkg_path, CHANGELOG_FILENAME)
if not os.path.exists(changelog_path):
missing_forthcoming.append(package.name)
continue
changelog = get_changelog_from_path(changelog_path, package.name)
if not changelog:
missing_forthcoming.append(package.name)
continue
# check that forthcoming section exists
forthcoming_label = get_forthcoming_label(changelog.rst)
if not forthcoming_label:
missing_forthcoming.append(package.name)
continue
# check that new_version section does not exist yet
try:
changelog.get_content_of_version(new_version)
already_tagged.append(package.name)
continue
except KeyError:
pass
changelogs.append((package.name, changelog_path, changelog, forthcoming_label))
if missing_forthcoming:
print('The following packages do not have a forthcoming section in their changelog file: %s' % ', '.join(sorted(missing_forthcoming)), file=sys.stderr)
if already_tagged:
print("The following packages do already have a section '%s' in their changelog file: %s" % (new_version, ', '.join(sorted(already_tagged))), file=sys.stderr)
# rename forthcoming sections to new_version including current date
new_changelog_data = []
new_label = '%s (%s)' % (new_version, datetime.date.today().isoformat())
for (pkg_name, changelog_path, changelog, forthcoming_label) in changelogs:
print("Renaming section '%s' to '%s' in package '%s'..." % (forthcoming_label, new_label, pkg_name))
data = rename_section(changelog.rst, forthcoming_label, new_label)
new_changelog_data.append((changelog_path, data))
print('Writing updated changelog files...')
for (changelog_path, data) in new_changelog_data:
with open(changelog_path, 'wb') as f:
f.write(data.encode('utf-8'))
| """This script renames the forthcoming section in changelog files with the upcoming version and the current date"""
from __future__ import print_function
import argparse
import datetime
import docutils.core
import os
import re
import sys
from catkin_pkg.changelog import CHANGELOG_FILENAME, get_changelog_from_path
from catkin_pkg.changelog_generator import FORTHCOMING_LABEL
from catkin_pkg.package_version import bump_version
from catkin_pkg.packages import find_packages, verify_equal_package_versions
def get_forthcoming_label(rst):
document = docutils.core.publish_doctree(rst)
forthcoming_label = None
for child in document.children:
title = None
if isinstance(child, docutils.nodes.subtitle):
title = child
elif isinstance(child, docutils.nodes.section):
section = child
if len(section.children) > 0 and isinstance(section.children[0], docutils.nodes.title):
title = section.children[0]
if title and len(title.children) > 0 and isinstance(title.children[0], docutils.nodes.Text):
title_text = title.children[0].rawsource
if FORTHCOMING_LABEL.lower() in title_text.lower():
if forthcoming_label:
raise RuntimeError('Found multiple forthcoming sections')
forthcoming_label = title_text
return forthcoming_label
def rename_section(data, old_label, new_label):
valid_section_characters = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
def replace_section(match):
section_char = match.group(2)[0]
return new_label + '\n' + section_char * len(new_label)
pattern = '^(' + re.escape(old_label) + ')\n([' + re.escape(valid_section_characters) + ']+)$'
data, count = re.subn(pattern, replace_section, data, flags=re.MULTILINE)
if count == 0:
raise RuntimeError('Could not find section')
if count > 1:
raise RuntimeError('Found multiple matching sections')
return data
def main(sysargs=None):
parser = argparse.ArgumentParser(description='Tag the forthcoming section in the changelog files with an upcoming version number')
parser.add_argument('--bump', choices=('major', 'minor', 'patch'), default='patch', help='Which part of the version number to bump? (default: %(default)s)')
args = parser.parse_args(sysargs)
base_path = '.'
# find packages
packages = find_packages(base_path)
if not packages:
raise RuntimeError('No packages found')
print('Found packages: %s' % ', '.join([p.name for p in packages.values()]))
# fetch current version and verify that all packages have same version number
old_version = verify_equal_package_versions(packages.values())
new_version = bump_version(old_version, args.bump)
print('Tag version %s' % new_version)
# check for changelog entries
changelogs = []
missing_forthcoming = []
already_tagged = []
for pkg_path, package in packages.items():
changelog_path = os.path.join(base_path, pkg_path, CHANGELOG_FILENAME)
if not os.path.exists(changelog_path):
missing_forthcoming.append(package.name)
continue
changelog = get_changelog_from_path(changelog_path, package.name)
if not changelog:
missing_forthcoming.append(package.name)
continue
# check that forthcoming section exists
forthcoming_label = get_forthcoming_label(changelog.rst)
if not forthcoming_label:
missing_forthcoming.append(package.name)
continue
# check that new_version section does not exist yet
try:
changelog.get_content_of_version(new_version)
already_tagged.append(package.name)
continue
except KeyError:
pass
changelogs.append((package.name, changelog_path, changelog, forthcoming_label))
if missing_forthcoming:
print('The following packages do not have a forthcoming section in their changelog file: %s' % ', '.join(sorted(missing_forthcoming)), file=sys.stderr)
if already_tagged:
print("The following packages do already have a section '%s' in their changelog file: %s" % (new_version, ', '.join(sorted(already_tagged))), file=sys.stderr)
# rename forthcoming sections to new_version including current date
new_changelog_data = []
new_label = '%s (%s)' % (new_version, datetime.date.today().isoformat())
for (pkg_name, changelog_path, changelog, forthcoming_label) in changelogs:
print("Renaming section '%s' to '%s' in package '%s'..." % (forthcoming_label, new_label, pkg_name))
data = rename_section(changelog.rst, forthcoming_label, new_label)
new_changelog_data.append((changelog_path, data))
print('Writing updated changelog files...')
for (changelog_path, data) in new_changelog_data:
with open(changelog_path, 'wb') as f:
f.write(data.encode('utf-8'))
| en | 0.831684 | This script renames the forthcoming section in changelog files with the upcoming version and the current date # find packages # fetch current version and verify that all packages have same version number # check for changelog entries # check that forthcoming section exists # check that new_version section does not exist yet # rename forthcoming sections to new_version including current date | 2.545939 | 3 |
tests/optims/distributed_adamw_test.py | AswinRetnakumar/Machina | 302 | 522 | import os
import unittest
import torch
import torch.distributed as dist
from torch.multiprocessing import Process
import torch.nn as nn
from machina.optims import DistributedAdamW
def init_processes(rank, world_size,
function, backend='tcp'):
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank,
world_size=world_size)
function(rank, world_size)
class TestDistributedAdamW(unittest.TestCase):
def test_step(self):
def _run(rank, world_size):
model = nn.Linear(10, 1)
optimizer = DistributedAdamW(
model.parameters())
optimizer.zero_grad()
loss = model(torch.ones(10).float())
loss.backward()
optimizer.step()
processes = []
world_size = 4
for rank in range(world_size):
p = Process(target=init_processes,
args=(rank,
world_size,
_run))
p.start()
processes.append(p)
for p in processes:
p.join()
| import os
import unittest
import torch
import torch.distributed as dist
from torch.multiprocessing import Process
import torch.nn as nn
from machina.optims import DistributedAdamW
def init_processes(rank, world_size,
function, backend='tcp'):
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank,
world_size=world_size)
function(rank, world_size)
class TestDistributedAdamW(unittest.TestCase):
def test_step(self):
def _run(rank, world_size):
model = nn.Linear(10, 1)
optimizer = DistributedAdamW(
model.parameters())
optimizer.zero_grad()
loss = model(torch.ones(10).float())
loss.backward()
optimizer.step()
processes = []
world_size = 4
for rank in range(world_size):
p = Process(target=init_processes,
args=(rank,
world_size,
_run))
p.start()
processes.append(p)
for p in processes:
p.join()
| none | 1 | 2.52126 | 3 |
|
rsqueakvm/model/__init__.py | shiplift/RSqueakOnABoat | 44 | 523 | <reponame>shiplift/RSqueakOnABoat<filename>rsqueakvm/model/__init__.py
"""
Squeak model.
W_Object
W_SmallInteger
W_MutableSmallInteger
W_AbstractObjectWithIdentityHash
W_AbstractFloat
W_Float
W_MutableFloat
W_Character
W_PointersObject
W_AbstractObjectWithClassReference
W_LargeInteger
W_LargeIntegerWord
W_LargeIntegerBig
W_BytesObject
W_WordsObject
W_CompiledMethod
W_SpurCompiledMethod
W_PreSpurCompiledMethod
"""
from rsqueakvm.model.base import *
from rsqueakvm.model.character import *
from rsqueakvm.model.compiled_methods import *
# from rsqueakvm.model.display import *
from rsqueakvm.model.numeric import *
from rsqueakvm.model.pointers import *
from rsqueakvm.model.variable import *
| """
Squeak model.
W_Object
W_SmallInteger
W_MutableSmallInteger
W_AbstractObjectWithIdentityHash
W_AbstractFloat
W_Float
W_MutableFloat
W_Character
W_PointersObject
W_AbstractObjectWithClassReference
W_LargeInteger
W_LargeIntegerWord
W_LargeIntegerBig
W_BytesObject
W_WordsObject
W_CompiledMethod
W_SpurCompiledMethod
W_PreSpurCompiledMethod
"""
from rsqueakvm.model.base import *
from rsqueakvm.model.character import *
from rsqueakvm.model.compiled_methods import *
# from rsqueakvm.model.display import *
from rsqueakvm.model.numeric import *
from rsqueakvm.model.pointers import *
from rsqueakvm.model.variable import * | en | 0.302719 | Squeak model. W_Object W_SmallInteger W_MutableSmallInteger W_AbstractObjectWithIdentityHash W_AbstractFloat W_Float W_MutableFloat W_Character W_PointersObject W_AbstractObjectWithClassReference W_LargeInteger W_LargeIntegerWord W_LargeIntegerBig W_BytesObject W_WordsObject W_CompiledMethod W_SpurCompiledMethod W_PreSpurCompiledMethod # from rsqueakvm.model.display import * | 1.377636 | 1 |
Multi-Task-Learning-PyTorch-master/losses/loss_functions.py | nikola3794/edge-evaluation-PASCAL-MT-tmp | 0 | 524 | <filename>Multi-Task-Learning-PyTorch-master/losses/loss_functions.py
# This code is referenced from
# https://github.com/facebookresearch/astmt/
#
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# License: Attribution-NonCommercial 4.0 International
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.module import Module
import numpy as np
class SoftMaxwithLoss(Module):
"""
This function returns cross entropy loss for semantic segmentation
"""
def __init__(self):
super(SoftMaxwithLoss, self).__init__()
self.softmax = nn.LogSoftmax(dim=1)
self.criterion = nn.NLLLoss(ignore_index=255)
def forward(self, out, label):
assert not label.requires_grad
# out shape batch_size x channels x h x w
# label shape batch_size x 1 x h x w
label = label[:, 0, :, :].long()
loss = self.criterion(self.softmax(out), label)
return loss
class BalancedCrossEntropyLoss(Module):
"""
Balanced Cross Entropy Loss with optional ignore regions
"""
def __init__(self, size_average=True, batch_average=True, pos_weight=None):
super(BalancedCrossEntropyLoss, self).__init__()
self.size_average = size_average
self.batch_average = batch_average
self.pos_weight = pos_weight
def forward(self, output, label, void_pixels=None):
assert (output.size() == label.size())
labels = torch.ge(label, 0.5).float()
# Weighting of the loss, default is HED-style
if self.pos_weight is None:
num_labels_pos = torch.sum(labels)
num_labels_neg = torch.sum(1.0 - labels)
num_total = num_labels_pos + num_labels_neg
w = num_labels_neg / num_total
else:
w = self.pos_weight
output_gt_zero = torch.ge(output, 0).float()
loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log(
1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero)))
loss_pos_pix = -torch.mul(labels, loss_val)
loss_neg_pix = -torch.mul(1.0 - labels, loss_val)
if void_pixels is not None and not self.pos_weight:
w_void = torch.le(void_pixels, 0.5).float()
loss_pos_pix = torch.mul(w_void, loss_pos_pix)
loss_neg_pix = torch.mul(w_void, loss_neg_pix)
num_total = num_total - torch.ge(void_pixels, 0.5).float().sum()
w = num_labels_neg / num_total
loss_pos = torch.sum(loss_pos_pix)
loss_neg = torch.sum(loss_neg_pix)
final_loss = w * loss_pos + (1 - w) * loss_neg
if self.size_average:
final_loss /= float(np.prod(label.size()))
elif self.batch_average:
final_loss /= label.size()[0]
return final_loss
class BinaryCrossEntropyLoss(Module):
"""
Binary Cross Entropy with ignore regions, not balanced.
"""
def __init__(self, size_average=True, batch_average=True):
super(BinaryCrossEntropyLoss, self).__init__()
self.size_average = size_average
self.batch_average = batch_average
def forward(self, output, label, void_pixels=None):
assert (output.size() == label.size())
labels = torch.ge(label, 0.5).float()
output_gt_zero = torch.ge(output, 0).float()
loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log(
1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero)))
loss_pos_pix = -torch.mul(labels, loss_val)
loss_neg_pix = -torch.mul(1.0 - labels, loss_val)
if void_pixels is not None:
w_void = torch.le(void_pixels, 0.5).float()
loss_pos_pix = torch.mul(w_void, loss_pos_pix)
loss_neg_pix = torch.mul(w_void, loss_neg_pix)
loss_pos = torch.sum(loss_pos_pix)
loss_neg = torch.sum(loss_neg_pix)
final_loss = loss_pos + loss_neg
if self.size_average:
final_loss /= float(np.prod(label.size()))
elif self.batch_average:
final_loss /= label.size()[0]
return final_loss
class DepthLoss(nn.Module):
"""
Loss for depth prediction. By default L1 loss is used.
"""
def __init__(self, loss='l1'):
super(DepthLoss, self).__init__()
if loss == 'l1':
self.loss = nn.L1Loss()
else:
raise NotImplementedError('Loss {} currently not supported in DepthLoss'.format(loss))
def forward(self, out, label):
mask = (label != 255)
return self.loss(torch.masked_select(out, mask), torch.masked_select(label, mask))
class Normalize(nn.Module):
def __init__(self):
super(Normalize, self).__init__()
def forward(self, bottom):
qn = torch.norm(bottom, p=2, dim=1).unsqueeze(dim=1) + 1e-12
top = bottom.div(qn)
return top
class NormalsLoss(Module):
"""
L1 loss with ignore labels
normalize: normalization for surface normals
"""
def __init__(self, size_average=True, normalize=False, norm=1):
super(NormalsLoss, self).__init__()
self.size_average = size_average
if normalize:
self.normalize = Normalize()
else:
self.normalize = None
if norm == 1:
print('Using L1 loss for surface normals')
self.loss_func = F.l1_loss
elif norm == 2:
print('Using L2 loss for surface normals')
self.loss_func = F.mse_loss
else:
raise NotImplementedError
def forward(self, out, label, ignore_label=255):
assert not label.requires_grad
mask = (label != ignore_label)
n_valid = torch.sum(mask).item()
if self.normalize is not None:
out_norm = self.normalize(out)
loss = self.loss_func(torch.masked_select(out_norm, mask), torch.masked_select(label, mask), reduction='sum')
else:
loss = self.loss_func(torch.masked_select(out, mask), torch.masked_select(label, mask), reduction='sum')
if self.size_average:
if ignore_label:
ret_loss = torch.div(loss, max(n_valid, 1e-6))
return ret_loss
else:
ret_loss = torch.div(loss, float(np.prod(label.size())))
return ret_loss
return loss
| <filename>Multi-Task-Learning-PyTorch-master/losses/loss_functions.py
# This code is referenced from
# https://github.com/facebookresearch/astmt/
#
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# License: Attribution-NonCommercial 4.0 International
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.module import Module
import numpy as np
class SoftMaxwithLoss(Module):
"""
This function returns cross entropy loss for semantic segmentation
"""
def __init__(self):
super(SoftMaxwithLoss, self).__init__()
self.softmax = nn.LogSoftmax(dim=1)
self.criterion = nn.NLLLoss(ignore_index=255)
def forward(self, out, label):
assert not label.requires_grad
# out shape batch_size x channels x h x w
# label shape batch_size x 1 x h x w
label = label[:, 0, :, :].long()
loss = self.criterion(self.softmax(out), label)
return loss
class BalancedCrossEntropyLoss(Module):
"""
Balanced Cross Entropy Loss with optional ignore regions
"""
def __init__(self, size_average=True, batch_average=True, pos_weight=None):
super(BalancedCrossEntropyLoss, self).__init__()
self.size_average = size_average
self.batch_average = batch_average
self.pos_weight = pos_weight
def forward(self, output, label, void_pixels=None):
assert (output.size() == label.size())
labels = torch.ge(label, 0.5).float()
# Weighting of the loss, default is HED-style
if self.pos_weight is None:
num_labels_pos = torch.sum(labels)
num_labels_neg = torch.sum(1.0 - labels)
num_total = num_labels_pos + num_labels_neg
w = num_labels_neg / num_total
else:
w = self.pos_weight
output_gt_zero = torch.ge(output, 0).float()
loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log(
1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero)))
loss_pos_pix = -torch.mul(labels, loss_val)
loss_neg_pix = -torch.mul(1.0 - labels, loss_val)
if void_pixels is not None and not self.pos_weight:
w_void = torch.le(void_pixels, 0.5).float()
loss_pos_pix = torch.mul(w_void, loss_pos_pix)
loss_neg_pix = torch.mul(w_void, loss_neg_pix)
num_total = num_total - torch.ge(void_pixels, 0.5).float().sum()
w = num_labels_neg / num_total
loss_pos = torch.sum(loss_pos_pix)
loss_neg = torch.sum(loss_neg_pix)
final_loss = w * loss_pos + (1 - w) * loss_neg
if self.size_average:
final_loss /= float(np.prod(label.size()))
elif self.batch_average:
final_loss /= label.size()[0]
return final_loss
class BinaryCrossEntropyLoss(Module):
"""
Binary Cross Entropy with ignore regions, not balanced.
"""
def __init__(self, size_average=True, batch_average=True):
super(BinaryCrossEntropyLoss, self).__init__()
self.size_average = size_average
self.batch_average = batch_average
def forward(self, output, label, void_pixels=None):
assert (output.size() == label.size())
labels = torch.ge(label, 0.5).float()
output_gt_zero = torch.ge(output, 0).float()
loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log(
1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero)))
loss_pos_pix = -torch.mul(labels, loss_val)
loss_neg_pix = -torch.mul(1.0 - labels, loss_val)
if void_pixels is not None:
w_void = torch.le(void_pixels, 0.5).float()
loss_pos_pix = torch.mul(w_void, loss_pos_pix)
loss_neg_pix = torch.mul(w_void, loss_neg_pix)
loss_pos = torch.sum(loss_pos_pix)
loss_neg = torch.sum(loss_neg_pix)
final_loss = loss_pos + loss_neg
if self.size_average:
final_loss /= float(np.prod(label.size()))
elif self.batch_average:
final_loss /= label.size()[0]
return final_loss
class DepthLoss(nn.Module):
"""
Loss for depth prediction. By default L1 loss is used.
"""
def __init__(self, loss='l1'):
super(DepthLoss, self).__init__()
if loss == 'l1':
self.loss = nn.L1Loss()
else:
raise NotImplementedError('Loss {} currently not supported in DepthLoss'.format(loss))
def forward(self, out, label):
mask = (label != 255)
return self.loss(torch.masked_select(out, mask), torch.masked_select(label, mask))
class Normalize(nn.Module):
def __init__(self):
super(Normalize, self).__init__()
def forward(self, bottom):
qn = torch.norm(bottom, p=2, dim=1).unsqueeze(dim=1) + 1e-12
top = bottom.div(qn)
return top
class NormalsLoss(Module):
"""
L1 loss with ignore labels
normalize: normalization for surface normals
"""
def __init__(self, size_average=True, normalize=False, norm=1):
super(NormalsLoss, self).__init__()
self.size_average = size_average
if normalize:
self.normalize = Normalize()
else:
self.normalize = None
if norm == 1:
print('Using L1 loss for surface normals')
self.loss_func = F.l1_loss
elif norm == 2:
print('Using L2 loss for surface normals')
self.loss_func = F.mse_loss
else:
raise NotImplementedError
def forward(self, out, label, ignore_label=255):
assert not label.requires_grad
mask = (label != ignore_label)
n_valid = torch.sum(mask).item()
if self.normalize is not None:
out_norm = self.normalize(out)
loss = self.loss_func(torch.masked_select(out_norm, mask), torch.masked_select(label, mask), reduction='sum')
else:
loss = self.loss_func(torch.masked_select(out, mask), torch.masked_select(label, mask), reduction='sum')
if self.size_average:
if ignore_label:
ret_loss = torch.div(loss, max(n_valid, 1e-6))
return ret_loss
else:
ret_loss = torch.div(loss, float(np.prod(label.size())))
return ret_loss
return loss
| en | 0.786692 | # This code is referenced from # https://github.com/facebookresearch/astmt/ # # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # License: Attribution-NonCommercial 4.0 International This function returns cross entropy loss for semantic segmentation # out shape batch_size x channels x h x w # label shape batch_size x 1 x h x w Balanced Cross Entropy Loss with optional ignore regions # Weighting of the loss, default is HED-style Binary Cross Entropy with ignore regions, not balanced. Loss for depth prediction. By default L1 loss is used. L1 loss with ignore labels normalize: normalization for surface normals | 2.515586 | 3 |
trabalho-numerico/tridimensional.py | heissonwillen/tcm | 0 | 525 | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import os
import contorno
from constantes import INTERVALOS, PASSOS, TAMANHO_BARRA, DELTA_T, DELTA_X
z_temp = contorno.p_3
TAMANHO_BARRA = 2
x = np.linspace(0.0, TAMANHO_BARRA, INTERVALOS+1)
y = np.linspace(0.0, DELTA_T, PASSOS+1)
z = []
for k in range(PASSOS+1):
z_k = np.copy(z_temp)
z.append(z_k)
for i in range(1, INTERVALOS):
z_temp[i] = z_k[i] + (DELTA_T/(DELTA_X**2)) * (z_k[i+1]-2*z_k[i]+z_k[i-1])
z = np.asarray(z)
x, y = np.meshgrid(x, y)
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm, antialiased=False)
ax.set_xlabel('x')
ax.set_ylabel('t')
ax.set_zlabel('T(x,t)')
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
| from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import os
import contorno
from constantes import INTERVALOS, PASSOS, TAMANHO_BARRA, DELTA_T, DELTA_X
z_temp = contorno.p_3
TAMANHO_BARRA = 2
x = np.linspace(0.0, TAMANHO_BARRA, INTERVALOS+1)
y = np.linspace(0.0, DELTA_T, PASSOS+1)
z = []
for k in range(PASSOS+1):
z_k = np.copy(z_temp)
z.append(z_k)
for i in range(1, INTERVALOS):
z_temp[i] = z_k[i] + (DELTA_T/(DELTA_X**2)) * (z_k[i+1]-2*z_k[i]+z_k[i-1])
z = np.asarray(z)
x, y = np.meshgrid(x, y)
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm, antialiased=False)
ax.set_xlabel('x')
ax.set_ylabel('t')
ax.set_zlabel('T(x,t)')
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
| none | 1 | 2.275699 | 2 |
|
leetcode/0006_ZigZag_Conversion/zigzag_conversion.py | zyeak/leetcode | 0 | 526 | # solution 1:
class Solution1:
def convert(self, s: str, numRows: int) -> str:
if numRows == 1 or numRows >= len(s):
return s
L = [''] * numRows
index, step = 0, 1
for x in s:
L[index] += x
if index == 0:
step = 1
elif index == numRows - 1:
step = -1
index += step
return ''.join(L)
# Solution 2
class Solution:
def convert(self, s: str, numRows: int) -> str:
# If we have only one row then we can return the string as it is
if numRows < 2:
return s
# We will create an empty string for each row and then fill each element in each row
# from row = 0 to row = numRows-1, if we reach bottom (i.e. row = numRows-1)
# then we move up. Similarly if we reach top, we change direction and move down
# Finally after filling up all the four rows we join them row0 + row1 +.. numRows
row = 0
result = [""]*numRows
for character in s:
if row == 0:
move_down = True
elif row == numRows-1:
move_down = False
result[row] += character
row = (row+1) if move_down else row-1
return "".join(result)
if __name__ == '__main__':
# begin
s = Solution()
print(s.convert("PAYPALISHIRING", 3)) | # solution 1:
class Solution1:
def convert(self, s: str, numRows: int) -> str:
if numRows == 1 or numRows >= len(s):
return s
L = [''] * numRows
index, step = 0, 1
for x in s:
L[index] += x
if index == 0:
step = 1
elif index == numRows - 1:
step = -1
index += step
return ''.join(L)
# Solution 2
class Solution:
def convert(self, s: str, numRows: int) -> str:
# If we have only one row then we can return the string as it is
if numRows < 2:
return s
# We will create an empty string for each row and then fill each element in each row
# from row = 0 to row = numRows-1, if we reach bottom (i.e. row = numRows-1)
# then we move up. Similarly if we reach top, we change direction and move down
# Finally after filling up all the four rows we join them row0 + row1 +.. numRows
row = 0
result = [""]*numRows
for character in s:
if row == 0:
move_down = True
elif row == numRows-1:
move_down = False
result[row] += character
row = (row+1) if move_down else row-1
return "".join(result)
if __name__ == '__main__':
# begin
s = Solution()
print(s.convert("PAYPALISHIRING", 3)) | en | 0.782857 | # solution 1: # Solution 2 # If we have only one row then we can return the string as it is # We will create an empty string for each row and then fill each element in each row # from row = 0 to row = numRows-1, if we reach bottom (i.e. row = numRows-1) # then we move up. Similarly if we reach top, we change direction and move down # Finally after filling up all the four rows we join them row0 + row1 +.. numRows # begin | 3.646331 | 4 |
FakeNewsClassifierWithLSTM.py | pratikasarkar/nlp | 0 | 527 | <reponame>pratikasarkar/nlp
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 11 13:42:45 2021
@author: ASUS
"""
import pandas as pd
df = pd.read_csv(r'D:\nlp\fake-news-data\train.csv')
df = df.dropna()
X = df.drop('label',axis = 1)
y = df['label']
import tensorflow as tf
from tensorflow.keras.layers import Embedding, Dense, LSTM
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import one_hot
# Vocabulary size
voc_size = 5000
# One Hot Representation
messages = X.copy()
messages.reset_index(inplace = True)
import nltk
import re
from nltk.corpus import stopwords
# Dataset Preprocessing
from nltk.stem import PorterStemmer
ps = PorterStemmer()
corpus = []
for i in range(len(messages)):
print(i)
review = re.sub('[^a-zA-Z]',' ',messages['title'][i])
review = review.lower()
review = review.split()
review = [ps.stem(word) for word in review if word not in stopwords.words('english')]
review = " ".join(review)
corpus.append(review)
onehot_repr = [one_hot(words,voc_size) for words in corpus]
sent_len = 20
embedded_doc = pad_sequences(onehot_repr,maxlen = sent_len,padding = 'pre')
# Creating the model
embedding_vector_features = 40
model = Sequential()
model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_len))
model.add(LSTM(100))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer = 'adam',metrics = ['accuracy'])
model.summary()
import numpy as np
X_final = np.array(embedded_doc)
y_final = np.array(y)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_final,y_final,test_size = 0.33,random_state = 42)
model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64)
y_pred = model.predict_classes(X_test)
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test,y_pred)
acc = accuracy_score(y_test,y_pred)
| # -*- coding: utf-8 -*-
"""
Created on Thu Feb 11 13:42:45 2021
@author: ASUS
"""
import pandas as pd
df = pd.read_csv(r'D:\nlp\fake-news-data\train.csv')
df = df.dropna()
X = df.drop('label',axis = 1)
y = df['label']
import tensorflow as tf
from tensorflow.keras.layers import Embedding, Dense, LSTM
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import one_hot
# Vocabulary size
voc_size = 5000
# One Hot Representation
messages = X.copy()
messages.reset_index(inplace = True)
import nltk
import re
from nltk.corpus import stopwords
# Dataset Preprocessing
from nltk.stem import PorterStemmer
ps = PorterStemmer()
corpus = []
for i in range(len(messages)):
print(i)
review = re.sub('[^a-zA-Z]',' ',messages['title'][i])
review = review.lower()
review = review.split()
review = [ps.stem(word) for word in review if word not in stopwords.words('english')]
review = " ".join(review)
corpus.append(review)
onehot_repr = [one_hot(words,voc_size) for words in corpus]
sent_len = 20
embedded_doc = pad_sequences(onehot_repr,maxlen = sent_len,padding = 'pre')
# Creating the model
embedding_vector_features = 40
model = Sequential()
model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_len))
model.add(LSTM(100))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer = 'adam',metrics = ['accuracy'])
model.summary()
import numpy as np
X_final = np.array(embedded_doc)
y_final = np.array(y)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_final,y_final,test_size = 0.33,random_state = 42)
model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64)
y_pred = model.predict_classes(X_test)
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test,y_pred)
acc = accuracy_score(y_test,y_pred) | en | 0.708594 | # -*- coding: utf-8 -*- Created on Thu Feb 11 13:42:45 2021 @author: ASUS # Vocabulary size # One Hot Representation # Dataset Preprocessing # Creating the model | 2.72361 | 3 |
SVM/SVM_12_Quiz.py | rohit517/Intro-to-machine-learning-Udacity | 0 | 528 | import sys
from class_vis import prettyPicture
from prep_terrain_data import makeTerrainData
import matplotlib.pyplot as plt
import copy
import numpy as np
import pylab as pl
features_train, labels_train, features_test, labels_test = makeTerrainData()
########################## SVM #################################
### we handle the import statement and SVC creation for you here
from sklearn.svm import SVC
clf = SVC(kernel="linear")
#### now your job is to fit the classifier
#### using the training features/labels, and to
#### make a set of predictions on the test data
clf.fit(features_train,labels_train)
pred = clf.predict(features_test)
#### store your predictions in a list named pred
from sklearn.metrics import accuracy_score
acc = accuracy_score(pred, labels_test)
def submitAccuracy():
return acc
| import sys
from class_vis import prettyPicture
from prep_terrain_data import makeTerrainData
import matplotlib.pyplot as plt
import copy
import numpy as np
import pylab as pl
features_train, labels_train, features_test, labels_test = makeTerrainData()
########################## SVM #################################
### we handle the import statement and SVC creation for you here
from sklearn.svm import SVC
clf = SVC(kernel="linear")
#### now your job is to fit the classifier
#### using the training features/labels, and to
#### make a set of predictions on the test data
clf.fit(features_train,labels_train)
pred = clf.predict(features_test)
#### store your predictions in a list named pred
from sklearn.metrics import accuracy_score
acc = accuracy_score(pred, labels_test)
def submitAccuracy():
return acc
| en | 0.572647 | ########################## SVM ################################# ### we handle the import statement and SVC creation for you here #### now your job is to fit the classifier #### using the training features/labels, and to #### make a set of predictions on the test data #### store your predictions in a list named pred | 2.745435 | 3 |
tests/test_auto_scan_logsigmoid.py | yeliang2258/Paddle2ONNX | 0 | 529 | <gh_stars>0
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import OPConvertAutoScanTest, BaseNet
from hypothesis import reproduce_failure
import hypothesis.strategies as st
import numpy as np
import unittest
import paddle
class Net(BaseNet):
"""
simple Net
"""
def forward(self, inputs):
"""
forward
"""
x = paddle.nn.functional.log_sigmoid(inputs)
return x
class TestLogsigmoidConvert(OPConvertAutoScanTest):
"""
api: paddle.nn.functional.log_sigmoid
OPset version: 7, 9, 15
"""
def sample_convert_config(self, draw):
input_shape = draw(
st.lists(
st.integers(
min_value=20, max_value=100),
min_size=4,
max_size=4))
input_spec = [-1] * len(input_shape)
dtype = draw(st.sampled_from(["float32", "float64"]))
config = {
"op_names": ["logsigmoid"],
"test_data_shapes": [input_shape],
"test_data_types": [[dtype]],
"opset_version": [7, 9, 15],
"input_spec_shape": [input_spec],
}
models = Net(config)
return (config, models)
def test(self):
self.run_and_statis(max_examples=30)
if __name__ == "__main__":
unittest.main()
| # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import OPConvertAutoScanTest, BaseNet
from hypothesis import reproduce_failure
import hypothesis.strategies as st
import numpy as np
import unittest
import paddle
class Net(BaseNet):
"""
simple Net
"""
def forward(self, inputs):
"""
forward
"""
x = paddle.nn.functional.log_sigmoid(inputs)
return x
class TestLogsigmoidConvert(OPConvertAutoScanTest):
"""
api: paddle.nn.functional.log_sigmoid
OPset version: 7, 9, 15
"""
def sample_convert_config(self, draw):
input_shape = draw(
st.lists(
st.integers(
min_value=20, max_value=100),
min_size=4,
max_size=4))
input_spec = [-1] * len(input_shape)
dtype = draw(st.sampled_from(["float32", "float64"]))
config = {
"op_names": ["logsigmoid"],
"test_data_shapes": [input_shape],
"test_data_types": [[dtype]],
"opset_version": [7, 9, 15],
"input_spec_shape": [input_spec],
}
models = Net(config)
return (config, models)
def test(self):
self.run_and_statis(max_examples=30)
if __name__ == "__main__":
unittest.main() | en | 0.817711 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License" # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. simple Net forward api: paddle.nn.functional.log_sigmoid OPset version: 7, 9, 15 | 2.034696 | 2 |
oasislmf/utils/concurrency.py | bbetov-corelogic/OasisLMF | 0 | 530 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import open
from builtins import str
from future import standard_library
standard_library.install_aliases()
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
import sys
import types
import billiard
from signal import (
signal,
SIGINT,
)
from threading import (
Event,
Thread,
)
__all__ = [
'multiprocess',
'multithread',
'SignalHandler',
'Task'
]
class SignalHandler(object):
def __init__(self, stopper, threads):
self.stopper = stopper
self.threads = threads
def __call__(self, signum, frame):
self.stopper.set()
for task in self.threads:
task.join()
sys.exit(0)
class Task(object):
def __init__(self, func, args=(), key=None):
self._func = func
self._args = args
self._key = key if key is not None else func.__name__
self._result = None
self._is_done = False
@property
def func(self):
"""
Task function/method property - getter only.
:getter: Gets the task function/method object
"""
return self._func
@property
def args(self):
"""
Task function/method arguments property - getter only.
:getter: Gets the task function/method arguments
"""
return self._args
@property
def key(self):
"""
Task function/method key - getter only.
:getter: Gets the task function/method key
"""
return self._key
@property
def result(self):
"""
Task function/method result property.
:getter: Gets the task function/method result (produced by calling
the function on the defined arguments)
:setter: Sets the task function/method result
"""
return self._result
@result.setter
def result(self, r):
self._result = r
self._is_done = True
@property
def is_done(self):
"""
Task function/method status property - getter only.
:getter: Gets the task function/method status
"""
return self._is_done
def multithread(tasks, pool_size=10):
"""
Executes several tasks concurrently via ``threading`` threads, puts the
results into a queue, and generates these back to the caller.
"""
task_q = Queue()
num_tasks = 0
for task in tasks:
task_q.put(task)
num_tasks += 1
def run(i, task_q, result_q, stopper):
while not stopper.is_set():
try:
task = task_q.get_nowait()
except Empty:
break
else:
task.result = task.func(*task.args) if task.args else task.func()
if type(task.result) in (types.GeneratorType, list, tuple, set):
for r in task.result:
result_q.put((task.key, r,))
else:
result_q.put((task.key, task.result,))
task_q.task_done()
result_q = Queue()
stopper = Event()
threads = tuple(Thread(target=run, args=(i, task_q, result_q, stopper,)) for i in range(pool_size))
handler = SignalHandler(stopper, threads)
signal(SIGINT, handler)
for thread in threads:
thread.start()
task_q.join()
while not result_q.empty():
key, result = result_q.get_nowait()
yield key, result
def multiprocess(tasks, pool_size=10):
"""
Executes several tasks concurrently via Python ``multiprocessing``
processes, puts the results into a queue, and generates these back to the
caller.
"""
pool = billiard.Pool(pool_size)
result_q = Queue()
def build_results(result):
if type(result) in (types.GeneratorType, list, tuple, set):
for r in result:
result_q.put(r)
else:
result_q.put(result)
for task in tasks:
run = pool.apply_async(task.func, args=task.args, callback=build_results)
run.get()
pool.close()
pool.join()
while not result_q.empty():
result = result_q.get_nowait()
yield result
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import open
from builtins import str
from future import standard_library
standard_library.install_aliases()
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
import sys
import types
import billiard
from signal import (
signal,
SIGINT,
)
from threading import (
Event,
Thread,
)
__all__ = [
'multiprocess',
'multithread',
'SignalHandler',
'Task'
]
class SignalHandler(object):
def __init__(self, stopper, threads):
self.stopper = stopper
self.threads = threads
def __call__(self, signum, frame):
self.stopper.set()
for task in self.threads:
task.join()
sys.exit(0)
class Task(object):
def __init__(self, func, args=(), key=None):
self._func = func
self._args = args
self._key = key if key is not None else func.__name__
self._result = None
self._is_done = False
@property
def func(self):
"""
Task function/method property - getter only.
:getter: Gets the task function/method object
"""
return self._func
@property
def args(self):
"""
Task function/method arguments property - getter only.
:getter: Gets the task function/method arguments
"""
return self._args
@property
def key(self):
"""
Task function/method key - getter only.
:getter: Gets the task function/method key
"""
return self._key
@property
def result(self):
"""
Task function/method result property.
:getter: Gets the task function/method result (produced by calling
the function on the defined arguments)
:setter: Sets the task function/method result
"""
return self._result
@result.setter
def result(self, r):
self._result = r
self._is_done = True
@property
def is_done(self):
"""
Task function/method status property - getter only.
:getter: Gets the task function/method status
"""
return self._is_done
def multithread(tasks, pool_size=10):
"""
Executes several tasks concurrently via ``threading`` threads, puts the
results into a queue, and generates these back to the caller.
"""
task_q = Queue()
num_tasks = 0
for task in tasks:
task_q.put(task)
num_tasks += 1
def run(i, task_q, result_q, stopper):
while not stopper.is_set():
try:
task = task_q.get_nowait()
except Empty:
break
else:
task.result = task.func(*task.args) if task.args else task.func()
if type(task.result) in (types.GeneratorType, list, tuple, set):
for r in task.result:
result_q.put((task.key, r,))
else:
result_q.put((task.key, task.result,))
task_q.task_done()
result_q = Queue()
stopper = Event()
threads = tuple(Thread(target=run, args=(i, task_q, result_q, stopper,)) for i in range(pool_size))
handler = SignalHandler(stopper, threads)
signal(SIGINT, handler)
for thread in threads:
thread.start()
task_q.join()
while not result_q.empty():
key, result = result_q.get_nowait()
yield key, result
def multiprocess(tasks, pool_size=10):
"""
Executes several tasks concurrently via Python ``multiprocessing``
processes, puts the results into a queue, and generates these back to the
caller.
"""
pool = billiard.Pool(pool_size)
result_q = Queue()
def build_results(result):
if type(result) in (types.GeneratorType, list, tuple, set):
for r in result:
result_q.put(r)
else:
result_q.put(result)
for task in tasks:
run = pool.apply_async(task.func, args=task.args, callback=build_results)
run.get()
pool.close()
pool.join()
while not result_q.empty():
result = result_q.get_nowait()
yield result
| en | 0.75313 | # -*- coding: utf-8 -*- Task function/method property - getter only. :getter: Gets the task function/method object Task function/method arguments property - getter only. :getter: Gets the task function/method arguments Task function/method key - getter only. :getter: Gets the task function/method key Task function/method result property. :getter: Gets the task function/method result (produced by calling the function on the defined arguments) :setter: Sets the task function/method result Task function/method status property - getter only. :getter: Gets the task function/method status Executes several tasks concurrently via ``threading`` threads, puts the results into a queue, and generates these back to the caller. Executes several tasks concurrently via Python ``multiprocessing`` processes, puts the results into a queue, and generates these back to the caller. | 2.367001 | 2 |
runtime/python/Lib/site-packages/isort/output.py | hwaipy/InteractionFreeNode | 4 | 531 | <reponame>hwaipy/InteractionFreeNode<filename>runtime/python/Lib/site-packages/isort/output.py
import copy
import itertools
from functools import partial
from typing import Any, Iterable, List, Optional, Set, Tuple, Type
from isort.format import format_simplified
from . import parse, sorting, wrap
from .comments import add_to_line as with_comments
from .identify import STATEMENT_DECLARATIONS
from .settings import DEFAULT_CONFIG, Config
def sorted_imports(
parsed: parse.ParsedContent,
config: Config = DEFAULT_CONFIG,
extension: str = "py",
import_type: str = "import",
) -> str:
"""Adds the imports back to the file.
(at the index of the first import) sorted alphabetically and split between groups
"""
if parsed.import_index == -1:
return _output_as_string(parsed.lines_without_imports, parsed.line_separator)
formatted_output: List[str] = parsed.lines_without_imports.copy()
remove_imports = [format_simplified(removal) for removal in config.remove_imports]
sections: Iterable[str] = itertools.chain(parsed.sections, config.forced_separate)
if config.no_sections:
parsed.imports["no_sections"] = {"straight": {}, "from": {}}
base_sections: Tuple[str, ...] = ()
for section in sections:
if section == "FUTURE":
base_sections = ("FUTURE",)
continue
parsed.imports["no_sections"]["straight"].update(
parsed.imports[section].get("straight", {})
)
parsed.imports["no_sections"]["from"].update(parsed.imports[section].get("from", {}))
sections = base_sections + ("no_sections",)
output: List[str] = []
seen_headings: Set[str] = set()
pending_lines_before = False
for section in sections:
straight_modules = parsed.imports[section]["straight"]
if not config.only_sections:
straight_modules = sorting.sort(
config,
straight_modules,
key=lambda key: sorting.module_key(
key, config, section_name=section, straight_import=True
),
reverse=config.reverse_sort,
)
from_modules = parsed.imports[section]["from"]
if not config.only_sections:
from_modules = sorting.sort(
config,
from_modules,
key=lambda key: sorting.module_key(key, config, section_name=section),
reverse=config.reverse_sort,
)
if config.star_first:
star_modules = []
other_modules = []
for module in from_modules:
if "*" in parsed.imports[section]["from"][module]:
star_modules.append(module)
else:
other_modules.append(module)
from_modules = star_modules + other_modules
straight_imports = _with_straight_imports(
parsed, config, straight_modules, section, remove_imports, import_type
)
from_imports = _with_from_imports(
parsed, config, from_modules, section, remove_imports, import_type
)
lines_between = [""] * (
config.lines_between_types if from_modules and straight_modules else 0
)
if config.from_first:
section_output = from_imports + lines_between + straight_imports
else:
section_output = straight_imports + lines_between + from_imports
if config.force_sort_within_sections:
# collapse comments
comments_above = []
new_section_output: List[str] = []
for line in section_output:
if not line:
continue
if line.startswith("#"):
comments_above.append(line)
elif comments_above:
new_section_output.append(_LineWithComments(line, comments_above))
comments_above = []
else:
new_section_output.append(line)
# only_sections options is not imposed if force_sort_within_sections is True
new_section_output = sorting.sort(
config,
new_section_output,
key=partial(sorting.section_key, config=config),
reverse=config.reverse_sort,
)
# uncollapse comments
section_output = []
for line in new_section_output:
comments = getattr(line, "comments", ())
if comments:
section_output.extend(comments)
section_output.append(str(line))
section_name = section
no_lines_before = section_name in config.no_lines_before
if section_output:
if section_name in parsed.place_imports:
parsed.place_imports[section_name] = section_output
continue
section_title = config.import_headings.get(section_name.lower(), "")
if section_title and section_title not in seen_headings:
if config.dedup_headings:
seen_headings.add(section_title)
section_comment = f"# {section_title}"
if section_comment not in parsed.lines_without_imports[0:1]: # pragma: no branch
section_output.insert(0, section_comment)
if pending_lines_before or not no_lines_before:
output += [""] * config.lines_between_sections
output += section_output
pending_lines_before = False
else:
pending_lines_before = pending_lines_before or not no_lines_before
if config.ensure_newline_before_comments:
output = _ensure_newline_before_comment(output)
while output and output[-1].strip() == "":
output.pop() # pragma: no cover
while output and output[0].strip() == "":
output.pop(0)
if config.formatting_function:
output = config.formatting_function(
parsed.line_separator.join(output), extension, config
).splitlines()
output_at = 0
if parsed.import_index < parsed.original_line_count:
output_at = parsed.import_index
formatted_output[output_at:0] = output
if output:
imports_tail = output_at + len(output)
while [
character.strip() for character in formatted_output[imports_tail : imports_tail + 1]
] == [""]:
formatted_output.pop(imports_tail)
if len(formatted_output) > imports_tail:
next_construct = ""
tail = formatted_output[imports_tail:]
for index, line in enumerate(tail): # pragma: no branch
should_skip, in_quote, *_ = parse.skip_line(
line,
in_quote="",
index=len(formatted_output),
section_comments=config.section_comments,
needs_import=False,
)
if not should_skip and line.strip():
if (
line.strip().startswith("#")
and len(tail) > (index + 1)
and tail[index + 1].strip()
):
continue
next_construct = line
break
if in_quote: # pragma: no branch
next_construct = line
break
if config.lines_after_imports != -1:
formatted_output[imports_tail:0] = [
"" for line in range(config.lines_after_imports)
]
elif extension != "pyi" and next_construct.startswith(STATEMENT_DECLARATIONS):
formatted_output[imports_tail:0] = ["", ""]
else:
formatted_output[imports_tail:0] = [""]
if parsed.place_imports:
new_out_lines = []
for index, line in enumerate(formatted_output):
new_out_lines.append(line)
if line in parsed.import_placements:
new_out_lines.extend(parsed.place_imports[parsed.import_placements[line]])
if (
len(formatted_output) <= (index + 1)
or formatted_output[index + 1].strip() != ""
):
new_out_lines.append("")
formatted_output = new_out_lines
return _output_as_string(formatted_output, parsed.line_separator)
def _with_from_imports(
parsed: parse.ParsedContent,
config: Config,
from_modules: Iterable[str],
section: str,
remove_imports: List[str],
import_type: str,
) -> List[str]:
output: List[str] = []
for module in from_modules:
if module in remove_imports:
continue
import_start = f"from {module} {import_type} "
from_imports = list(parsed.imports[section]["from"][module])
if (
not config.no_inline_sort
or (config.force_single_line and module not in config.single_line_exclusions)
) and not config.only_sections:
from_imports = sorting.sort(
config,
from_imports,
key=lambda key: sorting.module_key(
key,
config,
True,
config.force_alphabetical_sort_within_sections,
section_name=section,
),
reverse=config.reverse_sort,
)
if remove_imports:
from_imports = [
line for line in from_imports if f"{module}.{line}" not in remove_imports
]
sub_modules = [f"{module}.{from_import}" for from_import in from_imports]
as_imports = {
from_import: [
f"{from_import} as {as_module}" for as_module in parsed.as_map["from"][sub_module]
]
for from_import, sub_module in zip(from_imports, sub_modules)
if sub_module in parsed.as_map["from"]
}
if config.combine_as_imports and not ("*" in from_imports and config.combine_star):
if not config.no_inline_sort:
for as_import in as_imports:
if not config.only_sections:
as_imports[as_import] = sorting.sort(config, as_imports[as_import])
for from_import in copy.copy(from_imports):
if from_import in as_imports:
idx = from_imports.index(from_import)
if parsed.imports[section]["from"][module][from_import]:
from_imports[(idx + 1) : (idx + 1)] = as_imports.pop(from_import)
else:
from_imports[idx : (idx + 1)] = as_imports.pop(from_import)
only_show_as_imports = False
comments = parsed.categorized_comments["from"].pop(module, ())
above_comments = parsed.categorized_comments["above"]["from"].pop(module, None)
while from_imports:
if above_comments:
output.extend(above_comments)
above_comments = None
if "*" in from_imports and config.combine_star:
import_statement = wrap.line(
with_comments(
_with_star_comments(parsed, module, list(comments or ())),
f"{import_start}*",
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
),
parsed.line_separator,
config,
)
from_imports = [
from_import for from_import in from_imports if from_import in as_imports
]
only_show_as_imports = True
elif config.force_single_line and module not in config.single_line_exclusions:
import_statement = ""
while from_imports:
from_import = from_imports.pop(0)
single_import_line = with_comments(
comments,
import_start + from_import,
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
comment = (
parsed.categorized_comments["nested"].get(module, {}).pop(from_import, None)
)
if comment:
single_import_line += (
f"{comments and ';' or config.comment_prefix} " f"{comment}"
)
if from_import in as_imports:
if (
parsed.imports[section]["from"][module][from_import]
and not only_show_as_imports
):
output.append(
wrap.line(single_import_line, parsed.line_separator, config)
)
from_comments = parsed.categorized_comments["straight"].get(
f"{module}.{from_import}"
)
if not config.only_sections:
output.extend(
with_comments(
from_comments,
wrap.line(
import_start + as_import, parsed.line_separator, config
),
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
for as_import in sorting.sort(config, as_imports[from_import])
)
else:
output.extend(
with_comments(
from_comments,
wrap.line(
import_start + as_import, parsed.line_separator, config
),
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
for as_import in as_imports[from_import]
)
else:
output.append(wrap.line(single_import_line, parsed.line_separator, config))
comments = None
else:
while from_imports and from_imports[0] in as_imports:
from_import = from_imports.pop(0)
if not config.only_sections:
as_imports[from_import] = sorting.sort(config, as_imports[from_import])
from_comments = (
parsed.categorized_comments["straight"].get(f"{module}.{from_import}") or []
)
if (
parsed.imports[section]["from"][module][from_import]
and not only_show_as_imports
):
specific_comment = (
parsed.categorized_comments["nested"]
.get(module, {})
.pop(from_import, None)
)
if specific_comment:
from_comments.append(specific_comment)
output.append(
wrap.line(
with_comments(
from_comments,
import_start + from_import,
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
),
parsed.line_separator,
config,
)
)
from_comments = []
for as_import in as_imports[from_import]:
specific_comment = (
parsed.categorized_comments["nested"]
.get(module, {})
.pop(as_import, None)
)
if specific_comment:
from_comments.append(specific_comment)
output.append(
wrap.line(
with_comments(
from_comments,
import_start + as_import,
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
),
parsed.line_separator,
config,
)
)
from_comments = []
if "*" in from_imports:
output.append(
with_comments(
_with_star_comments(parsed, module, []),
f"{import_start}*",
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
)
from_imports.remove("*")
for from_import in copy.copy(from_imports):
comment = (
parsed.categorized_comments["nested"].get(module, {}).pop(from_import, None)
)
if comment:
from_imports.remove(from_import)
if from_imports:
use_comments = []
else:
use_comments = comments
comments = None
single_import_line = with_comments(
use_comments,
import_start + from_import,
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
single_import_line += (
f"{use_comments and ';' or config.comment_prefix} " f"{comment}"
)
output.append(wrap.line(single_import_line, parsed.line_separator, config))
from_import_section = []
while from_imports and (
from_imports[0] not in as_imports
or (
config.combine_as_imports
and parsed.imports[section]["from"][module][from_import]
)
):
from_import_section.append(from_imports.pop(0))
if config.combine_as_imports:
comments = (comments or []) + list(
parsed.categorized_comments["from"].pop(f"{module}.__combined_as__", ())
)
import_statement = with_comments(
comments,
import_start + (", ").join(from_import_section),
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
if not from_import_section:
import_statement = ""
do_multiline_reformat = False
force_grid_wrap = config.force_grid_wrap
if force_grid_wrap and len(from_import_section) >= force_grid_wrap:
do_multiline_reformat = True
if len(import_statement) > config.line_length and len(from_import_section) > 1:
do_multiline_reformat = True
# If line too long AND have imports AND we are
# NOT using GRID or VERTICAL wrap modes
if (
len(import_statement) > config.line_length
and len(from_import_section) > 0
and config.multi_line_output
not in (wrap.Modes.GRID, wrap.Modes.VERTICAL) # type: ignore
):
do_multiline_reformat = True
if do_multiline_reformat:
import_statement = wrap.import_statement(
import_start=import_start,
from_imports=from_import_section,
comments=comments,
line_separator=parsed.line_separator,
config=config,
)
if config.multi_line_output == wrap.Modes.GRID: # type: ignore
other_import_statement = wrap.import_statement(
import_start=import_start,
from_imports=from_import_section,
comments=comments,
line_separator=parsed.line_separator,
config=config,
multi_line_output=wrap.Modes.VERTICAL_GRID, # type: ignore
)
if (
max(
len(import_line)
for import_line in import_statement.split(parsed.line_separator)
)
> config.line_length
):
import_statement = other_import_statement
if not do_multiline_reformat and len(import_statement) > config.line_length:
import_statement = wrap.line(import_statement, parsed.line_separator, config)
if import_statement:
output.append(import_statement)
return output
def _with_straight_imports(
parsed: parse.ParsedContent,
config: Config,
straight_modules: Iterable[str],
section: str,
remove_imports: List[str],
import_type: str,
) -> List[str]:
output: List[str] = []
as_imports = any((module in parsed.as_map["straight"] for module in straight_modules))
# combine_straight_imports only works for bare imports, 'as' imports not included
if config.combine_straight_imports and not as_imports:
if not straight_modules:
return []
above_comments: List[str] = []
inline_comments: List[str] = []
for module in straight_modules:
if module in parsed.categorized_comments["above"]["straight"]:
above_comments.extend(parsed.categorized_comments["above"]["straight"].pop(module))
if module in parsed.categorized_comments["straight"]:
inline_comments.extend(parsed.categorized_comments["straight"][module])
combined_straight_imports = ", ".join(straight_modules)
if inline_comments:
combined_inline_comments = " ".join(inline_comments)
else:
combined_inline_comments = ""
output.extend(above_comments)
if combined_inline_comments:
output.append(
f"{import_type} {combined_straight_imports} # {combined_inline_comments}"
)
else:
output.append(f"{import_type} {combined_straight_imports}")
return output
for module in straight_modules:
if module in remove_imports:
continue
import_definition = []
if module in parsed.as_map["straight"]:
if parsed.imports[section]["straight"][module]:
import_definition.append((f"{import_type} {module}", module))
import_definition.extend(
(f"{import_type} {module} as {as_import}", f"{module} as {as_import}")
for as_import in parsed.as_map["straight"][module]
)
else:
import_definition.append((f"{import_type} {module}", module))
comments_above = parsed.categorized_comments["above"]["straight"].pop(module, None)
if comments_above:
output.extend(comments_above)
output.extend(
with_comments(
parsed.categorized_comments["straight"].get(imodule),
idef,
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
for idef, imodule in import_definition
)
return output
def _output_as_string(lines: List[str], line_separator: str) -> str:
return line_separator.join(_normalize_empty_lines(lines))
def _normalize_empty_lines(lines: List[str]) -> List[str]:
while lines and lines[-1].strip() == "":
lines.pop(-1)
lines.append("")
return lines
class _LineWithComments(str):
comments: List[str]
def __new__(
cls: Type["_LineWithComments"], value: Any, comments: List[str]
) -> "_LineWithComments":
instance = super().__new__(cls, value)
instance.comments = comments
return instance
def _ensure_newline_before_comment(output: List[str]) -> List[str]:
new_output: List[str] = []
def is_comment(line: Optional[str]) -> bool:
return line.startswith("#") if line else False
for line, prev_line in zip(output, [None] + output): # type: ignore
if is_comment(line) and prev_line != "" and not is_comment(prev_line):
new_output.append("")
new_output.append(line)
return new_output
def _with_star_comments(parsed: parse.ParsedContent, module: str, comments: List[str]) -> List[str]:
star_comment = parsed.categorized_comments["nested"].get(module, {}).pop("*", None)
if star_comment:
return comments + [star_comment]
return comments
| import copy
import itertools
from functools import partial
from typing import Any, Iterable, List, Optional, Set, Tuple, Type
from isort.format import format_simplified
from . import parse, sorting, wrap
from .comments import add_to_line as with_comments
from .identify import STATEMENT_DECLARATIONS
from .settings import DEFAULT_CONFIG, Config
def sorted_imports(
parsed: parse.ParsedContent,
config: Config = DEFAULT_CONFIG,
extension: str = "py",
import_type: str = "import",
) -> str:
"""Adds the imports back to the file.
(at the index of the first import) sorted alphabetically and split between groups
"""
if parsed.import_index == -1:
return _output_as_string(parsed.lines_without_imports, parsed.line_separator)
formatted_output: List[str] = parsed.lines_without_imports.copy()
remove_imports = [format_simplified(removal) for removal in config.remove_imports]
sections: Iterable[str] = itertools.chain(parsed.sections, config.forced_separate)
if config.no_sections:
parsed.imports["no_sections"] = {"straight": {}, "from": {}}
base_sections: Tuple[str, ...] = ()
for section in sections:
if section == "FUTURE":
base_sections = ("FUTURE",)
continue
parsed.imports["no_sections"]["straight"].update(
parsed.imports[section].get("straight", {})
)
parsed.imports["no_sections"]["from"].update(parsed.imports[section].get("from", {}))
sections = base_sections + ("no_sections",)
output: List[str] = []
seen_headings: Set[str] = set()
pending_lines_before = False
for section in sections:
straight_modules = parsed.imports[section]["straight"]
if not config.only_sections:
straight_modules = sorting.sort(
config,
straight_modules,
key=lambda key: sorting.module_key(
key, config, section_name=section, straight_import=True
),
reverse=config.reverse_sort,
)
from_modules = parsed.imports[section]["from"]
if not config.only_sections:
from_modules = sorting.sort(
config,
from_modules,
key=lambda key: sorting.module_key(key, config, section_name=section),
reverse=config.reverse_sort,
)
if config.star_first:
star_modules = []
other_modules = []
for module in from_modules:
if "*" in parsed.imports[section]["from"][module]:
star_modules.append(module)
else:
other_modules.append(module)
from_modules = star_modules + other_modules
straight_imports = _with_straight_imports(
parsed, config, straight_modules, section, remove_imports, import_type
)
from_imports = _with_from_imports(
parsed, config, from_modules, section, remove_imports, import_type
)
lines_between = [""] * (
config.lines_between_types if from_modules and straight_modules else 0
)
if config.from_first:
section_output = from_imports + lines_between + straight_imports
else:
section_output = straight_imports + lines_between + from_imports
if config.force_sort_within_sections:
# collapse comments
comments_above = []
new_section_output: List[str] = []
for line in section_output:
if not line:
continue
if line.startswith("#"):
comments_above.append(line)
elif comments_above:
new_section_output.append(_LineWithComments(line, comments_above))
comments_above = []
else:
new_section_output.append(line)
# only_sections options is not imposed if force_sort_within_sections is True
new_section_output = sorting.sort(
config,
new_section_output,
key=partial(sorting.section_key, config=config),
reverse=config.reverse_sort,
)
# uncollapse comments
section_output = []
for line in new_section_output:
comments = getattr(line, "comments", ())
if comments:
section_output.extend(comments)
section_output.append(str(line))
section_name = section
no_lines_before = section_name in config.no_lines_before
if section_output:
if section_name in parsed.place_imports:
parsed.place_imports[section_name] = section_output
continue
section_title = config.import_headings.get(section_name.lower(), "")
if section_title and section_title not in seen_headings:
if config.dedup_headings:
seen_headings.add(section_title)
section_comment = f"# {section_title}"
if section_comment not in parsed.lines_without_imports[0:1]: # pragma: no branch
section_output.insert(0, section_comment)
if pending_lines_before or not no_lines_before:
output += [""] * config.lines_between_sections
output += section_output
pending_lines_before = False
else:
pending_lines_before = pending_lines_before or not no_lines_before
if config.ensure_newline_before_comments:
output = _ensure_newline_before_comment(output)
while output and output[-1].strip() == "":
output.pop() # pragma: no cover
while output and output[0].strip() == "":
output.pop(0)
if config.formatting_function:
output = config.formatting_function(
parsed.line_separator.join(output), extension, config
).splitlines()
output_at = 0
if parsed.import_index < parsed.original_line_count:
output_at = parsed.import_index
formatted_output[output_at:0] = output
if output:
imports_tail = output_at + len(output)
while [
character.strip() for character in formatted_output[imports_tail : imports_tail + 1]
] == [""]:
formatted_output.pop(imports_tail)
if len(formatted_output) > imports_tail:
next_construct = ""
tail = formatted_output[imports_tail:]
for index, line in enumerate(tail): # pragma: no branch
should_skip, in_quote, *_ = parse.skip_line(
line,
in_quote="",
index=len(formatted_output),
section_comments=config.section_comments,
needs_import=False,
)
if not should_skip and line.strip():
if (
line.strip().startswith("#")
and len(tail) > (index + 1)
and tail[index + 1].strip()
):
continue
next_construct = line
break
if in_quote: # pragma: no branch
next_construct = line
break
if config.lines_after_imports != -1:
formatted_output[imports_tail:0] = [
"" for line in range(config.lines_after_imports)
]
elif extension != "pyi" and next_construct.startswith(STATEMENT_DECLARATIONS):
formatted_output[imports_tail:0] = ["", ""]
else:
formatted_output[imports_tail:0] = [""]
if parsed.place_imports:
new_out_lines = []
for index, line in enumerate(formatted_output):
new_out_lines.append(line)
if line in parsed.import_placements:
new_out_lines.extend(parsed.place_imports[parsed.import_placements[line]])
if (
len(formatted_output) <= (index + 1)
or formatted_output[index + 1].strip() != ""
):
new_out_lines.append("")
formatted_output = new_out_lines
return _output_as_string(formatted_output, parsed.line_separator)
def _with_from_imports(
parsed: parse.ParsedContent,
config: Config,
from_modules: Iterable[str],
section: str,
remove_imports: List[str],
import_type: str,
) -> List[str]:
output: List[str] = []
for module in from_modules:
if module in remove_imports:
continue
import_start = f"from {module} {import_type} "
from_imports = list(parsed.imports[section]["from"][module])
if (
not config.no_inline_sort
or (config.force_single_line and module not in config.single_line_exclusions)
) and not config.only_sections:
from_imports = sorting.sort(
config,
from_imports,
key=lambda key: sorting.module_key(
key,
config,
True,
config.force_alphabetical_sort_within_sections,
section_name=section,
),
reverse=config.reverse_sort,
)
if remove_imports:
from_imports = [
line for line in from_imports if f"{module}.{line}" not in remove_imports
]
sub_modules = [f"{module}.{from_import}" for from_import in from_imports]
as_imports = {
from_import: [
f"{from_import} as {as_module}" for as_module in parsed.as_map["from"][sub_module]
]
for from_import, sub_module in zip(from_imports, sub_modules)
if sub_module in parsed.as_map["from"]
}
if config.combine_as_imports and not ("*" in from_imports and config.combine_star):
if not config.no_inline_sort:
for as_import in as_imports:
if not config.only_sections:
as_imports[as_import] = sorting.sort(config, as_imports[as_import])
for from_import in copy.copy(from_imports):
if from_import in as_imports:
idx = from_imports.index(from_import)
if parsed.imports[section]["from"][module][from_import]:
from_imports[(idx + 1) : (idx + 1)] = as_imports.pop(from_import)
else:
from_imports[idx : (idx + 1)] = as_imports.pop(from_import)
only_show_as_imports = False
comments = parsed.categorized_comments["from"].pop(module, ())
above_comments = parsed.categorized_comments["above"]["from"].pop(module, None)
while from_imports:
if above_comments:
output.extend(above_comments)
above_comments = None
if "*" in from_imports and config.combine_star:
import_statement = wrap.line(
with_comments(
_with_star_comments(parsed, module, list(comments or ())),
f"{import_start}*",
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
),
parsed.line_separator,
config,
)
from_imports = [
from_import for from_import in from_imports if from_import in as_imports
]
only_show_as_imports = True
elif config.force_single_line and module not in config.single_line_exclusions:
import_statement = ""
while from_imports:
from_import = from_imports.pop(0)
single_import_line = with_comments(
comments,
import_start + from_import,
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
comment = (
parsed.categorized_comments["nested"].get(module, {}).pop(from_import, None)
)
if comment:
single_import_line += (
f"{comments and ';' or config.comment_prefix} " f"{comment}"
)
if from_import in as_imports:
if (
parsed.imports[section]["from"][module][from_import]
and not only_show_as_imports
):
output.append(
wrap.line(single_import_line, parsed.line_separator, config)
)
from_comments = parsed.categorized_comments["straight"].get(
f"{module}.{from_import}"
)
if not config.only_sections:
output.extend(
with_comments(
from_comments,
wrap.line(
import_start + as_import, parsed.line_separator, config
),
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
for as_import in sorting.sort(config, as_imports[from_import])
)
else:
output.extend(
with_comments(
from_comments,
wrap.line(
import_start + as_import, parsed.line_separator, config
),
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
for as_import in as_imports[from_import]
)
else:
output.append(wrap.line(single_import_line, parsed.line_separator, config))
comments = None
else:
while from_imports and from_imports[0] in as_imports:
from_import = from_imports.pop(0)
if not config.only_sections:
as_imports[from_import] = sorting.sort(config, as_imports[from_import])
from_comments = (
parsed.categorized_comments["straight"].get(f"{module}.{from_import}") or []
)
if (
parsed.imports[section]["from"][module][from_import]
and not only_show_as_imports
):
specific_comment = (
parsed.categorized_comments["nested"]
.get(module, {})
.pop(from_import, None)
)
if specific_comment:
from_comments.append(specific_comment)
output.append(
wrap.line(
with_comments(
from_comments,
import_start + from_import,
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
),
parsed.line_separator,
config,
)
)
from_comments = []
for as_import in as_imports[from_import]:
specific_comment = (
parsed.categorized_comments["nested"]
.get(module, {})
.pop(as_import, None)
)
if specific_comment:
from_comments.append(specific_comment)
output.append(
wrap.line(
with_comments(
from_comments,
import_start + as_import,
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
),
parsed.line_separator,
config,
)
)
from_comments = []
if "*" in from_imports:
output.append(
with_comments(
_with_star_comments(parsed, module, []),
f"{import_start}*",
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
)
from_imports.remove("*")
for from_import in copy.copy(from_imports):
comment = (
parsed.categorized_comments["nested"].get(module, {}).pop(from_import, None)
)
if comment:
from_imports.remove(from_import)
if from_imports:
use_comments = []
else:
use_comments = comments
comments = None
single_import_line = with_comments(
use_comments,
import_start + from_import,
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
single_import_line += (
f"{use_comments and ';' or config.comment_prefix} " f"{comment}"
)
output.append(wrap.line(single_import_line, parsed.line_separator, config))
from_import_section = []
while from_imports and (
from_imports[0] not in as_imports
or (
config.combine_as_imports
and parsed.imports[section]["from"][module][from_import]
)
):
from_import_section.append(from_imports.pop(0))
if config.combine_as_imports:
comments = (comments or []) + list(
parsed.categorized_comments["from"].pop(f"{module}.__combined_as__", ())
)
import_statement = with_comments(
comments,
import_start + (", ").join(from_import_section),
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
if not from_import_section:
import_statement = ""
do_multiline_reformat = False
force_grid_wrap = config.force_grid_wrap
if force_grid_wrap and len(from_import_section) >= force_grid_wrap:
do_multiline_reformat = True
if len(import_statement) > config.line_length and len(from_import_section) > 1:
do_multiline_reformat = True
# If line too long AND have imports AND we are
# NOT using GRID or VERTICAL wrap modes
if (
len(import_statement) > config.line_length
and len(from_import_section) > 0
and config.multi_line_output
not in (wrap.Modes.GRID, wrap.Modes.VERTICAL) # type: ignore
):
do_multiline_reformat = True
if do_multiline_reformat:
import_statement = wrap.import_statement(
import_start=import_start,
from_imports=from_import_section,
comments=comments,
line_separator=parsed.line_separator,
config=config,
)
if config.multi_line_output == wrap.Modes.GRID: # type: ignore
other_import_statement = wrap.import_statement(
import_start=import_start,
from_imports=from_import_section,
comments=comments,
line_separator=parsed.line_separator,
config=config,
multi_line_output=wrap.Modes.VERTICAL_GRID, # type: ignore
)
if (
max(
len(import_line)
for import_line in import_statement.split(parsed.line_separator)
)
> config.line_length
):
import_statement = other_import_statement
if not do_multiline_reformat and len(import_statement) > config.line_length:
import_statement = wrap.line(import_statement, parsed.line_separator, config)
if import_statement:
output.append(import_statement)
return output
def _with_straight_imports(
parsed: parse.ParsedContent,
config: Config,
straight_modules: Iterable[str],
section: str,
remove_imports: List[str],
import_type: str,
) -> List[str]:
output: List[str] = []
as_imports = any((module in parsed.as_map["straight"] for module in straight_modules))
# combine_straight_imports only works for bare imports, 'as' imports not included
if config.combine_straight_imports and not as_imports:
if not straight_modules:
return []
above_comments: List[str] = []
inline_comments: List[str] = []
for module in straight_modules:
if module in parsed.categorized_comments["above"]["straight"]:
above_comments.extend(parsed.categorized_comments["above"]["straight"].pop(module))
if module in parsed.categorized_comments["straight"]:
inline_comments.extend(parsed.categorized_comments["straight"][module])
combined_straight_imports = ", ".join(straight_modules)
if inline_comments:
combined_inline_comments = " ".join(inline_comments)
else:
combined_inline_comments = ""
output.extend(above_comments)
if combined_inline_comments:
output.append(
f"{import_type} {combined_straight_imports} # {combined_inline_comments}"
)
else:
output.append(f"{import_type} {combined_straight_imports}")
return output
for module in straight_modules:
if module in remove_imports:
continue
import_definition = []
if module in parsed.as_map["straight"]:
if parsed.imports[section]["straight"][module]:
import_definition.append((f"{import_type} {module}", module))
import_definition.extend(
(f"{import_type} {module} as {as_import}", f"{module} as {as_import}")
for as_import in parsed.as_map["straight"][module]
)
else:
import_definition.append((f"{import_type} {module}", module))
comments_above = parsed.categorized_comments["above"]["straight"].pop(module, None)
if comments_above:
output.extend(comments_above)
output.extend(
with_comments(
parsed.categorized_comments["straight"].get(imodule),
idef,
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
for idef, imodule in import_definition
)
return output
def _output_as_string(lines: List[str], line_separator: str) -> str:
return line_separator.join(_normalize_empty_lines(lines))
def _normalize_empty_lines(lines: List[str]) -> List[str]:
while lines and lines[-1].strip() == "":
lines.pop(-1)
lines.append("")
return lines
class _LineWithComments(str):
comments: List[str]
def __new__(
cls: Type["_LineWithComments"], value: Any, comments: List[str]
) -> "_LineWithComments":
instance = super().__new__(cls, value)
instance.comments = comments
return instance
def _ensure_newline_before_comment(output: List[str]) -> List[str]:
new_output: List[str] = []
def is_comment(line: Optional[str]) -> bool:
return line.startswith("#") if line else False
for line, prev_line in zip(output, [None] + output): # type: ignore
if is_comment(line) and prev_line != "" and not is_comment(prev_line):
new_output.append("")
new_output.append(line)
return new_output
def _with_star_comments(parsed: parse.ParsedContent, module: str, comments: List[str]) -> List[str]:
star_comment = parsed.categorized_comments["nested"].get(module, {}).pop("*", None)
if star_comment:
return comments + [star_comment]
return comments | en | 0.721043 | Adds the imports back to the file. (at the index of the first import) sorted alphabetically and split between groups # collapse comments # only_sections options is not imposed if force_sort_within_sections is True # uncollapse comments # pragma: no branch # pragma: no cover # pragma: no branch # pragma: no branch # If line too long AND have imports AND we are # NOT using GRID or VERTICAL wrap modes # type: ignore # type: ignore # type: ignore # combine_straight_imports only works for bare imports, 'as' imports not included # {combined_inline_comments}" # type: ignore | 2.279167 | 2 |
vixen/project.py | amoeba/vixen | 10 | 532 | import datetime
import io
import json_tricks
import logging
import os
from os.path import (abspath, basename, dirname, exists, expanduser,
join, realpath, relpath, splitext)
import re
import shutil
import sys
from traits.api import (Any, Dict, Enum, HasTraits, Instance, List, Long,
Str)
from whoosh import fields, qparser, query
from whoosh.util.times import datetime_to_long, long_to_datetime
from .common import get_project_dir
from .media import Media, MediaData, get_media_data
from .directory import Directory
from . import processor
logger = logging.getLogger(__name__)
if sys.version_info[0] > 2:
unicode = str
string_types = (str,)
import csv
else:
string_types = (basestring,)
import backports.csv as csv
INT = fields.NUMERIC(numtype=int)
FLOAT = fields.NUMERIC(numtype=float)
def get_file_saved_time(path):
dt = datetime.datetime.fromtimestamp(os.stat(path).st_ctime)
return dt.ctime()
def _get_sample(fname):
sample = ''
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
sample += fp.readline() + fp.readline()
return sample
def _get_csv_headers(fname):
sample = _get_sample(fname)
sniffer = csv.Sniffer()
has_header = sniffer.has_header(sample)
dialect = sniffer.sniff(sample)
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
reader = csv.reader(fp, dialect)
header = next(reader)
return has_header, header, dialect
class TagInfo(HasTraits):
name = Str
type = Enum("string", "text", "int", "float", "bool")
default = Any
def __repr__(self):
return 'TagInfo(%r, %r)' % (self.name, self.type)
def _default_default(self):
map = {"string": "", "text": "", "int": 0, "float": 0.0,
"bool": False}
return map[self.type]
def open_file(fname_or_file, mode='rb'):
if hasattr(fname_or_file, 'read'):
return fname_or_file
else:
return open(fname_or_file, mode)
def sanitize_name(name):
name = name.lower()
name = re.sub(r'\s+', '_', name)
return re.sub(r'\W+', '', name)
def get_non_existing_filename(fname):
if exists(fname):
base, ext = splitext(basename(fname))
return join(dirname(fname), base + '_a' + ext)
else:
return fname
COMMON_TAGS = dict(
file_name='string', path='string', relpath='string',
ctime='string', mtime='string', size='int', type='string'
)
def _cleanup_query(q, tag_types):
type_map = dict(float=FLOAT.from_bytes, int=INT.from_bytes)
for term in q.leaves():
if isinstance(term, query.Term):
if isinstance(term.text, (str, unicode, bytes)):
fieldtype = tag_types[term.fieldname]
if fieldtype in type_map:
term.text = type_map[fieldtype](term.text)
else:
term.text = term.text.lower()
elif isinstance(term, query.Phrase):
term.words = [x.lower() for x in term.words]
def _check_value(value, expr):
if isinstance(expr, string_types):
return expr in value.lower()
else:
return expr == value
def _check_range(x, term):
result = True
if term.start is not None:
if term.startexcl:
result &= x > term.start
else:
result &= x >= term.start
if term.end is not None and result:
if term.endexcl:
result &= x < term.end
else:
result &= x <= term.end
return result
def _check_date_range(x, term):
result = True
if term.startdate is not None:
result &= x >= term.start
if term.enddate is not None and result:
result &= x <= term.end
return result
def _search_media(expr, m_key, get_tag):
"""Given search expression, index to media, and a getter to get the attribute
check if the media matches expression.
"""
if expr.is_leaf():
if isinstance(expr, query.Term):
attr = expr.fieldname
return _check_value(get_tag(m_key, attr), expr.text)
elif isinstance(expr, query.Phrase):
attr = expr.fieldname
text = " ".join(expr.words)
return _check_value(get_tag(m_key, attr), text)
elif isinstance(expr, query.DateRange):
if expr.fieldname == 'ctime':
value = get_tag(m_key, 'ctime_')
elif expr.fieldname == 'mtime':
value = get_tag(m_key, 'mtime_')
return _check_date_range(value, expr)
elif isinstance(expr, query.NumericRange):
attr = expr.fieldname
return _check_range(get_tag(m_key, attr), expr)
else:
print("Unsupported term: %r" % expr)
return False
else:
if isinstance(expr, query.And):
result = True
for child in expr.children():
result &= _search_media(child, m_key, get_tag)
if not result:
break
return result
elif isinstance(expr, query.Or):
result = False
for child in expr.children():
result |= _search_media(child, m_key, get_tag)
if result:
break
return result
elif isinstance(expr, query.Not):
subquery = list(expr.children())[0]
return not _search_media(subquery, m_key, get_tag)
else:
print("Unsupported term: %r" % expr)
return False
class Project(HasTraits):
name = Str
description = Str
path = Str
root = Instance(Directory)
tags = List(TagInfo)
_media = Dict(Str, Media)
extensions = List(Str)
processors = List(processor.FactoryBase)
number_of_files = Long
# Path where the project data is saved.
save_file = Str
last_save_time = Str
_data = Dict
_tag_data = Dict
_relpath2index = Dict()
_query_parser = Instance(qparser.QueryParser)
def add_tags(self, tags):
tags = list(self.tags) + tags
self.update_tags(tags)
def update_tags(self, new_tags):
old_tags = self.tags
new_tag_names = set(tag.name for tag in new_tags)
tag_info = dict((tag.name, tag.type) for tag in old_tags)
removed = []
added = []
for tag in new_tags:
if tag.name not in tag_info:
added.append(tag)
elif tag_info[tag.name] != tag.type:
removed.append(tag)
added.append(tag)
for tag in old_tags:
if tag.name not in new_tag_names:
removed.append(tag)
for tag in removed:
del self._tag_data[tag.name]
n_entries = len(self._relpath2index)
for tag in added:
self._tag_data[tag.name] = [tag.default]*n_entries
# The above can be the first time when self._tag_data is accessed, when
# creating a new project for example. In this case,
# self.__tag_data_default is called, so if self.tags is set then the
# removed tags will not exist in _tag_data causing an error. So we only
# set self.tags below.
self.tags = new_tags
# Update the cached media
for m in self._media.values():
for tag in removed:
del m.tags[tag.name]
for tag in added:
m.tags[tag.name] = tag.default
self._query_parser = self._make_query_parser()
def copy(self):
"""Make a copy of this project. This does not copy the data but only
the tags, extensions and the other settings of the project.
This will not copy any of the processor states but only their settings.
"""
name = self.name + ' copy'
p = Project(name=name)
traits = ['description', 'extensions', 'path', 'processors', 'tags']
p.copy_traits(self, traits, copy='deep')
# Clear out the _done information from the processors
for proc in p.processors:
proc._done.clear()
return p
# #### CRUD interface to the data ####
def update(self, media_data, tags=None):
"""Create/update the internal data given the media data and tags.
Parameters
----------
f: vixen.directory.File instance
tags: dict
"""
relpath = media_data.relpath
if not self.has_media(relpath):
index = len(self._relpath2index)
self._relpath2index[relpath] = index
for key in MediaData._fields:
self._data[key].append(None)
for tag in self.tags:
self._tag_data[tag.name].append(tag.default)
index = self._relpath2index[relpath]
for i, key in enumerate(MediaData._fields):
self._data[key][index] = media_data[i]
if tags:
for key, value in tags.items():
self._tag_data[key][index] = value
media = self._media.get(relpath)
if media is not None:
media.update(media_data, tags)
def get(self, relpath):
"""Given the relative path of some media, return a Media instance.
"""
if relpath in self._media:
return self._media[relpath]
else:
data = {}
index = self._relpath2index[relpath]
for key in MediaData._fields:
data[key] = self._data[key][index]
tags = {}
for key in self._tag_data:
tags[key] = self._tag_data[key][index]
media = Media.from_data(MediaData(**data), tags)
media.on_trait_change(self._media_tag_handler, 'tags_items')
self._media[relpath] = media
return media
def remove(self, relpaths):
"""Given a list of relative path of some media, remove them from the
database.
"""
relpath2index = self._relpath2index
indices = [(x, relpath2index[x]) for x in relpaths]
for relpath, index in sorted(indices, reverse=True):
last = len(relpath2index) - 1
if index == last:
self._delete_record(last, relpath)
else:
self._replace_with_last_record(index, last)
self._delete_record(last, relpath)
def has_media(self, relpath):
"""Returns True if the media data is available.
"""
return relpath in self._relpath2index
def keys(self):
"""Return all the keys for the media relative paths."""
return self._relpath2index.keys()
def _get_media_attr(self, index, attr):
"""Given an index to the media, return its value.
"""
if attr in self._data:
return self._data[attr][index]
elif attr in self._tag_data:
return self._tag_data[attr][index]
# #### End of CRUD interface to the data ####
def clean(self):
"""Scan the project and remove any dead entries.
This is useful when you remove or rename files. This does not refresh
the directory tree or set the number of files. It simply cleans up the
db of files that no longer exist.
"""
logger.info('Cleaning project: %s', self.name)
root_path = self.path
to_remove = []
relpath2index = self._relpath2index
for rpath in list(relpath2index.keys()):
fname = os.path.join(root_path, rpath)
if not os.path.exists(fname):
to_remove.append(rpath)
self.remove(to_remove)
def export_csv(self, fname, cols=None):
"""Export metadata to a csv file. If `cols` are not specified,
it writes out all the useful metadata.
Parameters
-----------
fname: str: a path to the csv file to dump.
cols: sequence: a sequence of columns to write.
"""
logger.info('Exporting CSV: %s', fname)
all_keys = ((set(MediaData._fields) | set(self._tag_data.keys()))
- set(('ctime_', 'mtime_')))
if cols is None:
cols = all_keys
cols = list(sorted(cols))
data_cols = set([x for x in cols if x in self._data])
with io.open(fname, 'w', newline='', encoding='utf-8') as of:
# Write the header.
writer = csv.writer(of)
writer.writerow(cols)
for i in range(len(self._relpath2index)):
line = []
for col in cols:
if col in data_cols:
elem = self._data[col][i]
else:
elem = self._tag_data[col][i]
line.append(elem)
writer.writerow(line)
def import_csv(self, fname):
"""Read tag information from given CSV filename.
Returns the success status and the error message if any. Note that this
only applies tags for column headers with known tags. Unknown tags are
not added.
Parameters
----------
fname : str Input filename.
"""
logger.info('Importing tags from: %s', fname)
has_header, header, dialect = _get_csv_headers(fname)
if not has_header:
return False, "The CSV file does not appear to have a header."
if 'path' not in header:
msg = "The CSV file does not have a 'path' column."
return False, msg
tags = {x: header.index(x.name) for x in self.tags if x.name in header}
path_idx = header.index('path')
TRUE = ('1', 't', 'true', 'y', 'yes')
type_map = {
'bool': lambda x: x.lower() in TRUE,
'string': lambda x: x,
'text': lambda x: x,
'int': int,
'float': float
}
count = 0
total = 0
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
reader = csv.reader(fp, dialect)
next(reader) # Skip header
for record in reader:
total += 1
path = record[path_idx]
rpath = relpath(path, self.path)
index = self._relpath2index.get(rpath, None)
media = self._media.get(rpath)
if index is not None:
count += 1
for tag, header_index in tags.items():
data = record[header_index]
try:
value = type_map[tag.type](data)
if media is not None:
media.tags[tag.name] = value
else:
self._tag_data[tag.name][index] = value
except ValueError:
pass
msg = "Read tags for %d paths out of %d entries." % (count, total)
if count == 0 and total > 0:
msg += ("\nPlease check that your path column matches "
"the media paths.")
return False, msg
else:
msg += ("\nPlease check the imported tags and make sure you "
"save the project.")
return True, msg
def load(self, fp=None):
"""Load media info from opened file object.
"""
if fp is None:
if not exists(self.save_file):
return
fp = open_file(self.save_file, 'rb')
else:
fp = open_file(fp, 'rb')
data = json_tricks.load(
fp, preserve_order=False, ignore_comments=False
)
fp.close()
self.name = data.get('name', '')
self.description = data.get('description', '')
self.path = data.get('path')
self.tags = [TagInfo(name=x[0], type=x[1]) for x in data['tags']]
self.processors = [processor.load(x)
for x in data.get('processors', [])]
version = data.get('version')
if version == 1:
self._read_version1_media(data['media'])
else:
self._data = data['media_data']
self._tag_data = data['tag_data']
self._relpath2index = data['relpath2index']
root = Directory()
root.__setstate__(data.get('root'))
self.extensions = root.extensions
self.root = root
self.number_of_files = len(self._relpath2index)
def save(self):
"""Save current media info to a file object
"""
if len(self.save_file) > 0:
self.save_as(self.save_file)
self._update_last_save_time()
else:
raise IOError("No valid save file set.")
def save_as(self, fp):
"""Save copy to specified path.
"""
fp = open_file(fp, 'wb')
tags = [(t.name, t.type) for t in self.tags]
root = self.root.__getstate__()
processors = [processor.dump(x) for x in self.processors]
data = dict(
version=2, path=self.path, name=self.name,
description=self.description, tags=tags,
media_data=self._data, tag_data=self._tag_data,
relpath2index=self._relpath2index,
root=root, processors=processors
)
json_tricks.dump(data, fp, compression=True)
fp.close()
logger.info('Saved project: %s', self.name)
def scan(self, refresh=False):
"""Find all the media recursively inside the root directory.
This will not clobber existing records but will add any new ones.
"""
self._setup_root()
def _scan(dir):
for f in dir.files:
if not self.has_media(f.relpath) or refresh:
data = get_media_data(f.path, f.relpath)
self.update(data)
for d in dir.directories:
if refresh:
d.refresh()
_scan(d)
if refresh:
self.root.refresh()
_scan(self.root)
self.number_of_files = len(self._relpath2index)
def search(self, q):
"""A generator which yields the (filename, relpath) for each file
satisfying the search query.
"""
logger.info('Searching for %s', q)
try:
parsed_q = self._query_parser.parse(q)
except Exception:
logger.warn("Invalid search expression: %s", q)
print("Invalid search expression: %s" % q)
return
tag_types = self._get_tag_types()
_cleanup_query(parsed_q, tag_types)
for key, index in self._relpath2index.items():
if _search_media(parsed_q, index, self._get_media_attr):
yield basename(key), key
def refresh(self):
logger.info('Refreshing project: %s', self.name)
self.clean()
self.scan(refresh=True)
# #### Private protocol ################################################
def _setup_root(self):
path = abspath(expanduser(self.path))
root = self.root
if root is None or realpath(root.path) != realpath(path):
self.root = Directory(path=path, extensions=self.extensions)
def _tags_default(self):
return [TagInfo(name='completed', type='bool')]
def _save_file_default(self):
if len(self.name) > 0:
fname = sanitize_name(self.name) + '.vxn'
d = get_project_dir()
return get_non_existing_filename(join(d, fname))
else:
return ''
def _update_last_save_time(self):
self.last_save_time = get_file_saved_time(self.save_file)
def _last_save_time_default(self):
if exists(self.save_file):
return get_file_saved_time(self.save_file)
else:
return ''
def _name_changed(self, name):
if len(name) > 0:
old_save_file = self.save_file
old_dir = dirname(old_save_file)
new_save_file = join(old_dir, sanitize_name(name) + '.vxn')
if new_save_file != old_save_file:
self.save_file = new_save_file
if exists(old_save_file):
shutil.move(old_save_file, self.save_file)
def _extensions_changed(self, ext):
if self.root is not None:
self.root.extensions = ext
def _extensions_items_changed(self):
if self.root is not None:
self.root.extensions = self.extensions
def _get_tag_types(self):
result = dict(COMMON_TAGS)
result.update(dict((t.name, t.type) for t in self.tags))
return result
def _make_schema(self):
from whoosh.fields import BOOLEAN, DATETIME, TEXT, Schema
kw = dict(
type=TEXT, file_name=TEXT, path=TEXT,
mtime=DATETIME, ctime=DATETIME, size=INT
)
type_to_field = dict(
string=TEXT, text=TEXT, int=INT, float=FLOAT, bool=BOOLEAN
)
for tag in self.tags:
kw[tag.name] = type_to_field[tag.type]
return Schema(**kw)
def _make_query_parser(self):
schema = self._make_schema()
qp = qparser.QueryParser('path', schema=schema)
qp.add_plugin(qparser.GtLtPlugin())
from whoosh.qparser.dateparse import DateParserPlugin
qp.add_plugin(DateParserPlugin())
return qp
def __query_parser_default(self):
return self._make_query_parser()
def __data_default(self):
data = {}
for key in MediaData._fields:
data[key] = []
return data
def __tag_data_default(self):
tags = {}
for key in self.tags:
tags[key.name] = []
return tags
def _media_tag_handler(self, obj, tname, old, new):
index = self._relpath2index[obj.relpath]
for tag in new.changed:
self._tag_data[tag][index] = obj.tags[tag]
def _read_version1_media(self, media):
data = self.__data_default()
tag_data = self.__tag_data_default()
relpath2index = {}
keymap = dict.fromkeys(MediaData._fields)
for k in keymap:
keymap[k] = k
keymap['_ctime'] = 'ctime_'
keymap['_mtime'] = 'mtime_'
for index, (key, m) in enumerate(media):
relpath2index[key] = index
tags = m.pop('tags')
for tname, v in tags.items():
tag_data[tname].append(v)
for k, v in m.items():
data[keymap[k]].append(v)
if 'file_name' not in m:
data['file_name'].append(basename(key))
data['mtime_'] = [datetime_to_long(x) for x in data['mtime_']]
data['ctime_'] = [datetime_to_long(x) for x in data['ctime_']]
self._data = data
self._tag_data = tag_data
self._relpath2index = relpath2index
def _delete_record(self, index, relpath):
for key in MediaData._fields:
del self._data[key][index]
for key in self._tag_data:
del self._tag_data[key][index]
if relpath in self._media:
del self._media[relpath]
del self._relpath2index[relpath]
def _replace_with_last_record(self, index, last):
_data = self._data
_tag_data = self._tag_data
for key in MediaData._fields:
_data[key][index] = _data[key][last]
for key in self._tag_data:
_tag_data[key][index] = _tag_data[key][last]
last_relpath = _data['relpath'][last]
self._relpath2index[last_relpath] = index
def _save_as_v1(self, fp):
"""Save copy to specified path.
This mainly exists for testing and making sure we still read the old
saved files.
"""
def _rewrite_dir(state):
"Rewrite directories in the old format."
state['files'] = [x[0] for x in state['files']]
state['directories'] = [_rewrite_dir(d)
for d in state['directories']]
state.pop('relpath')
state.pop('name')
return state
fp = open_file(fp, 'wb')
media = [(key, self.get(key).to_dict()) for key in self._relpath2index]
tags = [(t.name, t.type) for t in self.tags]
root = _rewrite_dir(self.root.__getstate__())
processors = [processor.dump(x) for x in self.processors]
for k, m in media:
m['_ctime'] = long_to_datetime(m['_ctime'])
m['_mtime'] = long_to_datetime(m['_mtime'])
data = dict(
version=1, path=self.path, name=self.name,
description=self.description, tags=tags, media=media,
root=root, processors=processors
)
json_tricks.dump(data, fp, compression=True)
fp.close()
logger.info('Saved project: %s', self.name)
| import datetime
import io
import json_tricks
import logging
import os
from os.path import (abspath, basename, dirname, exists, expanduser,
join, realpath, relpath, splitext)
import re
import shutil
import sys
from traits.api import (Any, Dict, Enum, HasTraits, Instance, List, Long,
Str)
from whoosh import fields, qparser, query
from whoosh.util.times import datetime_to_long, long_to_datetime
from .common import get_project_dir
from .media import Media, MediaData, get_media_data
from .directory import Directory
from . import processor
logger = logging.getLogger(__name__)
if sys.version_info[0] > 2:
unicode = str
string_types = (str,)
import csv
else:
string_types = (basestring,)
import backports.csv as csv
INT = fields.NUMERIC(numtype=int)
FLOAT = fields.NUMERIC(numtype=float)
def get_file_saved_time(path):
dt = datetime.datetime.fromtimestamp(os.stat(path).st_ctime)
return dt.ctime()
def _get_sample(fname):
sample = ''
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
sample += fp.readline() + fp.readline()
return sample
def _get_csv_headers(fname):
sample = _get_sample(fname)
sniffer = csv.Sniffer()
has_header = sniffer.has_header(sample)
dialect = sniffer.sniff(sample)
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
reader = csv.reader(fp, dialect)
header = next(reader)
return has_header, header, dialect
class TagInfo(HasTraits):
name = Str
type = Enum("string", "text", "int", "float", "bool")
default = Any
def __repr__(self):
return 'TagInfo(%r, %r)' % (self.name, self.type)
def _default_default(self):
map = {"string": "", "text": "", "int": 0, "float": 0.0,
"bool": False}
return map[self.type]
def open_file(fname_or_file, mode='rb'):
if hasattr(fname_or_file, 'read'):
return fname_or_file
else:
return open(fname_or_file, mode)
def sanitize_name(name):
name = name.lower()
name = re.sub(r'\s+', '_', name)
return re.sub(r'\W+', '', name)
def get_non_existing_filename(fname):
if exists(fname):
base, ext = splitext(basename(fname))
return join(dirname(fname), base + '_a' + ext)
else:
return fname
COMMON_TAGS = dict(
file_name='string', path='string', relpath='string',
ctime='string', mtime='string', size='int', type='string'
)
def _cleanup_query(q, tag_types):
type_map = dict(float=FLOAT.from_bytes, int=INT.from_bytes)
for term in q.leaves():
if isinstance(term, query.Term):
if isinstance(term.text, (str, unicode, bytes)):
fieldtype = tag_types[term.fieldname]
if fieldtype in type_map:
term.text = type_map[fieldtype](term.text)
else:
term.text = term.text.lower()
elif isinstance(term, query.Phrase):
term.words = [x.lower() for x in term.words]
def _check_value(value, expr):
if isinstance(expr, string_types):
return expr in value.lower()
else:
return expr == value
def _check_range(x, term):
result = True
if term.start is not None:
if term.startexcl:
result &= x > term.start
else:
result &= x >= term.start
if term.end is not None and result:
if term.endexcl:
result &= x < term.end
else:
result &= x <= term.end
return result
def _check_date_range(x, term):
result = True
if term.startdate is not None:
result &= x >= term.start
if term.enddate is not None and result:
result &= x <= term.end
return result
def _search_media(expr, m_key, get_tag):
"""Given search expression, index to media, and a getter to get the attribute
check if the media matches expression.
"""
if expr.is_leaf():
if isinstance(expr, query.Term):
attr = expr.fieldname
return _check_value(get_tag(m_key, attr), expr.text)
elif isinstance(expr, query.Phrase):
attr = expr.fieldname
text = " ".join(expr.words)
return _check_value(get_tag(m_key, attr), text)
elif isinstance(expr, query.DateRange):
if expr.fieldname == 'ctime':
value = get_tag(m_key, 'ctime_')
elif expr.fieldname == 'mtime':
value = get_tag(m_key, 'mtime_')
return _check_date_range(value, expr)
elif isinstance(expr, query.NumericRange):
attr = expr.fieldname
return _check_range(get_tag(m_key, attr), expr)
else:
print("Unsupported term: %r" % expr)
return False
else:
if isinstance(expr, query.And):
result = True
for child in expr.children():
result &= _search_media(child, m_key, get_tag)
if not result:
break
return result
elif isinstance(expr, query.Or):
result = False
for child in expr.children():
result |= _search_media(child, m_key, get_tag)
if result:
break
return result
elif isinstance(expr, query.Not):
subquery = list(expr.children())[0]
return not _search_media(subquery, m_key, get_tag)
else:
print("Unsupported term: %r" % expr)
return False
class Project(HasTraits):
name = Str
description = Str
path = Str
root = Instance(Directory)
tags = List(TagInfo)
_media = Dict(Str, Media)
extensions = List(Str)
processors = List(processor.FactoryBase)
number_of_files = Long
# Path where the project data is saved.
save_file = Str
last_save_time = Str
_data = Dict
_tag_data = Dict
_relpath2index = Dict()
_query_parser = Instance(qparser.QueryParser)
def add_tags(self, tags):
tags = list(self.tags) + tags
self.update_tags(tags)
def update_tags(self, new_tags):
old_tags = self.tags
new_tag_names = set(tag.name for tag in new_tags)
tag_info = dict((tag.name, tag.type) for tag in old_tags)
removed = []
added = []
for tag in new_tags:
if tag.name not in tag_info:
added.append(tag)
elif tag_info[tag.name] != tag.type:
removed.append(tag)
added.append(tag)
for tag in old_tags:
if tag.name not in new_tag_names:
removed.append(tag)
for tag in removed:
del self._tag_data[tag.name]
n_entries = len(self._relpath2index)
for tag in added:
self._tag_data[tag.name] = [tag.default]*n_entries
# The above can be the first time when self._tag_data is accessed, when
# creating a new project for example. In this case,
# self.__tag_data_default is called, so if self.tags is set then the
# removed tags will not exist in _tag_data causing an error. So we only
# set self.tags below.
self.tags = new_tags
# Update the cached media
for m in self._media.values():
for tag in removed:
del m.tags[tag.name]
for tag in added:
m.tags[tag.name] = tag.default
self._query_parser = self._make_query_parser()
def copy(self):
"""Make a copy of this project. This does not copy the data but only
the tags, extensions and the other settings of the project.
This will not copy any of the processor states but only their settings.
"""
name = self.name + ' copy'
p = Project(name=name)
traits = ['description', 'extensions', 'path', 'processors', 'tags']
p.copy_traits(self, traits, copy='deep')
# Clear out the _done information from the processors
for proc in p.processors:
proc._done.clear()
return p
# #### CRUD interface to the data ####
def update(self, media_data, tags=None):
"""Create/update the internal data given the media data and tags.
Parameters
----------
f: vixen.directory.File instance
tags: dict
"""
relpath = media_data.relpath
if not self.has_media(relpath):
index = len(self._relpath2index)
self._relpath2index[relpath] = index
for key in MediaData._fields:
self._data[key].append(None)
for tag in self.tags:
self._tag_data[tag.name].append(tag.default)
index = self._relpath2index[relpath]
for i, key in enumerate(MediaData._fields):
self._data[key][index] = media_data[i]
if tags:
for key, value in tags.items():
self._tag_data[key][index] = value
media = self._media.get(relpath)
if media is not None:
media.update(media_data, tags)
def get(self, relpath):
"""Given the relative path of some media, return a Media instance.
"""
if relpath in self._media:
return self._media[relpath]
else:
data = {}
index = self._relpath2index[relpath]
for key in MediaData._fields:
data[key] = self._data[key][index]
tags = {}
for key in self._tag_data:
tags[key] = self._tag_data[key][index]
media = Media.from_data(MediaData(**data), tags)
media.on_trait_change(self._media_tag_handler, 'tags_items')
self._media[relpath] = media
return media
def remove(self, relpaths):
"""Given a list of relative path of some media, remove them from the
database.
"""
relpath2index = self._relpath2index
indices = [(x, relpath2index[x]) for x in relpaths]
for relpath, index in sorted(indices, reverse=True):
last = len(relpath2index) - 1
if index == last:
self._delete_record(last, relpath)
else:
self._replace_with_last_record(index, last)
self._delete_record(last, relpath)
def has_media(self, relpath):
"""Returns True if the media data is available.
"""
return relpath in self._relpath2index
def keys(self):
"""Return all the keys for the media relative paths."""
return self._relpath2index.keys()
def _get_media_attr(self, index, attr):
"""Given an index to the media, return its value.
"""
if attr in self._data:
return self._data[attr][index]
elif attr in self._tag_data:
return self._tag_data[attr][index]
# #### End of CRUD interface to the data ####
def clean(self):
"""Scan the project and remove any dead entries.
This is useful when you remove or rename files. This does not refresh
the directory tree or set the number of files. It simply cleans up the
db of files that no longer exist.
"""
logger.info('Cleaning project: %s', self.name)
root_path = self.path
to_remove = []
relpath2index = self._relpath2index
for rpath in list(relpath2index.keys()):
fname = os.path.join(root_path, rpath)
if not os.path.exists(fname):
to_remove.append(rpath)
self.remove(to_remove)
def export_csv(self, fname, cols=None):
"""Export metadata to a csv file. If `cols` are not specified,
it writes out all the useful metadata.
Parameters
-----------
fname: str: a path to the csv file to dump.
cols: sequence: a sequence of columns to write.
"""
logger.info('Exporting CSV: %s', fname)
all_keys = ((set(MediaData._fields) | set(self._tag_data.keys()))
- set(('ctime_', 'mtime_')))
if cols is None:
cols = all_keys
cols = list(sorted(cols))
data_cols = set([x for x in cols if x in self._data])
with io.open(fname, 'w', newline='', encoding='utf-8') as of:
# Write the header.
writer = csv.writer(of)
writer.writerow(cols)
for i in range(len(self._relpath2index)):
line = []
for col in cols:
if col in data_cols:
elem = self._data[col][i]
else:
elem = self._tag_data[col][i]
line.append(elem)
writer.writerow(line)
def import_csv(self, fname):
"""Read tag information from given CSV filename.
Returns the success status and the error message if any. Note that this
only applies tags for column headers with known tags. Unknown tags are
not added.
Parameters
----------
fname : str Input filename.
"""
logger.info('Importing tags from: %s', fname)
has_header, header, dialect = _get_csv_headers(fname)
if not has_header:
return False, "The CSV file does not appear to have a header."
if 'path' not in header:
msg = "The CSV file does not have a 'path' column."
return False, msg
tags = {x: header.index(x.name) for x in self.tags if x.name in header}
path_idx = header.index('path')
TRUE = ('1', 't', 'true', 'y', 'yes')
type_map = {
'bool': lambda x: x.lower() in TRUE,
'string': lambda x: x,
'text': lambda x: x,
'int': int,
'float': float
}
count = 0
total = 0
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
reader = csv.reader(fp, dialect)
next(reader) # Skip header
for record in reader:
total += 1
path = record[path_idx]
rpath = relpath(path, self.path)
index = self._relpath2index.get(rpath, None)
media = self._media.get(rpath)
if index is not None:
count += 1
for tag, header_index in tags.items():
data = record[header_index]
try:
value = type_map[tag.type](data)
if media is not None:
media.tags[tag.name] = value
else:
self._tag_data[tag.name][index] = value
except ValueError:
pass
msg = "Read tags for %d paths out of %d entries." % (count, total)
if count == 0 and total > 0:
msg += ("\nPlease check that your path column matches "
"the media paths.")
return False, msg
else:
msg += ("\nPlease check the imported tags and make sure you "
"save the project.")
return True, msg
def load(self, fp=None):
"""Load media info from opened file object.
"""
if fp is None:
if not exists(self.save_file):
return
fp = open_file(self.save_file, 'rb')
else:
fp = open_file(fp, 'rb')
data = json_tricks.load(
fp, preserve_order=False, ignore_comments=False
)
fp.close()
self.name = data.get('name', '')
self.description = data.get('description', '')
self.path = data.get('path')
self.tags = [TagInfo(name=x[0], type=x[1]) for x in data['tags']]
self.processors = [processor.load(x)
for x in data.get('processors', [])]
version = data.get('version')
if version == 1:
self._read_version1_media(data['media'])
else:
self._data = data['media_data']
self._tag_data = data['tag_data']
self._relpath2index = data['relpath2index']
root = Directory()
root.__setstate__(data.get('root'))
self.extensions = root.extensions
self.root = root
self.number_of_files = len(self._relpath2index)
def save(self):
"""Save current media info to a file object
"""
if len(self.save_file) > 0:
self.save_as(self.save_file)
self._update_last_save_time()
else:
raise IOError("No valid save file set.")
def save_as(self, fp):
"""Save copy to specified path.
"""
fp = open_file(fp, 'wb')
tags = [(t.name, t.type) for t in self.tags]
root = self.root.__getstate__()
processors = [processor.dump(x) for x in self.processors]
data = dict(
version=2, path=self.path, name=self.name,
description=self.description, tags=tags,
media_data=self._data, tag_data=self._tag_data,
relpath2index=self._relpath2index,
root=root, processors=processors
)
json_tricks.dump(data, fp, compression=True)
fp.close()
logger.info('Saved project: %s', self.name)
def scan(self, refresh=False):
"""Find all the media recursively inside the root directory.
This will not clobber existing records but will add any new ones.
"""
self._setup_root()
def _scan(dir):
for f in dir.files:
if not self.has_media(f.relpath) or refresh:
data = get_media_data(f.path, f.relpath)
self.update(data)
for d in dir.directories:
if refresh:
d.refresh()
_scan(d)
if refresh:
self.root.refresh()
_scan(self.root)
self.number_of_files = len(self._relpath2index)
def search(self, q):
"""A generator which yields the (filename, relpath) for each file
satisfying the search query.
"""
logger.info('Searching for %s', q)
try:
parsed_q = self._query_parser.parse(q)
except Exception:
logger.warn("Invalid search expression: %s", q)
print("Invalid search expression: %s" % q)
return
tag_types = self._get_tag_types()
_cleanup_query(parsed_q, tag_types)
for key, index in self._relpath2index.items():
if _search_media(parsed_q, index, self._get_media_attr):
yield basename(key), key
def refresh(self):
logger.info('Refreshing project: %s', self.name)
self.clean()
self.scan(refresh=True)
# #### Private protocol ################################################
def _setup_root(self):
path = abspath(expanduser(self.path))
root = self.root
if root is None or realpath(root.path) != realpath(path):
self.root = Directory(path=path, extensions=self.extensions)
def _tags_default(self):
return [TagInfo(name='completed', type='bool')]
def _save_file_default(self):
if len(self.name) > 0:
fname = sanitize_name(self.name) + '.vxn'
d = get_project_dir()
return get_non_existing_filename(join(d, fname))
else:
return ''
def _update_last_save_time(self):
self.last_save_time = get_file_saved_time(self.save_file)
def _last_save_time_default(self):
if exists(self.save_file):
return get_file_saved_time(self.save_file)
else:
return ''
def _name_changed(self, name):
if len(name) > 0:
old_save_file = self.save_file
old_dir = dirname(old_save_file)
new_save_file = join(old_dir, sanitize_name(name) + '.vxn')
if new_save_file != old_save_file:
self.save_file = new_save_file
if exists(old_save_file):
shutil.move(old_save_file, self.save_file)
def _extensions_changed(self, ext):
if self.root is not None:
self.root.extensions = ext
def _extensions_items_changed(self):
if self.root is not None:
self.root.extensions = self.extensions
def _get_tag_types(self):
result = dict(COMMON_TAGS)
result.update(dict((t.name, t.type) for t in self.tags))
return result
def _make_schema(self):
from whoosh.fields import BOOLEAN, DATETIME, TEXT, Schema
kw = dict(
type=TEXT, file_name=TEXT, path=TEXT,
mtime=DATETIME, ctime=DATETIME, size=INT
)
type_to_field = dict(
string=TEXT, text=TEXT, int=INT, float=FLOAT, bool=BOOLEAN
)
for tag in self.tags:
kw[tag.name] = type_to_field[tag.type]
return Schema(**kw)
def _make_query_parser(self):
schema = self._make_schema()
qp = qparser.QueryParser('path', schema=schema)
qp.add_plugin(qparser.GtLtPlugin())
from whoosh.qparser.dateparse import DateParserPlugin
qp.add_plugin(DateParserPlugin())
return qp
def __query_parser_default(self):
return self._make_query_parser()
def __data_default(self):
data = {}
for key in MediaData._fields:
data[key] = []
return data
def __tag_data_default(self):
tags = {}
for key in self.tags:
tags[key.name] = []
return tags
def _media_tag_handler(self, obj, tname, old, new):
index = self._relpath2index[obj.relpath]
for tag in new.changed:
self._tag_data[tag][index] = obj.tags[tag]
def _read_version1_media(self, media):
data = self.__data_default()
tag_data = self.__tag_data_default()
relpath2index = {}
keymap = dict.fromkeys(MediaData._fields)
for k in keymap:
keymap[k] = k
keymap['_ctime'] = 'ctime_'
keymap['_mtime'] = 'mtime_'
for index, (key, m) in enumerate(media):
relpath2index[key] = index
tags = m.pop('tags')
for tname, v in tags.items():
tag_data[tname].append(v)
for k, v in m.items():
data[keymap[k]].append(v)
if 'file_name' not in m:
data['file_name'].append(basename(key))
data['mtime_'] = [datetime_to_long(x) for x in data['mtime_']]
data['ctime_'] = [datetime_to_long(x) for x in data['ctime_']]
self._data = data
self._tag_data = tag_data
self._relpath2index = relpath2index
def _delete_record(self, index, relpath):
for key in MediaData._fields:
del self._data[key][index]
for key in self._tag_data:
del self._tag_data[key][index]
if relpath in self._media:
del self._media[relpath]
del self._relpath2index[relpath]
def _replace_with_last_record(self, index, last):
_data = self._data
_tag_data = self._tag_data
for key in MediaData._fields:
_data[key][index] = _data[key][last]
for key in self._tag_data:
_tag_data[key][index] = _tag_data[key][last]
last_relpath = _data['relpath'][last]
self._relpath2index[last_relpath] = index
def _save_as_v1(self, fp):
"""Save copy to specified path.
This mainly exists for testing and making sure we still read the old
saved files.
"""
def _rewrite_dir(state):
"Rewrite directories in the old format."
state['files'] = [x[0] for x in state['files']]
state['directories'] = [_rewrite_dir(d)
for d in state['directories']]
state.pop('relpath')
state.pop('name')
return state
fp = open_file(fp, 'wb')
media = [(key, self.get(key).to_dict()) for key in self._relpath2index]
tags = [(t.name, t.type) for t in self.tags]
root = _rewrite_dir(self.root.__getstate__())
processors = [processor.dump(x) for x in self.processors]
for k, m in media:
m['_ctime'] = long_to_datetime(m['_ctime'])
m['_mtime'] = long_to_datetime(m['_mtime'])
data = dict(
version=1, path=self.path, name=self.name,
description=self.description, tags=tags, media=media,
root=root, processors=processors
)
json_tricks.dump(data, fp, compression=True)
fp.close()
logger.info('Saved project: %s', self.name)
| en | 0.713932 | Given search expression, index to media, and a getter to get the attribute check if the media matches expression. # Path where the project data is saved. # The above can be the first time when self._tag_data is accessed, when # creating a new project for example. In this case, # self.__tag_data_default is called, so if self.tags is set then the # removed tags will not exist in _tag_data causing an error. So we only # set self.tags below. # Update the cached media Make a copy of this project. This does not copy the data but only the tags, extensions and the other settings of the project. This will not copy any of the processor states but only their settings. # Clear out the _done information from the processors # #### CRUD interface to the data #### Create/update the internal data given the media data and tags. Parameters ---------- f: vixen.directory.File instance tags: dict Given the relative path of some media, return a Media instance. Given a list of relative path of some media, remove them from the database. Returns True if the media data is available. Return all the keys for the media relative paths. Given an index to the media, return its value. # #### End of CRUD interface to the data #### Scan the project and remove any dead entries. This is useful when you remove or rename files. This does not refresh the directory tree or set the number of files. It simply cleans up the db of files that no longer exist. Export metadata to a csv file. If `cols` are not specified, it writes out all the useful metadata. Parameters ----------- fname: str: a path to the csv file to dump. cols: sequence: a sequence of columns to write. # Write the header. Read tag information from given CSV filename. Returns the success status and the error message if any. Note that this only applies tags for column headers with known tags. Unknown tags are not added. Parameters ---------- fname : str Input filename. # Skip header Load media info from opened file object. Save current media info to a file object Save copy to specified path. Find all the media recursively inside the root directory. This will not clobber existing records but will add any new ones. A generator which yields the (filename, relpath) for each file satisfying the search query. # #### Private protocol ################################################ Save copy to specified path. This mainly exists for testing and making sure we still read the old saved files. | 2.035162 | 2 |
prance/util/translator.py | elemental-lf/prance | 0 | 533 | """This submodule contains a JSON reference translator."""
__author__ = '<NAME>'
__copyright__ = 'Copyright © 2021 <NAME>'
__license__ = 'MIT'
__all__ = ()
import prance.util.url as _url
def _reference_key(ref_url, item_path):
"""
Return a portion of the dereferenced URL.
format - ref-url_obj-path
"""
return ref_url.path.split('/')[-1] + '_' + '_'.join(item_path[1:])
def _local_ref(path):
url = '#/' + '/'.join(path)
return {'$ref': url}
# Underscored to allow some time for the public API to be stabilized.
class _RefTranslator:
"""
Resolve JSON pointers/references in a spec by translation.
References to objects in other files are copied to the /components/schemas
object of the root document, while being translated to point to the the new
object locations.
"""
def __init__(self, specs, url):
"""
Construct a JSON reference translator.
The translated specs are in the `specs` member after a call to
`translate_references` has been made.
If a URL is given, it is used as a base for calculating the absolute
URL of relative file references.
:param dict specs: The parsed specs in which to translate any references.
:param str url: [optional] The URL to base relative references on.
"""
import copy
self.specs = copy.deepcopy(specs)
self.__strict = True
self.__reference_cache = {}
self.__collected_references = {}
if url:
self.url = _url.absurl(url)
url_key = (_url.urlresource(self.url), self.__strict)
# If we have a url, we want to add ourselves to the reference cache
# - that creates a reference loop, but prevents child resolvers from
# creating a new resolver for this url.
self.__reference_cache[url_key] = self.specs
else:
self.url = None
def translate_references(self):
"""
Iterate over the specification document, performing the translation.
Traverses over the whole document, adding the referenced object from
external files to the /components/schemas object in the root document
and translating the references to the new location.
"""
self.specs = self._translate_partial(self.url, self.specs)
# Add collected references to the root document.
if self.__collected_references:
if 'components' not in self.specs:
self.specs['components'] = {}
if 'schemas' not in self.specs['components']:
self.specs['components'].update({'schemas': {}})
self.specs['components']['schemas'].update(self.__collected_references)
def _dereference(self, ref_url, obj_path):
"""
Dereference the URL and object path.
Returns the dereferenced object.
:param mixed ref_url: The URL at which the reference is located.
:param list obj_path: The object path within the URL resource.
:param tuple recursions: A recursion stack for resolving references.
:return: A copy of the dereferenced value, with all internal references
resolved.
"""
# In order to start dereferencing anything in the referenced URL, we have
# to read and parse it, of course.
contents = _url.fetch_url(ref_url, self.__reference_cache, strict=self.__strict)
# In this inner parser's specification, we can now look for the referenced
# object.
value = contents
if len(obj_path) != 0:
from prance.util.path import path_get
try:
value = path_get(value, obj_path)
except (KeyError, IndexError, TypeError) as ex:
raise _url.ResolutionError('Cannot resolve reference "%s": %s'
% (ref_url.geturl(), str(ex)))
# Deep copy value; we don't want to create recursive structures
import copy
value = copy.deepcopy(value)
# Now resolve partial specs
value = self._translate_partial(ref_url, value)
# That's it!
return value
def _translate_partial(self, base_url, partial):
changes = dict(tuple(self._translating_iterator(base_url, partial, ())))
paths = sorted(changes.keys(), key = len)
from prance.util.path import path_set
for path in paths:
value = changes[path]
if len(path) == 0:
partial = value
else:
path_set(partial, list(path), value, create = True)
return partial
def _translating_iterator(self, base_url, partial, path):
from prance.util.iterators import reference_iterator
for _, ref_string, item_path in reference_iterator(partial):
ref_url, obj_path = _url.split_url_reference(base_url, ref_string)
full_path = path + item_path
if ref_url.path == self.url.path:
# Reference to the root document.
ref_path = obj_path
else:
# Reference to a non-root document.
ref_key = _reference_key(ref_url, obj_path)
if ref_key not in self.__collected_references:
self.__collected_references[ref_key] = None
ref_value = self._dereference(ref_url, obj_path)
self.__collected_references[ref_key] = ref_value
ref_path = ['components', 'schemas', ref_key]
ref_obj = _local_ref(ref_path)
yield full_path, ref_obj
| """This submodule contains a JSON reference translator."""
__author__ = '<NAME>'
__copyright__ = 'Copyright © 2021 <NAME>'
__license__ = 'MIT'
__all__ = ()
import prance.util.url as _url
def _reference_key(ref_url, item_path):
"""
Return a portion of the dereferenced URL.
format - ref-url_obj-path
"""
return ref_url.path.split('/')[-1] + '_' + '_'.join(item_path[1:])
def _local_ref(path):
url = '#/' + '/'.join(path)
return {'$ref': url}
# Underscored to allow some time for the public API to be stabilized.
class _RefTranslator:
"""
Resolve JSON pointers/references in a spec by translation.
References to objects in other files are copied to the /components/schemas
object of the root document, while being translated to point to the the new
object locations.
"""
def __init__(self, specs, url):
"""
Construct a JSON reference translator.
The translated specs are in the `specs` member after a call to
`translate_references` has been made.
If a URL is given, it is used as a base for calculating the absolute
URL of relative file references.
:param dict specs: The parsed specs in which to translate any references.
:param str url: [optional] The URL to base relative references on.
"""
import copy
self.specs = copy.deepcopy(specs)
self.__strict = True
self.__reference_cache = {}
self.__collected_references = {}
if url:
self.url = _url.absurl(url)
url_key = (_url.urlresource(self.url), self.__strict)
# If we have a url, we want to add ourselves to the reference cache
# - that creates a reference loop, but prevents child resolvers from
# creating a new resolver for this url.
self.__reference_cache[url_key] = self.specs
else:
self.url = None
def translate_references(self):
"""
Iterate over the specification document, performing the translation.
Traverses over the whole document, adding the referenced object from
external files to the /components/schemas object in the root document
and translating the references to the new location.
"""
self.specs = self._translate_partial(self.url, self.specs)
# Add collected references to the root document.
if self.__collected_references:
if 'components' not in self.specs:
self.specs['components'] = {}
if 'schemas' not in self.specs['components']:
self.specs['components'].update({'schemas': {}})
self.specs['components']['schemas'].update(self.__collected_references)
def _dereference(self, ref_url, obj_path):
"""
Dereference the URL and object path.
Returns the dereferenced object.
:param mixed ref_url: The URL at which the reference is located.
:param list obj_path: The object path within the URL resource.
:param tuple recursions: A recursion stack for resolving references.
:return: A copy of the dereferenced value, with all internal references
resolved.
"""
# In order to start dereferencing anything in the referenced URL, we have
# to read and parse it, of course.
contents = _url.fetch_url(ref_url, self.__reference_cache, strict=self.__strict)
# In this inner parser's specification, we can now look for the referenced
# object.
value = contents
if len(obj_path) != 0:
from prance.util.path import path_get
try:
value = path_get(value, obj_path)
except (KeyError, IndexError, TypeError) as ex:
raise _url.ResolutionError('Cannot resolve reference "%s": %s'
% (ref_url.geturl(), str(ex)))
# Deep copy value; we don't want to create recursive structures
import copy
value = copy.deepcopy(value)
# Now resolve partial specs
value = self._translate_partial(ref_url, value)
# That's it!
return value
def _translate_partial(self, base_url, partial):
changes = dict(tuple(self._translating_iterator(base_url, partial, ())))
paths = sorted(changes.keys(), key = len)
from prance.util.path import path_set
for path in paths:
value = changes[path]
if len(path) == 0:
partial = value
else:
path_set(partial, list(path), value, create = True)
return partial
def _translating_iterator(self, base_url, partial, path):
from prance.util.iterators import reference_iterator
for _, ref_string, item_path in reference_iterator(partial):
ref_url, obj_path = _url.split_url_reference(base_url, ref_string)
full_path = path + item_path
if ref_url.path == self.url.path:
# Reference to the root document.
ref_path = obj_path
else:
# Reference to a non-root document.
ref_key = _reference_key(ref_url, obj_path)
if ref_key not in self.__collected_references:
self.__collected_references[ref_key] = None
ref_value = self._dereference(ref_url, obj_path)
self.__collected_references[ref_key] = ref_value
ref_path = ['components', 'schemas', ref_key]
ref_obj = _local_ref(ref_path)
yield full_path, ref_obj
| en | 0.830531 | This submodule contains a JSON reference translator. Return a portion of the dereferenced URL. format - ref-url_obj-path # Underscored to allow some time for the public API to be stabilized. Resolve JSON pointers/references in a spec by translation. References to objects in other files are copied to the /components/schemas object of the root document, while being translated to point to the the new object locations. Construct a JSON reference translator. The translated specs are in the `specs` member after a call to `translate_references` has been made. If a URL is given, it is used as a base for calculating the absolute URL of relative file references. :param dict specs: The parsed specs in which to translate any references. :param str url: [optional] The URL to base relative references on. # If we have a url, we want to add ourselves to the reference cache # - that creates a reference loop, but prevents child resolvers from # creating a new resolver for this url. Iterate over the specification document, performing the translation. Traverses over the whole document, adding the referenced object from external files to the /components/schemas object in the root document and translating the references to the new location. # Add collected references to the root document. Dereference the URL and object path. Returns the dereferenced object. :param mixed ref_url: The URL at which the reference is located. :param list obj_path: The object path within the URL resource. :param tuple recursions: A recursion stack for resolving references. :return: A copy of the dereferenced value, with all internal references resolved. # In order to start dereferencing anything in the referenced URL, we have # to read and parse it, of course. # In this inner parser's specification, we can now look for the referenced # object. # Deep copy value; we don't want to create recursive structures # Now resolve partial specs # That's it! # Reference to the root document. # Reference to a non-root document. | 2.516565 | 3 |
src/tests/stream.py | LakshyaaSoni/dropSQL | 35 | 534 | from io import StringIO
from unittest import TestCase
from dropSQL.parser.streams import *
class StreamTestCase(TestCase):
def test(self):
s = '12'
cs = Characters(StringIO(s))
ch = cs.peek().ok()
self.assertEqual(ch, '1')
ch = cs.peek().ok()
self.assertEqual(ch, '1')
ch = cs.next().ok()
self.assertEqual(ch, '1')
ch = cs.next().ok()
self.assertEqual(ch, '2')
r = cs.next()
self.assertFalse(r)
self.assertTrue(r.err())
r = cs.next()
self.assertFalse(r)
cs.back()
r = cs.next()
self.assertTrue(r)
self.assertEqual(r.ok(), '2')
cs.back(2)
r = cs.next()
self.assertTrue(r)
self.assertEqual(r.ok(), '1')
| from io import StringIO
from unittest import TestCase
from dropSQL.parser.streams import *
class StreamTestCase(TestCase):
def test(self):
s = '12'
cs = Characters(StringIO(s))
ch = cs.peek().ok()
self.assertEqual(ch, '1')
ch = cs.peek().ok()
self.assertEqual(ch, '1')
ch = cs.next().ok()
self.assertEqual(ch, '1')
ch = cs.next().ok()
self.assertEqual(ch, '2')
r = cs.next()
self.assertFalse(r)
self.assertTrue(r.err())
r = cs.next()
self.assertFalse(r)
cs.back()
r = cs.next()
self.assertTrue(r)
self.assertEqual(r.ok(), '2')
cs.back(2)
r = cs.next()
self.assertTrue(r)
self.assertEqual(r.ok(), '1')
| none | 1 | 2.678547 | 3 |
|
tests/test_bugs.py | mmibrah2/OpenQL | 0 | 535 | <reponame>mmibrah2/OpenQL
import os
import filecmp
import unittest
import numpy as np
from openql import openql as ql
from utils import file_compare
curdir = os.path.dirname(os.path.realpath(__file__))
output_dir = os.path.join(curdir, 'test_output')
class Test_bugs(unittest.TestCase):
@classmethod
def setUp(self):
ql.initialize()
ql.set_option('output_dir', output_dir)
ql.set_option('use_default_gates', 'yes')
ql.set_option('log_level', 'LOG_WARNING')
# @unittest.expectedFailure
# @unittest.skip
def test_typecast(self):
sweep_points = [1,2]
num_circuits = 1
num_qubits = 2
platf = ql.Platform("starmon", 'cc_light')
p = ql.Program('test_bug', platf, num_qubits)
p.set_sweep_points(sweep_points)
k = ql.Kernel('kernel1', platf, num_qubits)
qubit = 1
k.identity(np.int(qubit))
k.identity(np.int32(qubit))
k.identity(np.int64(qubit))
k.identity(np.uint(qubit))
k.identity(np.uint32(qubit))
k.identity(np.uint64(qubit))
# add the kernel to the program
p.add_kernel(k)
# relates to https://github.com/QE-Lab/OpenQL/issues/171
# various runs of compiles were generating different results or in the best
# case strange errors. So multiple (NCOMPILES) runs of compile are executed
# to make sure there is no error and output generated in all these runs is same
# JvS: more likely, it also had to do with the classical register allocator
# depending on stuff like Python's garbage collection to free a register.
# The register numbers have to be hardcoded now for that reason.
def test_stateful_behavior(self):
ql.set_option('optimize', 'no')
ql.set_option('scheduler', 'ALAP')
platform = ql.Platform("myPlatform", 'cc_light')
sweep_points = [1]
nqubits = 3
nregs = 3
p = ql.Program("statelessProgram", platform, nqubits, nregs)
p.set_sweep_points(sweep_points)
k = ql.Kernel("aKernel", platform, nqubits, nregs)
k.prepz(0)
k.gate('rx180', [0])
k.measure(0)
rd = ql.CReg(0)
rs1 = ql.CReg(1)
rs2 = ql.CReg(2)
k.classical(rs1, ql.Operation(3))
k.classical(rs1, ql.Operation(4))
k.classical(rd, ql.Operation(rs1, '+', rs2))
p.add_kernel(k)
NCOMPILES=50
QISA_fn = os.path.join(output_dir, p.name+'_last.qasm')
for i in range(NCOMPILES):
p.compile()
self.setUpClass()
QISA_fn_i = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm')
os.rename(QISA_fn,QISA_fn_i)
for i in range(NCOMPILES-1):
QISA_fn_1 = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm')
QISA_fn_2 = os.path.join(output_dir, p.name+'_'+str(i+1)+'_last.qasm')
self.assertTrue( file_compare(QISA_fn_1, QISA_fn_2))
# Unclear how this test works.
# When clear, enable it again.
# Now it fails, not clear how to repair, so it is disabled.
# def test_empty_infinite_loop(self):
# name = 'empty_infinite_loop'
# in_fn = 'test_' + name + '.cq'
# out_fn = 'test_output/' + name + '_out.cq'
# gold_fn = 'golden/' + name + '_out.cq'
# ql.initialize()
# #ql.set_option('log_level', 'LOG_DEBUG')
# ql.compile(in_fn)
# self.assertTrue(file_compare(out_fn, gold_fn))
if __name__ == '__main__':
unittest.main()
| import os
import filecmp
import unittest
import numpy as np
from openql import openql as ql
from utils import file_compare
curdir = os.path.dirname(os.path.realpath(__file__))
output_dir = os.path.join(curdir, 'test_output')
class Test_bugs(unittest.TestCase):
@classmethod
def setUp(self):
ql.initialize()
ql.set_option('output_dir', output_dir)
ql.set_option('use_default_gates', 'yes')
ql.set_option('log_level', 'LOG_WARNING')
# @unittest.expectedFailure
# @unittest.skip
def test_typecast(self):
sweep_points = [1,2]
num_circuits = 1
num_qubits = 2
platf = ql.Platform("starmon", 'cc_light')
p = ql.Program('test_bug', platf, num_qubits)
p.set_sweep_points(sweep_points)
k = ql.Kernel('kernel1', platf, num_qubits)
qubit = 1
k.identity(np.int(qubit))
k.identity(np.int32(qubit))
k.identity(np.int64(qubit))
k.identity(np.uint(qubit))
k.identity(np.uint32(qubit))
k.identity(np.uint64(qubit))
# add the kernel to the program
p.add_kernel(k)
# relates to https://github.com/QE-Lab/OpenQL/issues/171
# various runs of compiles were generating different results or in the best
# case strange errors. So multiple (NCOMPILES) runs of compile are executed
# to make sure there is no error and output generated in all these runs is same
# JvS: more likely, it also had to do with the classical register allocator
# depending on stuff like Python's garbage collection to free a register.
# The register numbers have to be hardcoded now for that reason.
def test_stateful_behavior(self):
ql.set_option('optimize', 'no')
ql.set_option('scheduler', 'ALAP')
platform = ql.Platform("myPlatform", 'cc_light')
sweep_points = [1]
nqubits = 3
nregs = 3
p = ql.Program("statelessProgram", platform, nqubits, nregs)
p.set_sweep_points(sweep_points)
k = ql.Kernel("aKernel", platform, nqubits, nregs)
k.prepz(0)
k.gate('rx180', [0])
k.measure(0)
rd = ql.CReg(0)
rs1 = ql.CReg(1)
rs2 = ql.CReg(2)
k.classical(rs1, ql.Operation(3))
k.classical(rs1, ql.Operation(4))
k.classical(rd, ql.Operation(rs1, '+', rs2))
p.add_kernel(k)
NCOMPILES=50
QISA_fn = os.path.join(output_dir, p.name+'_last.qasm')
for i in range(NCOMPILES):
p.compile()
self.setUpClass()
QISA_fn_i = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm')
os.rename(QISA_fn,QISA_fn_i)
for i in range(NCOMPILES-1):
QISA_fn_1 = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm')
QISA_fn_2 = os.path.join(output_dir, p.name+'_'+str(i+1)+'_last.qasm')
self.assertTrue( file_compare(QISA_fn_1, QISA_fn_2))
# Unclear how this test works.
# When clear, enable it again.
# Now it fails, not clear how to repair, so it is disabled.
# def test_empty_infinite_loop(self):
# name = 'empty_infinite_loop'
# in_fn = 'test_' + name + '.cq'
# out_fn = 'test_output/' + name + '_out.cq'
# gold_fn = 'golden/' + name + '_out.cq'
# ql.initialize()
# #ql.set_option('log_level', 'LOG_DEBUG')
# ql.compile(in_fn)
# self.assertTrue(file_compare(out_fn, gold_fn))
if __name__ == '__main__':
unittest.main() | en | 0.805141 | # @unittest.expectedFailure # @unittest.skip # add the kernel to the program # relates to https://github.com/QE-Lab/OpenQL/issues/171 # various runs of compiles were generating different results or in the best # case strange errors. So multiple (NCOMPILES) runs of compile are executed # to make sure there is no error and output generated in all these runs is same # JvS: more likely, it also had to do with the classical register allocator # depending on stuff like Python's garbage collection to free a register. # The register numbers have to be hardcoded now for that reason. # Unclear how this test works. # When clear, enable it again. # Now it fails, not clear how to repair, so it is disabled. # def test_empty_infinite_loop(self): # name = 'empty_infinite_loop' # in_fn = 'test_' + name + '.cq' # out_fn = 'test_output/' + name + '_out.cq' # gold_fn = 'golden/' + name + '_out.cq' # ql.initialize() # #ql.set_option('log_level', 'LOG_DEBUG') # ql.compile(in_fn) # self.assertTrue(file_compare(out_fn, gold_fn)) | 2.335216 | 2 |
utils/setAddress.py | wedvjin/rs485-moist-sensor | 1 | 536 | #!/usr/bin/python
"""Looks for sensor on the bus and changes it's address to the one specified on command line"""
import argparse
import minimalmodbus
import serial
from time import sleep
parser = argparse.ArgumentParser()
parser.add_argument('address', metavar='ADDR', type=int, choices=range(1, 248), help='An address to set')
args = parser.parse_args()
ADDRESS1 = 1
ADDRESS2 = args.address
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True
minimalmodbus.PARITY=serial.PARITY_NONE
minimalmodbus.STOPBITS = 2
minimalmodbus.BAUDRATE=19200
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True
def scanModbus():
for i in range(1, 248):
try:
print('Trying address: ' + str(i))
sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i)
addressRead = sensor.read_register(0, functioncode=3)
if(i == addressRead):
print('FOUND!')
return (True, i)
except (IOError):
print("nope...")
pass
return (False, 0)
# sensor.debug=True
(found, i) = scanModbus()
if found:
print('Found sensor at address: ' + str(i))
try:
sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i)
print("writing new address: " + str(ADDRESS2))
sensor.write_register(0, value=ADDRESS2, functioncode=6)
sleep(0.2)
sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=ADDRESS2)
print("reading address from holding register: ")
print(sensor.read_register(0, functioncode=3))
except:
print "Could not change the address. Check your connections"
else:
print('No sensor on the bus found') | #!/usr/bin/python
"""Looks for sensor on the bus and changes it's address to the one specified on command line"""
import argparse
import minimalmodbus
import serial
from time import sleep
parser = argparse.ArgumentParser()
parser.add_argument('address', metavar='ADDR', type=int, choices=range(1, 248), help='An address to set')
args = parser.parse_args()
ADDRESS1 = 1
ADDRESS2 = args.address
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True
minimalmodbus.PARITY=serial.PARITY_NONE
minimalmodbus.STOPBITS = 2
minimalmodbus.BAUDRATE=19200
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True
def scanModbus():
for i in range(1, 248):
try:
print('Trying address: ' + str(i))
sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i)
addressRead = sensor.read_register(0, functioncode=3)
if(i == addressRead):
print('FOUND!')
return (True, i)
except (IOError):
print("nope...")
pass
return (False, 0)
# sensor.debug=True
(found, i) = scanModbus()
if found:
print('Found sensor at address: ' + str(i))
try:
sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i)
print("writing new address: " + str(ADDRESS2))
sensor.write_register(0, value=ADDRESS2, functioncode=6)
sleep(0.2)
sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=ADDRESS2)
print("reading address from holding register: ")
print(sensor.read_register(0, functioncode=3))
except:
print "Could not change the address. Check your connections"
else:
print('No sensor on the bus found') | en | 0.795104 | #!/usr/bin/python Looks for sensor on the bus and changes it's address to the one specified on command line # sensor.debug=True | 3.121598 | 3 |
python/ray/tune/tests/test_tune_save_restore.py | mgelbart/ray | 22 | 537 | # coding: utf-8
import os
import pickle
import shutil
import tempfile
import unittest
import ray
from ray import tune
from ray.rllib import _register_all
from ray.tune import Trainable
from ray.tune.utils import validate_save_restore
class SerialTuneRelativeLocalDirTest(unittest.TestCase):
local_mode = True
prefix = "Serial"
class MockTrainable(Trainable):
_name = "MockTrainable"
def setup(self, config):
self.state = {"hi": 1}
def step(self):
return {"timesteps_this_iter": 1, "done": True}
def save_checkpoint(self, checkpoint_dir):
checkpoint_path = os.path.join(
checkpoint_dir, "checkpoint-{}".format(self._iteration)
)
with open(checkpoint_path, "wb") as f:
pickle.dump(self.state, f)
return checkpoint_path
def load_checkpoint(self, checkpoint_path):
with open(checkpoint_path, "rb") as f:
extra_data = pickle.load(f)
self.state.update(extra_data)
def setUp(self):
self.absolute_local_dir = None
ray.init(num_cpus=1, num_gpus=0, local_mode=self.local_mode)
def tearDown(self):
if self.absolute_local_dir is not None:
shutil.rmtree(self.absolute_local_dir, ignore_errors=True)
self.absolute_local_dir = None
ray.shutdown()
# Without this line, test_tune_server.testAddTrial would fail.
_register_all()
def _get_trial_dir(self, absoulte_exp_dir):
print("looking for", self.MockTrainable._name)
print("in", os.listdir(absoulte_exp_dir))
trial_dirname = next(
(
child_dir
for child_dir in os.listdir(absoulte_exp_dir)
if (
os.path.isdir(os.path.join(absoulte_exp_dir, child_dir))
and child_dir.startswith(self.MockTrainable._name)
)
)
)
trial_absolute_dir = os.path.join(absoulte_exp_dir, trial_dirname)
return trial_dirname, trial_absolute_dir
def _train(self, exp_name, local_dir, absolute_local_dir):
(trial,) = tune.run(
self.MockTrainable,
name=exp_name,
stop={"training_iteration": 1},
checkpoint_freq=1,
local_dir=local_dir,
config={"env": "CartPole-v0", "log_level": "DEBUG"},
).trials
exp_dir = os.path.join(absolute_local_dir, exp_name)
_, abs_trial_dir = self._get_trial_dir(exp_dir)
self.assertIsNone(trial.error_file)
self.assertEqual(trial.local_dir, exp_dir)
self.assertEqual(trial.logdir, abs_trial_dir)
self.assertTrue(os.path.isdir(absolute_local_dir), absolute_local_dir)
self.assertTrue(os.path.isdir(exp_dir))
self.assertTrue(os.path.isdir(abs_trial_dir))
self.assertTrue(
os.path.isfile(
os.path.join(abs_trial_dir, "checkpoint_000001/checkpoint-1")
)
)
def _restore(self, exp_name, local_dir, absolute_local_dir):
trial_name, abs_trial_dir = self._get_trial_dir(
os.path.join(absolute_local_dir, exp_name)
)
checkpoint_path = os.path.join(
local_dir, exp_name, trial_name, "checkpoint_000001/checkpoint-1"
) # Relative checkpoint path
# The file tune would find. The absolute checkpoint path.
tune_find_file = os.path.abspath(os.path.expanduser(checkpoint_path))
self.assertTrue(
os.path.isfile(tune_find_file), "{} is not exist!".format(tune_find_file)
)
(trial,) = tune.run(
self.MockTrainable,
name=exp_name,
stop={"training_iteration": 2}, # train one more iteration.
restore=checkpoint_path, # Restore the checkpoint
config={"env": "CartPole-v0", "log_level": "DEBUG"},
).trials
self.assertIsNone(trial.error_file)
def testDottedRelativePath(self):
local_dir = "./test_dotted_relative_local_dir"
exp_name = self.prefix + "DottedRelativeLocalDir"
absolute_local_dir = os.path.abspath(local_dir)
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testRelativePath(self):
local_dir = "test_relative_local_dir"
exp_name = self.prefix + "RelativePath"
absolute_local_dir = os.path.abspath(local_dir)
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testTildeAbsolutePath(self):
local_dir = "~/test_tilde_absolute_local_dir"
exp_name = self.prefix + "TildeAbsolutePath"
absolute_local_dir = os.path.abspath(os.path.expanduser(local_dir))
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testTempfile(self):
local_dir = tempfile.mkdtemp()
exp_name = self.prefix + "Tempfile"
self.absolute_local_dir = local_dir
self._train(exp_name, local_dir, local_dir)
self._restore(exp_name, local_dir, local_dir)
def testCheckpointWithNoop(self):
"""Tests that passing the checkpoint_dir right back works."""
class MockTrainable(Trainable):
def setup(self, config):
pass
def step(self):
return {"score": 1}
def save_checkpoint(self, checkpoint_dir):
with open(os.path.join(checkpoint_dir, "test.txt"), "wb") as f:
pickle.dump("test", f)
return checkpoint_dir
def load_checkpoint(self, checkpoint_dir):
with open(os.path.join(checkpoint_dir, "test.txt"), "rb") as f:
x = pickle.load(f)
assert x == "test"
return checkpoint_dir
validate_save_restore(MockTrainable)
validate_save_restore(MockTrainable, use_object_store=True)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| # coding: utf-8
import os
import pickle
import shutil
import tempfile
import unittest
import ray
from ray import tune
from ray.rllib import _register_all
from ray.tune import Trainable
from ray.tune.utils import validate_save_restore
class SerialTuneRelativeLocalDirTest(unittest.TestCase):
local_mode = True
prefix = "Serial"
class MockTrainable(Trainable):
_name = "MockTrainable"
def setup(self, config):
self.state = {"hi": 1}
def step(self):
return {"timesteps_this_iter": 1, "done": True}
def save_checkpoint(self, checkpoint_dir):
checkpoint_path = os.path.join(
checkpoint_dir, "checkpoint-{}".format(self._iteration)
)
with open(checkpoint_path, "wb") as f:
pickle.dump(self.state, f)
return checkpoint_path
def load_checkpoint(self, checkpoint_path):
with open(checkpoint_path, "rb") as f:
extra_data = pickle.load(f)
self.state.update(extra_data)
def setUp(self):
self.absolute_local_dir = None
ray.init(num_cpus=1, num_gpus=0, local_mode=self.local_mode)
def tearDown(self):
if self.absolute_local_dir is not None:
shutil.rmtree(self.absolute_local_dir, ignore_errors=True)
self.absolute_local_dir = None
ray.shutdown()
# Without this line, test_tune_server.testAddTrial would fail.
_register_all()
def _get_trial_dir(self, absoulte_exp_dir):
print("looking for", self.MockTrainable._name)
print("in", os.listdir(absoulte_exp_dir))
trial_dirname = next(
(
child_dir
for child_dir in os.listdir(absoulte_exp_dir)
if (
os.path.isdir(os.path.join(absoulte_exp_dir, child_dir))
and child_dir.startswith(self.MockTrainable._name)
)
)
)
trial_absolute_dir = os.path.join(absoulte_exp_dir, trial_dirname)
return trial_dirname, trial_absolute_dir
def _train(self, exp_name, local_dir, absolute_local_dir):
(trial,) = tune.run(
self.MockTrainable,
name=exp_name,
stop={"training_iteration": 1},
checkpoint_freq=1,
local_dir=local_dir,
config={"env": "CartPole-v0", "log_level": "DEBUG"},
).trials
exp_dir = os.path.join(absolute_local_dir, exp_name)
_, abs_trial_dir = self._get_trial_dir(exp_dir)
self.assertIsNone(trial.error_file)
self.assertEqual(trial.local_dir, exp_dir)
self.assertEqual(trial.logdir, abs_trial_dir)
self.assertTrue(os.path.isdir(absolute_local_dir), absolute_local_dir)
self.assertTrue(os.path.isdir(exp_dir))
self.assertTrue(os.path.isdir(abs_trial_dir))
self.assertTrue(
os.path.isfile(
os.path.join(abs_trial_dir, "checkpoint_000001/checkpoint-1")
)
)
def _restore(self, exp_name, local_dir, absolute_local_dir):
trial_name, abs_trial_dir = self._get_trial_dir(
os.path.join(absolute_local_dir, exp_name)
)
checkpoint_path = os.path.join(
local_dir, exp_name, trial_name, "checkpoint_000001/checkpoint-1"
) # Relative checkpoint path
# The file tune would find. The absolute checkpoint path.
tune_find_file = os.path.abspath(os.path.expanduser(checkpoint_path))
self.assertTrue(
os.path.isfile(tune_find_file), "{} is not exist!".format(tune_find_file)
)
(trial,) = tune.run(
self.MockTrainable,
name=exp_name,
stop={"training_iteration": 2}, # train one more iteration.
restore=checkpoint_path, # Restore the checkpoint
config={"env": "CartPole-v0", "log_level": "DEBUG"},
).trials
self.assertIsNone(trial.error_file)
def testDottedRelativePath(self):
local_dir = "./test_dotted_relative_local_dir"
exp_name = self.prefix + "DottedRelativeLocalDir"
absolute_local_dir = os.path.abspath(local_dir)
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testRelativePath(self):
local_dir = "test_relative_local_dir"
exp_name = self.prefix + "RelativePath"
absolute_local_dir = os.path.abspath(local_dir)
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testTildeAbsolutePath(self):
local_dir = "~/test_tilde_absolute_local_dir"
exp_name = self.prefix + "TildeAbsolutePath"
absolute_local_dir = os.path.abspath(os.path.expanduser(local_dir))
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testTempfile(self):
local_dir = tempfile.mkdtemp()
exp_name = self.prefix + "Tempfile"
self.absolute_local_dir = local_dir
self._train(exp_name, local_dir, local_dir)
self._restore(exp_name, local_dir, local_dir)
def testCheckpointWithNoop(self):
"""Tests that passing the checkpoint_dir right back works."""
class MockTrainable(Trainable):
def setup(self, config):
pass
def step(self):
return {"score": 1}
def save_checkpoint(self, checkpoint_dir):
with open(os.path.join(checkpoint_dir, "test.txt"), "wb") as f:
pickle.dump("test", f)
return checkpoint_dir
def load_checkpoint(self, checkpoint_dir):
with open(os.path.join(checkpoint_dir, "test.txt"), "rb") as f:
x = pickle.load(f)
assert x == "test"
return checkpoint_dir
validate_save_restore(MockTrainable)
validate_save_restore(MockTrainable, use_object_store=True)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| en | 0.875749 | # coding: utf-8 # Without this line, test_tune_server.testAddTrial would fail. # Relative checkpoint path # The file tune would find. The absolute checkpoint path. # train one more iteration. # Restore the checkpoint Tests that passing the checkpoint_dir right back works. | 2.271999 | 2 |
src/manifest.py | silent1mezzo/lightsaber | 13 | 538 | MANIFEST = {
"hilt": {
"h1": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (110, 111)}},
"colours": {
"primary": (216, 216, 216), # d8d8d8
"secondary": (141, 141, 141), # 8d8d8d
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal/Salvaged materials",
},
"h2": {
"offsets": {"blade": 20, "button": {"x": (8, 8), "y": (100, 105)}},
"colours": {
"primary": (112, 112, 112), # 707070
"secondary": (0, 0, 0), # 000000
"tertiary": (212, 175, 55), # 000000
},
"length": 24,
"materials": "Alloy metal and carbon composite",
},
"h3": {
"offsets": {"blade": 0, "button": {"x": (10, 10), "y": (100, 118)}},
"colours": {
"primary": (157, 157, 157), # 707070
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal",
},
"h4": {
"offsets": {"blade": 7, "button": {"x": (8, 9), "y": (92, 100)}},
"colours": {
"primary": (0, 0, 0), # 000000
"secondary": (157, 157, 157), # 9d9d9d
"tertiary": (180, 97, 19), # b46113
},
"length": 13,
"materials": "Alloy metal",
},
"h5": {
"offsets": {"blade": 0, "button": {"x": (8, 8), "y": (92, 105)}},
"colours": {
"primary": (111, 111, 111), # 6f6f6f
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal",
},
"h6": {
"offsets": {"blade": 2, "button": {"x": (8, 9), "y": (112, 113)}},
"colours": {
"primary": (120, 120, 120), # 787878
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 22,
"materials": "Alloy metal/Salvaged materials",
},
"h7": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (105, 113)}},
"colours": {
"primary": (192, 192, 192), # c0c0c0
"secondary": (255, 215, 0), # ffd700
"tertiary": (0, 0, 0), # 000000
},
"length": 22,
"materials": "Alloy metal and Gold",
},
"h8": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (100, 111)}},
"colours": {
"primary": (216, 216, 216), # d8d8d8
"secondary": (180, 97, 19), # b46113
"tertiary": (0, 0, 0), # 000000
},
"length": 24,
"materials": "Alloy metal/Copper",
},
},
"blade": {
"b1": {"colour": "Red", "crystal": "Adegan crystal", "type": "Sith"},
"b2": {"colour": "Blue", "crystal": "Zophis crystal", "type": "Jedi"},
"b3": {"colour": "Green", "crystal": "Nishalorite stone", "type": "Jedi"},
"b4": {"colour": "Yellow", "crystal": "Kimber stone", "type": "Jedi"},
"b5": {"colour": "White", "crystal": "Dragite gem", "type": "Jedi"},
"b6": {"colour": "Purple", "crystal": "Krayt dragon pearl", "type": "Jedi"},
"b7": {"colour": "Blue/Green", "crystal": "Dantari crystal", "type": "Jedi"},
"b8": {
"colour": "Orange",
"crystal": ["Ilum crystal", "Ultima Pearl"],
"type": "Sith",
},
"b9": {
"colour": "Black",
"crystal": "Obsidian",
"type": ["Jedi", "Mandalorian"],
},
},
"pommel": {
"p1": {"length": 5,},
"p2": {"length": 14,},
"p3": {"length": 3,},
"p4": {"length": 8,},
"p5": {"length": 5,},
"p6": {"length": 5,},
"p7": {"length": 8,},
},
# These are lightsabers for a specific Jedi or Sith. Should use their name instead of
"unique_urls": {""},
}
| MANIFEST = {
"hilt": {
"h1": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (110, 111)}},
"colours": {
"primary": (216, 216, 216), # d8d8d8
"secondary": (141, 141, 141), # 8d8d8d
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal/Salvaged materials",
},
"h2": {
"offsets": {"blade": 20, "button": {"x": (8, 8), "y": (100, 105)}},
"colours": {
"primary": (112, 112, 112), # 707070
"secondary": (0, 0, 0), # 000000
"tertiary": (212, 175, 55), # 000000
},
"length": 24,
"materials": "Alloy metal and carbon composite",
},
"h3": {
"offsets": {"blade": 0, "button": {"x": (10, 10), "y": (100, 118)}},
"colours": {
"primary": (157, 157, 157), # 707070
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal",
},
"h4": {
"offsets": {"blade": 7, "button": {"x": (8, 9), "y": (92, 100)}},
"colours": {
"primary": (0, 0, 0), # 000000
"secondary": (157, 157, 157), # 9d9d9d
"tertiary": (180, 97, 19), # b46113
},
"length": 13,
"materials": "Alloy metal",
},
"h5": {
"offsets": {"blade": 0, "button": {"x": (8, 8), "y": (92, 105)}},
"colours": {
"primary": (111, 111, 111), # 6f6f6f
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal",
},
"h6": {
"offsets": {"blade": 2, "button": {"x": (8, 9), "y": (112, 113)}},
"colours": {
"primary": (120, 120, 120), # 787878
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 22,
"materials": "Alloy metal/Salvaged materials",
},
"h7": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (105, 113)}},
"colours": {
"primary": (192, 192, 192), # c0c0c0
"secondary": (255, 215, 0), # ffd700
"tertiary": (0, 0, 0), # 000000
},
"length": 22,
"materials": "Alloy metal and Gold",
},
"h8": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (100, 111)}},
"colours": {
"primary": (216, 216, 216), # d8d8d8
"secondary": (180, 97, 19), # b46113
"tertiary": (0, 0, 0), # 000000
},
"length": 24,
"materials": "Alloy metal/Copper",
},
},
"blade": {
"b1": {"colour": "Red", "crystal": "Adegan crystal", "type": "Sith"},
"b2": {"colour": "Blue", "crystal": "Zophis crystal", "type": "Jedi"},
"b3": {"colour": "Green", "crystal": "Nishalorite stone", "type": "Jedi"},
"b4": {"colour": "Yellow", "crystal": "Kimber stone", "type": "Jedi"},
"b5": {"colour": "White", "crystal": "Dragite gem", "type": "Jedi"},
"b6": {"colour": "Purple", "crystal": "Krayt dragon pearl", "type": "Jedi"},
"b7": {"colour": "Blue/Green", "crystal": "Dantari crystal", "type": "Jedi"},
"b8": {
"colour": "Orange",
"crystal": ["Ilum crystal", "Ultima Pearl"],
"type": "Sith",
},
"b9": {
"colour": "Black",
"crystal": "Obsidian",
"type": ["Jedi", "Mandalorian"],
},
},
"pommel": {
"p1": {"length": 5,},
"p2": {"length": 14,},
"p3": {"length": 3,},
"p4": {"length": 8,},
"p5": {"length": 5,},
"p6": {"length": 5,},
"p7": {"length": 8,},
},
# These are lightsabers for a specific Jedi or Sith. Should use their name instead of
"unique_urls": {""},
}
| en | 0.316315 | # d8d8d8 # 8d8d8d # b46113 # 707070 # 000000 # 000000 # 707070 # 000000 # b46113 # 000000 # 9d9d9d # b46113 # 6f6f6f # 000000 # b46113 # 787878 # 000000 # b46113 # c0c0c0 # ffd700 # 000000 # d8d8d8 # b46113 # 000000 # These are lightsabers for a specific Jedi or Sith. Should use their name instead of | 1.261851 | 1 |
tests/gpuarray/test_basic_ops.py | canyon289/Theano-PyMC | 1 | 539 | import numpy as np
import pytest
import theano
import theano.tensor as tt
# Don't import test classes otherwise they get tested as part of the file
from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name
from tests.tensor.test_basic import (
TestAlloc,
TestComparison,
TestJoinAndSplit,
TestReshape,
)
from tests.tensor.utils import rand, safe_make_node
from theano.gpuarray.basic_ops import (
GpuAlloc,
GpuAllocEmpty,
GpuContiguous,
GpuEye,
GpuFromHost,
GpuJoin,
GpuReshape,
GpuSplit,
GpuToGpu,
GpuTri,
HostFromGpu,
gpu_contiguous,
gpu_join,
host_from_gpu,
)
from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise
from theano.gpuarray.subtensor import GpuSubtensor
from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor
from theano.tensor import TensorType
from theano.tensor.basic import alloc
pygpu = pytest.importorskip("pygpu")
gpuarray = pygpu.gpuarray
utt.seed_rng()
rng = np.random.RandomState(seed=utt.fetch_seed())
def inplace_func(
inputs,
outputs,
mode=None,
allow_input_downcast=False,
on_unused_input="raise",
name=None,
):
if mode is None:
mode = mode_with_gpu
return theano.function(
inputs,
outputs,
mode=mode,
allow_input_downcast=allow_input_downcast,
accept_inplace=True,
on_unused_input=on_unused_input,
name=name,
)
def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs):
from theano.tensor.sharedvar import scalar_constructor, tensor_constructor
for c in (gpuarray_shared_constructor, tensor_constructor, scalar_constructor):
try:
return c(
value, name=name, strict=strict, allow_downcast=allow_downcast, **kwargs
)
except TypeError:
continue
def rand_gpuarray(*shape, **kwargs):
r = rng.rand(*shape) * 2 - 1
dtype = kwargs.pop("dtype", theano.config.floatX)
cls = kwargs.pop("cls", None)
if len(kwargs) != 0:
raise TypeError("Unexpected argument %s", list(kwargs.keys())[0])
return gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name))
def makeTester(
name,
op,
gpu_op,
cases,
checks=None,
mode_gpu=mode_with_gpu,
mode_nogpu=mode_without_gpu,
skip=False,
eps=1e-10,
):
if checks is None:
checks = {}
_op = op
_gpu_op = gpu_op
_cases = cases
_skip = skip
_checks = checks
class Checker(utt.OptimizationTestMixin):
op = staticmethod(_op)
gpu_op = staticmethod(_gpu_op)
cases = _cases
skip = _skip
checks = _checks
def setup_method(self):
eval(self.__class__.__module__ + "." + self.__class__.__name__)
def test_all(self):
if skip:
pytest.skip(skip)
for testname, inputs in cases.items():
for _ in range(len(inputs)):
if type(inputs[_]) is float:
inputs[_] = np.asarray(inputs[_], dtype=theano.config.floatX)
self.run_case(testname, inputs)
def run_case(self, testname, inputs):
inputs_ref = [theano.shared(inp) for inp in inputs]
inputs_tst = [theano.shared(inp) for inp in inputs]
try:
node_ref = safe_make_node(self.op, *inputs_ref)
node_tst = safe_make_node(self.op, *inputs_tst)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while making " "a node with inputs %s"
) % (self.gpu_op, testname, inputs)
exc.args += (err_msg,)
raise
try:
f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu)
f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while trying to " "make a Function"
) % (self.gpu_op, testname)
exc.args += (err_msg,)
raise
self.assertFunctionContains1(f_tst, self.gpu_op)
ref_e = None
try:
expecteds = f_ref()
except Exception as exc:
ref_e = exc
try:
variables = f_tst()
except Exception as exc:
if ref_e is None:
err_msg = (
"Test %s::%s: exception when calling the " "Function"
) % (self.gpu_op, testname)
exc.args += (err_msg,)
raise
else:
# if we raised an exception of the same type we're good.
if isinstance(exc, type(ref_e)):
return
else:
err_msg = (
"Test %s::%s: exception raised during test "
"call was not the same as the reference "
"call (got: %s, expected %s)"
% (self.gpu_op, testname, type(exc), type(ref_e))
)
exc.args += (err_msg,)
raise
for i, (variable, expected) in enumerate(zip(variables, expecteds)):
condition = (
variable.dtype != expected.dtype
or variable.shape != expected.shape
or not TensorType.values_eq_approx(variable, expected)
)
assert not condition, (
"Test %s::%s: Output %s gave the wrong "
"value. With inputs %s, expected %s "
"(dtype %s), got %s (dtype %s)."
% (
self.op,
testname,
i,
inputs,
expected,
expected.dtype,
variable,
variable.dtype,
)
)
for description, check in self.checks.items():
assert check(inputs, variables), (
"Test %s::%s: Failed check: %s " "(inputs were %s, ouputs were %s)"
) % (self.op, testname, description, inputs, variables)
Checker.__name__ = name
if hasattr(Checker, "__qualname__"):
Checker.__qualname__ = name
return Checker
def test_transfer_cpu_gpu():
a = tt.fmatrix("a")
g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")
av = np.asarray(rng.rand(5, 4), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
f = theano.function([a], GpuFromHost(test_ctx_name)(a))
fv = f(av)
assert GpuArrayType.values_eq(fv, gv)
f = theano.function([g], host_from_gpu(g))
fv = f(gv)
assert np.all(fv == av)
def test_transfer_gpu_gpu():
g = GpuArrayType(
dtype="float32", broadcastable=(False, False), context_name=test_ctx_name
)()
av = np.asarray(rng.rand(5, 4), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
mode = mode_with_gpu.excluding(
"cut_gpua_host_transfers", "local_cut_gpua_host_gpua"
)
f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, GpuToGpu)
fv = f(gv)
assert GpuArrayType.values_eq(fv, gv)
def test_transfer_strided():
# This is just to ensure that it works in theano
# libgpuarray has a much more comprehensive suit of tests to
# ensure correctness
a = tt.fmatrix("a")
g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")
av = np.asarray(rng.rand(5, 8), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
av = av[:, ::2]
gv = gv[:, ::2]
f = theano.function([a], GpuFromHost(test_ctx_name)(a))
fv = f(av)
assert GpuArrayType.values_eq(fv, gv)
f = theano.function([g], host_from_gpu(g))
fv = f(gv)
assert np.all(fv == av)
def gpu_alloc_expected(x, *shp):
g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name))
g[:] = x
return g
TestGpuAlloc = makeTester(
name="GpuAllocTester",
# The +1 is there to allow the lift to the GPU.
op=lambda *args: alloc(*args) + 1,
gpu_op=GpuAlloc(test_ctx_name),
cases=dict(
correct01=(rand(), np.int32(7)),
# just gives a DeepCopyOp with possibly wrong results on the CPU
# correct01_bcast=(rand(1), np.int32(7)),
correct02=(rand(), np.int32(4), np.int32(7)),
correct12=(rand(7), np.int32(4), np.int32(7)),
correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)),
correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)),
bad_shape12=(rand(7), np.int32(7), np.int32(5)),
),
)
class TestGPUAlloc(TestAlloc):
dtype = "float32"
mode = mode_with_gpu
shared = staticmethod(gpuarray_shared_constructor)
allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()]
def test_alloc_empty():
for dt in ["float32", "int8"]:
f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3))
assert len(f.maker.fgraph.apply_nodes) == 1
out = f()
assert out.shape == (2, 3)
assert out.dtype == dt
f = theano.function(
[],
[
GpuAllocEmpty("uint64", test_ctx_name)(3, 2),
GpuAllocEmpty("uint64", test_ctx_name)(3, 2),
],
)
out = f()
assert out[0].shape == (3, 2)
assert out[0].dtype == "uint64"
assert out[1].shape == (3, 2)
assert out[1].dtype == "uint64"
assert (
len(
[
node
for node in f.maker.fgraph.apply_nodes
if isinstance(node.op, GpuAllocEmpty)
]
)
== 1
)
def test_shape():
x = GpuArrayType(dtype="float32", broadcastable=[False, False, False])()
v = gpuarray.zeros((3, 4, 5), dtype="float32", context=get_context(test_ctx_name))
f = theano.function([x], x.shape)
topo = f.maker.fgraph.toposort()
assert np.all(f(v) == (3, 4, 5))
if theano.config.mode != "FAST_COMPILE":
assert len(topo) == 4
assert isinstance(topo[0].op, tt.opt.Shape_i)
assert isinstance(topo[1].op, tt.opt.Shape_i)
assert isinstance(topo[2].op, tt.opt.Shape_i)
assert isinstance(topo[3].op, tt.opt.MakeVector)
mode = mode_with_gpu.excluding("local_shape_to_shape_i")
f = theano.function([x], x.shape, mode=mode)
topo = f.maker.fgraph.toposort()
assert np.all(f(v) == (3, 4, 5))
assert len(topo) == 1
assert isinstance(topo[0].op, tt.Shape)
def test_gpu_contiguous():
a = tt.fmatrix("a")
i = tt.iscalar("i")
a_val = np.asarray(np.random.rand(4, 5), dtype="float32")
# The reshape is needed otherwise we make the subtensor on the CPU
# to transfer less data.
f = theano.function(
[a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu
)
topo = f.maker.fgraph.toposort()
assert any([isinstance(node.op, GpuSubtensor) for node in topo])
assert any([isinstance(node.op, GpuContiguous) for node in topo])
assert f(a_val, 1).flags.c_contiguous
assert f(a_val, 2).flags.c_contiguous
assert f(a_val, 2).flags.c_contiguous
class TestGPUReshape(TestReshape):
def setup_method(self):
self.shared = gpuarray_shared_constructor
self.op = GpuReshape
self.mode = mode_with_gpu
self.ignore_topo = (
HostFromGpu,
GpuFromHost,
theano.compile.DeepCopyOp,
GpuDimShuffle,
GpuElemwise,
tt.opt.Shape_i,
tt.opt.MakeVector,
)
assert self.op == GpuReshape
class TestGPUComparison(TestComparison):
def setup_method(self):
utt.seed_rng()
self.mode = mode_with_gpu
self.shared = gpuarray_shared_constructor
self.dtypes = ["float64", "float32"]
class TestGPUJoinAndSplit(TestJoinAndSplit):
def setup_method(self):
self.mode = mode_with_gpu.excluding("constant_folding")
self.join_op = GpuJoin()
self.split_op_class = GpuSplit
# Use join instead of MakeVector since there is no MakeVector on GPU
self.make_vector_op = GpuJoin()
# this is to avoid errors with limited devices
self.floatX = "float32"
self.hide_error = theano.config.mode not in ["DebugMode", "DEBUG_MODE"]
def shared(x, **kwargs):
return gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs)
self.shared = shared
def test_gpusplit_opt(self):
# Test that we move the node to the GPU
# Also test float16 computation at the same time.
rng = np.random.RandomState(seed=utt.fetch_seed())
m = self.shared(rng.rand(4, 6).astype("float16"))
o = tt.Split(2)(m, 0, [2, 2])
assert o[0].dtype == "float16"
f = theano.function([], o, mode=self.mode)
assert any(
[
isinstance(node.op, self.split_op_class)
for node in f.maker.fgraph.toposort()
]
)
o1, o2 = f()
assert np.allclose(o1, m.get_value(borrow=True)[:2])
assert np.allclose(o2, m.get_value(borrow=True)[2:])
def test_gpujoin_gpualloc():
a = tt.fmatrix("a")
a_val = np.asarray(np.random.rand(4, 5), dtype="float32")
b = tt.fmatrix("b")
b_val = np.asarray(np.random.rand(3, 5), dtype="float32")
f = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_without_gpu
)
f_gpu = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu
)
f_gpu2 = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_with_gpu
)
assert sum([node.op == tt.alloc for node in f.maker.fgraph.toposort()]) == 2
assert sum([node.op == tt.join_ for node in f.maker.fgraph.toposort()]) == 1
assert (
sum([isinstance(node.op, GpuAlloc) for node in f_gpu.maker.fgraph.toposort()])
== 2
)
assert sum([node.op == gpu_join for node in f_gpu.maker.fgraph.toposort()]) == 1
assert (
sum([isinstance(node.op, GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()])
== 2
)
assert sum([node.op == gpu_join for node in f_gpu2.maker.fgraph.toposort()]) == 1
assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val))
def test_gpueye():
def check(dtype, N, M_=None, k=0):
# Theano does not accept None as a tensor.
# So we must use a real value.
M = M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if M is None:
M = N
N_symb = tt.iscalar()
M_symb = tt.iscalar()
k_symb = tt.iscalar()
out = tt.eye(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype)
f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)
result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype)
assert np.allclose(result, np.eye(N, M_, k, dtype=dtype))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuEye) for node in f.maker.fgraph.toposort()])
for dtype in ["float32", "int32", "float16"]:
check(dtype, 3)
# M != N, k = 0
check(dtype, 3, 5)
check(dtype, 5, 3)
# N == M, k != 0
check(dtype, 3, 3, 1)
check(dtype, 3, 3, -1)
# N < M, k != 0
check(dtype, 3, 5, 1)
check(dtype, 3, 5, -1)
# N > M, k != 0
check(dtype, 5, 3, 1)
check(dtype, 5, 3, -1)
# k > M, -k > N, k > M, k > N
check(dtype, 5, 3, 3)
check(dtype, 3, 5, 3)
check(dtype, 5, 3, -3)
check(dtype, 3, 5, -3)
check(dtype, 5, 3, 6)
check(dtype, 3, 5, -6)
def test_hostfromgpu_shape_i():
# Test that the shape is lifted over hostfromgpu
m = mode_with_gpu.including(
"local_dot_to_dot22", "local_dot22_to_dot22scalar", "specialize"
)
a = tt.fmatrix("a")
ca = theano.gpuarray.type.GpuArrayType("float32", (False, False))()
av = np.asarray(np.random.rand(5, 4), dtype="float32")
cv = gpuarray.asarray(
np.random.rand(5, 4), dtype="float32", context=get_context(test_ctx_name)
)
f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m)
assert any(isinstance(x.op, GpuFromHost) for x in f.maker.fgraph.toposort())
f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, tt.opt.Shape_i)
assert isinstance(topo[1].op, tt.opt.Shape_i)
assert isinstance(topo[2].op, tt.opt.MakeVector)
assert tuple(f(av)) == (5, 4)
f = theano.function([ca], host_from_gpu(ca), mode=m)
assert host_from_gpu in [x.op for x in f.maker.fgraph.toposort()]
f = theano.function([ca], host_from_gpu(ca).shape, mode=m)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, theano.compile.Shape_i)
assert isinstance(topo[1].op, theano.compile.Shape_i)
assert isinstance(topo[2].op, tt.opt.MakeVector)
assert tuple(f(cv)) == (5, 4)
def test_Gpujoin_inplace():
# Test Gpujoin to work inplace.
#
# This function tests the case when several elements are passed to the
# Gpujoin function but all except one of them are empty. In this case
# Gpujoin should work inplace and the output should be the view of the
# non-empty element.
s = tt.lscalar()
data = np.array([3, 4, 5], dtype=theano.config.floatX)
x = gpuarray_shared_constructor(data, borrow=True)
z = tt.zeros((s,))
join = GpuJoin(view=0)
c = join(0, x, z)
f = theano.function([s], theano.Out(c, borrow=True))
if not isinstance(mode_with_gpu, theano.compile.DebugMode):
assert x.get_value(borrow=True, return_internal_type=True) is f(0)
assert np.allclose(f(0), [3, 4, 5])
def test_gpu_tril_triu():
def check_l(m, k=0):
m_symb = tt.matrix(dtype=m.dtype)
k_symb = tt.iscalar()
f = theano.function(
[m_symb, k_symb], tt.tril(m_symb, k_symb), mode=mode_with_gpu
)
result = f(m, k)
assert np.allclose(result, np.tril(m, k))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
def check_u(m, k=0):
m_symb = tt.matrix(dtype=m.dtype)
k_symb = tt.iscalar()
f = theano.function(
[m_symb, k_symb], tt.triu(m_symb, k_symb), mode=mode_with_gpu
)
result = f(m, k)
assert np.allclose(result, np.triu(m, k))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
utt.seed_rng()
test_rng = np.random.RandomState(seed=utt.fetch_seed())
for dtype in ["float64", "float32", "float16"]:
# try a big one
m = np.asarray(test_rng.rand(5000, 5000) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
m = np.asarray(test_rng.rand(10, 10) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
m = np.asarray(test_rng.rand(10, 5) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
def test_gputri():
def check(dtype, N, M_=None, k=0):
# Theano does not accept None as a tensor.
# So we must use a real value.
M = M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if M is None:
M = N
N_symb = tt.iscalar()
M_symb = tt.iscalar()
k_symb = tt.iscalar()
out = tt.tri(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype)
f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)
result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype)
assert np.allclose(result, np.tri(N, M_, k, dtype=dtype))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
for dtype in ["float64", "float32", "int32", "float16"]:
# try a big one
check(dtype, 1000, 1000, 0)
check(dtype, 1000, 1000, -400)
check(dtype, 1000, 1000, 400)
check(dtype, 5)
# M != N, k = 0
check(dtype, 3, 5)
check(dtype, 5, 3)
# N == M, k != 0
check(dtype, 3, 3, 1)
check(dtype, 3, 3, -1)
# N < M, k != 0
check(dtype, 3, 5, 1)
check(dtype, 3, 5, -1)
# N > M, k != 0
check(dtype, 5, 3, 1)
check(dtype, 5, 3, -1)
# k > M, -k > N, k > M, k > N
check(dtype, 5, 3, 3)
check(dtype, 3, 5, 3)
check(dtype, 5, 3, -3)
check(dtype, 3, 5, -3)
check(dtype, 5, 3, 6)
check(dtype, 3, 5, -6)
| import numpy as np
import pytest
import theano
import theano.tensor as tt
# Don't import test classes otherwise they get tested as part of the file
from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name
from tests.tensor.test_basic import (
TestAlloc,
TestComparison,
TestJoinAndSplit,
TestReshape,
)
from tests.tensor.utils import rand, safe_make_node
from theano.gpuarray.basic_ops import (
GpuAlloc,
GpuAllocEmpty,
GpuContiguous,
GpuEye,
GpuFromHost,
GpuJoin,
GpuReshape,
GpuSplit,
GpuToGpu,
GpuTri,
HostFromGpu,
gpu_contiguous,
gpu_join,
host_from_gpu,
)
from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise
from theano.gpuarray.subtensor import GpuSubtensor
from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor
from theano.tensor import TensorType
from theano.tensor.basic import alloc
pygpu = pytest.importorskip("pygpu")
gpuarray = pygpu.gpuarray
utt.seed_rng()
rng = np.random.RandomState(seed=utt.fetch_seed())
def inplace_func(
inputs,
outputs,
mode=None,
allow_input_downcast=False,
on_unused_input="raise",
name=None,
):
if mode is None:
mode = mode_with_gpu
return theano.function(
inputs,
outputs,
mode=mode,
allow_input_downcast=allow_input_downcast,
accept_inplace=True,
on_unused_input=on_unused_input,
name=name,
)
def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs):
from theano.tensor.sharedvar import scalar_constructor, tensor_constructor
for c in (gpuarray_shared_constructor, tensor_constructor, scalar_constructor):
try:
return c(
value, name=name, strict=strict, allow_downcast=allow_downcast, **kwargs
)
except TypeError:
continue
def rand_gpuarray(*shape, **kwargs):
r = rng.rand(*shape) * 2 - 1
dtype = kwargs.pop("dtype", theano.config.floatX)
cls = kwargs.pop("cls", None)
if len(kwargs) != 0:
raise TypeError("Unexpected argument %s", list(kwargs.keys())[0])
return gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name))
def makeTester(
name,
op,
gpu_op,
cases,
checks=None,
mode_gpu=mode_with_gpu,
mode_nogpu=mode_without_gpu,
skip=False,
eps=1e-10,
):
if checks is None:
checks = {}
_op = op
_gpu_op = gpu_op
_cases = cases
_skip = skip
_checks = checks
class Checker(utt.OptimizationTestMixin):
op = staticmethod(_op)
gpu_op = staticmethod(_gpu_op)
cases = _cases
skip = _skip
checks = _checks
def setup_method(self):
eval(self.__class__.__module__ + "." + self.__class__.__name__)
def test_all(self):
if skip:
pytest.skip(skip)
for testname, inputs in cases.items():
for _ in range(len(inputs)):
if type(inputs[_]) is float:
inputs[_] = np.asarray(inputs[_], dtype=theano.config.floatX)
self.run_case(testname, inputs)
def run_case(self, testname, inputs):
inputs_ref = [theano.shared(inp) for inp in inputs]
inputs_tst = [theano.shared(inp) for inp in inputs]
try:
node_ref = safe_make_node(self.op, *inputs_ref)
node_tst = safe_make_node(self.op, *inputs_tst)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while making " "a node with inputs %s"
) % (self.gpu_op, testname, inputs)
exc.args += (err_msg,)
raise
try:
f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu)
f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while trying to " "make a Function"
) % (self.gpu_op, testname)
exc.args += (err_msg,)
raise
self.assertFunctionContains1(f_tst, self.gpu_op)
ref_e = None
try:
expecteds = f_ref()
except Exception as exc:
ref_e = exc
try:
variables = f_tst()
except Exception as exc:
if ref_e is None:
err_msg = (
"Test %s::%s: exception when calling the " "Function"
) % (self.gpu_op, testname)
exc.args += (err_msg,)
raise
else:
# if we raised an exception of the same type we're good.
if isinstance(exc, type(ref_e)):
return
else:
err_msg = (
"Test %s::%s: exception raised during test "
"call was not the same as the reference "
"call (got: %s, expected %s)"
% (self.gpu_op, testname, type(exc), type(ref_e))
)
exc.args += (err_msg,)
raise
for i, (variable, expected) in enumerate(zip(variables, expecteds)):
condition = (
variable.dtype != expected.dtype
or variable.shape != expected.shape
or not TensorType.values_eq_approx(variable, expected)
)
assert not condition, (
"Test %s::%s: Output %s gave the wrong "
"value. With inputs %s, expected %s "
"(dtype %s), got %s (dtype %s)."
% (
self.op,
testname,
i,
inputs,
expected,
expected.dtype,
variable,
variable.dtype,
)
)
for description, check in self.checks.items():
assert check(inputs, variables), (
"Test %s::%s: Failed check: %s " "(inputs were %s, ouputs were %s)"
) % (self.op, testname, description, inputs, variables)
Checker.__name__ = name
if hasattr(Checker, "__qualname__"):
Checker.__qualname__ = name
return Checker
def test_transfer_cpu_gpu():
a = tt.fmatrix("a")
g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")
av = np.asarray(rng.rand(5, 4), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
f = theano.function([a], GpuFromHost(test_ctx_name)(a))
fv = f(av)
assert GpuArrayType.values_eq(fv, gv)
f = theano.function([g], host_from_gpu(g))
fv = f(gv)
assert np.all(fv == av)
def test_transfer_gpu_gpu():
g = GpuArrayType(
dtype="float32", broadcastable=(False, False), context_name=test_ctx_name
)()
av = np.asarray(rng.rand(5, 4), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
mode = mode_with_gpu.excluding(
"cut_gpua_host_transfers", "local_cut_gpua_host_gpua"
)
f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, GpuToGpu)
fv = f(gv)
assert GpuArrayType.values_eq(fv, gv)
def test_transfer_strided():
# This is just to ensure that it works in theano
# libgpuarray has a much more comprehensive suit of tests to
# ensure correctness
a = tt.fmatrix("a")
g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")
av = np.asarray(rng.rand(5, 8), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
av = av[:, ::2]
gv = gv[:, ::2]
f = theano.function([a], GpuFromHost(test_ctx_name)(a))
fv = f(av)
assert GpuArrayType.values_eq(fv, gv)
f = theano.function([g], host_from_gpu(g))
fv = f(gv)
assert np.all(fv == av)
def gpu_alloc_expected(x, *shp):
g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name))
g[:] = x
return g
TestGpuAlloc = makeTester(
name="GpuAllocTester",
# The +1 is there to allow the lift to the GPU.
op=lambda *args: alloc(*args) + 1,
gpu_op=GpuAlloc(test_ctx_name),
cases=dict(
correct01=(rand(), np.int32(7)),
# just gives a DeepCopyOp with possibly wrong results on the CPU
# correct01_bcast=(rand(1), np.int32(7)),
correct02=(rand(), np.int32(4), np.int32(7)),
correct12=(rand(7), np.int32(4), np.int32(7)),
correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)),
correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)),
bad_shape12=(rand(7), np.int32(7), np.int32(5)),
),
)
class TestGPUAlloc(TestAlloc):
dtype = "float32"
mode = mode_with_gpu
shared = staticmethod(gpuarray_shared_constructor)
allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()]
def test_alloc_empty():
for dt in ["float32", "int8"]:
f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3))
assert len(f.maker.fgraph.apply_nodes) == 1
out = f()
assert out.shape == (2, 3)
assert out.dtype == dt
f = theano.function(
[],
[
GpuAllocEmpty("uint64", test_ctx_name)(3, 2),
GpuAllocEmpty("uint64", test_ctx_name)(3, 2),
],
)
out = f()
assert out[0].shape == (3, 2)
assert out[0].dtype == "uint64"
assert out[1].shape == (3, 2)
assert out[1].dtype == "uint64"
assert (
len(
[
node
for node in f.maker.fgraph.apply_nodes
if isinstance(node.op, GpuAllocEmpty)
]
)
== 1
)
def test_shape():
x = GpuArrayType(dtype="float32", broadcastable=[False, False, False])()
v = gpuarray.zeros((3, 4, 5), dtype="float32", context=get_context(test_ctx_name))
f = theano.function([x], x.shape)
topo = f.maker.fgraph.toposort()
assert np.all(f(v) == (3, 4, 5))
if theano.config.mode != "FAST_COMPILE":
assert len(topo) == 4
assert isinstance(topo[0].op, tt.opt.Shape_i)
assert isinstance(topo[1].op, tt.opt.Shape_i)
assert isinstance(topo[2].op, tt.opt.Shape_i)
assert isinstance(topo[3].op, tt.opt.MakeVector)
mode = mode_with_gpu.excluding("local_shape_to_shape_i")
f = theano.function([x], x.shape, mode=mode)
topo = f.maker.fgraph.toposort()
assert np.all(f(v) == (3, 4, 5))
assert len(topo) == 1
assert isinstance(topo[0].op, tt.Shape)
def test_gpu_contiguous():
a = tt.fmatrix("a")
i = tt.iscalar("i")
a_val = np.asarray(np.random.rand(4, 5), dtype="float32")
# The reshape is needed otherwise we make the subtensor on the CPU
# to transfer less data.
f = theano.function(
[a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu
)
topo = f.maker.fgraph.toposort()
assert any([isinstance(node.op, GpuSubtensor) for node in topo])
assert any([isinstance(node.op, GpuContiguous) for node in topo])
assert f(a_val, 1).flags.c_contiguous
assert f(a_val, 2).flags.c_contiguous
assert f(a_val, 2).flags.c_contiguous
class TestGPUReshape(TestReshape):
def setup_method(self):
self.shared = gpuarray_shared_constructor
self.op = GpuReshape
self.mode = mode_with_gpu
self.ignore_topo = (
HostFromGpu,
GpuFromHost,
theano.compile.DeepCopyOp,
GpuDimShuffle,
GpuElemwise,
tt.opt.Shape_i,
tt.opt.MakeVector,
)
assert self.op == GpuReshape
class TestGPUComparison(TestComparison):
def setup_method(self):
utt.seed_rng()
self.mode = mode_with_gpu
self.shared = gpuarray_shared_constructor
self.dtypes = ["float64", "float32"]
class TestGPUJoinAndSplit(TestJoinAndSplit):
def setup_method(self):
self.mode = mode_with_gpu.excluding("constant_folding")
self.join_op = GpuJoin()
self.split_op_class = GpuSplit
# Use join instead of MakeVector since there is no MakeVector on GPU
self.make_vector_op = GpuJoin()
# this is to avoid errors with limited devices
self.floatX = "float32"
self.hide_error = theano.config.mode not in ["DebugMode", "DEBUG_MODE"]
def shared(x, **kwargs):
return gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs)
self.shared = shared
def test_gpusplit_opt(self):
# Test that we move the node to the GPU
# Also test float16 computation at the same time.
rng = np.random.RandomState(seed=utt.fetch_seed())
m = self.shared(rng.rand(4, 6).astype("float16"))
o = tt.Split(2)(m, 0, [2, 2])
assert o[0].dtype == "float16"
f = theano.function([], o, mode=self.mode)
assert any(
[
isinstance(node.op, self.split_op_class)
for node in f.maker.fgraph.toposort()
]
)
o1, o2 = f()
assert np.allclose(o1, m.get_value(borrow=True)[:2])
assert np.allclose(o2, m.get_value(borrow=True)[2:])
def test_gpujoin_gpualloc():
a = tt.fmatrix("a")
a_val = np.asarray(np.random.rand(4, 5), dtype="float32")
b = tt.fmatrix("b")
b_val = np.asarray(np.random.rand(3, 5), dtype="float32")
f = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_without_gpu
)
f_gpu = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu
)
f_gpu2 = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_with_gpu
)
assert sum([node.op == tt.alloc for node in f.maker.fgraph.toposort()]) == 2
assert sum([node.op == tt.join_ for node in f.maker.fgraph.toposort()]) == 1
assert (
sum([isinstance(node.op, GpuAlloc) for node in f_gpu.maker.fgraph.toposort()])
== 2
)
assert sum([node.op == gpu_join for node in f_gpu.maker.fgraph.toposort()]) == 1
assert (
sum([isinstance(node.op, GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()])
== 2
)
assert sum([node.op == gpu_join for node in f_gpu2.maker.fgraph.toposort()]) == 1
assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val))
def test_gpueye():
def check(dtype, N, M_=None, k=0):
# Theano does not accept None as a tensor.
# So we must use a real value.
M = M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if M is None:
M = N
N_symb = tt.iscalar()
M_symb = tt.iscalar()
k_symb = tt.iscalar()
out = tt.eye(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype)
f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)
result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype)
assert np.allclose(result, np.eye(N, M_, k, dtype=dtype))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuEye) for node in f.maker.fgraph.toposort()])
for dtype in ["float32", "int32", "float16"]:
check(dtype, 3)
# M != N, k = 0
check(dtype, 3, 5)
check(dtype, 5, 3)
# N == M, k != 0
check(dtype, 3, 3, 1)
check(dtype, 3, 3, -1)
# N < M, k != 0
check(dtype, 3, 5, 1)
check(dtype, 3, 5, -1)
# N > M, k != 0
check(dtype, 5, 3, 1)
check(dtype, 5, 3, -1)
# k > M, -k > N, k > M, k > N
check(dtype, 5, 3, 3)
check(dtype, 3, 5, 3)
check(dtype, 5, 3, -3)
check(dtype, 3, 5, -3)
check(dtype, 5, 3, 6)
check(dtype, 3, 5, -6)
def test_hostfromgpu_shape_i():
# Test that the shape is lifted over hostfromgpu
m = mode_with_gpu.including(
"local_dot_to_dot22", "local_dot22_to_dot22scalar", "specialize"
)
a = tt.fmatrix("a")
ca = theano.gpuarray.type.GpuArrayType("float32", (False, False))()
av = np.asarray(np.random.rand(5, 4), dtype="float32")
cv = gpuarray.asarray(
np.random.rand(5, 4), dtype="float32", context=get_context(test_ctx_name)
)
f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m)
assert any(isinstance(x.op, GpuFromHost) for x in f.maker.fgraph.toposort())
f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, tt.opt.Shape_i)
assert isinstance(topo[1].op, tt.opt.Shape_i)
assert isinstance(topo[2].op, tt.opt.MakeVector)
assert tuple(f(av)) == (5, 4)
f = theano.function([ca], host_from_gpu(ca), mode=m)
assert host_from_gpu in [x.op for x in f.maker.fgraph.toposort()]
f = theano.function([ca], host_from_gpu(ca).shape, mode=m)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, theano.compile.Shape_i)
assert isinstance(topo[1].op, theano.compile.Shape_i)
assert isinstance(topo[2].op, tt.opt.MakeVector)
assert tuple(f(cv)) == (5, 4)
def test_Gpujoin_inplace():
# Test Gpujoin to work inplace.
#
# This function tests the case when several elements are passed to the
# Gpujoin function but all except one of them are empty. In this case
# Gpujoin should work inplace and the output should be the view of the
# non-empty element.
s = tt.lscalar()
data = np.array([3, 4, 5], dtype=theano.config.floatX)
x = gpuarray_shared_constructor(data, borrow=True)
z = tt.zeros((s,))
join = GpuJoin(view=0)
c = join(0, x, z)
f = theano.function([s], theano.Out(c, borrow=True))
if not isinstance(mode_with_gpu, theano.compile.DebugMode):
assert x.get_value(borrow=True, return_internal_type=True) is f(0)
assert np.allclose(f(0), [3, 4, 5])
def test_gpu_tril_triu():
def check_l(m, k=0):
m_symb = tt.matrix(dtype=m.dtype)
k_symb = tt.iscalar()
f = theano.function(
[m_symb, k_symb], tt.tril(m_symb, k_symb), mode=mode_with_gpu
)
result = f(m, k)
assert np.allclose(result, np.tril(m, k))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
def check_u(m, k=0):
m_symb = tt.matrix(dtype=m.dtype)
k_symb = tt.iscalar()
f = theano.function(
[m_symb, k_symb], tt.triu(m_symb, k_symb), mode=mode_with_gpu
)
result = f(m, k)
assert np.allclose(result, np.triu(m, k))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
utt.seed_rng()
test_rng = np.random.RandomState(seed=utt.fetch_seed())
for dtype in ["float64", "float32", "float16"]:
# try a big one
m = np.asarray(test_rng.rand(5000, 5000) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
m = np.asarray(test_rng.rand(10, 10) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
m = np.asarray(test_rng.rand(10, 5) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
def test_gputri():
def check(dtype, N, M_=None, k=0):
# Theano does not accept None as a tensor.
# So we must use a real value.
M = M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if M is None:
M = N
N_symb = tt.iscalar()
M_symb = tt.iscalar()
k_symb = tt.iscalar()
out = tt.tri(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype)
f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)
result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype)
assert np.allclose(result, np.tri(N, M_, k, dtype=dtype))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
for dtype in ["float64", "float32", "int32", "float16"]:
# try a big one
check(dtype, 1000, 1000, 0)
check(dtype, 1000, 1000, -400)
check(dtype, 1000, 1000, 400)
check(dtype, 5)
# M != N, k = 0
check(dtype, 3, 5)
check(dtype, 5, 3)
# N == M, k != 0
check(dtype, 3, 3, 1)
check(dtype, 3, 3, -1)
# N < M, k != 0
check(dtype, 3, 5, 1)
check(dtype, 3, 5, -1)
# N > M, k != 0
check(dtype, 5, 3, 1)
check(dtype, 5, 3, -1)
# k > M, -k > N, k > M, k > N
check(dtype, 5, 3, 3)
check(dtype, 3, 5, 3)
check(dtype, 5, 3, -3)
check(dtype, 3, 5, -3)
check(dtype, 5, 3, 6)
check(dtype, 3, 5, -6)
| en | 0.916843 | # Don't import test classes otherwise they get tested as part of the file # if we raised an exception of the same type we're good. # This is just to ensure that it works in theano # libgpuarray has a much more comprehensive suit of tests to # ensure correctness # The +1 is there to allow the lift to the GPU. # just gives a DeepCopyOp with possibly wrong results on the CPU # correct01_bcast=(rand(1), np.int32(7)), # The reshape is needed otherwise we make the subtensor on the CPU # to transfer less data. # Use join instead of MakeVector since there is no MakeVector on GPU # this is to avoid errors with limited devices # Test that we move the node to the GPU # Also test float16 computation at the same time. # Theano does not accept None as a tensor. # So we must use a real value. # Currently DebugMode does not support None as inputs even if this is # allowed. # M != N, k = 0 # N == M, k != 0 # N < M, k != 0 # N > M, k != 0 # k > M, -k > N, k > M, k > N # Test that the shape is lifted over hostfromgpu # Test Gpujoin to work inplace. # # This function tests the case when several elements are passed to the # Gpujoin function but all except one of them are empty. In this case # Gpujoin should work inplace and the output should be the view of the # non-empty element. # try a big one # Theano does not accept None as a tensor. # So we must use a real value. # Currently DebugMode does not support None as inputs even if this is # allowed. # try a big one # M != N, k = 0 # N == M, k != 0 # N < M, k != 0 # N > M, k != 0 # k > M, -k > N, k > M, k > N | 2.143452 | 2 |
apps/interactor/tests/commander/commands/test_animations.py | Djelibeybi/photons | 51 | 540 | <gh_stars>10-100
# coding: spec
from interactor.commander.store import store, load_commands
from photons_app.mimic.event import Events
from photons_app import helpers as hp
from photons_canvas.points.simple_messages import Set64
from unittest import mock
import pytest
@pytest.fixture()
def store_clone():
load_commands()
return store.clone()
@pytest.fixture()
def final_future():
fut = hp.create_future()
try:
yield fut
finally:
fut.cancel()
@pytest.fixture()
async def sender(devices, final_future):
async with devices.for_test(final_future) as sender:
yield sender
@pytest.fixture()
async def make_server(store_clone, server_wrapper, FakeTime, MockedCallLater, sender, final_future):
with FakeTime() as t:
async with MockedCallLater(t) as m:
async with server_wrapper(store_clone, sender, final_future) as server:
yield server, m
@pytest.fixture()
def server(make_server):
return make_server[0]
@pytest.fixture()
def m(make_server):
return make_server[1]
@pytest.fixture(autouse=True)
def set_async_timeout(request):
request.applymarker(pytest.mark.async_timeout(15))
describe "Animation Commands":
async it "can get info and help", server, m:
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
json_output={"animations": {}, "paused": []},
)
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/help"},
)
assert b"Available animations include" in got
assert b"* dice" in got
assert b"To see options for a particular animation, run this again" in got
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/help", "args": {"animation_name": "dice"}},
)
assert b"dice animation" in got
assert b"This animation has the following options:" in got
assert b"colour range options" in got
async it "can control an animation", server, m:
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
json_output={"animations": {}, "paused": []},
)
identity = "first"
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/start", "args": {"identity": identity}},
)
assert "animations" in got
assert got["animations"] == [identity]
assert got["started"] == identity
identity2 = "second"
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/start", "args": {"identity": identity2}},
)
assert "animations" in got
identities = [identity, identity2]
assert got["animations"] == identities
assert got["started"] == identity2
info = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
)
assert info == {"animations": {identity: mock.ANY, identity2: mock.ANY}, "paused": []}
# pause
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/pause", "args": {"pause": identity}},
json_output={"animations": identities, "paused": [identity], "pausing": [identity]},
)
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/pause", "args": {"pause": identity2}},
json_output={
"animations": identities,
"paused": identities,
"pausing": [identity2],
},
)
# resume
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/resume", "args": {"resume": identity2}},
json_output={
"animations": identities,
"paused": [identity],
"resuming": [identity2],
},
)
# pause multiple
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/pause", "args": {"pause": identities}},
json_output={"animations": identities, "paused": identities, "pausing": identities},
)
# resume
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/resume", "args": {"resume": identities}},
json_output={
"animations": identities,
"paused": [],
"resuming": identities,
},
)
# pause
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/pause", "args": {"pause": identity}},
json_output={"animations": identities, "paused": [identity], "pausing": [identity]},
)
# info
info = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
)
assert info["animations"] == {identity: mock.ANY, identity2: mock.ANY}
assert info["paused"] == [identity]
# stop
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/stop", "args": {"stop": identity}},
json_output={
"animations": [identity, identity2],
"paused": [identity],
"stopping": [identity],
},
)
await m.add(0.5)
# info
info = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
)
assert info["animations"] == {identity2: mock.ANY}
assert info["paused"] == []
async it "pausing an animation actually pauses the animation", devices, server, m:
tile = devices["tile"]
io = tile.io["MEMORY"]
store = devices.store(tile)
store.clear()
first_set_64 = tile.attrs.event_waiter.wait_for_incoming(io, Set64)
# start
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/start", "args": {"animations": [["balls", {"every": 3}]]}},
)
identity = got["started"]
await first_set_64
now = store.count(Events.INCOMING(tile, io, pkt=Set64))
assert now > 0
await m.add(5)
now2 = store.count(Events.INCOMING(tile, io, pkt=Set64))
assert now2 > now
identity = got["started"]
await m.add(5)
assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > now
# pause
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/pause", "args": {"pause": [identity]}},
)
await m.add(5)
store.clear()
await m.add(5)
assert store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0
# resume
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/resume", "args": {"resume": [identity]}},
)
await m.add(5)
assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > 0
# stop
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/stop", "args": {"stop": [identity]}},
)
store.clear()
await m.add(5)
store.clear()
await m.add(5)
assert store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0
# info
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
json_output={"animations": {}, "paused": []},
)
async it "can get information", server, m:
# start
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/start", "args": {"animations": [["balls", {"every": 0.3}]]}},
)
identity = got["started"]
info = await server.assertCommand("/v1/lifx/command", {"command": "animation/info"})
assert info["paused"] == []
assert identity in info["animations"]
assert info["animations"][identity]["animations_ran"] == 1
assert info["animations"][identity]["current_animation"] == {
"name": "balls",
"options": {
"ball_colors": "<ManyColor:[((0, 360), (1000.0, 1000.0), (1000.0, 1000.0), (3500.0, 3500.0))]>",
"fade_amount": 0.02,
"num_balls": 5,
"rate": "<Rate 0.9 -> 1>",
},
"started": mock.ANY,
}
assert info["animations"][identity]["options"]["combined"]
assert "unlocked" in info["animations"][identity]["options"]["pauser"]
assert info["animations"][identity]["options"]["noisy_network"] == 0
specific = await server.assertCommand(
"/v1/lifx/command", {"command": "animation/info", "args": {"identity": identity}}
)
info["animations"][identity]["current_animation"]["started"] = mock.ANY
assert info["animations"][identity] == specific
| # coding: spec
from interactor.commander.store import store, load_commands
from photons_app.mimic.event import Events
from photons_app import helpers as hp
from photons_canvas.points.simple_messages import Set64
from unittest import mock
import pytest
@pytest.fixture()
def store_clone():
load_commands()
return store.clone()
@pytest.fixture()
def final_future():
fut = hp.create_future()
try:
yield fut
finally:
fut.cancel()
@pytest.fixture()
async def sender(devices, final_future):
async with devices.for_test(final_future) as sender:
yield sender
@pytest.fixture()
async def make_server(store_clone, server_wrapper, FakeTime, MockedCallLater, sender, final_future):
with FakeTime() as t:
async with MockedCallLater(t) as m:
async with server_wrapper(store_clone, sender, final_future) as server:
yield server, m
@pytest.fixture()
def server(make_server):
return make_server[0]
@pytest.fixture()
def m(make_server):
return make_server[1]
@pytest.fixture(autouse=True)
def set_async_timeout(request):
request.applymarker(pytest.mark.async_timeout(15))
describe "Animation Commands":
async it "can get info and help", server, m:
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
json_output={"animations": {}, "paused": []},
)
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/help"},
)
assert b"Available animations include" in got
assert b"* dice" in got
assert b"To see options for a particular animation, run this again" in got
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/help", "args": {"animation_name": "dice"}},
)
assert b"dice animation" in got
assert b"This animation has the following options:" in got
assert b"colour range options" in got
async it "can control an animation", server, m:
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
json_output={"animations": {}, "paused": []},
)
identity = "first"
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/start", "args": {"identity": identity}},
)
assert "animations" in got
assert got["animations"] == [identity]
assert got["started"] == identity
identity2 = "second"
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/start", "args": {"identity": identity2}},
)
assert "animations" in got
identities = [identity, identity2]
assert got["animations"] == identities
assert got["started"] == identity2
info = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
)
assert info == {"animations": {identity: mock.ANY, identity2: mock.ANY}, "paused": []}
# pause
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/pause", "args": {"pause": identity}},
json_output={"animations": identities, "paused": [identity], "pausing": [identity]},
)
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/pause", "args": {"pause": identity2}},
json_output={
"animations": identities,
"paused": identities,
"pausing": [identity2],
},
)
# resume
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/resume", "args": {"resume": identity2}},
json_output={
"animations": identities,
"paused": [identity],
"resuming": [identity2],
},
)
# pause multiple
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/pause", "args": {"pause": identities}},
json_output={"animations": identities, "paused": identities, "pausing": identities},
)
# resume
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/resume", "args": {"resume": identities}},
json_output={
"animations": identities,
"paused": [],
"resuming": identities,
},
)
# pause
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/pause", "args": {"pause": identity}},
json_output={"animations": identities, "paused": [identity], "pausing": [identity]},
)
# info
info = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
)
assert info["animations"] == {identity: mock.ANY, identity2: mock.ANY}
assert info["paused"] == [identity]
# stop
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/stop", "args": {"stop": identity}},
json_output={
"animations": [identity, identity2],
"paused": [identity],
"stopping": [identity],
},
)
await m.add(0.5)
# info
info = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
)
assert info["animations"] == {identity2: mock.ANY}
assert info["paused"] == []
async it "pausing an animation actually pauses the animation", devices, server, m:
tile = devices["tile"]
io = tile.io["MEMORY"]
store = devices.store(tile)
store.clear()
first_set_64 = tile.attrs.event_waiter.wait_for_incoming(io, Set64)
# start
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/start", "args": {"animations": [["balls", {"every": 3}]]}},
)
identity = got["started"]
await first_set_64
now = store.count(Events.INCOMING(tile, io, pkt=Set64))
assert now > 0
await m.add(5)
now2 = store.count(Events.INCOMING(tile, io, pkt=Set64))
assert now2 > now
identity = got["started"]
await m.add(5)
assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > now
# pause
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/pause", "args": {"pause": [identity]}},
)
await m.add(5)
store.clear()
await m.add(5)
assert store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0
# resume
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/resume", "args": {"resume": [identity]}},
)
await m.add(5)
assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > 0
# stop
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/stop", "args": {"stop": [identity]}},
)
store.clear()
await m.add(5)
store.clear()
await m.add(5)
assert store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0
# info
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
json_output={"animations": {}, "paused": []},
)
async it "can get information", server, m:
# start
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/start", "args": {"animations": [["balls", {"every": 0.3}]]}},
)
identity = got["started"]
info = await server.assertCommand("/v1/lifx/command", {"command": "animation/info"})
assert info["paused"] == []
assert identity in info["animations"]
assert info["animations"][identity]["animations_ran"] == 1
assert info["animations"][identity]["current_animation"] == {
"name": "balls",
"options": {
"ball_colors": "<ManyColor:[((0, 360), (1000.0, 1000.0), (1000.0, 1000.0), (3500.0, 3500.0))]>",
"fade_amount": 0.02,
"num_balls": 5,
"rate": "<Rate 0.9 -> 1>",
},
"started": mock.ANY,
}
assert info["animations"][identity]["options"]["combined"]
assert "unlocked" in info["animations"][identity]["options"]["pauser"]
assert info["animations"][identity]["options"]["noisy_network"] == 0
specific = await server.assertCommand(
"/v1/lifx/command", {"command": "animation/info", "args": {"identity": identity}}
)
info["animations"][identity]["current_animation"]["started"] = mock.ANY
assert info["animations"][identity] == specific | en | 0.446794 | # coding: spec # pause # resume # pause multiple # resume # pause # info # stop # info # start # pause # resume # stop # info # start | 1.981948 | 2 |
.ipython/profile_pytube/startup/init.py | showa-yojyo/dotfiles | 0 | 541 | from pytube import YouTube
def download_video(watch_url):
yt = YouTube(watch_url)
(yt.streams
.filter(progressive=True, file_extension='mp4')
.order_by('resolution')
.desc()
.first()
.download())
| from pytube import YouTube
def download_video(watch_url):
yt = YouTube(watch_url)
(yt.streams
.filter(progressive=True, file_extension='mp4')
.order_by('resolution')
.desc()
.first()
.download())
| none | 1 | 2.810867 | 3 |
|
ament_tools/setup_arguments.py | richmattes/ament_tools | 1 | 542 | <filename>ament_tools/setup_arguments.py
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import distutils.core
import os
try:
import setuptools
except ImportError:
pass
import subprocess
import sys
from threading import Lock
from ament_tools.build_type import get_command_prefix
from ament_tools.helper import quote_shell_command
setup_lock = None
def get_setup_arguments_with_context(build_type, context):
"""
Capture the arguments of the setup() function in the setup.py file.
To provide a custom environment when introspecting the setup() function
a separate Python interpreter is being used which can have an extended
PYTHONPATH etc.
:param build_type: the build type
:param context: the context
:type context: :py:class:`ament_tools.context.Context`
:returns: a dictionary containing the arguments of the setup() function
"""
prefix = get_command_prefix(
'%s__setup' % build_type, context.build_space,
context.build_dependencies)
ament_tools_path = os.path.dirname(os.path.dirname(__file__))
setuppy = os.path.join(context.source_space, 'setup.py')
if os.name == 'nt':
ament_tools_path = ament_tools_path.replace(os.sep, os.altsep)
setuppy = setuppy.replace(os.sep, os.altsep)
code_lines = [
'import sys',
"sys.path.insert(0, '%s')" % ament_tools_path,
'from ament_tools.setup_arguments import get_setup_arguments',
"print(repr(get_setup_arguments('%s')))" % setuppy]
# invoke get_setup_arguments() in a separate interpreter
cmd = prefix + [sys.executable, '-c', ';'.join(code_lines)]
cmd = quote_shell_command(cmd)
result = subprocess.run(
cmd, stdout=subprocess.PIPE, shell=True, check=True)
output = result.stdout.decode()
return ast.literal_eval(output)
def get_setup_arguments(setup_py_path):
"""
Capture the arguments of the setup() function in the setup.py file.
The function is being run within the current Python interpreter.
Therefore the processed setup.py file can not have any additional
dependencies not available in the current environment.
:param setup_py_path: the path to the setup.py file
:returns: a dictionary containing the arguments of the setup() function
"""
global setup_lock
if not setup_lock:
setup_lock = Lock()
assert os.path.basename(setup_py_path) == 'setup.py'
# prevent side effects in other threads
with setup_lock:
# change to the directory containing the setup.py file
old_cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(setup_py_path)))
try:
data = {}
mock_setup = create_mock_setup_function(data)
# replace setup() function of distutils and setuptools
# in order to capture its arguments
try:
distutils_setup = distutils.core.setup
distutils.core.setup = mock_setup
try:
setuptools_setup = setuptools.setup
setuptools.setup = mock_setup
except NameError:
pass
# evaluate the setup.py file
with open('setup.py', 'r') as h:
exec(h.read())
finally:
distutils.core.setup = distutils_setup
try:
setuptools.setup = setuptools_setup
except NameError:
pass
return data
finally:
os.chdir(old_cwd)
def create_mock_setup_function(data):
"""
Create a mock function to capture its arguments.
It can replace either distutils.core.setup or setuptools.setup.
:param data: a dictionary which is updated with the captured arguments
:returns: a function to replace disutils.core.setup and setuptools.setup
"""
def setup(*args, **kwargs):
if args:
raise RuntimeError(
'setup() function invoked with positional arguments')
if 'name' not in kwargs:
raise RuntimeError(
"setup() function invoked without the keyword argument 'name'")
data.update(kwargs)
return setup
def get_data_files_mapping(data_files):
"""
Transform the data_files structure into a dictionary.
:param data_files: either a list of source files or
a list of tuples where the first element is the destination path and
the second element is a list of source files
:returns: a dictionary mapping the source file to a destination file
"""
mapping = {}
for data_file in data_files:
if isinstance(data_file, tuple):
assert len(data_file) == 2
dest = data_file[0]
assert not os.path.isabs(dest)
sources = data_file[1]
assert isinstance(sources, list)
for source in sources:
assert not os.path.isabs(source)
mapping[source] = os.path.join(dest, os.path.basename(source))
else:
assert not os.path.isabs(data_file)
mapping[data_file] = os.path.basename(data_file)
return mapping
| <filename>ament_tools/setup_arguments.py
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import distutils.core
import os
try:
import setuptools
except ImportError:
pass
import subprocess
import sys
from threading import Lock
from ament_tools.build_type import get_command_prefix
from ament_tools.helper import quote_shell_command
setup_lock = None
def get_setup_arguments_with_context(build_type, context):
"""
Capture the arguments of the setup() function in the setup.py file.
To provide a custom environment when introspecting the setup() function
a separate Python interpreter is being used which can have an extended
PYTHONPATH etc.
:param build_type: the build type
:param context: the context
:type context: :py:class:`ament_tools.context.Context`
:returns: a dictionary containing the arguments of the setup() function
"""
prefix = get_command_prefix(
'%s__setup' % build_type, context.build_space,
context.build_dependencies)
ament_tools_path = os.path.dirname(os.path.dirname(__file__))
setuppy = os.path.join(context.source_space, 'setup.py')
if os.name == 'nt':
ament_tools_path = ament_tools_path.replace(os.sep, os.altsep)
setuppy = setuppy.replace(os.sep, os.altsep)
code_lines = [
'import sys',
"sys.path.insert(0, '%s')" % ament_tools_path,
'from ament_tools.setup_arguments import get_setup_arguments',
"print(repr(get_setup_arguments('%s')))" % setuppy]
# invoke get_setup_arguments() in a separate interpreter
cmd = prefix + [sys.executable, '-c', ';'.join(code_lines)]
cmd = quote_shell_command(cmd)
result = subprocess.run(
cmd, stdout=subprocess.PIPE, shell=True, check=True)
output = result.stdout.decode()
return ast.literal_eval(output)
def get_setup_arguments(setup_py_path):
"""
Capture the arguments of the setup() function in the setup.py file.
The function is being run within the current Python interpreter.
Therefore the processed setup.py file can not have any additional
dependencies not available in the current environment.
:param setup_py_path: the path to the setup.py file
:returns: a dictionary containing the arguments of the setup() function
"""
global setup_lock
if not setup_lock:
setup_lock = Lock()
assert os.path.basename(setup_py_path) == 'setup.py'
# prevent side effects in other threads
with setup_lock:
# change to the directory containing the setup.py file
old_cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(setup_py_path)))
try:
data = {}
mock_setup = create_mock_setup_function(data)
# replace setup() function of distutils and setuptools
# in order to capture its arguments
try:
distutils_setup = distutils.core.setup
distutils.core.setup = mock_setup
try:
setuptools_setup = setuptools.setup
setuptools.setup = mock_setup
except NameError:
pass
# evaluate the setup.py file
with open('setup.py', 'r') as h:
exec(h.read())
finally:
distutils.core.setup = distutils_setup
try:
setuptools.setup = setuptools_setup
except NameError:
pass
return data
finally:
os.chdir(old_cwd)
def create_mock_setup_function(data):
"""
Create a mock function to capture its arguments.
It can replace either distutils.core.setup or setuptools.setup.
:param data: a dictionary which is updated with the captured arguments
:returns: a function to replace disutils.core.setup and setuptools.setup
"""
def setup(*args, **kwargs):
if args:
raise RuntimeError(
'setup() function invoked with positional arguments')
if 'name' not in kwargs:
raise RuntimeError(
"setup() function invoked without the keyword argument 'name'")
data.update(kwargs)
return setup
def get_data_files_mapping(data_files):
"""
Transform the data_files structure into a dictionary.
:param data_files: either a list of source files or
a list of tuples where the first element is the destination path and
the second element is a list of source files
:returns: a dictionary mapping the source file to a destination file
"""
mapping = {}
for data_file in data_files:
if isinstance(data_file, tuple):
assert len(data_file) == 2
dest = data_file[0]
assert not os.path.isabs(dest)
sources = data_file[1]
assert isinstance(sources, list)
for source in sources:
assert not os.path.isabs(source)
mapping[source] = os.path.join(dest, os.path.basename(source))
else:
assert not os.path.isabs(data_file)
mapping[data_file] = os.path.basename(data_file)
return mapping
| en | 0.731423 | # Copyright 2015 Open Source Robotics Foundation, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Capture the arguments of the setup() function in the setup.py file. To provide a custom environment when introspecting the setup() function a separate Python interpreter is being used which can have an extended PYTHONPATH etc. :param build_type: the build type :param context: the context :type context: :py:class:`ament_tools.context.Context` :returns: a dictionary containing the arguments of the setup() function # invoke get_setup_arguments() in a separate interpreter Capture the arguments of the setup() function in the setup.py file. The function is being run within the current Python interpreter. Therefore the processed setup.py file can not have any additional dependencies not available in the current environment. :param setup_py_path: the path to the setup.py file :returns: a dictionary containing the arguments of the setup() function # prevent side effects in other threads # change to the directory containing the setup.py file # replace setup() function of distutils and setuptools # in order to capture its arguments # evaluate the setup.py file Create a mock function to capture its arguments. It can replace either distutils.core.setup or setuptools.setup. :param data: a dictionary which is updated with the captured arguments :returns: a function to replace disutils.core.setup and setuptools.setup Transform the data_files structure into a dictionary. :param data_files: either a list of source files or a list of tuples where the first element is the destination path and the second element is a list of source files :returns: a dictionary mapping the source file to a destination file | 2.369 | 2 |
tests/functional/test_soft_round_inverse.py | tallamjr/NeuralCompression | 233 | 543 | <gh_stars>100-1000
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from neuralcompression.functional import soft_round, soft_round_inverse
def test_soft_round_inverse():
x = torch.linspace(-2.0, 2.0, 50)
torch.testing.assert_close(
x,
soft_round_inverse(x, alpha=1e-13),
)
x = torch.tensor([-1.25, -0.75, 0.75, 1.25])
torch.testing.assert_close(
x,
soft_round_inverse(soft_round(x, alpha=2.0), alpha=2.0),
)
for offset in range(-5, 5):
x = torch.linspace(offset + 0.001, offset + 0.999, 100)
torch.testing.assert_close(
torch.ceil(x) - 0.5,
soft_round_inverse(x, alpha=5000.0),
atol=0.001,
rtol=0.002,
)
| # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from neuralcompression.functional import soft_round, soft_round_inverse
def test_soft_round_inverse():
x = torch.linspace(-2.0, 2.0, 50)
torch.testing.assert_close(
x,
soft_round_inverse(x, alpha=1e-13),
)
x = torch.tensor([-1.25, -0.75, 0.75, 1.25])
torch.testing.assert_close(
x,
soft_round_inverse(soft_round(x, alpha=2.0), alpha=2.0),
)
for offset in range(-5, 5):
x = torch.linspace(offset + 0.001, offset + 0.999, 100)
torch.testing.assert_close(
torch.ceil(x) - 0.5,
soft_round_inverse(x, alpha=5000.0),
atol=0.001,
rtol=0.002,
) | en | 0.914992 | # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. | 2.190652 | 2 |
dizoo/pybullet/config/hopper_ppo_default_config.py | konnase/DI-engine | 2 | 544 | from easydict import EasyDict
hopper_ppo_default_config = dict(
env=dict(
env_id='HopperMuJoCoEnv-v0',
norm_obs=dict(use_norm=False, ),
norm_reward=dict(use_norm=False, ),
collector_env_num=8,
evaluator_env_num=10,
use_act_scale=True,
n_evaluator_episode=10,
stop_value=3000,
),
policy=dict(
cuda=True,
on_policy=True,
recompute_adv=True,
model=dict(
obs_shape=11,
action_shape=3,
continuous=True,
),
continuous=True,
learn=dict(
epoch_per_collect=10,
batch_size=64,
learning_rate=3e-4,
value_weight=0.5,
entropy_weight=0.0,
clip_ratio=0.2,
adv_norm=True,
value_norm=True,
),
collect=dict(
n_sample=2048,
unroll_len=1,
discount_factor=0.99,
gae_lambda=0.97,
),
eval=dict(evaluator=dict(eval_freq=5000, )),
other=dict(replay_buffer=dict(
replay_buffer_size=10000,
replay_buffer_start_size=0,
), ),
),
)
hopper_ppo_default_config = EasyDict(hopper_ppo_default_config)
main_config = hopper_ppo_default_config
hopper_ppo_create_default_config = dict(
env=dict(
type='pybullet',
import_names=['dizoo.pybullet.envs.pybullet_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(
type='ppo',
import_names=['ding.policy.ppo'],
),
replay_buffer=dict(type='naive', ),
)
hopper_ppo_create_default_config = EasyDict(hopper_ppo_create_default_config)
create_config = hopper_ppo_create_default_config
| from easydict import EasyDict
hopper_ppo_default_config = dict(
env=dict(
env_id='HopperMuJoCoEnv-v0',
norm_obs=dict(use_norm=False, ),
norm_reward=dict(use_norm=False, ),
collector_env_num=8,
evaluator_env_num=10,
use_act_scale=True,
n_evaluator_episode=10,
stop_value=3000,
),
policy=dict(
cuda=True,
on_policy=True,
recompute_adv=True,
model=dict(
obs_shape=11,
action_shape=3,
continuous=True,
),
continuous=True,
learn=dict(
epoch_per_collect=10,
batch_size=64,
learning_rate=3e-4,
value_weight=0.5,
entropy_weight=0.0,
clip_ratio=0.2,
adv_norm=True,
value_norm=True,
),
collect=dict(
n_sample=2048,
unroll_len=1,
discount_factor=0.99,
gae_lambda=0.97,
),
eval=dict(evaluator=dict(eval_freq=5000, )),
other=dict(replay_buffer=dict(
replay_buffer_size=10000,
replay_buffer_start_size=0,
), ),
),
)
hopper_ppo_default_config = EasyDict(hopper_ppo_default_config)
main_config = hopper_ppo_default_config
hopper_ppo_create_default_config = dict(
env=dict(
type='pybullet',
import_names=['dizoo.pybullet.envs.pybullet_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(
type='ppo',
import_names=['ding.policy.ppo'],
),
replay_buffer=dict(type='naive', ),
)
hopper_ppo_create_default_config = EasyDict(hopper_ppo_create_default_config)
create_config = hopper_ppo_create_default_config
| none | 1 | 1.533946 | 2 |
|
cisco_sdwan_policy/List/Application.py | ljm625/cisco_sdwan_policy_python | 11 | 545 | import json
from cisco_sdwan_policy.BaseObject import BaseObject
class Application(BaseObject):
def __init__(self,name,app_list,is_app_family,id=None,reference=None,**kwargs):
self.type = "appList"
self.id = id
self.name = name
self.references = reference
self.app_family=is_app_family
self._entries = app_list
self.url = "template/policy/list/app"
super().__init__(**kwargs)
self.modified=False
def get_entries(self):
return self._entries
def set_entries(self,entries):
self.modified=True
self._entries=entries
@classmethod
def from_json(cls,jsonfile,**kwargs):
id = jsonfile["listId"]
name = jsonfile["name"]
references = jsonfile.get("references")
if len(jsonfile["entries"])>0 and jsonfile["entries"][0].get("app"):
appFamily=False
entries = [i["app"] for i in jsonfile["entries"]]
else:
if not jsonfile["entries"][0].get("appFamily"):
return None
else:
appFamily=True
entries = [i["appFamily"] for i in jsonfile["entries"]]
return cls(name,entries,appFamily,id,references,**kwargs)
def to_json(self):
return {
"name":self.name,
"description":"Desc Not Required",
"type":"app",
"entries":[
{"appFamily" if self.app_family else "app":i} for i in self._entries]
}
| import json
from cisco_sdwan_policy.BaseObject import BaseObject
class Application(BaseObject):
def __init__(self,name,app_list,is_app_family,id=None,reference=None,**kwargs):
self.type = "appList"
self.id = id
self.name = name
self.references = reference
self.app_family=is_app_family
self._entries = app_list
self.url = "template/policy/list/app"
super().__init__(**kwargs)
self.modified=False
def get_entries(self):
return self._entries
def set_entries(self,entries):
self.modified=True
self._entries=entries
@classmethod
def from_json(cls,jsonfile,**kwargs):
id = jsonfile["listId"]
name = jsonfile["name"]
references = jsonfile.get("references")
if len(jsonfile["entries"])>0 and jsonfile["entries"][0].get("app"):
appFamily=False
entries = [i["app"] for i in jsonfile["entries"]]
else:
if not jsonfile["entries"][0].get("appFamily"):
return None
else:
appFamily=True
entries = [i["appFamily"] for i in jsonfile["entries"]]
return cls(name,entries,appFamily,id,references,**kwargs)
def to_json(self):
return {
"name":self.name,
"description":"Desc Not Required",
"type":"app",
"entries":[
{"appFamily" if self.app_family else "app":i} for i in self._entries]
}
| none | 1 | 2.237607 | 2 |
|
supervisor/docker/dns.py | zeehio/supervisor | 0 | 546 | <filename>supervisor/docker/dns.py
"""DNS docker object."""
import logging
from ..const import ENV_TIME
from ..coresys import CoreSysAttributes
from .interface import DockerInterface
_LOGGER: logging.Logger = logging.getLogger(__name__)
DNS_DOCKER_NAME: str = "hassio_dns"
class DockerDNS(DockerInterface, CoreSysAttributes):
"""Docker Supervisor wrapper for Supervisor DNS."""
@property
def image(self) -> str:
"""Return name of Supervisor DNS image."""
return self.sys_plugins.dns.image
@property
def name(self) -> str:
"""Return name of Docker container."""
return DNS_DOCKER_NAME
def _run(self) -> None:
"""Run Docker image.
Need run inside executor.
"""
if self._is_running():
return
# Cleanup
self._stop()
# Create & Run container
docker_container = self.sys_docker.run(
self.image,
tag=self.sys_plugins.dns.version.string,
init=False,
dns=False,
ipv4=self.sys_docker.network.dns,
name=self.name,
hostname=self.name.replace("_", "-"),
detach=True,
environment={ENV_TIME: self.sys_config.timezone},
volumes={
str(self.sys_config.path_extern_dns): {"bind": "/config", "mode": "rw"}
},
)
self._meta = docker_container.attrs
_LOGGER.info(
"Starting DNS %s with version %s - %s",
self.image,
self.version,
self.sys_docker.network.dns,
)
| <filename>supervisor/docker/dns.py
"""DNS docker object."""
import logging
from ..const import ENV_TIME
from ..coresys import CoreSysAttributes
from .interface import DockerInterface
_LOGGER: logging.Logger = logging.getLogger(__name__)
DNS_DOCKER_NAME: str = "hassio_dns"
class DockerDNS(DockerInterface, CoreSysAttributes):
"""Docker Supervisor wrapper for Supervisor DNS."""
@property
def image(self) -> str:
"""Return name of Supervisor DNS image."""
return self.sys_plugins.dns.image
@property
def name(self) -> str:
"""Return name of Docker container."""
return DNS_DOCKER_NAME
def _run(self) -> None:
"""Run Docker image.
Need run inside executor.
"""
if self._is_running():
return
# Cleanup
self._stop()
# Create & Run container
docker_container = self.sys_docker.run(
self.image,
tag=self.sys_plugins.dns.version.string,
init=False,
dns=False,
ipv4=self.sys_docker.network.dns,
name=self.name,
hostname=self.name.replace("_", "-"),
detach=True,
environment={ENV_TIME: self.sys_config.timezone},
volumes={
str(self.sys_config.path_extern_dns): {"bind": "/config", "mode": "rw"}
},
)
self._meta = docker_container.attrs
_LOGGER.info(
"Starting DNS %s with version %s - %s",
self.image,
self.version,
self.sys_docker.network.dns,
)
| en | 0.522888 | DNS docker object. Docker Supervisor wrapper for Supervisor DNS. Return name of Supervisor DNS image. Return name of Docker container. Run Docker image. Need run inside executor. # Cleanup # Create & Run container | 2.242045 | 2 |
nuitka/codegen/OperatorCodes.py | hclivess/Nuitka | 0 | 547 | <gh_stars>0
# Copyright 2019, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Operator code tables
These are mostly used to look up the Python C/API from operations or a wrapper used.
"""
from nuitka.PythonVersions import python_version
binary_operator_codes = {
# Those commented out in this section have fully specialized variants already.
# "Add" : "PyNumber_Add",
# "Sub" : "PyNumber_Subtract",
# "Div" : "PyNumber_Divide",
# "Mult" : "PyNumber_Multiply",
# "Mod" : "PyNumber_Remainder",
# "Div" : "PyNumber_Divide",
# "FloorDiv" : "PyNumber_FloorDivide",
# "TrueDiv" : "PyNumber_TrueDivide",
# These have their own variants only to make sure the generic code is in-lined
# but the CPython code is not in-lined.
# "Pow" : "PyNumber_Power",
# "IPow" : "PyNumber_InPlacePower",
# The others are generic code and would be faster if they had a specialized variant too.
"LShift": "PyNumber_Lshift",
"RShift": "PyNumber_Rshift",
"BitAnd": "PyNumber_And",
"BitOr": "PyNumber_Or",
"BitXor": "PyNumber_Xor",
"IAdd": "PyNumber_InPlaceAdd",
"ISub": "PyNumber_InPlaceSubtract",
"IMult": "PyNumber_InPlaceMultiply",
"IDiv": "PyNumber_InPlaceDivide",
"IFloorDiv": "PyNumber_InPlaceFloorDivide",
"ITrueDiv": "PyNumber_InPlaceTrueDivide",
"IMod": "PyNumber_InPlaceRemainder",
"ILShift": "PyNumber_InPlaceLshift",
"IRShift": "PyNumber_InPlaceRshift",
"IBitAnd": "PyNumber_InPlaceAnd",
"IBitOr": "PyNumber_InPlaceOr",
"IBitXor": "PyNumber_InPlaceXor",
}
# Python 3.5 only operator
if python_version >= 350:
binary_operator_codes["MatMult"] = "PyNumber_MatrixMultiply"
binary_operator_codes["IMatMult"] = "PyNumber_InPlaceMatrixMultiply"
unary_operator_codes = {
"UAdd": ("PyNumber_Positive", 1),
"USub": ("PyNumber_Negative", 1),
"Invert": ("PyNumber_Invert", 1),
"Repr": ("PyObject_Repr", 1),
"Not": ("UNARY_NOT", 0),
}
rich_comparison_codes = {
"Lt": "LT",
"LtE": "LE",
"Eq": "EQ",
"NotEq": "NE",
"Gt": "GT",
"GtE": "GE",
}
containing_comparison_codes = ("In", "NotIn")
| # Copyright 2019, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Operator code tables
These are mostly used to look up the Python C/API from operations or a wrapper used.
"""
from nuitka.PythonVersions import python_version
binary_operator_codes = {
# Those commented out in this section have fully specialized variants already.
# "Add" : "PyNumber_Add",
# "Sub" : "PyNumber_Subtract",
# "Div" : "PyNumber_Divide",
# "Mult" : "PyNumber_Multiply",
# "Mod" : "PyNumber_Remainder",
# "Div" : "PyNumber_Divide",
# "FloorDiv" : "PyNumber_FloorDivide",
# "TrueDiv" : "PyNumber_TrueDivide",
# These have their own variants only to make sure the generic code is in-lined
# but the CPython code is not in-lined.
# "Pow" : "PyNumber_Power",
# "IPow" : "PyNumber_InPlacePower",
# The others are generic code and would be faster if they had a specialized variant too.
"LShift": "PyNumber_Lshift",
"RShift": "PyNumber_Rshift",
"BitAnd": "PyNumber_And",
"BitOr": "PyNumber_Or",
"BitXor": "PyNumber_Xor",
"IAdd": "PyNumber_InPlaceAdd",
"ISub": "PyNumber_InPlaceSubtract",
"IMult": "PyNumber_InPlaceMultiply",
"IDiv": "PyNumber_InPlaceDivide",
"IFloorDiv": "PyNumber_InPlaceFloorDivide",
"ITrueDiv": "PyNumber_InPlaceTrueDivide",
"IMod": "PyNumber_InPlaceRemainder",
"ILShift": "PyNumber_InPlaceLshift",
"IRShift": "PyNumber_InPlaceRshift",
"IBitAnd": "PyNumber_InPlaceAnd",
"IBitOr": "PyNumber_InPlaceOr",
"IBitXor": "PyNumber_InPlaceXor",
}
# Python 3.5 only operator
if python_version >= 350:
binary_operator_codes["MatMult"] = "PyNumber_MatrixMultiply"
binary_operator_codes["IMatMult"] = "PyNumber_InPlaceMatrixMultiply"
unary_operator_codes = {
"UAdd": ("PyNumber_Positive", 1),
"USub": ("PyNumber_Negative", 1),
"Invert": ("PyNumber_Invert", 1),
"Repr": ("PyObject_Repr", 1),
"Not": ("UNARY_NOT", 0),
}
rich_comparison_codes = {
"Lt": "LT",
"LtE": "LE",
"Eq": "EQ",
"NotEq": "NE",
"Gt": "GT",
"GtE": "GE",
}
containing_comparison_codes = ("In", "NotIn") | en | 0.872862 | # Copyright 2019, <NAME>, mailto:<EMAIL> # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Operator code tables These are mostly used to look up the Python C/API from operations or a wrapper used. # Those commented out in this section have fully specialized variants already. # "Add" : "PyNumber_Add", # "Sub" : "PyNumber_Subtract", # "Div" : "PyNumber_Divide", # "Mult" : "PyNumber_Multiply", # "Mod" : "PyNumber_Remainder", # "Div" : "PyNumber_Divide", # "FloorDiv" : "PyNumber_FloorDivide", # "TrueDiv" : "PyNumber_TrueDivide", # These have their own variants only to make sure the generic code is in-lined # but the CPython code is not in-lined. # "Pow" : "PyNumber_Power", # "IPow" : "PyNumber_InPlacePower", # The others are generic code and would be faster if they had a specialized variant too. # Python 3.5 only operator | 1.704629 | 2 |
python_modules/dagster-graphql/dagster_graphql/implementation/external.py | rpatil524/dagster | 1 | 548 | <gh_stars>1-10
import sys
from dagster import check
from dagster.config.validate import validate_config_from_snap
from dagster.core.host_representation import ExternalPipeline, PipelineSelector, RepositorySelector
from dagster.core.workspace.context import BaseWorkspaceRequestContext
from dagster.utils.error import serializable_error_info_from_exc_info
from graphql.execution.base import ResolveInfo
from .utils import UserFacingGraphQLError, capture_error
def get_full_external_pipeline_or_raise(graphene_info, selector):
from ..schema.errors import GraphenePipelineNotFoundError
check.inst_param(graphene_info, "graphene_info", ResolveInfo)
check.inst_param(selector, "selector", PipelineSelector)
if not graphene_info.context.has_external_pipeline(selector):
raise UserFacingGraphQLError(GraphenePipelineNotFoundError(selector=selector))
return graphene_info.context.get_full_external_pipeline(selector)
def get_external_pipeline_or_raise(graphene_info, selector):
from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError
from ..schema.pipelines.pipeline import GraphenePipeline
check.inst_param(graphene_info, "graphene_info", ResolveInfo)
check.inst_param(selector, "selector", PipelineSelector)
full_pipeline = get_full_external_pipeline_or_raise(graphene_info, selector)
if selector.solid_selection is None:
return full_pipeline
for solid_name in selector.solid_selection:
if not full_pipeline.has_solid_invocation(solid_name):
raise UserFacingGraphQLError(
GrapheneInvalidSubsetError(
message='Solid "{solid_name}" does not exist in "{pipeline_name}"'.format(
solid_name=solid_name, pipeline_name=selector.pipeline_name
),
pipeline=GraphenePipeline(full_pipeline),
)
)
return get_subset_external_pipeline(graphene_info.context, selector)
def get_subset_external_pipeline(context, selector):
from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError
from ..schema.pipelines.pipeline import GraphenePipeline
check.inst_param(selector, "selector", PipelineSelector)
repository_location = context.get_repository_location(selector.location_name)
try:
external_pipeline = repository_location.get_external_pipeline(selector)
except Exception:
error_info = serializable_error_info_from_exc_info(sys.exc_info())
raise UserFacingGraphQLError(
GrapheneInvalidSubsetError(
message="{message}{cause_message}".format(
message=error_info.message,
cause_message="\n{}".format(error_info.cause.message)
if error_info.cause
else "",
),
pipeline=GraphenePipeline(context.get_full_external_pipeline(selector)),
)
)
return external_pipeline
def ensure_valid_config(external_pipeline, mode, run_config):
from ..schema.pipelines.config import GrapheneRunConfigValidationInvalid
check.inst_param(external_pipeline, "external_pipeline", ExternalPipeline)
check.opt_str_param(mode, "mode")
# do not type check run_config so that validate_config_from_snap throws
validated_config = validate_config_from_snap(
config_schema_snapshot=external_pipeline.config_schema_snapshot,
config_type_key=external_pipeline.root_config_key_for_mode(mode),
config_value=run_config,
)
if not validated_config.success:
raise UserFacingGraphQLError(
GrapheneRunConfigValidationInvalid.for_validation_errors(
external_pipeline, validated_config.errors
)
)
return validated_config
def get_external_execution_plan_or_raise(
graphene_info,
external_pipeline,
mode,
run_config,
step_keys_to_execute,
known_state,
):
return graphene_info.context.get_external_execution_plan(
external_pipeline=external_pipeline,
run_config=run_config,
mode=mode,
step_keys_to_execute=step_keys_to_execute,
known_state=known_state,
)
@capture_error
def fetch_repositories(graphene_info):
from ..schema.external import GrapheneRepository, GrapheneRepositoryConnection
check.inst_param(graphene_info, "graphene_info", ResolveInfo)
return GrapheneRepositoryConnection(
nodes=[
GrapheneRepository(repository=repository, repository_location=location)
for location in graphene_info.context.repository_locations
for repository in location.get_repositories().values()
]
)
@capture_error
def fetch_repository(graphene_info, repository_selector):
from ..schema.errors import GrapheneRepositoryNotFoundError
from ..schema.external import GrapheneRepository
check.inst_param(graphene_info, "graphene_info", ResolveInfo)
check.inst_param(repository_selector, "repository_selector", RepositorySelector)
if graphene_info.context.has_repository_location(repository_selector.location_name):
repo_loc = graphene_info.context.get_repository_location(repository_selector.location_name)
if repo_loc.has_repository(repository_selector.repository_name):
return GrapheneRepository(
repository=repo_loc.get_repository(repository_selector.repository_name),
repository_location=repo_loc,
)
return GrapheneRepositoryNotFoundError(
repository_selector.location_name, repository_selector.repository_name
)
@capture_error
def fetch_workspace(workspace_request_context):
from ..schema.external import GrapheneWorkspace, GrapheneWorkspaceLocationEntry
check.inst_param(
workspace_request_context, "workspace_request_context", BaseWorkspaceRequestContext
)
nodes = [
GrapheneWorkspaceLocationEntry(entry)
for entry in workspace_request_context.get_workspace_snapshot().values()
]
return GrapheneWorkspace(locationEntries=nodes)
| import sys
from dagster import check
from dagster.config.validate import validate_config_from_snap
from dagster.core.host_representation import ExternalPipeline, PipelineSelector, RepositorySelector
from dagster.core.workspace.context import BaseWorkspaceRequestContext
from dagster.utils.error import serializable_error_info_from_exc_info
from graphql.execution.base import ResolveInfo
from .utils import UserFacingGraphQLError, capture_error
def get_full_external_pipeline_or_raise(graphene_info, selector):
from ..schema.errors import GraphenePipelineNotFoundError
check.inst_param(graphene_info, "graphene_info", ResolveInfo)
check.inst_param(selector, "selector", PipelineSelector)
if not graphene_info.context.has_external_pipeline(selector):
raise UserFacingGraphQLError(GraphenePipelineNotFoundError(selector=selector))
return graphene_info.context.get_full_external_pipeline(selector)
def get_external_pipeline_or_raise(graphene_info, selector):
from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError
from ..schema.pipelines.pipeline import GraphenePipeline
check.inst_param(graphene_info, "graphene_info", ResolveInfo)
check.inst_param(selector, "selector", PipelineSelector)
full_pipeline = get_full_external_pipeline_or_raise(graphene_info, selector)
if selector.solid_selection is None:
return full_pipeline
for solid_name in selector.solid_selection:
if not full_pipeline.has_solid_invocation(solid_name):
raise UserFacingGraphQLError(
GrapheneInvalidSubsetError(
message='Solid "{solid_name}" does not exist in "{pipeline_name}"'.format(
solid_name=solid_name, pipeline_name=selector.pipeline_name
),
pipeline=GraphenePipeline(full_pipeline),
)
)
return get_subset_external_pipeline(graphene_info.context, selector)
def get_subset_external_pipeline(context, selector):
from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError
from ..schema.pipelines.pipeline import GraphenePipeline
check.inst_param(selector, "selector", PipelineSelector)
repository_location = context.get_repository_location(selector.location_name)
try:
external_pipeline = repository_location.get_external_pipeline(selector)
except Exception:
error_info = serializable_error_info_from_exc_info(sys.exc_info())
raise UserFacingGraphQLError(
GrapheneInvalidSubsetError(
message="{message}{cause_message}".format(
message=error_info.message,
cause_message="\n{}".format(error_info.cause.message)
if error_info.cause
else "",
),
pipeline=GraphenePipeline(context.get_full_external_pipeline(selector)),
)
)
return external_pipeline
def ensure_valid_config(external_pipeline, mode, run_config):
from ..schema.pipelines.config import GrapheneRunConfigValidationInvalid
check.inst_param(external_pipeline, "external_pipeline", ExternalPipeline)
check.opt_str_param(mode, "mode")
# do not type check run_config so that validate_config_from_snap throws
validated_config = validate_config_from_snap(
config_schema_snapshot=external_pipeline.config_schema_snapshot,
config_type_key=external_pipeline.root_config_key_for_mode(mode),
config_value=run_config,
)
if not validated_config.success:
raise UserFacingGraphQLError(
GrapheneRunConfigValidationInvalid.for_validation_errors(
external_pipeline, validated_config.errors
)
)
return validated_config
def get_external_execution_plan_or_raise(
graphene_info,
external_pipeline,
mode,
run_config,
step_keys_to_execute,
known_state,
):
return graphene_info.context.get_external_execution_plan(
external_pipeline=external_pipeline,
run_config=run_config,
mode=mode,
step_keys_to_execute=step_keys_to_execute,
known_state=known_state,
)
@capture_error
def fetch_repositories(graphene_info):
from ..schema.external import GrapheneRepository, GrapheneRepositoryConnection
check.inst_param(graphene_info, "graphene_info", ResolveInfo)
return GrapheneRepositoryConnection(
nodes=[
GrapheneRepository(repository=repository, repository_location=location)
for location in graphene_info.context.repository_locations
for repository in location.get_repositories().values()
]
)
@capture_error
def fetch_repository(graphene_info, repository_selector):
from ..schema.errors import GrapheneRepositoryNotFoundError
from ..schema.external import GrapheneRepository
check.inst_param(graphene_info, "graphene_info", ResolveInfo)
check.inst_param(repository_selector, "repository_selector", RepositorySelector)
if graphene_info.context.has_repository_location(repository_selector.location_name):
repo_loc = graphene_info.context.get_repository_location(repository_selector.location_name)
if repo_loc.has_repository(repository_selector.repository_name):
return GrapheneRepository(
repository=repo_loc.get_repository(repository_selector.repository_name),
repository_location=repo_loc,
)
return GrapheneRepositoryNotFoundError(
repository_selector.location_name, repository_selector.repository_name
)
@capture_error
def fetch_workspace(workspace_request_context):
from ..schema.external import GrapheneWorkspace, GrapheneWorkspaceLocationEntry
check.inst_param(
workspace_request_context, "workspace_request_context", BaseWorkspaceRequestContext
)
nodes = [
GrapheneWorkspaceLocationEntry(entry)
for entry in workspace_request_context.get_workspace_snapshot().values()
]
return GrapheneWorkspace(locationEntries=nodes) | en | 0.343082 | # do not type check run_config so that validate_config_from_snap throws | 1.832467 | 2 |
tests/test_utils/test_pywriting_utils.py | heylohousing/quickbase-client | 0 | 549 | import os
from tempfile import TemporaryDirectory
from quickbase_client.utils.pywriting_utils import BasicPyFileWriter
from quickbase_client.utils.pywriting_utils import PyPackageWriter
class TestBasicFileWriter:
def test_outputs_lines(self):
w = BasicPyFileWriter()
w.add_line('import abc')
w.add_line('import os').space()
s = w.get_file_as_string()
assert s == 'import abc\nimport os\n'
def test_indent_dedent(self):
w = BasicPyFileWriter()
w.add_line('def foo():').indent().add_line('return 5').dedent().space()
s = w.get_file_as_string()
assert s == 'def foo():\n return 5\n'
def test_use_refs(self):
w = BasicPyFileWriter()
w.add_line('a = "A"')
ref = w.make_ref()
w.add_line('d = "D"')
ref.add_line('b = "B"').add_line('c = "C"')
s = w.get_file_as_string()
lns = s.split('\n')
assert 'a' in lns[0]
assert 'b' in lns[1]
assert 'c' in lns[2]
assert 'd' in lns[3]
class TestPyPackageWriter:
def test_includes_init(self):
with TemporaryDirectory() as d:
w = PyPackageWriter(pkg_name='foo', parent_dir=d)
assert '__init__' in w.modules
assert w.has_module_name('__init__')
assert w.pkg_path == os.path.join(d, 'foo')
w.write()
assert os.path.exists(d)
assert os.path.exists(os.path.join(d, 'foo'))
assert os.path.exists(os.path.join(d, 'foo', '__init__.py'))
| import os
from tempfile import TemporaryDirectory
from quickbase_client.utils.pywriting_utils import BasicPyFileWriter
from quickbase_client.utils.pywriting_utils import PyPackageWriter
class TestBasicFileWriter:
def test_outputs_lines(self):
w = BasicPyFileWriter()
w.add_line('import abc')
w.add_line('import os').space()
s = w.get_file_as_string()
assert s == 'import abc\nimport os\n'
def test_indent_dedent(self):
w = BasicPyFileWriter()
w.add_line('def foo():').indent().add_line('return 5').dedent().space()
s = w.get_file_as_string()
assert s == 'def foo():\n return 5\n'
def test_use_refs(self):
w = BasicPyFileWriter()
w.add_line('a = "A"')
ref = w.make_ref()
w.add_line('d = "D"')
ref.add_line('b = "B"').add_line('c = "C"')
s = w.get_file_as_string()
lns = s.split('\n')
assert 'a' in lns[0]
assert 'b' in lns[1]
assert 'c' in lns[2]
assert 'd' in lns[3]
class TestPyPackageWriter:
def test_includes_init(self):
with TemporaryDirectory() as d:
w = PyPackageWriter(pkg_name='foo', parent_dir=d)
assert '__init__' in w.modules
assert w.has_module_name('__init__')
assert w.pkg_path == os.path.join(d, 'foo')
w.write()
assert os.path.exists(d)
assert os.path.exists(os.path.join(d, 'foo'))
assert os.path.exists(os.path.join(d, 'foo', '__init__.py'))
| none | 1 | 2.547435 | 3 |
|
model/resnet.py | DrMMZ/RetinaNet | 7 | 550 | <filename>model/resnet.py
"""
Residual Networks (ResNet)
"""
# adapted from
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
import tensorflow as tf
def identity_block(
input_tensor,
filters,
stage,
block,
train_bn=False
):
"""
Builds an identity shortcut in a bottleneck building block of a ResNet.
Parameters
----------
input_tensor : tf tensor, [batch_size, height, width, channels]
An input tensor.
filters : list, positive integers
The number of filters in 3 conv layers at the main path, where
last number is equal to input_tensor's channels.
stage : integer
A number in [2,5] used for generating layer names.
block : string
A lowercase letter, used for generating layer names.
train_bn : boolean, optional
Whether one should normalize the layer input by the mean and variance
over the current batch. The default is False, i.e., use the moving
average of mean and variance to normalize the layer input.
Returns
-------
output_tensor : tf tensor, [batch_size, height, width, channels]
The output tensor same shape as input_tensor.
"""
num_filters_1, num_filters_2, num_filters_3 = filters
conv_prefix = 'res' + str(stage) + block + '_branch'
bn_prefix = 'bn' + str(stage) + block + '_branch'
x = tf.keras.layers.Conv2D(
num_filters_1, (1,1), name=conv_prefix + '2a')(input_tensor)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2a')(x, training=train_bn)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
num_filters_2, (3,3), padding='same', name=conv_prefix + '2b')(x)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2b')(x, training=train_bn)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
num_filters_3, (1,1), name=conv_prefix + '2c')(x)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2c')(x, training=train_bn)
x = tf.keras.layers.Add()([input_tensor, x])
output_tensor = tf.keras.layers.Activation(
'relu', name='res' + str(stage) + block + '_out')(x)
return output_tensor
def conv_block(
input_tensor,
filters,
stage,
block,
strides=(2, 2),
train_bn=False
):
"""
Builds a projection shortcut in a bottleneck block of a ResNet.
Parameters
----------
input_tensor : tf tensor, [batch_size, height, width, channels]
An input tensor.
filters : list, positive integers
The number of filters in 3 conv layers at the main path.
stage : integer
A number in [2,5] used for generating layer names.
block : string
A lowercase letter, used for generating layer names.
strides : tuple, integers, optional
The conv layer strides. The default is (2, 2).
train_bn : boolean, optional
Whether one should normalize the layer input by the mean and variance
over the current batch. The default is False, i.e., use the moving
average of mean and variance to normalize the layer input.
Returns
-------
output_tensor : tf tensor
[batch_size, height//strides, width//strides, num_filters_3] where
num_filters_3 is the last number in filters, the output tensor.
"""
num_filters_1, num_filters_2, num_filters_3 = filters
conv_prefix = 'res' + str(stage) + block + '_branch'
bn_prefix = 'bn' + str(stage) + block + '_branch'
x = tf.keras.layers.Conv2D(
num_filters_1, (1,1), strides, name=conv_prefix + '2a')(input_tensor)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2a')(x, training=train_bn)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
num_filters_2, (3,3), padding='same', name=conv_prefix + '2b')(x)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2b')(x, training=train_bn)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
num_filters_3, (1,1), name=conv_prefix + '2c')(x)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2c')(x, training=train_bn)
shortcut = tf.keras.layers.Conv2D(
num_filters_3, (1,1), strides, name=conv_prefix + '1')(input_tensor)
shortcut = tf.keras.layers.BatchNormalization(
name=bn_prefix + '1')(shortcut, training=train_bn)
x = tf.keras.layers.Add()([shortcut, x])
output_tensor = tf.keras.layers.Activation(
'relu', name='res' + str(stage) + block + '_out')(x)
return output_tensor
def backbone_resnet(input_image, architecture, stage5=True, train_bn=False):
"""
Builds a backbone ResNet.
Parameters
----------
input_image : tf tensor, [batch_size, height, width, channels]
An input tensor.
architecture : string
The ResNet architecture in {'resnet50', 'resnet101'}.
stage5 : boolean, optional
Whether create stage5 of network. The default is True.
train_bn : boolean, optional
Whether one should normalize the layer input by the mean and variance
over the current batch. The default is False, i.e., use the moving
average of mean and variance to normalize the layer input.
Returns
-------
outputs : list
Feature maps at each stage.
"""
assert architecture in ['resnet50', 'resnet101'], \
'Only support ResNet50\101'
# stage 1
x = tf.keras.layers.ZeroPadding2D((3,3))(input_image)
x = tf.keras.layers.Conv2D(64, (7,7), (2,2), name='conv1')(x)
x = tf.keras.layers.BatchNormalization(name='bn_conv1')(x, training=train_bn)
x = tf.keras.layers.Activation('relu')(x)
C1 = x = tf.keras.layers.MaxPooling2D((3,3), (2,2), padding='same')(x)
# stage 2
x = conv_block(
x, [64,64,256], stage=2, block='a', strides=(1,1), train_bn=train_bn)
x = identity_block(x, [64,64,256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(
x, [64,64,256], stage=2, block='c', train_bn=train_bn)
# stage 3
x = conv_block(x, [128,128,512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, [128,128,512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, [128,128,512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(
x, [128,128,512], stage=3, block='d', train_bn=train_bn)
# stage 4
x = conv_block(x, [256,256,1024], stage=4, block='a', train_bn=train_bn)
num_blocks = {'resnet50':5, 'resnet101':22}[architecture]
for i in range(num_blocks):
x = identity_block(
x, [256,256,1024], stage=4, block=chr(98+i), train_bn=train_bn)
C4 = x
# stage 5
if stage5:
x = conv_block(x, [512,512,2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(
x, [512,512,2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(
x, [512,512,2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
| <filename>model/resnet.py
"""
Residual Networks (ResNet)
"""
# adapted from
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
import tensorflow as tf
def identity_block(
input_tensor,
filters,
stage,
block,
train_bn=False
):
"""
Builds an identity shortcut in a bottleneck building block of a ResNet.
Parameters
----------
input_tensor : tf tensor, [batch_size, height, width, channels]
An input tensor.
filters : list, positive integers
The number of filters in 3 conv layers at the main path, where
last number is equal to input_tensor's channels.
stage : integer
A number in [2,5] used for generating layer names.
block : string
A lowercase letter, used for generating layer names.
train_bn : boolean, optional
Whether one should normalize the layer input by the mean and variance
over the current batch. The default is False, i.e., use the moving
average of mean and variance to normalize the layer input.
Returns
-------
output_tensor : tf tensor, [batch_size, height, width, channels]
The output tensor same shape as input_tensor.
"""
num_filters_1, num_filters_2, num_filters_3 = filters
conv_prefix = 'res' + str(stage) + block + '_branch'
bn_prefix = 'bn' + str(stage) + block + '_branch'
x = tf.keras.layers.Conv2D(
num_filters_1, (1,1), name=conv_prefix + '2a')(input_tensor)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2a')(x, training=train_bn)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
num_filters_2, (3,3), padding='same', name=conv_prefix + '2b')(x)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2b')(x, training=train_bn)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
num_filters_3, (1,1), name=conv_prefix + '2c')(x)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2c')(x, training=train_bn)
x = tf.keras.layers.Add()([input_tensor, x])
output_tensor = tf.keras.layers.Activation(
'relu', name='res' + str(stage) + block + '_out')(x)
return output_tensor
def conv_block(
input_tensor,
filters,
stage,
block,
strides=(2, 2),
train_bn=False
):
"""
Builds a projection shortcut in a bottleneck block of a ResNet.
Parameters
----------
input_tensor : tf tensor, [batch_size, height, width, channels]
An input tensor.
filters : list, positive integers
The number of filters in 3 conv layers at the main path.
stage : integer
A number in [2,5] used for generating layer names.
block : string
A lowercase letter, used for generating layer names.
strides : tuple, integers, optional
The conv layer strides. The default is (2, 2).
train_bn : boolean, optional
Whether one should normalize the layer input by the mean and variance
over the current batch. The default is False, i.e., use the moving
average of mean and variance to normalize the layer input.
Returns
-------
output_tensor : tf tensor
[batch_size, height//strides, width//strides, num_filters_3] where
num_filters_3 is the last number in filters, the output tensor.
"""
num_filters_1, num_filters_2, num_filters_3 = filters
conv_prefix = 'res' + str(stage) + block + '_branch'
bn_prefix = 'bn' + str(stage) + block + '_branch'
x = tf.keras.layers.Conv2D(
num_filters_1, (1,1), strides, name=conv_prefix + '2a')(input_tensor)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2a')(x, training=train_bn)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
num_filters_2, (3,3), padding='same', name=conv_prefix + '2b')(x)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2b')(x, training=train_bn)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
num_filters_3, (1,1), name=conv_prefix + '2c')(x)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2c')(x, training=train_bn)
shortcut = tf.keras.layers.Conv2D(
num_filters_3, (1,1), strides, name=conv_prefix + '1')(input_tensor)
shortcut = tf.keras.layers.BatchNormalization(
name=bn_prefix + '1')(shortcut, training=train_bn)
x = tf.keras.layers.Add()([shortcut, x])
output_tensor = tf.keras.layers.Activation(
'relu', name='res' + str(stage) + block + '_out')(x)
return output_tensor
def backbone_resnet(input_image, architecture, stage5=True, train_bn=False):
"""
Builds a backbone ResNet.
Parameters
----------
input_image : tf tensor, [batch_size, height, width, channels]
An input tensor.
architecture : string
The ResNet architecture in {'resnet50', 'resnet101'}.
stage5 : boolean, optional
Whether create stage5 of network. The default is True.
train_bn : boolean, optional
Whether one should normalize the layer input by the mean and variance
over the current batch. The default is False, i.e., use the moving
average of mean and variance to normalize the layer input.
Returns
-------
outputs : list
Feature maps at each stage.
"""
assert architecture in ['resnet50', 'resnet101'], \
'Only support ResNet50\101'
# stage 1
x = tf.keras.layers.ZeroPadding2D((3,3))(input_image)
x = tf.keras.layers.Conv2D(64, (7,7), (2,2), name='conv1')(x)
x = tf.keras.layers.BatchNormalization(name='bn_conv1')(x, training=train_bn)
x = tf.keras.layers.Activation('relu')(x)
C1 = x = tf.keras.layers.MaxPooling2D((3,3), (2,2), padding='same')(x)
# stage 2
x = conv_block(
x, [64,64,256], stage=2, block='a', strides=(1,1), train_bn=train_bn)
x = identity_block(x, [64,64,256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(
x, [64,64,256], stage=2, block='c', train_bn=train_bn)
# stage 3
x = conv_block(x, [128,128,512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, [128,128,512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, [128,128,512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(
x, [128,128,512], stage=3, block='d', train_bn=train_bn)
# stage 4
x = conv_block(x, [256,256,1024], stage=4, block='a', train_bn=train_bn)
num_blocks = {'resnet50':5, 'resnet101':22}[architecture]
for i in range(num_blocks):
x = identity_block(
x, [256,256,1024], stage=4, block=chr(98+i), train_bn=train_bn)
C4 = x
# stage 5
if stage5:
x = conv_block(x, [512,512,2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(
x, [512,512,2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(
x, [512,512,2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
| en | 0.633404 | Residual Networks (ResNet) # adapted from # https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py Builds an identity shortcut in a bottleneck building block of a ResNet. Parameters ---------- input_tensor : tf tensor, [batch_size, height, width, channels] An input tensor. filters : list, positive integers The number of filters in 3 conv layers at the main path, where last number is equal to input_tensor's channels. stage : integer A number in [2,5] used for generating layer names. block : string A lowercase letter, used for generating layer names. train_bn : boolean, optional Whether one should normalize the layer input by the mean and variance over the current batch. The default is False, i.e., use the moving average of mean and variance to normalize the layer input. Returns ------- output_tensor : tf tensor, [batch_size, height, width, channels] The output tensor same shape as input_tensor. Builds a projection shortcut in a bottleneck block of a ResNet. Parameters ---------- input_tensor : tf tensor, [batch_size, height, width, channels] An input tensor. filters : list, positive integers The number of filters in 3 conv layers at the main path. stage : integer A number in [2,5] used for generating layer names. block : string A lowercase letter, used for generating layer names. strides : tuple, integers, optional The conv layer strides. The default is (2, 2). train_bn : boolean, optional Whether one should normalize the layer input by the mean and variance over the current batch. The default is False, i.e., use the moving average of mean and variance to normalize the layer input. Returns ------- output_tensor : tf tensor [batch_size, height//strides, width//strides, num_filters_3] where num_filters_3 is the last number in filters, the output tensor. Builds a backbone ResNet. Parameters ---------- input_image : tf tensor, [batch_size, height, width, channels] An input tensor. architecture : string The ResNet architecture in {'resnet50', 'resnet101'}. stage5 : boolean, optional Whether create stage5 of network. The default is True. train_bn : boolean, optional Whether one should normalize the layer input by the mean and variance over the current batch. The default is False, i.e., use the moving average of mean and variance to normalize the layer input. Returns ------- outputs : list Feature maps at each stage. # stage 1 # stage 2 # stage 3 # stage 4 # stage 5 | 3.089667 | 3 |
src/main/python/rds_log_cat/parser/mysql57.py | Scout24/rds-log-cat | 1 | 551 | <reponame>Scout24/rds-log-cat
from rds_log_cat.parser.parser import Parser, LineParserException
class Mysql57(Parser):
def __init__(self):
Parser.__init__(self)
def compose_timestamp(self, datetime, timezone):
if len(datetime) != 27:
raise LineParserException('wrong length of datetime - wrong date is: ' + datetime)
if not timezone == 'UTC':
raise LineParserException('Only able to parse times in UTC. You gave {}'.format(timezone))
return datetime
def parse(self, line):
"""
parses the fields in line to generate json structure
"""
expected_min_no_fields = 5
if len(line) < expected_min_no_fields:
raise LineParserException('line too short')
pid = line[1]
log_level = line[2].lstrip("[").rstrip("]")
timezone = 'UTC'
return {
'@timestamp': self.compose_timestamp(line[0], timezone),
'log_level': log_level,
'process_id': int(pid),
'message': ' '.join(map(str, line[3:]))
}
| from rds_log_cat.parser.parser import Parser, LineParserException
class Mysql57(Parser):
def __init__(self):
Parser.__init__(self)
def compose_timestamp(self, datetime, timezone):
if len(datetime) != 27:
raise LineParserException('wrong length of datetime - wrong date is: ' + datetime)
if not timezone == 'UTC':
raise LineParserException('Only able to parse times in UTC. You gave {}'.format(timezone))
return datetime
def parse(self, line):
"""
parses the fields in line to generate json structure
"""
expected_min_no_fields = 5
if len(line) < expected_min_no_fields:
raise LineParserException('line too short')
pid = line[1]
log_level = line[2].lstrip("[").rstrip("]")
timezone = 'UTC'
return {
'@timestamp': self.compose_timestamp(line[0], timezone),
'log_level': log_level,
'process_id': int(pid),
'message': ' '.join(map(str, line[3:]))
} | en | 0.612965 | parses the fields in line to generate json structure | 2.68985 | 3 |
corpustools/neighdens/neighborhood_density.py | PhonologicalCorpusTools/CorpusTools | 97 | 552 | from functools import partial
from corpustools.corpus.classes import Word
from corpustools.symbolsim.edit_distance import edit_distance
from corpustools.symbolsim.khorsi import khorsi
from corpustools.symbolsim.phono_edit_distance import phono_edit_distance
from corpustools.symbolsim.phono_align import Aligner
from corpustools.multiproc import filter_mp, score_mp
def _is_edit_distance_neighbor(w, query, sequence_type, max_distance):
w_len = len(getattr(w, sequence_type))
query_len = len(getattr(query, sequence_type))
if w_len > query_len+max_distance:
return False
if w_len < query_len-max_distance:
return False
return edit_distance(getattr(w, sequence_type), getattr(query, sequence_type),
sequence_type, max_distance) <= max_distance
def _is_phono_edit_distance_neighbor(w, query, sequence_type, specifier, max_distance):
return phono_edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type, specifier) <= max_distance
def _is_khorsi_neighbor(w, query, freq_base, sequence_type, max_distance):
return khorsi(getattr(w, sequence_type), getattr(query, sequence_type), freq_base, sequence_type, max_distance) >= max_distance
def neighborhood_density_all_words(corpus_context, tierdict, tier_type = None, sequence_type = None,
algorithm = 'edit_distance', max_distance = 1, output_format = 'spelling',
num_cores = -1, settable_attr = None, collapse_homophones = False,
stop_check = None, call_back = None):
"""Calculate the neighborhood density of all words in the corpus and
adds them as attributes of the words.
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
algorithm : str
The algorithm used to determine distance
max_distance : float, optional
Maximum edit distance from the queried word to consider a word a neighbor.
stop_check : callable, optional
Optional function to check whether to gracefully terminate early
call_back : callable, optional
Optional function to supply progress information during the function
settable_attr: string
Name of attribute that neighbourhood density results will be assigned to
"""
function = partial(neighborhood_density, corpus_context,
tierdict = tierdict,
tier_type = tier_type,
sequence_type = sequence_type,
algorithm = algorithm,
max_distance = max_distance,
collapse_homophones = collapse_homophones)
if call_back is not None:
call_back('Calculating neighborhood densities...')
call_back(0,len(corpus_context))
cur = 0
results = dict()
last_value_removed = None
last_key_removed = None
if num_cores == -1 or num_cores == 1:
for w in corpus_context:
if stop_check is not None and stop_check():
return
if last_value_removed:
tierdict[last_key_removed].append(last_value_removed)
w_sequence = getattr(w, corpus_context.sequence_type)
last_key_removed = str(w_sequence)
for i, item in enumerate(tierdict[last_key_removed]):
if str(item) == str(w):
last_value_removed = tierdict[last_key_removed].pop(i)
break
res = neighborhood_density(corpus_context, w, tierdict,
tier_type = tier_type,
sequence_type = sequence_type,
algorithm = algorithm,
max_distance = max_distance,
collapse_homophones = collapse_homophones)
results[str(w)] = [getattr(r, output_format) for r in res[1]]
setattr(w.original, settable_attr.name, res[0])
# for w in corpus_context:
# if stop_check is not None and stop_check():
# return
# cur += 1
# call_back(cur)
# res = function(w)
# results[str(w)] = [getattr(r, output_format) for r in res[1]]
# setattr(w.original, settable_attr.name, res[0]-1)
# #the -1 is to account for the fact that words are counted as their own neighbour, and this is incorrect
# #subtracting 1 here is easier than fixing the neighbourhood density algorithm
else:
iterable = ((w,) for w in corpus_context)
neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size = 1)
for n in neighbors:
#Have to look up the key, then look up the object due to how
#multiprocessing pickles objects
setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])),
#corpus_context.attribute.name, n[1][0])
settable_attr.name, n[1][0])
return results
def neighborhood_density(corpus_context, query, tierdict,
algorithm = 'edit_distance', max_distance = 1, collapse_homophones = False,
force_quadratic = False, file_type = None, tier_type=None, sequence_type = None,
stop_check = None, call_back = None):
"""Calculate the neighborhood density of a particular word in the corpus.
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
query : Word
The word whose neighborhood density to calculate.
algorithm : str
The algorithm used to determine distance
max_distance : float, optional
Maximum edit distance from the queried word to consider a word a neighbor
force_quadratic : bool
Force use of the less efficient quadratic algorithm even when finding edit
distance of 1 neighborhoods
stop_check : callable, optional
Optional function to check whether to gracefully terminate early
call_back : callable, optional
Optional function to supply progress information during the function
Returns
-------
tuple(int, set)
Tuple of the number of neighbors and the set of neighbor Words.
"""
matches = []
query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type)
if call_back is not None:
call_back('Finding neighbors for {}...'.format(query))
call_back(0,len(corpus_context))
cur = 0
if algorithm == 'edit_distance' and max_distance == 1 and not force_quadratic:
return fast_neighborhood_density(corpus_context, query, corpus_context.sequence_type, tier_type, tierdict,
file_type=file_type, collapse_homophones=collapse_homophones)
if algorithm == 'edit_distance':
is_neighbor = partial(_is_edit_distance_neighbor,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
elif algorithm == 'phono_edit_distance':
is_neighbor = partial(_is_phono_edit_distance_neighbor,
specifier = corpus_context.specifier,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
elif algorithm == 'khorsi':
freq_base = corpus_context.get_frequency_base()
is_neighbor = partial(_is_khorsi_neighbor,
freq_base = freq_base,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
for w in corpus_context:
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 10 == 0:
call_back(cur)
if not is_neighbor(w, query):
continue
matches.append(w)
neighbors = set(matches)-set([query])
return (len(neighbors), neighbors)
def fast_neighborhood_density(corpus_context, query, sequence_type, tier_type,
tierdict, file_type=None, trans_delimiter='.', collapse_homophones = False):
"""Generates all neighbors of edit distance <= 1 and searches
for them in corpus_context.
Will be faster than neighborhood_density when:
n > m * (1 + s), where
n: number of words in corpus
m: length of query
s: size of segment inventory
"""
neighbors = list()
query = ensure_query_is_word(query, corpus_context, sequence_type, tier_type, file_type=file_type)
for candidate in generate_neighbor_candidates(corpus_context, query, sequence_type):
if tier_type.att_type == 'tier':
cand_str = trans_delimiter.join(candidate)
else:
cand_str = ''.join(candidate)
if cand_str in tierdict:
for w in tierdict[cand_str]:
w_sequence = getattr(w, sequence_type)
if collapse_homophones and any(getattr(word, sequence_type) == w_sequence for word in neighbors):
continue
else:
neighbors.append(w)
return (len(neighbors), neighbors)
def generate_neighbor_candidates(corpus_context, query, sequence_type):
sequence = getattr(query, sequence_type)
yield [str(c) for c in sequence]
for i in range(len(sequence)):
yield [str(c) for c in sequence[:i]] + [str(c) for c in sequence[i+1:]] # deletion
for char in corpus_context.inventory:
if str(char) not in ['#', sequence[i]]:
yield [str(c) for c in sequence[:i]] + [str(char)] + [str(c) for c in sequence[i:]] # insertion
yield [str(c) for c in sequence[:i]] + [str(char)] + [str(c) for c in sequence[i+1:]] # substitution
for char in corpus_context.inventory: # final pass to get insertion at len+1
if str(char) not in ['#', sequence[i]]:
yield [str(c) for c in sequence[:]] + [str(char)] # insertion
def find_mutation_minpairs_all_words(corpus_context, tierdict, tier_type = None, num_cores = -1, collapse_homophones = False,
stop_check = None, call_back = None):
function = partial(find_mutation_minpairs, corpus_context, tier_type=tier_type, collapse_homophones = collapse_homophones)
if call_back is not None:
call_back('Calculating neighborhood densities...')
call_back(0,len(corpus_context))
cur = 0
results = dict()
last_value_removed = None
last_key_removed = None
if num_cores == -1 or num_cores == 1:
for w in corpus_context:
if stop_check is not None and stop_check():
return
if last_value_removed:
tierdict[last_key_removed].append(last_value_removed)
w_sequence = getattr(w, corpus_context.sequence_type)
last_key_removed = str(w_sequence)
for i, item in enumerate(tierdict[last_key_removed]):
if str(item) == str(w):
last_value_removed = tierdict[last_key_removed].pop(i)
break
res = find_mutation_minpairs(corpus_context, w,
tier_type=tier_type, collapse_homophones = collapse_homophones)
results[str(w)] = res[1]
setattr(w.original, corpus_context.attribute.name, res[0])
# for w in corpus_context:
# if stop_check is not None and stop_check():
# return
# cur += 1
# call_back(cur)
# res = function(w)
# results[str(w)] = res[1]#[str(r) for r in res[1]]
# setattr(w.original, corpus_context.attribute.name, res[0])
else:
iterable = ((w,) for w in corpus_context)
neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size= 1)
for n in neighbors:
#Have to look up the key, then look up the object due to how
#multiprocessing pickles objects
setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), corpus_context.attribute.name, n[1][0])
return results
def find_mutation_minpairs(corpus_context, query, tier_type = None, collapse_homophones = False,
stop_check = None, call_back = None):
"""Find all minimal pairs of the query word based only on segment
mutations (not deletions/insertions)
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
query : Word
The word whose minimal pairs to find
stop_check : callable or None
Optional function to check whether to gracefully terminate early
call_back : callable or None
Optional function to supply progress information during the function
Returns
-------
list
The found minimal pairs for the queried word
"""
matches = []
sequence_type = corpus_context.sequence_type
query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type)
if call_back is not None:
call_back('Finding neighbors...')
call_back(0,len(corpus_context))
cur = 0
al = Aligner(features_tf=False, ins_penalty=float('inf'), del_penalty=float('inf'), sub_penalty=1)
for w in corpus_context:
w_sequence = getattr(w, sequence_type)
query_sequence = getattr(query, sequence_type)
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 10 == 0:
call_back(cur)
if (len(w_sequence) > len(query_sequence)+1 or
len(w_sequence) < len(query_sequence)-1):
continue
m = al.make_similarity_matrix(query_sequence, w_sequence)
if m[-1][-1]['f'] != 1:
continue
w_sequence = getattr(w, sequence_type)
if collapse_homophones and any(getattr(m, sequence_type) == w_sequence for m in matches):
continue
else:
#matches.append(str(w_sequence))
matches.append(w)
matches = [m.spelling for m in matches]
neighbors = list(set(matches)-set([str(query_sequence)]))
return (len(neighbors), neighbors)
def ensure_query_is_word(query, corpus, sequence_type, tier_type, trans_delimiter='.', file_type=None):
if isinstance(query, Word):
query_word = query
else:
if tier_type.att_type == 'spelling':
if file_type == sequence_type:
query_word = Word(**{sequence_type: list(query)})
else:
query_word = query.replace(trans_delimiter, '')
query_word = Word(**{sequence_type: list(query_word)})
elif tier_type.att_type == 'tier':
if file_type == sequence_type:
query_with̠td = '.'.join(query) if '.' not in query else query
for entry in corpus:
corpus_word_with_td = str(getattr(entry, sequence_type))
if query_with̠td == corpus_word_with_td: # if a word in corpus has the same transcription
return entry # that word in the corpus is to be referred to.
# the following should be run if no word found in corpus with the transcription
new_query = parse(query, trans_delimiter)
query_word = Word(**{sequence_type: new_query})
else: # if file contains spelling
try:
query_word = corpus.corpus.find(query)
except KeyError:
# if the word in the file can't be found in the corpus
new_query = parse(query, trans_delimiter)
query_word = Word(**{sequence_type: list(new_query)})
return query_word
def parse(word, delimiter):
return word.split(delimiter) if delimiter in word else list(word) | from functools import partial
from corpustools.corpus.classes import Word
from corpustools.symbolsim.edit_distance import edit_distance
from corpustools.symbolsim.khorsi import khorsi
from corpustools.symbolsim.phono_edit_distance import phono_edit_distance
from corpustools.symbolsim.phono_align import Aligner
from corpustools.multiproc import filter_mp, score_mp
def _is_edit_distance_neighbor(w, query, sequence_type, max_distance):
w_len = len(getattr(w, sequence_type))
query_len = len(getattr(query, sequence_type))
if w_len > query_len+max_distance:
return False
if w_len < query_len-max_distance:
return False
return edit_distance(getattr(w, sequence_type), getattr(query, sequence_type),
sequence_type, max_distance) <= max_distance
def _is_phono_edit_distance_neighbor(w, query, sequence_type, specifier, max_distance):
return phono_edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type, specifier) <= max_distance
def _is_khorsi_neighbor(w, query, freq_base, sequence_type, max_distance):
return khorsi(getattr(w, sequence_type), getattr(query, sequence_type), freq_base, sequence_type, max_distance) >= max_distance
def neighborhood_density_all_words(corpus_context, tierdict, tier_type = None, sequence_type = None,
algorithm = 'edit_distance', max_distance = 1, output_format = 'spelling',
num_cores = -1, settable_attr = None, collapse_homophones = False,
stop_check = None, call_back = None):
"""Calculate the neighborhood density of all words in the corpus and
adds them as attributes of the words.
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
algorithm : str
The algorithm used to determine distance
max_distance : float, optional
Maximum edit distance from the queried word to consider a word a neighbor.
stop_check : callable, optional
Optional function to check whether to gracefully terminate early
call_back : callable, optional
Optional function to supply progress information during the function
settable_attr: string
Name of attribute that neighbourhood density results will be assigned to
"""
function = partial(neighborhood_density, corpus_context,
tierdict = tierdict,
tier_type = tier_type,
sequence_type = sequence_type,
algorithm = algorithm,
max_distance = max_distance,
collapse_homophones = collapse_homophones)
if call_back is not None:
call_back('Calculating neighborhood densities...')
call_back(0,len(corpus_context))
cur = 0
results = dict()
last_value_removed = None
last_key_removed = None
if num_cores == -1 or num_cores == 1:
for w in corpus_context:
if stop_check is not None and stop_check():
return
if last_value_removed:
tierdict[last_key_removed].append(last_value_removed)
w_sequence = getattr(w, corpus_context.sequence_type)
last_key_removed = str(w_sequence)
for i, item in enumerate(tierdict[last_key_removed]):
if str(item) == str(w):
last_value_removed = tierdict[last_key_removed].pop(i)
break
res = neighborhood_density(corpus_context, w, tierdict,
tier_type = tier_type,
sequence_type = sequence_type,
algorithm = algorithm,
max_distance = max_distance,
collapse_homophones = collapse_homophones)
results[str(w)] = [getattr(r, output_format) for r in res[1]]
setattr(w.original, settable_attr.name, res[0])
# for w in corpus_context:
# if stop_check is not None and stop_check():
# return
# cur += 1
# call_back(cur)
# res = function(w)
# results[str(w)] = [getattr(r, output_format) for r in res[1]]
# setattr(w.original, settable_attr.name, res[0]-1)
# #the -1 is to account for the fact that words are counted as their own neighbour, and this is incorrect
# #subtracting 1 here is easier than fixing the neighbourhood density algorithm
else:
iterable = ((w,) for w in corpus_context)
neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size = 1)
for n in neighbors:
#Have to look up the key, then look up the object due to how
#multiprocessing pickles objects
setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])),
#corpus_context.attribute.name, n[1][0])
settable_attr.name, n[1][0])
return results
def neighborhood_density(corpus_context, query, tierdict,
algorithm = 'edit_distance', max_distance = 1, collapse_homophones = False,
force_quadratic = False, file_type = None, tier_type=None, sequence_type = None,
stop_check = None, call_back = None):
"""Calculate the neighborhood density of a particular word in the corpus.
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
query : Word
The word whose neighborhood density to calculate.
algorithm : str
The algorithm used to determine distance
max_distance : float, optional
Maximum edit distance from the queried word to consider a word a neighbor
force_quadratic : bool
Force use of the less efficient quadratic algorithm even when finding edit
distance of 1 neighborhoods
stop_check : callable, optional
Optional function to check whether to gracefully terminate early
call_back : callable, optional
Optional function to supply progress information during the function
Returns
-------
tuple(int, set)
Tuple of the number of neighbors and the set of neighbor Words.
"""
matches = []
query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type)
if call_back is not None:
call_back('Finding neighbors for {}...'.format(query))
call_back(0,len(corpus_context))
cur = 0
if algorithm == 'edit_distance' and max_distance == 1 and not force_quadratic:
return fast_neighborhood_density(corpus_context, query, corpus_context.sequence_type, tier_type, tierdict,
file_type=file_type, collapse_homophones=collapse_homophones)
if algorithm == 'edit_distance':
is_neighbor = partial(_is_edit_distance_neighbor,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
elif algorithm == 'phono_edit_distance':
is_neighbor = partial(_is_phono_edit_distance_neighbor,
specifier = corpus_context.specifier,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
elif algorithm == 'khorsi':
freq_base = corpus_context.get_frequency_base()
is_neighbor = partial(_is_khorsi_neighbor,
freq_base = freq_base,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
for w in corpus_context:
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 10 == 0:
call_back(cur)
if not is_neighbor(w, query):
continue
matches.append(w)
neighbors = set(matches)-set([query])
return (len(neighbors), neighbors)
def fast_neighborhood_density(corpus_context, query, sequence_type, tier_type,
tierdict, file_type=None, trans_delimiter='.', collapse_homophones = False):
"""Generates all neighbors of edit distance <= 1 and searches
for them in corpus_context.
Will be faster than neighborhood_density when:
n > m * (1 + s), where
n: number of words in corpus
m: length of query
s: size of segment inventory
"""
neighbors = list()
query = ensure_query_is_word(query, corpus_context, sequence_type, tier_type, file_type=file_type)
for candidate in generate_neighbor_candidates(corpus_context, query, sequence_type):
if tier_type.att_type == 'tier':
cand_str = trans_delimiter.join(candidate)
else:
cand_str = ''.join(candidate)
if cand_str in tierdict:
for w in tierdict[cand_str]:
w_sequence = getattr(w, sequence_type)
if collapse_homophones and any(getattr(word, sequence_type) == w_sequence for word in neighbors):
continue
else:
neighbors.append(w)
return (len(neighbors), neighbors)
def generate_neighbor_candidates(corpus_context, query, sequence_type):
sequence = getattr(query, sequence_type)
yield [str(c) for c in sequence]
for i in range(len(sequence)):
yield [str(c) for c in sequence[:i]] + [str(c) for c in sequence[i+1:]] # deletion
for char in corpus_context.inventory:
if str(char) not in ['#', sequence[i]]:
yield [str(c) for c in sequence[:i]] + [str(char)] + [str(c) for c in sequence[i:]] # insertion
yield [str(c) for c in sequence[:i]] + [str(char)] + [str(c) for c in sequence[i+1:]] # substitution
for char in corpus_context.inventory: # final pass to get insertion at len+1
if str(char) not in ['#', sequence[i]]:
yield [str(c) for c in sequence[:]] + [str(char)] # insertion
def find_mutation_minpairs_all_words(corpus_context, tierdict, tier_type = None, num_cores = -1, collapse_homophones = False,
stop_check = None, call_back = None):
function = partial(find_mutation_minpairs, corpus_context, tier_type=tier_type, collapse_homophones = collapse_homophones)
if call_back is not None:
call_back('Calculating neighborhood densities...')
call_back(0,len(corpus_context))
cur = 0
results = dict()
last_value_removed = None
last_key_removed = None
if num_cores == -1 or num_cores == 1:
for w in corpus_context:
if stop_check is not None and stop_check():
return
if last_value_removed:
tierdict[last_key_removed].append(last_value_removed)
w_sequence = getattr(w, corpus_context.sequence_type)
last_key_removed = str(w_sequence)
for i, item in enumerate(tierdict[last_key_removed]):
if str(item) == str(w):
last_value_removed = tierdict[last_key_removed].pop(i)
break
res = find_mutation_minpairs(corpus_context, w,
tier_type=tier_type, collapse_homophones = collapse_homophones)
results[str(w)] = res[1]
setattr(w.original, corpus_context.attribute.name, res[0])
# for w in corpus_context:
# if stop_check is not None and stop_check():
# return
# cur += 1
# call_back(cur)
# res = function(w)
# results[str(w)] = res[1]#[str(r) for r in res[1]]
# setattr(w.original, corpus_context.attribute.name, res[0])
else:
iterable = ((w,) for w in corpus_context)
neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size= 1)
for n in neighbors:
#Have to look up the key, then look up the object due to how
#multiprocessing pickles objects
setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), corpus_context.attribute.name, n[1][0])
return results
def find_mutation_minpairs(corpus_context, query, tier_type = None, collapse_homophones = False,
stop_check = None, call_back = None):
"""Find all minimal pairs of the query word based only on segment
mutations (not deletions/insertions)
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
query : Word
The word whose minimal pairs to find
stop_check : callable or None
Optional function to check whether to gracefully terminate early
call_back : callable or None
Optional function to supply progress information during the function
Returns
-------
list
The found minimal pairs for the queried word
"""
matches = []
sequence_type = corpus_context.sequence_type
query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type)
if call_back is not None:
call_back('Finding neighbors...')
call_back(0,len(corpus_context))
cur = 0
al = Aligner(features_tf=False, ins_penalty=float('inf'), del_penalty=float('inf'), sub_penalty=1)
for w in corpus_context:
w_sequence = getattr(w, sequence_type)
query_sequence = getattr(query, sequence_type)
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 10 == 0:
call_back(cur)
if (len(w_sequence) > len(query_sequence)+1 or
len(w_sequence) < len(query_sequence)-1):
continue
m = al.make_similarity_matrix(query_sequence, w_sequence)
if m[-1][-1]['f'] != 1:
continue
w_sequence = getattr(w, sequence_type)
if collapse_homophones and any(getattr(m, sequence_type) == w_sequence for m in matches):
continue
else:
#matches.append(str(w_sequence))
matches.append(w)
matches = [m.spelling for m in matches]
neighbors = list(set(matches)-set([str(query_sequence)]))
return (len(neighbors), neighbors)
def ensure_query_is_word(query, corpus, sequence_type, tier_type, trans_delimiter='.', file_type=None):
if isinstance(query, Word):
query_word = query
else:
if tier_type.att_type == 'spelling':
if file_type == sequence_type:
query_word = Word(**{sequence_type: list(query)})
else:
query_word = query.replace(trans_delimiter, '')
query_word = Word(**{sequence_type: list(query_word)})
elif tier_type.att_type == 'tier':
if file_type == sequence_type:
query_with̠td = '.'.join(query) if '.' not in query else query
for entry in corpus:
corpus_word_with_td = str(getattr(entry, sequence_type))
if query_with̠td == corpus_word_with_td: # if a word in corpus has the same transcription
return entry # that word in the corpus is to be referred to.
# the following should be run if no word found in corpus with the transcription
new_query = parse(query, trans_delimiter)
query_word = Word(**{sequence_type: new_query})
else: # if file contains spelling
try:
query_word = corpus.corpus.find(query)
except KeyError:
# if the word in the file can't be found in the corpus
new_query = parse(query, trans_delimiter)
query_word = Word(**{sequence_type: list(new_query)})
return query_word
def parse(word, delimiter):
return word.split(delimiter) if delimiter in word else list(word) | en | 0.743961 | Calculate the neighborhood density of all words in the corpus and adds them as attributes of the words. Parameters ---------- corpus_context : CorpusContext Context manager for a corpus algorithm : str The algorithm used to determine distance max_distance : float, optional Maximum edit distance from the queried word to consider a word a neighbor. stop_check : callable, optional Optional function to check whether to gracefully terminate early call_back : callable, optional Optional function to supply progress information during the function settable_attr: string Name of attribute that neighbourhood density results will be assigned to # for w in corpus_context: # if stop_check is not None and stop_check(): # return # cur += 1 # call_back(cur) # res = function(w) # results[str(w)] = [getattr(r, output_format) for r in res[1]] # setattr(w.original, settable_attr.name, res[0]-1) # #the -1 is to account for the fact that words are counted as their own neighbour, and this is incorrect # #subtracting 1 here is easier than fixing the neighbourhood density algorithm #Have to look up the key, then look up the object due to how #multiprocessing pickles objects #corpus_context.attribute.name, n[1][0]) Calculate the neighborhood density of a particular word in the corpus. Parameters ---------- corpus_context : CorpusContext Context manager for a corpus query : Word The word whose neighborhood density to calculate. algorithm : str The algorithm used to determine distance max_distance : float, optional Maximum edit distance from the queried word to consider a word a neighbor force_quadratic : bool Force use of the less efficient quadratic algorithm even when finding edit distance of 1 neighborhoods stop_check : callable, optional Optional function to check whether to gracefully terminate early call_back : callable, optional Optional function to supply progress information during the function Returns ------- tuple(int, set) Tuple of the number of neighbors and the set of neighbor Words. Generates all neighbors of edit distance <= 1 and searches for them in corpus_context. Will be faster than neighborhood_density when: n > m * (1 + s), where n: number of words in corpus m: length of query s: size of segment inventory # deletion # insertion # substitution # final pass to get insertion at len+1 # insertion # for w in corpus_context: # if stop_check is not None and stop_check(): # return # cur += 1 # call_back(cur) # res = function(w) # results[str(w)] = res[1]#[str(r) for r in res[1]] # setattr(w.original, corpus_context.attribute.name, res[0]) #Have to look up the key, then look up the object due to how #multiprocessing pickles objects Find all minimal pairs of the query word based only on segment mutations (not deletions/insertions) Parameters ---------- corpus_context : CorpusContext Context manager for a corpus query : Word The word whose minimal pairs to find stop_check : callable or None Optional function to check whether to gracefully terminate early call_back : callable or None Optional function to supply progress information during the function Returns ------- list The found minimal pairs for the queried word #matches.append(str(w_sequence)) # if a word in corpus has the same transcription # that word in the corpus is to be referred to. # the following should be run if no word found in corpus with the transcription # if file contains spelling # if the word in the file can't be found in the corpus | 2.659056 | 3 |
brokenChains/migrations/0003_auto_20181106_1819.py | bunya017/brokenChains | 1 | 553 | <gh_stars>1-10
# Generated by Django 2.1.1 on 2018-11-06 17:19
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('brokenChains', '0002_auto_20181106_1723'),
]
operations = [
migrations.AlterUniqueTogether(
name='habit',
unique_together={('owner', 'name')},
),
]
| # Generated by Django 2.1.1 on 2018-11-06 17:19
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('brokenChains', '0002_auto_20181106_1723'),
]
operations = [
migrations.AlterUniqueTogether(
name='habit',
unique_together={('owner', 'name')},
),
] | en | 0.769757 | # Generated by Django 2.1.1 on 2018-11-06 17:19 | 1.649725 | 2 |
ex05-td/ex05-td.py | vijaykumarprabhu/rl-course | 0 | 554 | import gym
import numpy as np
from itertools import product
import matplotlib.pyplot as plt
def print_policy(Q, env):
""" This is a helper function to print a nice policy from the Q function"""
moves = [u'←', u'↓',u'→', u'↑']
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
policy = np.chararray(dims, unicode=True)
policy[:] = ' '
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
policy[idx] = moves[np.argmax(Q[s])]
if env.desc[idx] in ['H', 'G']:
policy[idx] = u'·'
print('\n'.join([''.join([u'{:2}'.format(item) for item in row])
for row in policy]))
def plot_V(Q, env):
""" This is a helper function to plot the state values from the Q function"""
fig = plt.figure()
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
V = np.zeros(dims)
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
V[idx] = np.max(Q[s])
if env.desc[idx] in ['H', 'G']:
V[idx] = 0.
plt.imshow(V, origin='upper',
extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6,
cmap=plt.cm.RdYlGn, interpolation='none')
for x, y in product(range(dims[0]), range(dims[1])):
plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]),
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
def plot_Q(Q, env):
""" This is a helper function to plot the Q function """
from matplotlib import colors, patches
fig = plt.figure()
ax = fig.gca()
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
up = np.array([[0, 1], [0.5, 0.5], [1,1]])
down = np.array([[0, 0], [0.5, 0.5], [1,0]])
left = np.array([[0, 0], [0.5, 0.5], [0,1]])
right = np.array([[1, 0], [0.5, 0.5], [1,1]])
tri = [left, down, right, up]
pos = [[0.2, 0.5], [0.5, 0.2], [0.8, 0.5], [0.5, 0.8]]
cmap = plt.cm.RdYlGn
norm = colors.Normalize(vmin=.0,vmax=.6)
ax.imshow(np.zeros(dims), origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=cmap)
ax.grid(which='major', color='black', linestyle='-', linewidth=2)
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
x, y = idx
if env.desc[idx] in ['H', 'G']:
ax.add_patch(patches.Rectangle((y, 3-x), 1, 1, color=cmap(.0)))
plt.text(y+0.5, dims[0]-x-0.5, '{:.2f}'.format(.0),
horizontalalignment='center',
verticalalignment='center')
continue
for a in range(len(tri)):
ax.add_patch(patches.Polygon(tri[a] + np.array([y, 3-x]), color=cmap(Q[s][a])))
plt.text(y+pos[a][0], dims[0]-1-x+pos[a][1], '{:.2f}'.format(Q[s][a]),
horizontalalignment='center', verticalalignment='center',
fontsize=9, fontweight=('bold' if Q[s][a] == np.max(Q[s]) else 'normal'))
plt.xticks([])
plt.yticks([])
def choose_abs_greedy_action(state, Q, epsilon):
action = None
if np.random.uniform(0, 1) < epsilon:
action = np.random.randint(env.action_space.n)
else:
action = np.argmax(Q[state,:])
return action
def max_action_state(state, Q):
action = np.argmax(Q[state,:])
return Q[state, action]
def sarsa(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)):
#Q = np.zeros((env.observation_space.n, env.action_space.n))
Q = np.random.rand(env.observation_space.n, env.action_space.n)
# TODO: implement the sarsa algorithm
# This is some starting point performing random walks in the environment:
for i in range(num_ep):
s = env.reset()
done = False
a = choose_abs_greedy_action(s, Q, epsilon)
while not done:
s_, r, done, _ = env.step(a)
a_ = choose_abs_greedy_action(s_, Q, epsilon)
#update Q using sarsa
Q[s, a] = Q[s, a] + alpha * (r + (gamma * Q[s_,a_]) - Q[s,a])
s = s_
a = a_
return Q
def qlearning(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)):
#Q = np.zeros((env.observation_space.n, env.action_space.n))
Q = np.random.rand(env.observation_space.n, env.action_space.n)
# TODO: implement the qlearning algorithm
for i in range(num_ep):
s = env.reset()
done = False
while not done:
a = choose_abs_greedy_action(s, Q, epsilon)
s_, r, done, _ = env.step(a)
#update Q using Q learning
Q[s, a] = Q[s, a] + alpha * (r+ ( gamma * max_action_state(s_, Q)) - Q[s,a] )
s = s_
return Q
env=gym.make('FrozenLake-v0')
#env=gym.make('FrozenLake-v0', is_slippery=False)
#env=gym.make('FrozenLake-v0', map_name="8x8")
print("Running sarsa...")
Q = sarsa(env)
plot_V(Q, env)
plot_Q(Q, env)
print_policy(Q, env)
plt.show()
print("Running qlearning")
Q = qlearning(env)
plot_V(Q, env)
plot_Q(Q, env)
print_policy(Q, env)
plt.show()
| import gym
import numpy as np
from itertools import product
import matplotlib.pyplot as plt
def print_policy(Q, env):
""" This is a helper function to print a nice policy from the Q function"""
moves = [u'←', u'↓',u'→', u'↑']
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
policy = np.chararray(dims, unicode=True)
policy[:] = ' '
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
policy[idx] = moves[np.argmax(Q[s])]
if env.desc[idx] in ['H', 'G']:
policy[idx] = u'·'
print('\n'.join([''.join([u'{:2}'.format(item) for item in row])
for row in policy]))
def plot_V(Q, env):
""" This is a helper function to plot the state values from the Q function"""
fig = plt.figure()
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
V = np.zeros(dims)
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
V[idx] = np.max(Q[s])
if env.desc[idx] in ['H', 'G']:
V[idx] = 0.
plt.imshow(V, origin='upper',
extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6,
cmap=plt.cm.RdYlGn, interpolation='none')
for x, y in product(range(dims[0]), range(dims[1])):
plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]),
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
def plot_Q(Q, env):
""" This is a helper function to plot the Q function """
from matplotlib import colors, patches
fig = plt.figure()
ax = fig.gca()
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
up = np.array([[0, 1], [0.5, 0.5], [1,1]])
down = np.array([[0, 0], [0.5, 0.5], [1,0]])
left = np.array([[0, 0], [0.5, 0.5], [0,1]])
right = np.array([[1, 0], [0.5, 0.5], [1,1]])
tri = [left, down, right, up]
pos = [[0.2, 0.5], [0.5, 0.2], [0.8, 0.5], [0.5, 0.8]]
cmap = plt.cm.RdYlGn
norm = colors.Normalize(vmin=.0,vmax=.6)
ax.imshow(np.zeros(dims), origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=cmap)
ax.grid(which='major', color='black', linestyle='-', linewidth=2)
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
x, y = idx
if env.desc[idx] in ['H', 'G']:
ax.add_patch(patches.Rectangle((y, 3-x), 1, 1, color=cmap(.0)))
plt.text(y+0.5, dims[0]-x-0.5, '{:.2f}'.format(.0),
horizontalalignment='center',
verticalalignment='center')
continue
for a in range(len(tri)):
ax.add_patch(patches.Polygon(tri[a] + np.array([y, 3-x]), color=cmap(Q[s][a])))
plt.text(y+pos[a][0], dims[0]-1-x+pos[a][1], '{:.2f}'.format(Q[s][a]),
horizontalalignment='center', verticalalignment='center',
fontsize=9, fontweight=('bold' if Q[s][a] == np.max(Q[s]) else 'normal'))
plt.xticks([])
plt.yticks([])
def choose_abs_greedy_action(state, Q, epsilon):
action = None
if np.random.uniform(0, 1) < epsilon:
action = np.random.randint(env.action_space.n)
else:
action = np.argmax(Q[state,:])
return action
def max_action_state(state, Q):
action = np.argmax(Q[state,:])
return Q[state, action]
def sarsa(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)):
#Q = np.zeros((env.observation_space.n, env.action_space.n))
Q = np.random.rand(env.observation_space.n, env.action_space.n)
# TODO: implement the sarsa algorithm
# This is some starting point performing random walks in the environment:
for i in range(num_ep):
s = env.reset()
done = False
a = choose_abs_greedy_action(s, Q, epsilon)
while not done:
s_, r, done, _ = env.step(a)
a_ = choose_abs_greedy_action(s_, Q, epsilon)
#update Q using sarsa
Q[s, a] = Q[s, a] + alpha * (r + (gamma * Q[s_,a_]) - Q[s,a])
s = s_
a = a_
return Q
def qlearning(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)):
#Q = np.zeros((env.observation_space.n, env.action_space.n))
Q = np.random.rand(env.observation_space.n, env.action_space.n)
# TODO: implement the qlearning algorithm
for i in range(num_ep):
s = env.reset()
done = False
while not done:
a = choose_abs_greedy_action(s, Q, epsilon)
s_, r, done, _ = env.step(a)
#update Q using Q learning
Q[s, a] = Q[s, a] + alpha * (r+ ( gamma * max_action_state(s_, Q)) - Q[s,a] )
s = s_
return Q
env=gym.make('FrozenLake-v0')
#env=gym.make('FrozenLake-v0', is_slippery=False)
#env=gym.make('FrozenLake-v0', map_name="8x8")
print("Running sarsa...")
Q = sarsa(env)
plot_V(Q, env)
plot_Q(Q, env)
print_policy(Q, env)
plt.show()
print("Running qlearning")
Q = qlearning(env)
plot_V(Q, env)
plot_Q(Q, env)
print_policy(Q, env)
plt.show()
| en | 0.5422 | This is a helper function to print a nice policy from the Q function This is a helper function to plot the state values from the Q function This is a helper function to plot the Q function #Q = np.zeros((env.observation_space.n, env.action_space.n)) # TODO: implement the sarsa algorithm # This is some starting point performing random walks in the environment: #update Q using sarsa #Q = np.zeros((env.observation_space.n, env.action_space.n)) # TODO: implement the qlearning algorithm #update Q using Q learning #env=gym.make('FrozenLake-v0', is_slippery=False) #env=gym.make('FrozenLake-v0', map_name="8x8") | 3.472321 | 3 |
app/modules/ai_lab/migrations/0003_ailabcasestudy.py | nickmoreton/nhsx-website | 50 | 555 | # Generated by Django 3.0.4 on 2020-07-14 11:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("core", "0026_auto_20200713_1535"),
("ai_lab", "0002_ailabusecase"),
]
operations = [
migrations.CreateModel(
name="AiLabCaseStudy",
fields=[
(
"articlepage_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="core.ArticlePage",
),
),
(
"use_case",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="ai_lab.AiLabUseCase",
),
),
],
options={"abstract": False,},
bases=("core.articlepage", models.Model),
),
]
| # Generated by Django 3.0.4 on 2020-07-14 11:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("core", "0026_auto_20200713_1535"),
("ai_lab", "0002_ailabusecase"),
]
operations = [
migrations.CreateModel(
name="AiLabCaseStudy",
fields=[
(
"articlepage_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="core.ArticlePage",
),
),
(
"use_case",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="ai_lab.AiLabUseCase",
),
),
],
options={"abstract": False,},
bases=("core.articlepage", models.Model),
),
]
| en | 0.796531 | # Generated by Django 3.0.4 on 2020-07-14 11:00 | 1.503369 | 2 |
requests/UpdateWorkbookConnectionRequest.py | divinorum-webb/python-tableau-api | 1 | 556 | <filename>requests/UpdateWorkbookConnectionRequest.py<gh_stars>1-10
from .BaseRequest import BaseRequest
class UpdateWorkbookConnectionRequest(BaseRequest):
"""
Update workbook connection request for sending API requests to Tableau Server.
:param ts_connection: The Tableau Server connection object.
:type ts_connection: class
:param server_address: The new server for the connection.
:type server_address: string
:param port: The new port for the connection.
:type port: string
:param connection_username: The new username for the connection.
:type connection_username: string
:param connection_password: The new password for the connection.
:type connection_password: string
:param embed_password_flag: Boolean; True to embed the password in the connection, False otherwise.
:type embed_password_flag: boolean
"""
def __init__(self,
ts_connection,
server_address=None,
port=None,
connection_username=None,
connection_password=<PASSWORD>,
embed_password_flag=None):
super().__init__(ts_connection)
self._server_address = server_address
self._port = port
self._connection_username = connection_username
self._connection_password = <PASSWORD>
self._embed_password_flag = embed_password_flag
self.base_update_workbook_connection_request
@property
def optional_parameter_keys(self):
return [
'serverAddress',
'serverPort',
'userName',
'password',
'embedPassword'
]
@property
def optional_parameter_values_exist(self):
return [
self._server_address,
self._port,
self._connection_username,
self._connection_password,
True if self._embed_password_flag is not None else None
]
@property
def optional_parameter_values(self):
return [
self._server_address,
self._port,
self._connection_username,
self._connection_password,
self._embed_password_flag
]
@property
def base_update_workbook_connection_request(self):
self._request_body.update({'connection': {}})
return self._request_body
@property
def modified_update_workbook_connection_request(self):
if any(self.optional_parameter_values_exist):
self._request_body['connection'].update(
self._get_parameters_dict(self.optional_parameter_keys,
self.optional_parameter_values))
return self._request_body
@staticmethod
def _get_parameters_dict(param_keys, param_values):
"""Override the inherited _get_parameters_dict() method to allow passing boolean values directly"""
params_dict = {}
for i, key in enumerate(param_keys):
if param_values[i] is not None:
params_dict.update({key: param_values[i]})
return params_dict
def get_request(self):
return self.modified_update_workbook_connection_request
| <filename>requests/UpdateWorkbookConnectionRequest.py<gh_stars>1-10
from .BaseRequest import BaseRequest
class UpdateWorkbookConnectionRequest(BaseRequest):
"""
Update workbook connection request for sending API requests to Tableau Server.
:param ts_connection: The Tableau Server connection object.
:type ts_connection: class
:param server_address: The new server for the connection.
:type server_address: string
:param port: The new port for the connection.
:type port: string
:param connection_username: The new username for the connection.
:type connection_username: string
:param connection_password: The new password for the connection.
:type connection_password: string
:param embed_password_flag: Boolean; True to embed the password in the connection, False otherwise.
:type embed_password_flag: boolean
"""
def __init__(self,
ts_connection,
server_address=None,
port=None,
connection_username=None,
connection_password=<PASSWORD>,
embed_password_flag=None):
super().__init__(ts_connection)
self._server_address = server_address
self._port = port
self._connection_username = connection_username
self._connection_password = <PASSWORD>
self._embed_password_flag = embed_password_flag
self.base_update_workbook_connection_request
@property
def optional_parameter_keys(self):
return [
'serverAddress',
'serverPort',
'userName',
'password',
'embedPassword'
]
@property
def optional_parameter_values_exist(self):
return [
self._server_address,
self._port,
self._connection_username,
self._connection_password,
True if self._embed_password_flag is not None else None
]
@property
def optional_parameter_values(self):
return [
self._server_address,
self._port,
self._connection_username,
self._connection_password,
self._embed_password_flag
]
@property
def base_update_workbook_connection_request(self):
self._request_body.update({'connection': {}})
return self._request_body
@property
def modified_update_workbook_connection_request(self):
if any(self.optional_parameter_values_exist):
self._request_body['connection'].update(
self._get_parameters_dict(self.optional_parameter_keys,
self.optional_parameter_values))
return self._request_body
@staticmethod
def _get_parameters_dict(param_keys, param_values):
"""Override the inherited _get_parameters_dict() method to allow passing boolean values directly"""
params_dict = {}
for i, key in enumerate(param_keys):
if param_values[i] is not None:
params_dict.update({key: param_values[i]})
return params_dict
def get_request(self):
return self.modified_update_workbook_connection_request
| en | 0.64992 | Update workbook connection request for sending API requests to Tableau Server. :param ts_connection: The Tableau Server connection object. :type ts_connection: class :param server_address: The new server for the connection. :type server_address: string :param port: The new port for the connection. :type port: string :param connection_username: The new username for the connection. :type connection_username: string :param connection_password: The new password for the connection. :type connection_password: string :param embed_password_flag: Boolean; True to embed the password in the connection, False otherwise. :type embed_password_flag: boolean Override the inherited _get_parameters_dict() method to allow passing boolean values directly | 3.247556 | 3 |
frappe/website/doctype/website_route_meta/test_website_route_meta.py | oryxsolutions/frappe | 0 | 557 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# License: MIT. See LICENSE
import unittest
import frappe
from frappe.utils import set_request
from frappe.website.serve import get_response
test_dependencies = ["Blog Post"]
class TestWebsiteRouteMeta(unittest.TestCase):
def test_meta_tag_generation(self):
blogs = frappe.get_all(
"Blog Post", fields=["name", "route"], filters={"published": 1, "route": ("!=", "")}, limit=1
)
blog = blogs[0]
# create meta tags for this route
doc = frappe.new_doc("Website Route Meta")
doc.append("meta_tags", {"key": "type", "value": "blog_post"})
doc.append("meta_tags", {"key": "og:title", "value": "My Blog"})
doc.name = blog.route
doc.insert()
# set request on this route
set_request(path=blog.route)
response = get_response()
self.assertTrue(response.status_code, 200)
html = response.get_data().decode()
self.assertTrue("""<meta name="type" content="blog_post">""" in html)
self.assertTrue("""<meta property="og:title" content="My Blog">""" in html)
def tearDown(self):
frappe.db.rollback()
| # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# License: MIT. See LICENSE
import unittest
import frappe
from frappe.utils import set_request
from frappe.website.serve import get_response
test_dependencies = ["Blog Post"]
class TestWebsiteRouteMeta(unittest.TestCase):
def test_meta_tag_generation(self):
blogs = frappe.get_all(
"Blog Post", fields=["name", "route"], filters={"published": 1, "route": ("!=", "")}, limit=1
)
blog = blogs[0]
# create meta tags for this route
doc = frappe.new_doc("Website Route Meta")
doc.append("meta_tags", {"key": "type", "value": "blog_post"})
doc.append("meta_tags", {"key": "og:title", "value": "My Blog"})
doc.name = blog.route
doc.insert()
# set request on this route
set_request(path=blog.route)
response = get_response()
self.assertTrue(response.status_code, 200)
html = response.get_data().decode()
self.assertTrue("""<meta name="type" content="blog_post">""" in html)
self.assertTrue("""<meta property="og:title" content="My Blog">""" in html)
def tearDown(self):
frappe.db.rollback()
| en | 0.543477 | # -*- coding: utf-8 -*- # Copyright (c) 2019, Frappe Technologies and Contributors # License: MIT. See LICENSE # create meta tags for this route # set request on this route <meta name="type" content="blog_post"> <meta property="og:title" content="My Blog"> | 2.194778 | 2 |
test/unittests/test_AgRunoff.py | rajadain/gwlf-e | 0 | 558 | <reponame>rajadain/gwlf-e<filename>test/unittests/test_AgRunoff.py
import numpy as np
from .VariableUnitTest import VariableUnitTest
from gwlfe.MultiUse_Fxns.Runoff import AgRunoff
class TestAgRunoff(VariableUnitTest):
# @skip("not ready")
def test_AgRunoff(self):
z = self.z
np.testing.assert_array_almost_equal(
AgRunoff.AgRunoff_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb,
z.Grow_0, z.Landuse, z.Area),
AgRunoff.AgRunoff(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb,
z.Grow_0, z.Landuse, z.Area), decimal=7)
| import numpy as np
from .VariableUnitTest import VariableUnitTest
from gwlfe.MultiUse_Fxns.Runoff import AgRunoff
class TestAgRunoff(VariableUnitTest):
# @skip("not ready")
def test_AgRunoff(self):
z = self.z
np.testing.assert_array_almost_equal(
AgRunoff.AgRunoff_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb,
z.Grow_0, z.Landuse, z.Area),
AgRunoff.AgRunoff(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb,
z.Grow_0, z.Landuse, z.Area), decimal=7) | en | 0.213408 | # @skip("not ready") | 1.86825 | 2 |
lingvo/tasks/car/car_layers_test.py | Harshs27/lingvo | 2,611 | 559 | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for car_layers."""
from lingvo import compat as tf
from lingvo.core import py_utils
from lingvo.core import test_utils
from lingvo.tasks.car import car_layers
class CarLayersTest(test_utils.TestCase):
def _testNestedOutShape(self, p, input_shape, expected_shape):
batch_size, num_points, _ = input_shape
g = tf.Graph()
with g.as_default():
net = p.Instantiate()
input_data = py_utils.NestedMap(
points=tf.random.uniform(input_shape[:-1] + (3,)),
features=tf.random.uniform(input_shape),
padding=tf.zeros((batch_size, num_points), dtype=tf.float32),
label=tf.random.uniform((batch_size,),
minval=0,
maxval=16,
dtype=tf.int32))
result = net.FPropDefaultTheta(input_data)
with self.session(graph=g):
self.evaluate(tf.global_variables_initializer())
np_result = self.evaluate(result)
grouped_points_result = np_result.grouped_points
self.assertEqual(grouped_points_result.features.shape,
expected_shape.grouped_points.features)
self.assertEqual(grouped_points_result.points.shape,
expected_shape.grouped_points.points)
self.assertEqual(grouped_points_result.padding.shape,
expected_shape.grouped_points.padding)
query_points_result = np_result.query_points
self.assertEqual(query_points_result.points.shape,
expected_shape.query_points.points)
self.assertEqual(query_points_result.padding.shape,
expected_shape.query_points.padding)
def testSamplingAndGrouping(self):
for num_points in [1024, 256]:
for input_dims in [3, 6, 9]:
for group_size in [32, 64]:
p = car_layers.SamplingAndGroupingLayer.Params().Set(
name='SampleGroupTest',
num_samples=256,
ball_radius=0.2,
group_size=group_size,
sample_neighbors_uniformly=True)
grouped_points_shape = py_utils.NestedMap(
features=(8, 256, group_size, input_dims),
points=(8, 256, group_size, 3),
padding=(8, 256, group_size))
query_points_shape = py_utils.NestedMap(
points=(8, 256, 3), padding=(8, 256))
expected_shape = py_utils.NestedMap({
'grouped_points': grouped_points_shape,
'query_points': query_points_shape
})
self._testNestedOutShape(p, (8, num_points, input_dims),
expected_shape)
if __name__ == '__main__':
tf.test.main()
| # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for car_layers."""
from lingvo import compat as tf
from lingvo.core import py_utils
from lingvo.core import test_utils
from lingvo.tasks.car import car_layers
class CarLayersTest(test_utils.TestCase):
def _testNestedOutShape(self, p, input_shape, expected_shape):
batch_size, num_points, _ = input_shape
g = tf.Graph()
with g.as_default():
net = p.Instantiate()
input_data = py_utils.NestedMap(
points=tf.random.uniform(input_shape[:-1] + (3,)),
features=tf.random.uniform(input_shape),
padding=tf.zeros((batch_size, num_points), dtype=tf.float32),
label=tf.random.uniform((batch_size,),
minval=0,
maxval=16,
dtype=tf.int32))
result = net.FPropDefaultTheta(input_data)
with self.session(graph=g):
self.evaluate(tf.global_variables_initializer())
np_result = self.evaluate(result)
grouped_points_result = np_result.grouped_points
self.assertEqual(grouped_points_result.features.shape,
expected_shape.grouped_points.features)
self.assertEqual(grouped_points_result.points.shape,
expected_shape.grouped_points.points)
self.assertEqual(grouped_points_result.padding.shape,
expected_shape.grouped_points.padding)
query_points_result = np_result.query_points
self.assertEqual(query_points_result.points.shape,
expected_shape.query_points.points)
self.assertEqual(query_points_result.padding.shape,
expected_shape.query_points.padding)
def testSamplingAndGrouping(self):
for num_points in [1024, 256]:
for input_dims in [3, 6, 9]:
for group_size in [32, 64]:
p = car_layers.SamplingAndGroupingLayer.Params().Set(
name='SampleGroupTest',
num_samples=256,
ball_radius=0.2,
group_size=group_size,
sample_neighbors_uniformly=True)
grouped_points_shape = py_utils.NestedMap(
features=(8, 256, group_size, input_dims),
points=(8, 256, group_size, 3),
padding=(8, 256, group_size))
query_points_shape = py_utils.NestedMap(
points=(8, 256, 3), padding=(8, 256))
expected_shape = py_utils.NestedMap({
'grouped_points': grouped_points_shape,
'query_points': query_points_shape
})
self._testNestedOutShape(p, (8, num_points, input_dims),
expected_shape)
if __name__ == '__main__':
tf.test.main()
| en | 0.804322 | # Lint as: python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Tests for car_layers. | 2.343105 | 2 |
starry/_core/ops/lib/include/oblate/tests/test_derivs.py | rodluger/starry | 116 | 560 | <reponame>rodluger/starry<filename>starry/_core/ops/lib/include/oblate/tests/test_derivs.py
import oblate
import numpy as np
import pytest
# TODO!
| import oblate
import numpy as np
import pytest
# TODO! | none | 1 | 0.818186 | 1 |
|
take_snapshot.py | ITCave/sniff-for-changes-in-directory | 0 | 561 | # -*- coding: utf-8 -*-
# @Filename : take_snapshot.py
# @Date : 2019-07-15-13-44
# @Project: ITC-sniff-for-changes-in-directory
# @Author: <NAME>
# @Website: http://itcave.eu
# @Email: <EMAIL>
# @License: MIT
# @Copyright (C) 2019 ITGO <NAME>
# Generic imports
import os
import pickle
import re
import argparse
from datetime import datetime
def clear_path_string(s):
"""
Simple function that removes chars that are not allowed in file names
:param s: path_string
:return: cleaned_path_string
"""
return (re.sub('[^a-zA-Z]+', '#', s)).lower()
def sniff(sniff_path):
"""
Walks the path and stores information about directory content
:param sniff_path: relative or absolute path
:return: void
"""
sniff_path = str(sniff_path).lower()
# Variable in which information will be stored
dir_store = {}
# Recursive loop that walks through all of the subdirectories
for subdir, dirs, files in os.walk(sniff_path):
if subdir not in dir_store:
dir_store[subdir] = {}
dir_store[subdir]['subdirs'] = dirs
dir_store[subdir]['files'] = files
dir_store[subdir]['file_details'] = {}
for file in files:
f_path = os.path.join(subdir, file)
# The information that will be store for each of the files - in this case last file modification date
# Important: it's cross-platform relevant!
modified_date = os.path.getmtime(f_path)
dir_store[subdir]['file_details'][file] = (modified_date,)
# Name of a file in which data will be stored
dump_name = clear_path_string(sniff_path) + '_' + datetime.now().strftime('%Y%m%d%H%M%S')
# Save pickled data
with open(dump_name + '.pkl', 'wb') as output:
pickle.dump(dir_store, output, pickle.HIGHEST_PROTOCOL)
print("Directory Snapshot taken:", dump_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Directory Sniffer')
parser.add_argument('path', help='Path to the directory that you want to take a snapshot of')
args = parser.parse_args()
sniff(args.path)
| # -*- coding: utf-8 -*-
# @Filename : take_snapshot.py
# @Date : 2019-07-15-13-44
# @Project: ITC-sniff-for-changes-in-directory
# @Author: <NAME>
# @Website: http://itcave.eu
# @Email: <EMAIL>
# @License: MIT
# @Copyright (C) 2019 ITGO <NAME>
# Generic imports
import os
import pickle
import re
import argparse
from datetime import datetime
def clear_path_string(s):
"""
Simple function that removes chars that are not allowed in file names
:param s: path_string
:return: cleaned_path_string
"""
return (re.sub('[^a-zA-Z]+', '#', s)).lower()
def sniff(sniff_path):
"""
Walks the path and stores information about directory content
:param sniff_path: relative or absolute path
:return: void
"""
sniff_path = str(sniff_path).lower()
# Variable in which information will be stored
dir_store = {}
# Recursive loop that walks through all of the subdirectories
for subdir, dirs, files in os.walk(sniff_path):
if subdir not in dir_store:
dir_store[subdir] = {}
dir_store[subdir]['subdirs'] = dirs
dir_store[subdir]['files'] = files
dir_store[subdir]['file_details'] = {}
for file in files:
f_path = os.path.join(subdir, file)
# The information that will be store for each of the files - in this case last file modification date
# Important: it's cross-platform relevant!
modified_date = os.path.getmtime(f_path)
dir_store[subdir]['file_details'][file] = (modified_date,)
# Name of a file in which data will be stored
dump_name = clear_path_string(sniff_path) + '_' + datetime.now().strftime('%Y%m%d%H%M%S')
# Save pickled data
with open(dump_name + '.pkl', 'wb') as output:
pickle.dump(dir_store, output, pickle.HIGHEST_PROTOCOL)
print("Directory Snapshot taken:", dump_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Directory Sniffer')
parser.add_argument('path', help='Path to the directory that you want to take a snapshot of')
args = parser.parse_args()
sniff(args.path)
| en | 0.740184 | # -*- coding: utf-8 -*- # @Filename : take_snapshot.py # @Date : 2019-07-15-13-44 # @Project: ITC-sniff-for-changes-in-directory # @Author: <NAME> # @Website: http://itcave.eu # @Email: <EMAIL> # @License: MIT # @Copyright (C) 2019 ITGO <NAME> # Generic imports Simple function that removes chars that are not allowed in file names :param s: path_string :return: cleaned_path_string Walks the path and stores information about directory content :param sniff_path: relative or absolute path :return: void # Variable in which information will be stored # Recursive loop that walks through all of the subdirectories # The information that will be store for each of the files - in this case last file modification date # Important: it's cross-platform relevant! # Name of a file in which data will be stored # Save pickled data | 2.779933 | 3 |
nuitka/nodes/GlobalsLocalsNodes.py | juanfra684/Nuitka | 1 | 562 | # Copyright 2020, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Globals/locals/single arg dir nodes
These nodes give access to variables, highly problematic, because using them,
the code may change or access anything about them, so nothing can be trusted
anymore, if we start to not know where their value goes.
The "dir()" call without arguments is reformulated to locals or globals calls.
"""
from .ConstantRefNodes import makeConstantRefNode
from .DictionaryNodes import ExpressionKeyValuePair, ExpressionMakeDict
from .ExpressionBases import ExpressionBase, ExpressionBuiltinSingleArgBase
from .VariableRefNodes import ExpressionTempVariableRef, ExpressionVariableRef
class ExpressionBuiltinGlobals(ExpressionBase):
kind = "EXPRESSION_BUILTIN_GLOBALS"
def __init__(self, source_ref):
ExpressionBase.__init__(self, source_ref=source_ref)
def finalize(self):
del self.parent
def computeExpressionRaw(self, trace_collection):
return self, None, None
def mayHaveSideEffects(self):
return False
def mayRaiseException(self, exception_type):
return False
class ExpressionBuiltinLocalsBase(ExpressionBase):
# Base classes can be abstract, pylint: disable=abstract-method
__slots__ = ("variable_traces", "locals_scope")
def __init__(self, locals_scope, source_ref):
ExpressionBase.__init__(self, source_ref=source_ref)
self.variable_traces = None
self.locals_scope = locals_scope
def finalize(self):
del self.locals_scope
del self.variable_traces
def mayHaveSideEffects(self):
return False
def mayRaiseException(self, exception_type):
return False
def getVariableTraces(self):
return self.variable_traces
class ExpressionBuiltinLocalsUpdated(ExpressionBuiltinLocalsBase):
kind = "EXPRESSION_BUILTIN_LOCALS_UPDATED"
def __init__(self, locals_scope, source_ref):
ExpressionBuiltinLocalsBase.__init__(
self, locals_scope=locals_scope, source_ref=source_ref
)
assert locals_scope is not None
def getLocalsScope(self):
return self.locals_scope
def computeExpressionRaw(self, trace_collection):
# Just inform the collection that all escaped.
self.variable_traces = trace_collection.onLocalsUsage(
self.getParentVariableProvider()
)
trace_collection.onLocalsDictEscaped(self.locals_scope)
return self, None, None
class ExpressionBuiltinLocalsRef(ExpressionBuiltinLocalsBase):
kind = "EXPRESSION_BUILTIN_LOCALS_REF"
def __init__(self, locals_scope, source_ref):
ExpressionBuiltinLocalsBase.__init__(
self, locals_scope=locals_scope, source_ref=source_ref
)
def getLocalsScope(self):
return self.locals_scope
def computeExpressionRaw(self, trace_collection):
if self.locals_scope.isMarkedForPropagation():
result = ExpressionMakeDict(
pairs=(
ExpressionKeyValuePair(
key=makeConstantRefNode(
constant=variable_name, source_ref=self.source_ref
),
value=ExpressionTempVariableRef(
variable=variable, source_ref=self.source_ref
),
source_ref=self.source_ref,
)
for variable_name, variable in self.locals_scope.getPropagationVariables().items()
),
source_ref=self.source_ref,
)
new_result = result.computeExpressionRaw(trace_collection)
assert new_result[0] is result
self.finalize()
return result, "new_expression", "Propagated locals dictionary reference."
# Just inform the collection that all escaped unless it is abortative.
if not self.getParent().isStatementReturn():
trace_collection.onLocalsUsage(self.getParentVariableProvider())
return self, None, None
class ExpressionBuiltinLocalsCopy(ExpressionBuiltinLocalsBase):
kind = "EXPRESSION_BUILTIN_LOCALS_COPY"
def computeExpressionRaw(self, trace_collection):
# Just inform the collection that all escaped.
self.variable_traces = trace_collection.onLocalsUsage(
self.getParentVariableProvider()
)
for variable, variable_trace in self.variable_traces:
if (
not variable_trace.mustHaveValue()
and not variable_trace.mustNotHaveValue()
):
return self, None, None
# Other locals elsewhere.
if variable_trace.getNameUsageCount() > 1:
return self, None, None
pairs = []
for variable, variable_trace in self.variable_traces:
if variable_trace.mustHaveValue():
pairs.append(
ExpressionKeyValuePair(
key=makeConstantRefNode(
constant=variable.getName(),
user_provided=True,
source_ref=self.source_ref,
),
value=ExpressionVariableRef(
variable=variable, source_ref=self.source_ref
),
source_ref=self.source_ref,
)
)
# Locals is sorted of course.
def _sorted(pairs):
names = self.getParentVariableProvider().getLocalVariableNames()
return sorted(
pairs,
key=lambda pair: names.index(pair.getKey().getCompileTimeConstant()),
)
result = ExpressionMakeDict(pairs=_sorted(pairs), source_ref=self.source_ref)
return result, "new_expression", "Statically predicted locals dictionary."
class ExpressionBuiltinDir1(ExpressionBuiltinSingleArgBase):
kind = "EXPRESSION_BUILTIN_DIR1"
def computeExpression(self, trace_collection):
# TODO: Quite some cases should be possible to predict and this
# should be using a slot, with "__dir__" being overloaded or not.
# Any code could be run, note that.
trace_collection.onControlFlowEscape(self)
# Any exception may be raised.
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
| # Copyright 2020, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Globals/locals/single arg dir nodes
These nodes give access to variables, highly problematic, because using them,
the code may change or access anything about them, so nothing can be trusted
anymore, if we start to not know where their value goes.
The "dir()" call without arguments is reformulated to locals or globals calls.
"""
from .ConstantRefNodes import makeConstantRefNode
from .DictionaryNodes import ExpressionKeyValuePair, ExpressionMakeDict
from .ExpressionBases import ExpressionBase, ExpressionBuiltinSingleArgBase
from .VariableRefNodes import ExpressionTempVariableRef, ExpressionVariableRef
class ExpressionBuiltinGlobals(ExpressionBase):
kind = "EXPRESSION_BUILTIN_GLOBALS"
def __init__(self, source_ref):
ExpressionBase.__init__(self, source_ref=source_ref)
def finalize(self):
del self.parent
def computeExpressionRaw(self, trace_collection):
return self, None, None
def mayHaveSideEffects(self):
return False
def mayRaiseException(self, exception_type):
return False
class ExpressionBuiltinLocalsBase(ExpressionBase):
# Base classes can be abstract, pylint: disable=abstract-method
__slots__ = ("variable_traces", "locals_scope")
def __init__(self, locals_scope, source_ref):
ExpressionBase.__init__(self, source_ref=source_ref)
self.variable_traces = None
self.locals_scope = locals_scope
def finalize(self):
del self.locals_scope
del self.variable_traces
def mayHaveSideEffects(self):
return False
def mayRaiseException(self, exception_type):
return False
def getVariableTraces(self):
return self.variable_traces
class ExpressionBuiltinLocalsUpdated(ExpressionBuiltinLocalsBase):
kind = "EXPRESSION_BUILTIN_LOCALS_UPDATED"
def __init__(self, locals_scope, source_ref):
ExpressionBuiltinLocalsBase.__init__(
self, locals_scope=locals_scope, source_ref=source_ref
)
assert locals_scope is not None
def getLocalsScope(self):
return self.locals_scope
def computeExpressionRaw(self, trace_collection):
# Just inform the collection that all escaped.
self.variable_traces = trace_collection.onLocalsUsage(
self.getParentVariableProvider()
)
trace_collection.onLocalsDictEscaped(self.locals_scope)
return self, None, None
class ExpressionBuiltinLocalsRef(ExpressionBuiltinLocalsBase):
kind = "EXPRESSION_BUILTIN_LOCALS_REF"
def __init__(self, locals_scope, source_ref):
ExpressionBuiltinLocalsBase.__init__(
self, locals_scope=locals_scope, source_ref=source_ref
)
def getLocalsScope(self):
return self.locals_scope
def computeExpressionRaw(self, trace_collection):
if self.locals_scope.isMarkedForPropagation():
result = ExpressionMakeDict(
pairs=(
ExpressionKeyValuePair(
key=makeConstantRefNode(
constant=variable_name, source_ref=self.source_ref
),
value=ExpressionTempVariableRef(
variable=variable, source_ref=self.source_ref
),
source_ref=self.source_ref,
)
for variable_name, variable in self.locals_scope.getPropagationVariables().items()
),
source_ref=self.source_ref,
)
new_result = result.computeExpressionRaw(trace_collection)
assert new_result[0] is result
self.finalize()
return result, "new_expression", "Propagated locals dictionary reference."
# Just inform the collection that all escaped unless it is abortative.
if not self.getParent().isStatementReturn():
trace_collection.onLocalsUsage(self.getParentVariableProvider())
return self, None, None
class ExpressionBuiltinLocalsCopy(ExpressionBuiltinLocalsBase):
kind = "EXPRESSION_BUILTIN_LOCALS_COPY"
def computeExpressionRaw(self, trace_collection):
# Just inform the collection that all escaped.
self.variable_traces = trace_collection.onLocalsUsage(
self.getParentVariableProvider()
)
for variable, variable_trace in self.variable_traces:
if (
not variable_trace.mustHaveValue()
and not variable_trace.mustNotHaveValue()
):
return self, None, None
# Other locals elsewhere.
if variable_trace.getNameUsageCount() > 1:
return self, None, None
pairs = []
for variable, variable_trace in self.variable_traces:
if variable_trace.mustHaveValue():
pairs.append(
ExpressionKeyValuePair(
key=makeConstantRefNode(
constant=variable.getName(),
user_provided=True,
source_ref=self.source_ref,
),
value=ExpressionVariableRef(
variable=variable, source_ref=self.source_ref
),
source_ref=self.source_ref,
)
)
# Locals is sorted of course.
def _sorted(pairs):
names = self.getParentVariableProvider().getLocalVariableNames()
return sorted(
pairs,
key=lambda pair: names.index(pair.getKey().getCompileTimeConstant()),
)
result = ExpressionMakeDict(pairs=_sorted(pairs), source_ref=self.source_ref)
return result, "new_expression", "Statically predicted locals dictionary."
class ExpressionBuiltinDir1(ExpressionBuiltinSingleArgBase):
kind = "EXPRESSION_BUILTIN_DIR1"
def computeExpression(self, trace_collection):
# TODO: Quite some cases should be possible to predict and this
# should be using a slot, with "__dir__" being overloaded or not.
# Any code could be run, note that.
trace_collection.onControlFlowEscape(self)
# Any exception may be raised.
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
| en | 0.884043 | # Copyright 2020, <NAME>, mailto:<EMAIL> # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Globals/locals/single arg dir nodes These nodes give access to variables, highly problematic, because using them, the code may change or access anything about them, so nothing can be trusted anymore, if we start to not know where their value goes. The "dir()" call without arguments is reformulated to locals or globals calls. # Base classes can be abstract, pylint: disable=abstract-method # Just inform the collection that all escaped. # Just inform the collection that all escaped unless it is abortative. # Just inform the collection that all escaped. # Other locals elsewhere. # Locals is sorted of course. # TODO: Quite some cases should be possible to predict and this # should be using a slot, with "__dir__" being overloaded or not. # Any code could be run, note that. # Any exception may be raised. | 1.857975 | 2 |
tests/chainerx_tests/unit_tests/test_scalar.py | yuhonghong66/chainer | 1 | 563 | <reponame>yuhonghong66/chainer
import math
import pytest
import chainerx
def _check_cast_scalar_equals_data(scalar, data):
assert bool(scalar) == bool(data)
assert int(scalar) == int(data)
assert float(scalar) == float(data)
all_scalar_values = [
-2, 1, -1.5, 2.3, True, False, float('inf'), float('nan')]
@pytest.mark.parametrize('value,dtype', [
(0, chainerx.int64),
(-1, chainerx.int64),
(0x7fffffffffffffff, chainerx.int64),
(-0x8000000000000000, chainerx.int64),
(0.0, chainerx.float64),
(float('inf'), chainerx.float64),
(float('nan'), chainerx.float64),
(True, chainerx.bool_),
(False, chainerx.bool_),
])
def test_init_without_dtype(value, dtype):
scalar = chainerx.Scalar(value)
assert scalar.dtype == dtype
if math.isnan(value):
assert math.isnan(scalar.tolist())
else:
assert scalar.tolist() == value
assert isinstance(scalar.tolist(), type(value))
@pytest.mark.parametrize('value,cast_dtype,expected_value', [
(0, chainerx.bool_, False),
(0, chainerx.int8, 0),
(0, chainerx.int16, 0),
(0, chainerx.int32, 0),
(0, chainerx.int64, 0),
(0, chainerx.uint8, 0),
(0, chainerx.float32, 0.0),
(0, chainerx.float64, 0.0),
(0.0, chainerx.bool_, False),
(0.0, chainerx.int8, 0),
(0.0, chainerx.int16, 0),
(0.0, chainerx.int32, 0),
(0.0, chainerx.int64, 0),
(0.0, chainerx.uint8, 0),
(0.0, chainerx.float32, 0.0),
(0.0, chainerx.float64, 0.0),
(1, chainerx.bool_, True),
(1, chainerx.int8, 1),
(1, chainerx.int16, 1),
(1, chainerx.int32, 1),
(1, chainerx.int64, 1),
(1, chainerx.uint8, 1),
(1, chainerx.float32, 1.0),
(1, chainerx.float64, 1.0),
(1.0, chainerx.bool_, True),
(1.0, chainerx.int8, 1),
(1.0, chainerx.int16, 1),
(1.0, chainerx.int32, 1),
(1.0, chainerx.int64, 1),
(1.0, chainerx.uint8, 1),
(1.0, chainerx.float32, 1.0),
(1.0, chainerx.float64, 1.0),
(-1, chainerx.bool_, True),
(-1, chainerx.int8, -1),
(-1, chainerx.int16, -1),
(-1, chainerx.int32, -1),
(-1, chainerx.int64, -1),
(-1, chainerx.uint8, 0xff),
(-1, chainerx.float32, -1.0),
(-1, chainerx.float64, -1.0),
(0x100, chainerx.bool_, True),
(0x100, chainerx.int8, 0),
(0x100, chainerx.int16, 0x100),
(0x100, chainerx.int32, 0x100),
(0x100, chainerx.int64, 0x100),
(0x100, chainerx.uint8, 0),
(0x10000, chainerx.bool_, True),
(0x10000, chainerx.int8, 0),
(0x10000, chainerx.int16, 0),
(0x10000, chainerx.int32, 0x10000),
(0x10000, chainerx.int64, 0x10000),
(0x10000, chainerx.uint8, 0),
(0x100000000, chainerx.bool_, True),
(0x100000000, chainerx.int8, 0),
(0x100000000, chainerx.int16, 0),
(0x100000000, chainerx.int32, 0),
(0x100000000, chainerx.int64, 0x100000000),
(0x100000000, chainerx.uint8, 0),
(0x7fffffffffffffff, chainerx.bool_, True),
(0x7fffffffffffffff, chainerx.int8, -1),
(0x7fffffffffffffff, chainerx.int16, -1),
(0x7fffffffffffffff, chainerx.int32, -1),
(0x7fffffffffffffff, chainerx.int64, 0x7fffffffffffffff),
(0x7fffffffffffffff, chainerx.uint8, 255),
])
def test_init_casted(value, cast_dtype, expected_value):
scalar = chainerx.Scalar(value, cast_dtype)
assert scalar.dtype == cast_dtype
if math.isnan(expected_value):
assert math.isnan(scalar.tolist())
else:
assert scalar.tolist() == expected_value
assert isinstance(scalar.tolist(), type(expected_value))
@pytest.mark.parametrize(
'value',
[0, 0.0, 1, 1.0, -1, 0x100, 0x10000, 0x100000000, 0x7fffffffffffffff])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_init_with_dtype(value, dtype_spec):
expected_dtype = chainerx.dtype(dtype_spec)
scalar = chainerx.Scalar(value, dtype_spec)
assert scalar.dtype == expected_dtype
assert scalar == chainerx.Scalar(value, expected_dtype)
@pytest.mark.parametrize('value1,value2', [
# TODO(niboshi): Support commented-out cases
(0, 0),
(1, 1),
# (1, 1.0),
(1.5, 1.5),
(-1.5, -1.5),
(True, True),
(False, False),
# (True, 1),
# (True, 1.0),
# (False, 0),
# (False, 0.0),
# (float('inf'), float('inf')),
])
def test_equality(value1, value2):
scalar1 = chainerx.Scalar(value1)
scalar2 = chainerx.Scalar(value2)
assert scalar1 == scalar2
assert scalar2 == scalar1
assert scalar1 == value1
assert value1 == scalar1
assert scalar2 == value2
assert value2 == scalar2
assert scalar2 == value1
assert value1 == scalar2
assert scalar1 == value2
assert value2 == scalar1
@pytest.mark.parametrize('value1,value2', [
(0, 1),
(-1, 1),
(-1.0001, -1.0),
(-1.0001, -1),
(True, False),
(True, 1.1),
(1.0001, 1.0002),
(float('nan'), float('nan')),
])
def test_inequality(value1, value2):
scalar1 = chainerx.Scalar(value1)
scalar2 = chainerx.Scalar(value2)
assert scalar1 != scalar2
assert scalar2 != scalar1
assert scalar2 != value1
assert value1 != scalar2
assert scalar1 != value2
assert value2 != scalar1
@pytest.mark.parametrize('value', [
-2, 1, -1.5, 2.3, True, False
])
def test_cast(value):
scalar = chainerx.Scalar(value)
_check_cast_scalar_equals_data(scalar, value)
_check_cast_scalar_equals_data(+scalar, +value)
if isinstance(value, bool):
with pytest.raises(chainerx.DtypeError):
-scalar # should not be able to negate bool
else:
_check_cast_scalar_equals_data(-scalar, -value)
@pytest.mark.parametrize('value', all_scalar_values)
def test_dtype(value):
scalar = chainerx.Scalar(value)
if isinstance(value, bool):
assert scalar.dtype == chainerx.bool_
elif isinstance(value, int):
assert scalar.dtype == chainerx.int64
elif isinstance(value, float):
assert scalar.dtype == chainerx.float64
else:
assert False
@pytest.mark.parametrize('value', all_scalar_values)
def test_repr(value):
scalar = chainerx.Scalar(value)
assert repr(scalar) == repr(value)
assert str(scalar) == str(value)
def test_init_invalid():
with pytest.raises(TypeError):
chainerx.Scalar("1") # string, which is not a numeric
| import math
import pytest
import chainerx
def _check_cast_scalar_equals_data(scalar, data):
assert bool(scalar) == bool(data)
assert int(scalar) == int(data)
assert float(scalar) == float(data)
all_scalar_values = [
-2, 1, -1.5, 2.3, True, False, float('inf'), float('nan')]
@pytest.mark.parametrize('value,dtype', [
(0, chainerx.int64),
(-1, chainerx.int64),
(0x7fffffffffffffff, chainerx.int64),
(-0x8000000000000000, chainerx.int64),
(0.0, chainerx.float64),
(float('inf'), chainerx.float64),
(float('nan'), chainerx.float64),
(True, chainerx.bool_),
(False, chainerx.bool_),
])
def test_init_without_dtype(value, dtype):
scalar = chainerx.Scalar(value)
assert scalar.dtype == dtype
if math.isnan(value):
assert math.isnan(scalar.tolist())
else:
assert scalar.tolist() == value
assert isinstance(scalar.tolist(), type(value))
@pytest.mark.parametrize('value,cast_dtype,expected_value', [
(0, chainerx.bool_, False),
(0, chainerx.int8, 0),
(0, chainerx.int16, 0),
(0, chainerx.int32, 0),
(0, chainerx.int64, 0),
(0, chainerx.uint8, 0),
(0, chainerx.float32, 0.0),
(0, chainerx.float64, 0.0),
(0.0, chainerx.bool_, False),
(0.0, chainerx.int8, 0),
(0.0, chainerx.int16, 0),
(0.0, chainerx.int32, 0),
(0.0, chainerx.int64, 0),
(0.0, chainerx.uint8, 0),
(0.0, chainerx.float32, 0.0),
(0.0, chainerx.float64, 0.0),
(1, chainerx.bool_, True),
(1, chainerx.int8, 1),
(1, chainerx.int16, 1),
(1, chainerx.int32, 1),
(1, chainerx.int64, 1),
(1, chainerx.uint8, 1),
(1, chainerx.float32, 1.0),
(1, chainerx.float64, 1.0),
(1.0, chainerx.bool_, True),
(1.0, chainerx.int8, 1),
(1.0, chainerx.int16, 1),
(1.0, chainerx.int32, 1),
(1.0, chainerx.int64, 1),
(1.0, chainerx.uint8, 1),
(1.0, chainerx.float32, 1.0),
(1.0, chainerx.float64, 1.0),
(-1, chainerx.bool_, True),
(-1, chainerx.int8, -1),
(-1, chainerx.int16, -1),
(-1, chainerx.int32, -1),
(-1, chainerx.int64, -1),
(-1, chainerx.uint8, 0xff),
(-1, chainerx.float32, -1.0),
(-1, chainerx.float64, -1.0),
(0x100, chainerx.bool_, True),
(0x100, chainerx.int8, 0),
(0x100, chainerx.int16, 0x100),
(0x100, chainerx.int32, 0x100),
(0x100, chainerx.int64, 0x100),
(0x100, chainerx.uint8, 0),
(0x10000, chainerx.bool_, True),
(0x10000, chainerx.int8, 0),
(0x10000, chainerx.int16, 0),
(0x10000, chainerx.int32, 0x10000),
(0x10000, chainerx.int64, 0x10000),
(0x10000, chainerx.uint8, 0),
(0x100000000, chainerx.bool_, True),
(0x100000000, chainerx.int8, 0),
(0x100000000, chainerx.int16, 0),
(0x100000000, chainerx.int32, 0),
(0x100000000, chainerx.int64, 0x100000000),
(0x100000000, chainerx.uint8, 0),
(0x7fffffffffffffff, chainerx.bool_, True),
(0x7fffffffffffffff, chainerx.int8, -1),
(0x7fffffffffffffff, chainerx.int16, -1),
(0x7fffffffffffffff, chainerx.int32, -1),
(0x7fffffffffffffff, chainerx.int64, 0x7fffffffffffffff),
(0x7fffffffffffffff, chainerx.uint8, 255),
])
def test_init_casted(value, cast_dtype, expected_value):
scalar = chainerx.Scalar(value, cast_dtype)
assert scalar.dtype == cast_dtype
if math.isnan(expected_value):
assert math.isnan(scalar.tolist())
else:
assert scalar.tolist() == expected_value
assert isinstance(scalar.tolist(), type(expected_value))
@pytest.mark.parametrize(
'value',
[0, 0.0, 1, 1.0, -1, 0x100, 0x10000, 0x100000000, 0x7fffffffffffffff])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_init_with_dtype(value, dtype_spec):
expected_dtype = chainerx.dtype(dtype_spec)
scalar = chainerx.Scalar(value, dtype_spec)
assert scalar.dtype == expected_dtype
assert scalar == chainerx.Scalar(value, expected_dtype)
@pytest.mark.parametrize('value1,value2', [
# TODO(niboshi): Support commented-out cases
(0, 0),
(1, 1),
# (1, 1.0),
(1.5, 1.5),
(-1.5, -1.5),
(True, True),
(False, False),
# (True, 1),
# (True, 1.0),
# (False, 0),
# (False, 0.0),
# (float('inf'), float('inf')),
])
def test_equality(value1, value2):
scalar1 = chainerx.Scalar(value1)
scalar2 = chainerx.Scalar(value2)
assert scalar1 == scalar2
assert scalar2 == scalar1
assert scalar1 == value1
assert value1 == scalar1
assert scalar2 == value2
assert value2 == scalar2
assert scalar2 == value1
assert value1 == scalar2
assert scalar1 == value2
assert value2 == scalar1
@pytest.mark.parametrize('value1,value2', [
(0, 1),
(-1, 1),
(-1.0001, -1.0),
(-1.0001, -1),
(True, False),
(True, 1.1),
(1.0001, 1.0002),
(float('nan'), float('nan')),
])
def test_inequality(value1, value2):
scalar1 = chainerx.Scalar(value1)
scalar2 = chainerx.Scalar(value2)
assert scalar1 != scalar2
assert scalar2 != scalar1
assert scalar2 != value1
assert value1 != scalar2
assert scalar1 != value2
assert value2 != scalar1
@pytest.mark.parametrize('value', [
-2, 1, -1.5, 2.3, True, False
])
def test_cast(value):
scalar = chainerx.Scalar(value)
_check_cast_scalar_equals_data(scalar, value)
_check_cast_scalar_equals_data(+scalar, +value)
if isinstance(value, bool):
with pytest.raises(chainerx.DtypeError):
-scalar # should not be able to negate bool
else:
_check_cast_scalar_equals_data(-scalar, -value)
@pytest.mark.parametrize('value', all_scalar_values)
def test_dtype(value):
scalar = chainerx.Scalar(value)
if isinstance(value, bool):
assert scalar.dtype == chainerx.bool_
elif isinstance(value, int):
assert scalar.dtype == chainerx.int64
elif isinstance(value, float):
assert scalar.dtype == chainerx.float64
else:
assert False
@pytest.mark.parametrize('value', all_scalar_values)
def test_repr(value):
scalar = chainerx.Scalar(value)
assert repr(scalar) == repr(value)
assert str(scalar) == str(value)
def test_init_invalid():
with pytest.raises(TypeError):
chainerx.Scalar("1") # string, which is not a numeric | en | 0.577369 | # TODO(niboshi): Support commented-out cases # (1, 1.0), # (True, 1), # (True, 1.0), # (False, 0), # (False, 0.0), # (float('inf'), float('inf')), # should not be able to negate bool # string, which is not a numeric | 2.490047 | 2 |
app.py | Tiemoue/SnakeGame | 0 | 564 | import sys
import pygame
from app_window import App_window
from button import Button
from snake import Snake
from food import Food
from settings import WIDTH, HEIGHT, FONT, BG_COL, QUIT_BUTTON_COLOUR, PLAY_BUTTON_COLOUR, BLACK, FPS, RED
class App:
def __init__(self):
pygame.init()
self.clock = pygame.time.Clock()
self.window = pygame.display.set_mode((WIDTH, HEIGHT))
self.gameover = pygame.font.SysFont("Comicsansms",
90,
bold=False,
italic=True)
self.font = pygame.font.SysFont(FONT, 20, bold=1)
self.running = True
self.state = "intro"
self.intro_buttons = []
self.playing_buttons = []
self.gameover_buttons = []
self.active_buttons = self.intro_buttons
self.app_window = App_window(self)
self.snake = Snake(self)
self.food = Food(self)
self.make_buttons()
def make_buttons(self):
# INTRO PLAY AND QUIT BUTTON
intro_play_button = Button(self,
50,
300,
WIDTH - 100,
50,
PLAY_BUTTON_COLOUR,
hover_colour=(49, 218, 46),
function=self.intro_to_play,
text="PLAY")
self.intro_buttons.append(intro_play_button)
intro_quit_button = Button(self,
50,
HEIGHT - 100,
WIDTH - 100,
50,
QUIT_BUTTON_COLOUR,
hover_colour=(219, 53, 43),
function=self.intro_quit,
text="QUIT")
self.intro_buttons.append(intro_quit_button)
# PLAYING QUIT BUTTON
playing_quit_button = Button(self, (WIDTH // 2) - 50,
20,
100,
33,
QUIT_BUTTON_COLOUR,
hover_colour=(219, 53, 43),
function=self.playing_quit,
text="QUIT")
self.playing_buttons.append(playing_quit_button)
# GAMEOVER BUTTON
gameover_play_again_button = Button(self,
50,
300,
WIDTH - 100,
50,
PLAY_BUTTON_COLOUR,
hover_colour=(36, 183, 23),
function=self.reset,
text="PLAY AGAIN")
self.gameover_buttons.append(gameover_play_again_button)
gameover_quit_button = Button(self,
50,
HEIGHT - 100,
WIDTH - 100,
50,
QUIT_BUTTON_COLOUR,
hover_colour=(216, 53, 43),
function=self.intro_quit,
text="QUIT")
self.gameover_buttons.append(gameover_quit_button)
def show_text(self, text, pos):
text = self.font.render(text, False, BLACK)
self.window.blit(text, (pos[0], pos[1]))
def reset(self):
# reset the game
self.state = "play"
self.active_buttons = self.playing_buttons
self.snake = Snake(self)
FPS[0] = 5
def run(self):
while self.running:
self.events()
self.update()
self.draw()
self.clock.tick(FPS[0])
pygame.quit()
sys.exit()
def events(self):
if self.state == "intro":
self.intro_events()
if self.state == "play":
self.playing_events()
if self.state == "dead":
self.gameover_events()
def update(self):
if self.state == "intro":
self.intro_update()
if self.state == "play":
self.playing_update()
if self.state == "dead":
self.gameover_update()
def draw(self):
self.window.fill(BG_COL)
if self.state == "intro":
self.intro_draw()
if self.state == "play":
self.playing_draw()
if self.state == "dead":
self.gameover_draw()
pygame.display.update()
# INTRO FUNCTIONS
def intro_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.running = False
if event.type == pygame.MOUSEBUTTONDOWN:
for button in self.active_buttons:
if button.hovered:
button.click()
def intro_update(self):
for button in self.active_buttons:
button.update()
def intro_draw(self):
for button in self.active_buttons:
button.draw()
def intro_to_play(self):
self.state = "play"
self.active_buttons = self.playing_buttons
def intro_quit(self):
self.running = False
# PlAY FUNCTIONS
def playing_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
# checks if a key is pressed down
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.running = False
if event.key == pygame.K_LEFT and self.snake.direction != [
1, 0
]:
self.snake.direction = [-1, 0]
if event.key == pygame.K_RIGHT and self.snake.direction != [
-1, 0
]:
self.snake.direction = [1, 0]
if event.key == pygame.K_UP and self.snake.direction != [0, 1]:
self.snake.direction = [0, -1]
if event.key == pygame.K_DOWN and self.snake.direction != [
0, -1
]:
self.snake.direction = [0, 1]
if event.type == pygame.MOUSEBUTTONDOWN:
for button in self.active_buttons:
if button.hovered:
button.click()
def playing_update(self):
for button in self.active_buttons:
button.update()
self.app_window.update()
def playing_draw(self):
self.app_window.draw()
for button in self.active_buttons:
button.draw()
self.show_text("Score: " + str(self.snake.length - 1), [20, 20])
def playing_quit(self):
self.running = False
# GAMEOVER FUNCTIONS
def gameover_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.running = False
if event.type == pygame.MOUSEBUTTONDOWN:
for button in self.active_buttons:
if button.hovered:
button.click()
def gameover_update(self):
for button in self.active_buttons:
button.update()
def gameover_draw(self):
for button in self.active_buttons:
button.draw()
self.game_over("GAME OVER", [WIDTH - 440, 30])
def game_over(self, text, pos):
text = self.gameover.render(text, False, RED)
self.window.blit(text, (pos[0], pos[1]))
| import sys
import pygame
from app_window import App_window
from button import Button
from snake import Snake
from food import Food
from settings import WIDTH, HEIGHT, FONT, BG_COL, QUIT_BUTTON_COLOUR, PLAY_BUTTON_COLOUR, BLACK, FPS, RED
class App:
def __init__(self):
pygame.init()
self.clock = pygame.time.Clock()
self.window = pygame.display.set_mode((WIDTH, HEIGHT))
self.gameover = pygame.font.SysFont("Comicsansms",
90,
bold=False,
italic=True)
self.font = pygame.font.SysFont(FONT, 20, bold=1)
self.running = True
self.state = "intro"
self.intro_buttons = []
self.playing_buttons = []
self.gameover_buttons = []
self.active_buttons = self.intro_buttons
self.app_window = App_window(self)
self.snake = Snake(self)
self.food = Food(self)
self.make_buttons()
def make_buttons(self):
# INTRO PLAY AND QUIT BUTTON
intro_play_button = Button(self,
50,
300,
WIDTH - 100,
50,
PLAY_BUTTON_COLOUR,
hover_colour=(49, 218, 46),
function=self.intro_to_play,
text="PLAY")
self.intro_buttons.append(intro_play_button)
intro_quit_button = Button(self,
50,
HEIGHT - 100,
WIDTH - 100,
50,
QUIT_BUTTON_COLOUR,
hover_colour=(219, 53, 43),
function=self.intro_quit,
text="QUIT")
self.intro_buttons.append(intro_quit_button)
# PLAYING QUIT BUTTON
playing_quit_button = Button(self, (WIDTH // 2) - 50,
20,
100,
33,
QUIT_BUTTON_COLOUR,
hover_colour=(219, 53, 43),
function=self.playing_quit,
text="QUIT")
self.playing_buttons.append(playing_quit_button)
# GAMEOVER BUTTON
gameover_play_again_button = Button(self,
50,
300,
WIDTH - 100,
50,
PLAY_BUTTON_COLOUR,
hover_colour=(36, 183, 23),
function=self.reset,
text="PLAY AGAIN")
self.gameover_buttons.append(gameover_play_again_button)
gameover_quit_button = Button(self,
50,
HEIGHT - 100,
WIDTH - 100,
50,
QUIT_BUTTON_COLOUR,
hover_colour=(216, 53, 43),
function=self.intro_quit,
text="QUIT")
self.gameover_buttons.append(gameover_quit_button)
def show_text(self, text, pos):
text = self.font.render(text, False, BLACK)
self.window.blit(text, (pos[0], pos[1]))
def reset(self):
# reset the game
self.state = "play"
self.active_buttons = self.playing_buttons
self.snake = Snake(self)
FPS[0] = 5
def run(self):
while self.running:
self.events()
self.update()
self.draw()
self.clock.tick(FPS[0])
pygame.quit()
sys.exit()
def events(self):
if self.state == "intro":
self.intro_events()
if self.state == "play":
self.playing_events()
if self.state == "dead":
self.gameover_events()
def update(self):
if self.state == "intro":
self.intro_update()
if self.state == "play":
self.playing_update()
if self.state == "dead":
self.gameover_update()
def draw(self):
self.window.fill(BG_COL)
if self.state == "intro":
self.intro_draw()
if self.state == "play":
self.playing_draw()
if self.state == "dead":
self.gameover_draw()
pygame.display.update()
# INTRO FUNCTIONS
def intro_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.running = False
if event.type == pygame.MOUSEBUTTONDOWN:
for button in self.active_buttons:
if button.hovered:
button.click()
def intro_update(self):
for button in self.active_buttons:
button.update()
def intro_draw(self):
for button in self.active_buttons:
button.draw()
def intro_to_play(self):
self.state = "play"
self.active_buttons = self.playing_buttons
def intro_quit(self):
self.running = False
# PlAY FUNCTIONS
def playing_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
# checks if a key is pressed down
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.running = False
if event.key == pygame.K_LEFT and self.snake.direction != [
1, 0
]:
self.snake.direction = [-1, 0]
if event.key == pygame.K_RIGHT and self.snake.direction != [
-1, 0
]:
self.snake.direction = [1, 0]
if event.key == pygame.K_UP and self.snake.direction != [0, 1]:
self.snake.direction = [0, -1]
if event.key == pygame.K_DOWN and self.snake.direction != [
0, -1
]:
self.snake.direction = [0, 1]
if event.type == pygame.MOUSEBUTTONDOWN:
for button in self.active_buttons:
if button.hovered:
button.click()
def playing_update(self):
for button in self.active_buttons:
button.update()
self.app_window.update()
def playing_draw(self):
self.app_window.draw()
for button in self.active_buttons:
button.draw()
self.show_text("Score: " + str(self.snake.length - 1), [20, 20])
def playing_quit(self):
self.running = False
# GAMEOVER FUNCTIONS
def gameover_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.running = False
if event.type == pygame.MOUSEBUTTONDOWN:
for button in self.active_buttons:
if button.hovered:
button.click()
def gameover_update(self):
for button in self.active_buttons:
button.update()
def gameover_draw(self):
for button in self.active_buttons:
button.draw()
self.game_over("GAME OVER", [WIDTH - 440, 30])
def game_over(self, text, pos):
text = self.gameover.render(text, False, RED)
self.window.blit(text, (pos[0], pos[1]))
| en | 0.468207 | # INTRO PLAY AND QUIT BUTTON # PLAYING QUIT BUTTON # GAMEOVER BUTTON # reset the game # INTRO FUNCTIONS # PlAY FUNCTIONS # checks if a key is pressed down # GAMEOVER FUNCTIONS | 2.96103 | 3 |
pupa/importers/bills.py | datamade/pupa | 3 | 565 | <filename>pupa/importers/bills.py<gh_stars>1-10
from pupa.utils import fix_bill_id
from opencivicdata.legislative.models import (Bill, RelatedBill, BillAbstract, BillTitle,
BillIdentifier, BillAction, BillActionRelatedEntity,
BillSponsorship, BillSource, BillDocument,
BillVersion, BillDocumentLink, BillVersionLink)
from .base import BaseImporter
from ..exceptions import PupaInternalError
class BillImporter(BaseImporter):
_type = 'bill'
model_class = Bill
related_models = {'abstracts': (BillAbstract, 'bill_id', {}),
'other_titles': (BillTitle, 'bill_id', {}),
'other_identifiers': (BillIdentifier, 'bill_id', {}),
'actions': (BillAction, 'bill_id', {
'related_entities': (BillActionRelatedEntity, 'action_id', {})}),
'related_bills': (RelatedBill, 'bill_id', {}),
'sponsorships': (BillSponsorship, 'bill_id', {}),
'sources': (BillSource, 'bill_id', {}),
'documents': (BillDocument, 'bill_id', {
'links': (BillDocumentLink, 'document_id', {})}),
'versions': (BillVersion, 'bill_id', {
'links': (BillVersionLink, 'version_id', {})}),
}
preserve_order = {'actions'}
def __init__(self, jurisdiction_id, org_importer, person_importer):
super(BillImporter, self).__init__(jurisdiction_id)
self.org_importer = org_importer
self.person_importer = person_importer
def get_object(self, bill):
spec = {
'legislative_session_id': bill['legislative_session_id'],
'identifier': bill['identifier'],
}
if 'from_organization_id' in bill:
spec['from_organization_id'] = bill['from_organization_id']
return self.model_class.objects.prefetch_related('actions__related_entities',
'versions__links',
'documents__links',
).get(**spec)
def limit_spec(self, spec):
spec['legislative_session__jurisdiction_id'] = self.jurisdiction_id
return spec
def prepare_for_db(self, data):
data['identifier'] = fix_bill_id(data['identifier'])
data['legislative_session_id'] = self.get_session_id(data.pop('legislative_session'))
if data['from_organization']:
data['from_organization_id'] = self.org_importer.resolve_json_id(
data.pop('from_organization'))
for action in data['actions']:
action['organization_id'] = self.org_importer.resolve_json_id(
action['organization_id'])
for entity in action['related_entities']:
if 'organization_id' in entity:
entity['organization_id'] = self.org_importer.resolve_json_id(
entity['organization_id'])
elif 'person_id' in entity:
entity['person_id'] = self.person_importer.resolve_json_id(
entity['person_id'])
for sponsor in data['sponsorships']:
if 'person_id' in sponsor:
sponsor['person_id'] = self.person_importer.resolve_json_id(
sponsor['person_id'], allow_no_match=True)
if 'organization_id' in sponsor:
sponsor['organization_id'] = self.org_importer.resolve_json_id(
sponsor['organization_id'], allow_no_match=True)
return data
def postimport(self):
# go through all RelatedBill objs that are attached to a bill in this jurisdiction and
# are currently unresolved
for rb in RelatedBill.objects.filter(
bill__legislative_session__jurisdiction_id=self.jurisdiction_id,
related_bill=None):
candidates = list(Bill.objects.filter(
legislative_session__identifier=rb.legislative_session,
legislative_session__jurisdiction_id=self.jurisdiction_id,
identifier=rb.identifier)
)
if len(candidates) == 1:
rb.related_bill = candidates[0]
rb.save()
elif len(candidates) > 1: # pragma: no cover
# if we ever see this, we need to add additional fields on the relation
raise PupaInternalError('multiple related_bill candidates found for {}'.format(rb))
| <filename>pupa/importers/bills.py<gh_stars>1-10
from pupa.utils import fix_bill_id
from opencivicdata.legislative.models import (Bill, RelatedBill, BillAbstract, BillTitle,
BillIdentifier, BillAction, BillActionRelatedEntity,
BillSponsorship, BillSource, BillDocument,
BillVersion, BillDocumentLink, BillVersionLink)
from .base import BaseImporter
from ..exceptions import PupaInternalError
class BillImporter(BaseImporter):
_type = 'bill'
model_class = Bill
related_models = {'abstracts': (BillAbstract, 'bill_id', {}),
'other_titles': (BillTitle, 'bill_id', {}),
'other_identifiers': (BillIdentifier, 'bill_id', {}),
'actions': (BillAction, 'bill_id', {
'related_entities': (BillActionRelatedEntity, 'action_id', {})}),
'related_bills': (RelatedBill, 'bill_id', {}),
'sponsorships': (BillSponsorship, 'bill_id', {}),
'sources': (BillSource, 'bill_id', {}),
'documents': (BillDocument, 'bill_id', {
'links': (BillDocumentLink, 'document_id', {})}),
'versions': (BillVersion, 'bill_id', {
'links': (BillVersionLink, 'version_id', {})}),
}
preserve_order = {'actions'}
def __init__(self, jurisdiction_id, org_importer, person_importer):
super(BillImporter, self).__init__(jurisdiction_id)
self.org_importer = org_importer
self.person_importer = person_importer
def get_object(self, bill):
spec = {
'legislative_session_id': bill['legislative_session_id'],
'identifier': bill['identifier'],
}
if 'from_organization_id' in bill:
spec['from_organization_id'] = bill['from_organization_id']
return self.model_class.objects.prefetch_related('actions__related_entities',
'versions__links',
'documents__links',
).get(**spec)
def limit_spec(self, spec):
spec['legislative_session__jurisdiction_id'] = self.jurisdiction_id
return spec
def prepare_for_db(self, data):
data['identifier'] = fix_bill_id(data['identifier'])
data['legislative_session_id'] = self.get_session_id(data.pop('legislative_session'))
if data['from_organization']:
data['from_organization_id'] = self.org_importer.resolve_json_id(
data.pop('from_organization'))
for action in data['actions']:
action['organization_id'] = self.org_importer.resolve_json_id(
action['organization_id'])
for entity in action['related_entities']:
if 'organization_id' in entity:
entity['organization_id'] = self.org_importer.resolve_json_id(
entity['organization_id'])
elif 'person_id' in entity:
entity['person_id'] = self.person_importer.resolve_json_id(
entity['person_id'])
for sponsor in data['sponsorships']:
if 'person_id' in sponsor:
sponsor['person_id'] = self.person_importer.resolve_json_id(
sponsor['person_id'], allow_no_match=True)
if 'organization_id' in sponsor:
sponsor['organization_id'] = self.org_importer.resolve_json_id(
sponsor['organization_id'], allow_no_match=True)
return data
def postimport(self):
# go through all RelatedBill objs that are attached to a bill in this jurisdiction and
# are currently unresolved
for rb in RelatedBill.objects.filter(
bill__legislative_session__jurisdiction_id=self.jurisdiction_id,
related_bill=None):
candidates = list(Bill.objects.filter(
legislative_session__identifier=rb.legislative_session,
legislative_session__jurisdiction_id=self.jurisdiction_id,
identifier=rb.identifier)
)
if len(candidates) == 1:
rb.related_bill = candidates[0]
rb.save()
elif len(candidates) > 1: # pragma: no cover
# if we ever see this, we need to add additional fields on the relation
raise PupaInternalError('multiple related_bill candidates found for {}'.format(rb))
| en | 0.95957 | # go through all RelatedBill objs that are attached to a bill in this jurisdiction and # are currently unresolved # pragma: no cover # if we ever see this, we need to add additional fields on the relation | 1.980602 | 2 |
utilities/classify_ensemble.py | Hazel1994/Paraphrase-detection-on-Quora-and-MSRP | 2 | 566 | from sklearn.metrics import f1_score,accuracy_score
import numpy as np
from utilities.tools import load_model
import pandas as pd
def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels):
models=[]
n_h_features=nlp_f.shape[1]
print('loading the models...')
for i in range(n_models):
models.append(load_model(i+1,nb_words,n_h_features))
preds=[]
print('predicting the test data...\n')
i=0
for m in models:
i+=1
preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=64, verbose=0)
preds.append(preds_prob[:,1])
preds=np.asarray(preds)
final_labels=np.zeros(len(test_data_1),dtype=int)
#average the predicttion
for i in range(len(test_data_1)):
final_labels[i]=round(np.mean(preds[:,i]))
if i%100==0:
print(i ,' out of ',len(test_data_1))
print("test data accuracy: ", accuracy_score(final_labels,test_labels))
print("test data f_measure: ", f1_score(final_labels, test_labels))
submission = pd.DataFrame({"Quality": final_labels})
submission.to_csv("predictions/MSRP.tsv", index=True,index_label='test_id')
def predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2):
models=[]
n_h_features=nlp_f.shape[1]
print('loading the models...')
for i in range(n_models):
models.append(load_model(i+1,nb_words,n_h_features))
preds=[]
print('predicting the test data...\n')
i=0
for m in models:
i+=1
preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=125, verbose=0)
preds.append(preds_prob[:,1])
preds=np.asarray(preds)
final_labels=np.zeros(len(test_data_1),dtype=float)
#average the predicttion
for i in range(len(test_data_1)):
final_labels[i]=np.mean(preds[:,i])
if i%10000==0:
print(i ,' out of ',len(test_data_1))
print('making the sumbission file')
submission = pd.DataFrame({"is_duplicate": final_labels})
submission.to_csv("predictions/Quora.tsv", index=True,index_label='test_id') | from sklearn.metrics import f1_score,accuracy_score
import numpy as np
from utilities.tools import load_model
import pandas as pd
def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels):
models=[]
n_h_features=nlp_f.shape[1]
print('loading the models...')
for i in range(n_models):
models.append(load_model(i+1,nb_words,n_h_features))
preds=[]
print('predicting the test data...\n')
i=0
for m in models:
i+=1
preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=64, verbose=0)
preds.append(preds_prob[:,1])
preds=np.asarray(preds)
final_labels=np.zeros(len(test_data_1),dtype=int)
#average the predicttion
for i in range(len(test_data_1)):
final_labels[i]=round(np.mean(preds[:,i]))
if i%100==0:
print(i ,' out of ',len(test_data_1))
print("test data accuracy: ", accuracy_score(final_labels,test_labels))
print("test data f_measure: ", f1_score(final_labels, test_labels))
submission = pd.DataFrame({"Quality": final_labels})
submission.to_csv("predictions/MSRP.tsv", index=True,index_label='test_id')
def predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2):
models=[]
n_h_features=nlp_f.shape[1]
print('loading the models...')
for i in range(n_models):
models.append(load_model(i+1,nb_words,n_h_features))
preds=[]
print('predicting the test data...\n')
i=0
for m in models:
i+=1
preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=125, verbose=0)
preds.append(preds_prob[:,1])
preds=np.asarray(preds)
final_labels=np.zeros(len(test_data_1),dtype=float)
#average the predicttion
for i in range(len(test_data_1)):
final_labels[i]=np.mean(preds[:,i])
if i%10000==0:
print(i ,' out of ',len(test_data_1))
print('making the sumbission file')
submission = pd.DataFrame({"is_duplicate": final_labels})
submission.to_csv("predictions/Quora.tsv", index=True,index_label='test_id') | en | 0.322757 | #average the predicttion #average the predicttion | 2.710547 | 3 |
koino/plot/clusters.py | tritas/koino | 0 | 567 | # coding=utf-8
import logging
import traceback
from os import makedirs
from os.path import exists, join
from textwrap import fill
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from koino.plot import big_square, default_alpha
from matplotlib import cm
from ..utils.base import jaccard
def plot_silhouette(
X, figure_fp, n_clusters, silhouette_values, cluster_labels, silhouette_avg
):
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(26, 10))
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but here all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
y_lower = 10
for k in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = np.sort(silhouette_values[cluster_labels == k])
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(k) / n_clusters)
ax1.fill_betweenx(
np.arange(y_lower, y_upper),
0,
ith_cluster_silhouette_values,
facecolor=color,
edgecolor=color,
alpha=default_alpha,
)
# Label the silhouette plots with their cluster numbers at the
# middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(k))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# Construct cluster
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
# colors = y
ax2.scatter(X[:, 0], X[:, 1], marker=".", s=20, lw=0, alpha=default_alpha, c=colors)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(
("Silhouette analysis for KMeans " "with n_clusters = %d" % n_clusters),
fontsize=14,
fontweight="bold",
)
plt.savefig(figure_fp)
plt.close()
plt.clf()
def plot_cluster_assignments(
X, y, n_clusters, figures_dir, transparent=False, cluster_names=None, title=""
):
"""Clustering assignments scatter plot
Notes
-----
Can use mean or median to fix cluster centroid coordinates."""
if cluster_names is None:
cluster_names = ["Cluster {}".format(i + 1) for i in range(n_clusters)]
# We first reorder the data points according to the centroids labels
X = np.vstack([X[y == i] for i in range(n_clusters)])
y = np.hstack([y[y == i] for i in range(n_clusters)])
# Choose a color palette with seaborn.
palette = np.array(sns.color_palette("hls", n_clusters))
fig, ax = plt.subplots(figsize=big_square)
# for i in range(n_clusters):
# mask = y == i
# ax.scatter(X[mask, 0], X[mask, 1], lw=0, s=20, c=palette[i],
# label=cluster_names[i])
ax.set_title(title)
ax.scatter(X[:, 0], X[:, 1], lw=0, s=20, c=palette[y.astype(np.int)])
ax.axis("off")
# Add the labels for each cluster.
for i in range(n_clusters):
# Position of each label.
samples = np.atleast_2d(X[y == i, :2])
if not len(samples):
logging.warning(
"Probably singular cluster {} (shape:{})".format(i + 1, X[y == i].shape)
)
continue
xtext, ytext = np.median(samples, axis=0)
name = fill(cluster_names[i], width=20)
assert np.isfinite(xtext)
assert np.isfinite(ytext)
txt = ax.text(xtext, ytext, name, fontsize=20, wrap=True, ha="left")
txt.set_path_effects(
[PathEffects.Stroke(linewidth=5, foreground="w"), PathEffects.Normal()]
)
# plt.legend()
figure_fp = join(figures_dir, "Clustered {}.png".format(title))
fig.tight_layout()
try:
fig.savefig(figure_fp, transparent=transparent)
except ValueError:
logging.warning(traceback.format_exc())
finally:
plt.close()
plt.clf()
def overlap_jaccard(
indx,
y_a,
y_b,
names_a,
names_b,
n_a=None,
n_b=None,
figsize=None,
output_dir=None,
alabel="socio-demographic",
blabel="purchases",
transparent=False,
):
"""Compute and plot contingency tables based on set intersection and
jaccard score.
# TODO: Normaliser par len(sd_set) ou len(diet_set) ?
"""
if not (n_a or n_b) or not output_dir:
return
elif output_dir and not exists(output_dir):
makedirs(output_dir)
else:
assert n_a and n_b
assert len(indx) == len(y_a) == len(y_b)
assert len(names_a) == n_a
assert len(names_b) == n_b
a_sets = [set(indx[y_a == i]) for i in range(n_a)]
b_sets = [set(indx[y_b == i]) for i in range(n_b)]
inter_sets = np.asarray(
[[len(set_a & set_t) for set_a in a_sets] for set_t in b_sets], dtype=np.int_
)
fig, ax = plt.subplots(figsize=figsize)
plt.title("Overlap between {} and {} clusters".format(alabel, blabel))
sns.heatmap(
inter_sets,
annot=True,
fmt="6.0f",
ax=ax,
square=True,
xticklabels=names_a,
yticklabels=names_b,
)
plt.tight_layout()
inter_path = join(output_dir, "Clusters Intersection.png")
plt.savefig(inter_path, transparent=transparent)
plt.close()
plt.clf()
jac_arr = np.asarray(
[[jaccard(set_a, set_b) for set_a in a_sets] for set_b in b_sets],
dtype=np.float_,
)
fig, ax = plt.subplots(figsize=figsize)
plt.title("Jaccard scores between {} and {} clusters".format(alabel, blabel))
sns.heatmap(
jac_arr,
annot=True,
fmt=".3f",
ax=ax,
square=True,
xticklabels=names_a,
yticklabels=names_b,
)
plt.tight_layout()
jaccard_path = join(output_dir, "Clusters Jaccard.png")
plt.savefig(jaccard_path, transparent=transparent)
plt.close()
plt.clf()
| # coding=utf-8
import logging
import traceback
from os import makedirs
from os.path import exists, join
from textwrap import fill
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from koino.plot import big_square, default_alpha
from matplotlib import cm
from ..utils.base import jaccard
def plot_silhouette(
X, figure_fp, n_clusters, silhouette_values, cluster_labels, silhouette_avg
):
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(26, 10))
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but here all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
y_lower = 10
for k in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = np.sort(silhouette_values[cluster_labels == k])
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(k) / n_clusters)
ax1.fill_betweenx(
np.arange(y_lower, y_upper),
0,
ith_cluster_silhouette_values,
facecolor=color,
edgecolor=color,
alpha=default_alpha,
)
# Label the silhouette plots with their cluster numbers at the
# middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(k))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# Construct cluster
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
# colors = y
ax2.scatter(X[:, 0], X[:, 1], marker=".", s=20, lw=0, alpha=default_alpha, c=colors)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(
("Silhouette analysis for KMeans " "with n_clusters = %d" % n_clusters),
fontsize=14,
fontweight="bold",
)
plt.savefig(figure_fp)
plt.close()
plt.clf()
def plot_cluster_assignments(
X, y, n_clusters, figures_dir, transparent=False, cluster_names=None, title=""
):
"""Clustering assignments scatter plot
Notes
-----
Can use mean or median to fix cluster centroid coordinates."""
if cluster_names is None:
cluster_names = ["Cluster {}".format(i + 1) for i in range(n_clusters)]
# We first reorder the data points according to the centroids labels
X = np.vstack([X[y == i] for i in range(n_clusters)])
y = np.hstack([y[y == i] for i in range(n_clusters)])
# Choose a color palette with seaborn.
palette = np.array(sns.color_palette("hls", n_clusters))
fig, ax = plt.subplots(figsize=big_square)
# for i in range(n_clusters):
# mask = y == i
# ax.scatter(X[mask, 0], X[mask, 1], lw=0, s=20, c=palette[i],
# label=cluster_names[i])
ax.set_title(title)
ax.scatter(X[:, 0], X[:, 1], lw=0, s=20, c=palette[y.astype(np.int)])
ax.axis("off")
# Add the labels for each cluster.
for i in range(n_clusters):
# Position of each label.
samples = np.atleast_2d(X[y == i, :2])
if not len(samples):
logging.warning(
"Probably singular cluster {} (shape:{})".format(i + 1, X[y == i].shape)
)
continue
xtext, ytext = np.median(samples, axis=0)
name = fill(cluster_names[i], width=20)
assert np.isfinite(xtext)
assert np.isfinite(ytext)
txt = ax.text(xtext, ytext, name, fontsize=20, wrap=True, ha="left")
txt.set_path_effects(
[PathEffects.Stroke(linewidth=5, foreground="w"), PathEffects.Normal()]
)
# plt.legend()
figure_fp = join(figures_dir, "Clustered {}.png".format(title))
fig.tight_layout()
try:
fig.savefig(figure_fp, transparent=transparent)
except ValueError:
logging.warning(traceback.format_exc())
finally:
plt.close()
plt.clf()
def overlap_jaccard(
indx,
y_a,
y_b,
names_a,
names_b,
n_a=None,
n_b=None,
figsize=None,
output_dir=None,
alabel="socio-demographic",
blabel="purchases",
transparent=False,
):
"""Compute and plot contingency tables based on set intersection and
jaccard score.
# TODO: Normaliser par len(sd_set) ou len(diet_set) ?
"""
if not (n_a or n_b) or not output_dir:
return
elif output_dir and not exists(output_dir):
makedirs(output_dir)
else:
assert n_a and n_b
assert len(indx) == len(y_a) == len(y_b)
assert len(names_a) == n_a
assert len(names_b) == n_b
a_sets = [set(indx[y_a == i]) for i in range(n_a)]
b_sets = [set(indx[y_b == i]) for i in range(n_b)]
inter_sets = np.asarray(
[[len(set_a & set_t) for set_a in a_sets] for set_t in b_sets], dtype=np.int_
)
fig, ax = plt.subplots(figsize=figsize)
plt.title("Overlap between {} and {} clusters".format(alabel, blabel))
sns.heatmap(
inter_sets,
annot=True,
fmt="6.0f",
ax=ax,
square=True,
xticklabels=names_a,
yticklabels=names_b,
)
plt.tight_layout()
inter_path = join(output_dir, "Clusters Intersection.png")
plt.savefig(inter_path, transparent=transparent)
plt.close()
plt.clf()
jac_arr = np.asarray(
[[jaccard(set_a, set_b) for set_a in a_sets] for set_b in b_sets],
dtype=np.float_,
)
fig, ax = plt.subplots(figsize=figsize)
plt.title("Jaccard scores between {} and {} clusters".format(alabel, blabel))
sns.heatmap(
jac_arr,
annot=True,
fmt=".3f",
ax=ax,
square=True,
xticklabels=names_a,
yticklabels=names_b,
)
plt.tight_layout()
jaccard_path = join(output_dir, "Clusters Jaccard.png")
plt.savefig(jaccard_path, transparent=transparent)
plt.close()
plt.clf()
| en | 0.730928 | # coding=utf-8 # Create a subplot with 1 row and 2 columns # The 1st subplot is the silhouette plot # The silhouette coefficient can range from -1, 1 but here all # lie within [-0.1, 1] # The (n_clusters+1)*10 is for inserting blank space between silhouette # plots of individual clusters, to demarcate them clearly. # Aggregate the silhouette scores for samples belonging to # cluster i, and sort them # Label the silhouette plots with their cluster numbers at the # middle # Compute the new y_lower for next plot # 10 for the 0 samples # The vertical line for average silhouette score of all the values # Clear the yaxis labels / ticks # Construct cluster # 2nd Plot showing the actual clusters formed # colors = y Clustering assignments scatter plot Notes ----- Can use mean or median to fix cluster centroid coordinates. # We first reorder the data points according to the centroids labels # Choose a color palette with seaborn. # for i in range(n_clusters): # mask = y == i # ax.scatter(X[mask, 0], X[mask, 1], lw=0, s=20, c=palette[i], # label=cluster_names[i]) # Add the labels for each cluster. # Position of each label. # plt.legend() Compute and plot contingency tables based on set intersection and jaccard score. # TODO: Normaliser par len(sd_set) ou len(diet_set) ? | 2.724165 | 3 |
python/testData/stubs/FullyQualifiedTypingNamedTuple.py | jnthn/intellij-community | 2 | 568 | import typing
nt = typing.NamedTuple("name", [("field", str)]) | import typing
nt = typing.NamedTuple("name", [("field", str)]) | none | 1 | 2.085623 | 2 |
|
src/plat/index_news_remove.py | jack139/cnnc | 0 | 569 | <reponame>jack139/cnnc
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import web
import time
from bson.objectid import ObjectId
from config import setting
import helper
db = setting.db_web
# 删除聊天规则
url = ('/plat/index_news_remove')
class handler:
def GET(self):
if not helper.logged(helper.PRIV_USER, 'TALKBOT'):
raise web.seeother('/')
render = helper.create_render()
user_data = web.input(news_id='')
if user_data.news_id == '':
return render.info('参数错误!')
db.index_news.delete_one({'_id':ObjectId(user_data.news_id)})
return render.info('成功删除!', '/plat/index_news')
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import web
import time
from bson.objectid import ObjectId
from config import setting
import helper
db = setting.db_web
# 删除聊天规则
url = ('/plat/index_news_remove')
class handler:
def GET(self):
if not helper.logged(helper.PRIV_USER, 'TALKBOT'):
raise web.seeother('/')
render = helper.create_render()
user_data = web.input(news_id='')
if user_data.news_id == '':
return render.info('参数错误!')
db.index_news.delete_one({'_id':ObjectId(user_data.news_id)})
return render.info('成功删除!', '/plat/index_news') | en | 0.303256 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # 删除聊天规则 | 2.128993 | 2 |
esppy/windows/score.py | PetreStegaroiu/python-esppy | 0 | 570 | <filename>esppy/windows/score.py<gh_stars>0
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import pandas as pd
import six
from .base import BaseWindow, attribute
from .features import SchemaFeature, ModelsFeature, ConnectorsFeature
from .utils import get_args, ensure_element
class ScoreWindow(BaseWindow, SchemaFeature, ModelsFeature, ConnectorsFeature):
'''
Score window
Parameters
----------
name : string, optional
The name of the window
schema : Schema, optional
The schema of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level
value of pubsub is manual, true enables publishing and subscribing
for the window and false disables it.
description : string, optional
Description of the window
Attributes
----------
online_models : list-of-OnlineModels
List of online model objects
offline_models : list-of-OfflineModels
List of offline model objects
Returns
-------
:class:`ScoreWindow`
'''
window_type = 'score'
def __init__(self, name=None, schema=None, pubsub=None, description=None,
copyvars=None):
BaseWindow.__init__(self, **get_args(locals()))
# Set the online model for subclasses
if type(self).__name__ != 'ScoreWindow':
self.add_online_model(type(self).__name__)
def _create_schema_list(self, variables):
'''
Extract schema information from DataFrame
Parameters
----------
variables : DataFrame
The DataFrame containing schema information
Returns
-------
list
'''
labels = []
labels.append('id*:int64')
for name, dtype in zip(variables['Name'], variables['Type']):
if dtype == 'Num':
labels.append(name + ':double')
elif dtype == 'Char':
labels.append(name + ':string')
return labels
def import_schema_from_astore_output(self, output_variables_input):
'''
Import a schema from the astore CAS action output format
Parameters
----------
output_variables_input : DataFrame or list or string
The schema definition
'''
if isinstance(output_variables_input, six.string_types):
if os.path.isfile(output_variables_input):
output_variables_input = pd.read_csv(output_variables_input)
else:
output_variables_input = pd.read_csv(six.StringIO(output_variables_input))
if isinstance(output_variables_input, pd.DataFrame):
self.schema = self._create_schema_list(output_variables_input)
elif isinstance(output_variables_input, (tuple, list)):
self.schema = list(output_variables_input)
| <filename>esppy/windows/score.py<gh_stars>0
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import pandas as pd
import six
from .base import BaseWindow, attribute
from .features import SchemaFeature, ModelsFeature, ConnectorsFeature
from .utils import get_args, ensure_element
class ScoreWindow(BaseWindow, SchemaFeature, ModelsFeature, ConnectorsFeature):
'''
Score window
Parameters
----------
name : string, optional
The name of the window
schema : Schema, optional
The schema of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level
value of pubsub is manual, true enables publishing and subscribing
for the window and false disables it.
description : string, optional
Description of the window
Attributes
----------
online_models : list-of-OnlineModels
List of online model objects
offline_models : list-of-OfflineModels
List of offline model objects
Returns
-------
:class:`ScoreWindow`
'''
window_type = 'score'
def __init__(self, name=None, schema=None, pubsub=None, description=None,
copyvars=None):
BaseWindow.__init__(self, **get_args(locals()))
# Set the online model for subclasses
if type(self).__name__ != 'ScoreWindow':
self.add_online_model(type(self).__name__)
def _create_schema_list(self, variables):
'''
Extract schema information from DataFrame
Parameters
----------
variables : DataFrame
The DataFrame containing schema information
Returns
-------
list
'''
labels = []
labels.append('id*:int64')
for name, dtype in zip(variables['Name'], variables['Type']):
if dtype == 'Num':
labels.append(name + ':double')
elif dtype == 'Char':
labels.append(name + ':string')
return labels
def import_schema_from_astore_output(self, output_variables_input):
'''
Import a schema from the astore CAS action output format
Parameters
----------
output_variables_input : DataFrame or list or string
The schema definition
'''
if isinstance(output_variables_input, six.string_types):
if os.path.isfile(output_variables_input):
output_variables_input = pd.read_csv(output_variables_input)
else:
output_variables_input = pd.read_csv(six.StringIO(output_variables_input))
if isinstance(output_variables_input, pd.DataFrame):
self.schema = self._create_schema_list(output_variables_input)
elif isinstance(output_variables_input, (tuple, list)):
self.schema = list(output_variables_input)
| en | 0.620076 | #!/usr/bin/env python # encoding: utf-8 # # Copyright SAS Institute # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Score window Parameters ---------- name : string, optional The name of the window schema : Schema, optional The schema of the window pubsub : bool, optional Publish/subscribe mode for the window. When the project-level value of pubsub is manual, true enables publishing and subscribing for the window and false disables it. description : string, optional Description of the window Attributes ---------- online_models : list-of-OnlineModels List of online model objects offline_models : list-of-OfflineModels List of offline model objects Returns ------- :class:`ScoreWindow` # Set the online model for subclasses Extract schema information from DataFrame Parameters ---------- variables : DataFrame The DataFrame containing schema information Returns ------- list Import a schema from the astore CAS action output format Parameters ---------- output_variables_input : DataFrame or list or string The schema definition | 1.798721 | 2 |
Packs/Pwned/Integrations/PwnedV2/PwnedV2.py | diCagri/content | 799 | 571 | from CommonServerPython import *
''' IMPORTS '''
import re
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
VENDOR = 'Have I Been Pwned? V2'
MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time', -1)
API_KEY = demisto.params().get('api_key')
USE_SSL = not demisto.params().get('insecure', False)
BASE_URL = 'https://haveibeenpwned.com/api/v3'
HEADERS = {
'hibp-api-key': API_KEY,
'user-agent': 'DBOT-API',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
DEFAULT_DBOT_SCORE_EMAIL = 2 if demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS' else 3
DEFAULT_DBOT_SCORE_DOMAIN = 2 if demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS' else 3
SUFFIXES = {
"email": '/breachedaccount/',
"domain": '/breaches?domain=',
"username": '/breachedaccount/',
"paste": '/pasteaccount/',
"email_truncate_verified": '?truncateResponse=false&includeUnverified=true',
"domain_truncate_verified": '&truncateResponse=false&includeUnverified=true',
"username_truncate_verified": '?truncateResponse=false&includeUnverified=true'
}
RETRIES_END_TIME = datetime.min
''' HELPER FUNCTIONS '''
def http_request(method, url_suffix, params=None, data=None):
while True:
res = requests.request(
method,
BASE_URL + url_suffix,
verify=USE_SSL,
params=params,
data=data,
headers=HEADERS
)
if res.status_code != 429:
# Rate limit response code
break
if datetime.now() > RETRIES_END_TIME:
return_error('Max retry time has exceeded.')
wait_regex = re.search(r'\d+', res.json()['message'])
if wait_regex:
wait_amount = wait_regex.group()
else:
demisto.error('failed extracting wait time will use default (5). Res body: {}'.format(res.text))
wait_amount = 5
if datetime.now() + timedelta(seconds=int(wait_amount)) > RETRIES_END_TIME:
return_error('Max retry time has exceeded.')
time.sleep(int(wait_amount))
if res.status_code == 404:
return None
if not res.status_code == 200:
if not res.status_code == 401:
demisto.error(
'Error in API call to Pwned Integration [%d]. Full text: %s' % (res.status_code, res.text))
return_error('Error in API call to Pwned Integration [%d] - %s' % (res.status_code, res.reason))
return None
return res.json()
def html_description_to_human_readable(breach_description):
"""
Converting from html description to hr
:param breach_description: Description of breach from API response
:return: Description string that altered HTML urls to clickable urls
for better readability in war-room
"""
html_link_pattern = re.compile('<a href="(.+?)"(.+?)>(.+?)</a>')
patterns_found = html_link_pattern.findall(breach_description)
for link in patterns_found:
html_actual_address = link[0]
html_readable_name = link[2]
link_from_desc = '[' + html_readable_name + ']' + '(' + html_actual_address + ')'
breach_description = re.sub(html_link_pattern, link_from_desc, breach_description, count=1)
return breach_description
def data_to_markdown(query_type, query_arg, api_res, api_paste_res=None):
records_found = False
md = '### Have I Been Pwned query for ' + query_type.lower() + ': *' + query_arg + '*\n'
if api_res:
records_found = True
for breach in api_res:
verified_breach = 'Verified' if breach['IsVerified'] else 'Unverified'
md += '#### ' + breach['Title'] + ' (' + breach['Domain'] + '): ' + str(breach['PwnCount']) + \
' records breached [' + verified_breach + ' breach]\n'
md += 'Date: **' + breach['BreachDate'] + '**\n\n'
md += html_description_to_human_readable(breach['Description']) + '\n'
md += 'Data breached: **' + ','.join(breach['DataClasses']) + '**\n'
if api_paste_res:
records_found = True
pastes_list = []
for paste_breach in api_paste_res:
paste_entry = \
{
'Source': paste_breach['Source'],
'Title': paste_breach['Title'],
'ID': paste_breach['Id'],
'Date': '',
'Amount of emails in paste': str(paste_breach['EmailCount'])
}
if paste_breach['Date']:
paste_entry['Date'] = paste_breach['Date'].split('T')[0]
pastes_list.append(paste_entry)
md += tableToMarkdown('The email address was found in the following "Pastes":',
pastes_list,
['ID', 'Title', 'Date', 'Source', 'Amount of emails in paste'])
if not records_found:
md += 'No records found'
return md
def create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score):
return {
'Indicator': indicator_value,
'Type': indicator_type,
'Vendor': VENDOR,
'Score': dbot_score
}
def create_context_entry(context_type, context_main_value, comp_sites, comp_pastes, malicious_score):
context_dict = dict() # dict
if context_type == 'email':
context_dict['Address'] = context_main_value
else:
context_dict['Name'] = context_main_value
context_dict['Pwned-V2'] = {
'Compromised': {
'Vendor': VENDOR,
'Reporters': ', '.join(comp_sites + comp_pastes)
}
}
if malicious_score == 3:
context_dict['Malicious'] = add_malicious_to_context(context_type)
return context_dict
def add_malicious_to_context(malicious_type):
return {
'Vendor': VENDOR,
'Description': 'The ' + malicious_type + ' has been compromised'
}
def email_to_entry_context(email, api_email_res, api_paste_res):
dbot_score = 0
comp_email = dict() # type: dict
comp_sites = sorted([item['Title'] for item in api_email_res])
comp_pastes = sorted(set(item['Source'] for item in api_paste_res))
if len(comp_sites) > 0:
dbot_score = DEFAULT_DBOT_SCORE_EMAIL
email_context = create_context_entry('email', email, comp_sites, comp_pastes, DEFAULT_DBOT_SCORE_EMAIL)
comp_email[outputPaths['email']] = email_context
comp_email['DBotScore'] = create_dbot_score_dictionary(email, 'email', dbot_score)
return comp_email
def domain_to_entry_context(domain, api_res):
comp_sites = [item['Title'] for item in api_res]
comp_sites = sorted(comp_sites)
comp_domain = dict() # type: dict
dbot_score = 0
if len(comp_sites) > 0:
dbot_score = DEFAULT_DBOT_SCORE_DOMAIN
domain_context = create_context_entry('domain', domain, comp_sites, [], DEFAULT_DBOT_SCORE_DOMAIN)
comp_domain[outputPaths['domain']] = domain_context
comp_domain['DBotScore'] = create_dbot_score_dictionary(domain, 'domain', dbot_score)
return comp_domain
def set_retry_end_time():
global RETRIES_END_TIME
if MAX_RETRY_ALLOWED != -1:
RETRIES_END_TIME = datetime.now() + timedelta(seconds=int(MAX_RETRY_ALLOWED))
''' COMMANDS + REQUESTS FUNCTIONS '''
def test_module(args_dict):
"""
If the http request was successful the test will return OK
:return: 3 arrays of outputs
"""
http_request('GET', SUFFIXES.get("username", '') + 'test')
return ['ok'], [None], [None]
def pwned_email_command(args_dict):
"""
Executing the pwned request for emails list, in order to support list input, the function returns 3 lists of outputs
:param args_dict: the demisto argument - in this case the email list is needed
:return: 3 arrays of outputs
"""
email_list = argToList(args_dict.get('email', ''))
api_email_res_list, api_paste_res_list = pwned_email(email_list)
md_list = []
ec_list = []
for email, api_email_res, api_paste_res in zip(email_list, api_email_res_list, api_paste_res_list):
md_list.append(data_to_markdown('Email', email, api_email_res, api_paste_res))
ec_list.append(email_to_entry_context(email, api_email_res or [], api_paste_res or []))
return md_list, ec_list, api_email_res_list
def pwned_email(email_list):
"""
Executing the http requests
:param email_list: the email list that needed for the http requests
:return: 2 arrays of http requests outputs
"""
api_email_res_list = []
api_paste_res_list = []
for email in email_list:
email_suffix = SUFFIXES.get("email") + email + SUFFIXES.get("email_truncate_verified")
paste_suffix = SUFFIXES.get("paste") + email
api_email_res_list.append(http_request('GET', url_suffix=email_suffix))
api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix))
return api_email_res_list, api_paste_res_list
def pwned_domain_command(args_dict):
"""
Executing the pwned request for domains list, in order to support list input, the function returns 3 lists of
outputs
:param args_dict: the demisto argument - in this case the domain list is needed
:return: 3 arrays of outputs
"""
domain_list = argToList(args_dict.get('domain', ''))
api_res_list = pwned_domain(domain_list)
md_list = []
ec_list = []
for domain, api_res in zip(domain_list, api_res_list):
md_list.append(data_to_markdown('Domain', domain, api_res))
ec_list.append(domain_to_entry_context(domain, api_res or []))
return md_list, ec_list, api_res_list
def pwned_domain(domain_list):
"""
Executing the http request
:param domain_list: the domains list that needed for the http requests
:return: an array of http requests outputs
"""
api_res_list = []
for domain in domain_list:
suffix = SUFFIXES.get("domain") + domain + SUFFIXES.get("domain_truncate_verified")
api_res_list.append(http_request('GET', url_suffix=suffix))
return api_res_list
def pwned_username_command(args_dict):
"""
Executing the pwned request for usernames list, in order to support list input, the function returns 3 lists of
outputs
:param args_dict: the demisto argument - in this case the username list is needed
:return: 3 arrays of outputs
"""
username_list = argToList(args_dict.get('username', ''))
api_res_list = pwned_username(username_list)
md_list = []
ec_list = []
for username, api_res in zip(username_list, api_res_list):
md_list.append(data_to_markdown('Username', username, api_res))
ec_list.append(domain_to_entry_context(username, api_res or []))
return md_list, ec_list, api_res_list
def pwned_username(username_list):
"""
Executing the http request
:param username_list: the username list that needed for the http requests
:return: an array of http requests outputs
"""
api_res_list = []
for username in username_list:
suffix = SUFFIXES.get("username") + username + SUFFIXES.get("username_truncate_verified")
api_res_list.append(http_request('GET', url_suffix=suffix))
return api_res_list
command = demisto.command()
LOG('Command being called is: {}'.format(command))
try:
handle_proxy()
set_retry_end_time()
commands = {
'test-module': test_module,
'email': pwned_email_command,
'pwned-email': pwned_email_command,
'domain': pwned_domain_command,
'pwned-domain': pwned_domain_command,
'pwned-username': pwned_username_command
}
if command in commands:
md_list, ec_list, api_email_res_list = commands[command](demisto.args())
for md, ec, api_paste_res in zip(md_list, ec_list, api_email_res_list):
return_outputs(md, ec, api_paste_res)
# Log exceptions
except Exception as e:
return_error(str(e))
| from CommonServerPython import *
''' IMPORTS '''
import re
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
VENDOR = 'Have I Been Pwned? V2'
MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time', -1)
API_KEY = demisto.params().get('api_key')
USE_SSL = not demisto.params().get('insecure', False)
BASE_URL = 'https://haveibeenpwned.com/api/v3'
HEADERS = {
'hibp-api-key': API_KEY,
'user-agent': 'DBOT-API',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
DEFAULT_DBOT_SCORE_EMAIL = 2 if demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS' else 3
DEFAULT_DBOT_SCORE_DOMAIN = 2 if demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS' else 3
SUFFIXES = {
"email": '/breachedaccount/',
"domain": '/breaches?domain=',
"username": '/breachedaccount/',
"paste": '/pasteaccount/',
"email_truncate_verified": '?truncateResponse=false&includeUnverified=true',
"domain_truncate_verified": '&truncateResponse=false&includeUnverified=true',
"username_truncate_verified": '?truncateResponse=false&includeUnverified=true'
}
RETRIES_END_TIME = datetime.min
''' HELPER FUNCTIONS '''
def http_request(method, url_suffix, params=None, data=None):
while True:
res = requests.request(
method,
BASE_URL + url_suffix,
verify=USE_SSL,
params=params,
data=data,
headers=HEADERS
)
if res.status_code != 429:
# Rate limit response code
break
if datetime.now() > RETRIES_END_TIME:
return_error('Max retry time has exceeded.')
wait_regex = re.search(r'\d+', res.json()['message'])
if wait_regex:
wait_amount = wait_regex.group()
else:
demisto.error('failed extracting wait time will use default (5). Res body: {}'.format(res.text))
wait_amount = 5
if datetime.now() + timedelta(seconds=int(wait_amount)) > RETRIES_END_TIME:
return_error('Max retry time has exceeded.')
time.sleep(int(wait_amount))
if res.status_code == 404:
return None
if not res.status_code == 200:
if not res.status_code == 401:
demisto.error(
'Error in API call to Pwned Integration [%d]. Full text: %s' % (res.status_code, res.text))
return_error('Error in API call to Pwned Integration [%d] - %s' % (res.status_code, res.reason))
return None
return res.json()
def html_description_to_human_readable(breach_description):
"""
Converting from html description to hr
:param breach_description: Description of breach from API response
:return: Description string that altered HTML urls to clickable urls
for better readability in war-room
"""
html_link_pattern = re.compile('<a href="(.+?)"(.+?)>(.+?)</a>')
patterns_found = html_link_pattern.findall(breach_description)
for link in patterns_found:
html_actual_address = link[0]
html_readable_name = link[2]
link_from_desc = '[' + html_readable_name + ']' + '(' + html_actual_address + ')'
breach_description = re.sub(html_link_pattern, link_from_desc, breach_description, count=1)
return breach_description
def data_to_markdown(query_type, query_arg, api_res, api_paste_res=None):
records_found = False
md = '### Have I Been Pwned query for ' + query_type.lower() + ': *' + query_arg + '*\n'
if api_res:
records_found = True
for breach in api_res:
verified_breach = 'Verified' if breach['IsVerified'] else 'Unverified'
md += '#### ' + breach['Title'] + ' (' + breach['Domain'] + '): ' + str(breach['PwnCount']) + \
' records breached [' + verified_breach + ' breach]\n'
md += 'Date: **' + breach['BreachDate'] + '**\n\n'
md += html_description_to_human_readable(breach['Description']) + '\n'
md += 'Data breached: **' + ','.join(breach['DataClasses']) + '**\n'
if api_paste_res:
records_found = True
pastes_list = []
for paste_breach in api_paste_res:
paste_entry = \
{
'Source': paste_breach['Source'],
'Title': paste_breach['Title'],
'ID': paste_breach['Id'],
'Date': '',
'Amount of emails in paste': str(paste_breach['EmailCount'])
}
if paste_breach['Date']:
paste_entry['Date'] = paste_breach['Date'].split('T')[0]
pastes_list.append(paste_entry)
md += tableToMarkdown('The email address was found in the following "Pastes":',
pastes_list,
['ID', 'Title', 'Date', 'Source', 'Amount of emails in paste'])
if not records_found:
md += 'No records found'
return md
def create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score):
return {
'Indicator': indicator_value,
'Type': indicator_type,
'Vendor': VENDOR,
'Score': dbot_score
}
def create_context_entry(context_type, context_main_value, comp_sites, comp_pastes, malicious_score):
context_dict = dict() # dict
if context_type == 'email':
context_dict['Address'] = context_main_value
else:
context_dict['Name'] = context_main_value
context_dict['Pwned-V2'] = {
'Compromised': {
'Vendor': VENDOR,
'Reporters': ', '.join(comp_sites + comp_pastes)
}
}
if malicious_score == 3:
context_dict['Malicious'] = add_malicious_to_context(context_type)
return context_dict
def add_malicious_to_context(malicious_type):
return {
'Vendor': VENDOR,
'Description': 'The ' + malicious_type + ' has been compromised'
}
def email_to_entry_context(email, api_email_res, api_paste_res):
dbot_score = 0
comp_email = dict() # type: dict
comp_sites = sorted([item['Title'] for item in api_email_res])
comp_pastes = sorted(set(item['Source'] for item in api_paste_res))
if len(comp_sites) > 0:
dbot_score = DEFAULT_DBOT_SCORE_EMAIL
email_context = create_context_entry('email', email, comp_sites, comp_pastes, DEFAULT_DBOT_SCORE_EMAIL)
comp_email[outputPaths['email']] = email_context
comp_email['DBotScore'] = create_dbot_score_dictionary(email, 'email', dbot_score)
return comp_email
def domain_to_entry_context(domain, api_res):
comp_sites = [item['Title'] for item in api_res]
comp_sites = sorted(comp_sites)
comp_domain = dict() # type: dict
dbot_score = 0
if len(comp_sites) > 0:
dbot_score = DEFAULT_DBOT_SCORE_DOMAIN
domain_context = create_context_entry('domain', domain, comp_sites, [], DEFAULT_DBOT_SCORE_DOMAIN)
comp_domain[outputPaths['domain']] = domain_context
comp_domain['DBotScore'] = create_dbot_score_dictionary(domain, 'domain', dbot_score)
return comp_domain
def set_retry_end_time():
global RETRIES_END_TIME
if MAX_RETRY_ALLOWED != -1:
RETRIES_END_TIME = datetime.now() + timedelta(seconds=int(MAX_RETRY_ALLOWED))
''' COMMANDS + REQUESTS FUNCTIONS '''
def test_module(args_dict):
"""
If the http request was successful the test will return OK
:return: 3 arrays of outputs
"""
http_request('GET', SUFFIXES.get("username", '') + 'test')
return ['ok'], [None], [None]
def pwned_email_command(args_dict):
"""
Executing the pwned request for emails list, in order to support list input, the function returns 3 lists of outputs
:param args_dict: the demisto argument - in this case the email list is needed
:return: 3 arrays of outputs
"""
email_list = argToList(args_dict.get('email', ''))
api_email_res_list, api_paste_res_list = pwned_email(email_list)
md_list = []
ec_list = []
for email, api_email_res, api_paste_res in zip(email_list, api_email_res_list, api_paste_res_list):
md_list.append(data_to_markdown('Email', email, api_email_res, api_paste_res))
ec_list.append(email_to_entry_context(email, api_email_res or [], api_paste_res or []))
return md_list, ec_list, api_email_res_list
def pwned_email(email_list):
"""
Executing the http requests
:param email_list: the email list that needed for the http requests
:return: 2 arrays of http requests outputs
"""
api_email_res_list = []
api_paste_res_list = []
for email in email_list:
email_suffix = SUFFIXES.get("email") + email + SUFFIXES.get("email_truncate_verified")
paste_suffix = SUFFIXES.get("paste") + email
api_email_res_list.append(http_request('GET', url_suffix=email_suffix))
api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix))
return api_email_res_list, api_paste_res_list
def pwned_domain_command(args_dict):
"""
Executing the pwned request for domains list, in order to support list input, the function returns 3 lists of
outputs
:param args_dict: the demisto argument - in this case the domain list is needed
:return: 3 arrays of outputs
"""
domain_list = argToList(args_dict.get('domain', ''))
api_res_list = pwned_domain(domain_list)
md_list = []
ec_list = []
for domain, api_res in zip(domain_list, api_res_list):
md_list.append(data_to_markdown('Domain', domain, api_res))
ec_list.append(domain_to_entry_context(domain, api_res or []))
return md_list, ec_list, api_res_list
def pwned_domain(domain_list):
"""
Executing the http request
:param domain_list: the domains list that needed for the http requests
:return: an array of http requests outputs
"""
api_res_list = []
for domain in domain_list:
suffix = SUFFIXES.get("domain") + domain + SUFFIXES.get("domain_truncate_verified")
api_res_list.append(http_request('GET', url_suffix=suffix))
return api_res_list
def pwned_username_command(args_dict):
"""
Executing the pwned request for usernames list, in order to support list input, the function returns 3 lists of
outputs
:param args_dict: the demisto argument - in this case the username list is needed
:return: 3 arrays of outputs
"""
username_list = argToList(args_dict.get('username', ''))
api_res_list = pwned_username(username_list)
md_list = []
ec_list = []
for username, api_res in zip(username_list, api_res_list):
md_list.append(data_to_markdown('Username', username, api_res))
ec_list.append(domain_to_entry_context(username, api_res or []))
return md_list, ec_list, api_res_list
def pwned_username(username_list):
"""
Executing the http request
:param username_list: the username list that needed for the http requests
:return: an array of http requests outputs
"""
api_res_list = []
for username in username_list:
suffix = SUFFIXES.get("username") + username + SUFFIXES.get("username_truncate_verified")
api_res_list.append(http_request('GET', url_suffix=suffix))
return api_res_list
command = demisto.command()
LOG('Command being called is: {}'.format(command))
try:
handle_proxy()
set_retry_end_time()
commands = {
'test-module': test_module,
'email': pwned_email_command,
'pwned-email': pwned_email_command,
'domain': pwned_domain_command,
'pwned-domain': pwned_domain_command,
'pwned-username': pwned_username_command
}
if command in commands:
md_list, ec_list, api_email_res_list = commands[command](demisto.args())
for md, ec, api_paste_res in zip(md_list, ec_list, api_email_res_list):
return_outputs(md, ec, api_paste_res)
# Log exceptions
except Exception as e:
return_error(str(e))
| en | 0.680999 | IMPORTS # Disable insecure warnings GLOBALS/PARAMS HELPER FUNCTIONS # Rate limit response code Converting from html description to hr :param breach_description: Description of breach from API response :return: Description string that altered HTML urls to clickable urls for better readability in war-room ## Have I Been Pwned query for ' + query_type.lower() + ': *' + query_arg + '*\n' ### ' + breach['Title'] + ' (' + breach['Domain'] + '): ' + str(breach['PwnCount']) + \ # dict # type: dict # type: dict COMMANDS + REQUESTS FUNCTIONS If the http request was successful the test will return OK :return: 3 arrays of outputs Executing the pwned request for emails list, in order to support list input, the function returns 3 lists of outputs :param args_dict: the demisto argument - in this case the email list is needed :return: 3 arrays of outputs Executing the http requests :param email_list: the email list that needed for the http requests :return: 2 arrays of http requests outputs Executing the pwned request for domains list, in order to support list input, the function returns 3 lists of outputs :param args_dict: the demisto argument - in this case the domain list is needed :return: 3 arrays of outputs Executing the http request :param domain_list: the domains list that needed for the http requests :return: an array of http requests outputs Executing the pwned request for usernames list, in order to support list input, the function returns 3 lists of outputs :param args_dict: the demisto argument - in this case the username list is needed :return: 3 arrays of outputs Executing the http request :param username_list: the username list that needed for the http requests :return: an array of http requests outputs # Log exceptions | 2.183652 | 2 |
moshmosh/extensions/pipelines.py | Aloxaf/moshmosh | 114 | 572 | <reponame>Aloxaf/moshmosh<gh_stars>100-1000
from moshmosh.extension import Extension
from moshmosh.ast_compat import ast
class PipelineVisitor(ast.NodeTransformer):
"""
`a | f -> f(a)`, recursively
"""
def __init__(self, activation):
self.activation = activation
def visit_BinOp(self, n: ast.BinOp):
if n.lineno in self.activation and isinstance(n.op, ast.BitOr):
return ast.Call(
self.visit(n.right),
[self.visit(n.left)],
[],
lineno=n.lineno,
col_offset=n.col_offset
)
return self.generic_visit(n)
class Pipeline(Extension):
identifier = "pipeline"
def __init__(self):
self.visitor = PipelineVisitor(self.activation)
def rewrite_ast(self, node):
return self.visitor.visit(node)
| from moshmosh.extension import Extension
from moshmosh.ast_compat import ast
class PipelineVisitor(ast.NodeTransformer):
"""
`a | f -> f(a)`, recursively
"""
def __init__(self, activation):
self.activation = activation
def visit_BinOp(self, n: ast.BinOp):
if n.lineno in self.activation and isinstance(n.op, ast.BitOr):
return ast.Call(
self.visit(n.right),
[self.visit(n.left)],
[],
lineno=n.lineno,
col_offset=n.col_offset
)
return self.generic_visit(n)
class Pipeline(Extension):
identifier = "pipeline"
def __init__(self):
self.visitor = PipelineVisitor(self.activation)
def rewrite_ast(self, node):
return self.visitor.visit(node) | en | 0.573606 | `a | f -> f(a)`, recursively | 2.580588 | 3 |
postpatch.py | mr-ma/basic-self-checksumming | 1 | 573 | <reponame>mr-ma/basic-self-checksumming
import argparse
import os
import r2pipe
import struct
import mmap
import base64
from shutil import copyfile
import pprint
pp = pprint.PrettyPrinter(indent=4)
def precompute_hash(r2, offset, size):
print('Precomputing hash')
h = 0
print("r2 command to get the function body in base64:\np6e {}@{}".format(size, offset))
b64_func = r2.cmd("p6e {}@{}".format(size, offset))
func_bytes = bytearray(base64.b64decode(b64_func))
for b in func_bytes:
h = h ^ b
print('Precomuted hash:', hex(h))
return h
def patch_binary(mm, search_value, patch_value):
print("search value:{} patch value:{}".format(search_value, patch_value))
flag = "<I" # little-endian unsigned int
search_bytes = struct.pack(flag, search_value)
address = mm.find(search_bytes)
if address == -1:
mm.seek(0)
address = mm.find(search_bytes)
mm.seek(address, os.SEEK_SET)
patch_bytes = struct.pack(flag, patch_value)
mm.write(patch_bytes)
def get_protected_function_info(r2, function):
# find addresses and sizes of all functions
r2.cmd("aa")
r2.cmd("aac")
function_list = r2.cmdj("aflj")
# print(function_list)
funcs = {}
for func in function_list:
attr = {'size': func['size'], 'offset': func['offset']}
funcs[func['name']] = attr
# Basic search for mangled names
if function == 'main':
# main function is entry0 in the binary
function = 'entry0'
print("Cannot precompute the expected hash for the main function, why is that?")
exit(1)
match = 0
mangledName = ""
for name, attr in funcs.items():
# sometimes r2 prepends sym. to function names
if function in name:
mangledName = name
match += 1
if match != 1:
print("Failed to safely find function in the binary!")
pp.pprint(funcs)
exit(1)
return funcs[mangledName]
def main():
parser = argparse.ArgumentParser(
description='Postpatch protected C program.')
parser.add_argument('-b', action="store", dest="binary",
help="program.out protected program binary", required=True)
parser.add_argument('-f', action="store", dest="function",
help="protected function name", required=True)
parser.add_argument('-p', nargs="+", dest="placeholders",
help="list of used placeholders in the exact order of function, size, expected hash", required=True)
results = parser.parse_args()
print("python protect program", results)
r2 = r2pipe.open(results.binary)
funcInfo = get_protected_function_info(r2, results.function)
funcOffset = funcInfo["offset"]
funcSize = funcInfo["size"]
funcExpectedHash = precompute_hash(r2, funcOffset, funcSize)
print("funcOffset:{} funcSize:{} funcExpectedHash:{}".format(
funcOffset, funcSize, funcExpectedHash))
binaryFile, _ = os.path.splitext(results.binary)
patchedBinary = "{}-patched.out".format(binaryFile)
copyfile(results.binary, patchedBinary)
with open(patchedBinary, 'r+b') as binary:
mm = mmap.mmap(binary.fileno(), 0)
patch_binary(mm, int(results.placeholders[0]), int(funcSize))
patch_binary(mm, int(results.placeholders[1]), int(funcExpectedHash))
print("Successfully stored patched binary {}".format(patchedBinary))
status = os.system(
"chmod +x {}".format(patchedBinary))
if status != 0:
print("Error in setting permission, try:\n sudo chmod +x {}".format(patchedBinary))
exit(1)
if __name__ == '__main__':
main()
| import argparse
import os
import r2pipe
import struct
import mmap
import base64
from shutil import copyfile
import pprint
pp = pprint.PrettyPrinter(indent=4)
def precompute_hash(r2, offset, size):
print('Precomputing hash')
h = 0
print("r2 command to get the function body in base64:\np6e {}@{}".format(size, offset))
b64_func = r2.cmd("p6e {}@{}".format(size, offset))
func_bytes = bytearray(base64.b64decode(b64_func))
for b in func_bytes:
h = h ^ b
print('Precomuted hash:', hex(h))
return h
def patch_binary(mm, search_value, patch_value):
print("search value:{} patch value:{}".format(search_value, patch_value))
flag = "<I" # little-endian unsigned int
search_bytes = struct.pack(flag, search_value)
address = mm.find(search_bytes)
if address == -1:
mm.seek(0)
address = mm.find(search_bytes)
mm.seek(address, os.SEEK_SET)
patch_bytes = struct.pack(flag, patch_value)
mm.write(patch_bytes)
def get_protected_function_info(r2, function):
# find addresses and sizes of all functions
r2.cmd("aa")
r2.cmd("aac")
function_list = r2.cmdj("aflj")
# print(function_list)
funcs = {}
for func in function_list:
attr = {'size': func['size'], 'offset': func['offset']}
funcs[func['name']] = attr
# Basic search for mangled names
if function == 'main':
# main function is entry0 in the binary
function = 'entry0'
print("Cannot precompute the expected hash for the main function, why is that?")
exit(1)
match = 0
mangledName = ""
for name, attr in funcs.items():
# sometimes r2 prepends sym. to function names
if function in name:
mangledName = name
match += 1
if match != 1:
print("Failed to safely find function in the binary!")
pp.pprint(funcs)
exit(1)
return funcs[mangledName]
def main():
parser = argparse.ArgumentParser(
description='Postpatch protected C program.')
parser.add_argument('-b', action="store", dest="binary",
help="program.out protected program binary", required=True)
parser.add_argument('-f', action="store", dest="function",
help="protected function name", required=True)
parser.add_argument('-p', nargs="+", dest="placeholders",
help="list of used placeholders in the exact order of function, size, expected hash", required=True)
results = parser.parse_args()
print("python protect program", results)
r2 = r2pipe.open(results.binary)
funcInfo = get_protected_function_info(r2, results.function)
funcOffset = funcInfo["offset"]
funcSize = funcInfo["size"]
funcExpectedHash = precompute_hash(r2, funcOffset, funcSize)
print("funcOffset:{} funcSize:{} funcExpectedHash:{}".format(
funcOffset, funcSize, funcExpectedHash))
binaryFile, _ = os.path.splitext(results.binary)
patchedBinary = "{}-patched.out".format(binaryFile)
copyfile(results.binary, patchedBinary)
with open(patchedBinary, 'r+b') as binary:
mm = mmap.mmap(binary.fileno(), 0)
patch_binary(mm, int(results.placeholders[0]), int(funcSize))
patch_binary(mm, int(results.placeholders[1]), int(funcExpectedHash))
print("Successfully stored patched binary {}".format(patchedBinary))
status = os.system(
"chmod +x {}".format(patchedBinary))
if status != 0:
print("Error in setting permission, try:\n sudo chmod +x {}".format(patchedBinary))
exit(1)
if __name__ == '__main__':
main() | en | 0.767291 | # little-endian unsigned int # find addresses and sizes of all functions # print(function_list) # Basic search for mangled names # main function is entry0 in the binary # sometimes r2 prepends sym. to function names | 2.694561 | 3 |
sitewebapp/migrations/0011_auto_20210130_0150.py | deucaleon18/debsoc-nitdgp-website | 2 | 574 | # Generated by Django 2.2.15 on 2021-01-29 20:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sitewebapp', '0010_auditionanswers_auditionquestions_audtionrounds_candidates'),
]
operations = [
migrations.CreateModel(
name='auditionRounds',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('roundno', models.IntegerField(default=1)),
('candidate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inductees', to='sitewebapp.Candidates')),
],
),
migrations.AlterField(
model_name='auditionquestions',
name='round',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='round', to='sitewebapp.auditionRounds'),
),
migrations.DeleteModel(
name='audtionRounds',
),
]
| # Generated by Django 2.2.15 on 2021-01-29 20:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sitewebapp', '0010_auditionanswers_auditionquestions_audtionrounds_candidates'),
]
operations = [
migrations.CreateModel(
name='auditionRounds',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('roundno', models.IntegerField(default=1)),
('candidate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inductees', to='sitewebapp.Candidates')),
],
),
migrations.AlterField(
model_name='auditionquestions',
name='round',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='round', to='sitewebapp.auditionRounds'),
),
migrations.DeleteModel(
name='audtionRounds',
),
]
| en | 0.801606 | # Generated by Django 2.2.15 on 2021-01-29 20:20 | 1.568238 | 2 |
venv/lib/python3.6/site-packages/ansible_collections/community/azure/plugins/modules/azure_rm_availabilityset_info.py | usegalaxy-no/usegalaxy | 1 | 575 | <reponame>usegalaxy-no/usegalaxy
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_availabilityset_info
short_description: Get Azure Availability Set facts
description:
- Get facts for a specific availability set or all availability sets.
options:
name:
description:
- Limit results to a specific availability set.
resource_group:
description:
- The resource group to search for the desired availability set.
tags:
description:
- List of tags to be matched.
extends_documentation_fragment:
- azure.azcollection.azure
author:
- <NAME> (@julienstroheker)
deprecated:
removed_in: '2.0.0'
why: The Ansible collection community.azure is deprecated. Use azure.azcollection instead.
alternative: Use M(azure.azcollection.azure_rm_availabilityset_info) instead.
'''
EXAMPLES = '''
- name: Get facts for one availability set
community.azure.azure_rm_availabilityset_info:
name: Testing
resource_group: myResourceGroup
- name: Get facts for all availability sets in a specific resource group
community.azure.azure_rm_availabilityset_info:
resource_group: myResourceGroup
'''
RETURN = '''
azure_availabilityset:
description: List of availability sets dicts.
returned: always
type: complex
contains:
location:
description:
- Location where the resource lives.
type: str
sample: eastus2
name:
description:
- Resource name.
type: str
sample: myAvailabilitySet
properties:
description:
- The properties of the resource.
type: dict
contains:
platformFaultDomainCount:
description:
- Fault Domain count.
type: int
sample: 3
platformUpdateDomainCount:
description:
- Update Domain count.
type: int
sample: 2
virtualMachines:
description:
- A list of references to all virtualmachines in the availability set.
type: list
sample: []
sku:
description:
- Location where the resource lives.
type: str
sample: Aligned
type:
description:
- Resource type.
type: str
sample: "Microsoft.Compute/availabilitySets"
tags:
description:
- Resource tags.
type: dict
sample: { env: sandbox }
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# handled in azure_rm_common
pass
AZURE_OBJECT_CLASS = 'AvailabilitySet'
class AzureRMAvailabilitySetInfo(AzureRMModuleBase):
"""Utility class to get availability set facts"""
def __init__(self):
self.module_args = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_info=dict(
azure_availabilitysets=[]
)
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMAvailabilitySetInfo, self).__init__(
derived_arg_spec=self.module_args,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_availabilityset_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_availabilityset_facts' module has been renamed to 'azure_rm_availabilityset_info'",
version='3.0.0', collection_name='community.azure') # was 2.13
for key in self.module_args:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
self.results['ansible_info']['azure_availabilitysets'] = self.get_item()
else:
self.results['ansible_info']['azure_availabilitysets'] = self.list_items()
return self.results
def get_item(self):
"""Get a single availability set"""
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.compute_client.availability_sets.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
avase['name'] = item.name
avase['type'] = item.type
avase['sku'] = item.sku.name
result = [avase]
return result
def list_items(self):
"""Get all availability sets"""
self.log('List all availability sets')
try:
response = self.compute_client.availability_sets.list(self.resource_group)
except CloudError as exc:
self.fail('Failed to list all items - {0}'.format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
avase['name'] = item.name
avase['type'] = item.type
avase['sku'] = item.sku.name
results.append(avase)
return results
def main():
"""Main module execution code path"""
AzureRMAvailabilitySetInfo()
if __name__ == '__main__':
main()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_availabilityset_info
short_description: Get Azure Availability Set facts
description:
- Get facts for a specific availability set or all availability sets.
options:
name:
description:
- Limit results to a specific availability set.
resource_group:
description:
- The resource group to search for the desired availability set.
tags:
description:
- List of tags to be matched.
extends_documentation_fragment:
- azure.azcollection.azure
author:
- <NAME> (@julienstroheker)
deprecated:
removed_in: '2.0.0'
why: The Ansible collection community.azure is deprecated. Use azure.azcollection instead.
alternative: Use M(azure.azcollection.azure_rm_availabilityset_info) instead.
'''
EXAMPLES = '''
- name: Get facts for one availability set
community.azure.azure_rm_availabilityset_info:
name: Testing
resource_group: myResourceGroup
- name: Get facts for all availability sets in a specific resource group
community.azure.azure_rm_availabilityset_info:
resource_group: myResourceGroup
'''
RETURN = '''
azure_availabilityset:
description: List of availability sets dicts.
returned: always
type: complex
contains:
location:
description:
- Location where the resource lives.
type: str
sample: eastus2
name:
description:
- Resource name.
type: str
sample: myAvailabilitySet
properties:
description:
- The properties of the resource.
type: dict
contains:
platformFaultDomainCount:
description:
- Fault Domain count.
type: int
sample: 3
platformUpdateDomainCount:
description:
- Update Domain count.
type: int
sample: 2
virtualMachines:
description:
- A list of references to all virtualmachines in the availability set.
type: list
sample: []
sku:
description:
- Location where the resource lives.
type: str
sample: Aligned
type:
description:
- Resource type.
type: str
sample: "Microsoft.Compute/availabilitySets"
tags:
description:
- Resource tags.
type: dict
sample: { env: sandbox }
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# handled in azure_rm_common
pass
AZURE_OBJECT_CLASS = 'AvailabilitySet'
class AzureRMAvailabilitySetInfo(AzureRMModuleBase):
"""Utility class to get availability set facts"""
def __init__(self):
self.module_args = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_info=dict(
azure_availabilitysets=[]
)
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMAvailabilitySetInfo, self).__init__(
derived_arg_spec=self.module_args,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_availabilityset_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_availabilityset_facts' module has been renamed to 'azure_rm_availabilityset_info'",
version='3.0.0', collection_name='community.azure') # was 2.13
for key in self.module_args:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
self.results['ansible_info']['azure_availabilitysets'] = self.get_item()
else:
self.results['ansible_info']['azure_availabilitysets'] = self.list_items()
return self.results
def get_item(self):
"""Get a single availability set"""
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.compute_client.availability_sets.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
avase['name'] = item.name
avase['type'] = item.type
avase['sku'] = item.sku.name
result = [avase]
return result
def list_items(self):
"""Get all availability sets"""
self.log('List all availability sets')
try:
response = self.compute_client.availability_sets.list(self.resource_group)
except CloudError as exc:
self.fail('Failed to list all items - {0}'.format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
avase['name'] = item.name
avase['type'] = item.type
avase['sku'] = item.sku.name
results.append(avase)
return results
def main():
"""Main module execution code path"""
AzureRMAvailabilitySetInfo()
if __name__ == '__main__':
main() | en | 0.580393 | #!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2016, <NAME> <<EMAIL>> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) --- module: azure_rm_availabilityset_info short_description: Get Azure Availability Set facts description: - Get facts for a specific availability set or all availability sets. options: name: description: - Limit results to a specific availability set. resource_group: description: - The resource group to search for the desired availability set. tags: description: - List of tags to be matched. extends_documentation_fragment: - azure.azcollection.azure author: - <NAME> (@julienstroheker) deprecated: removed_in: '2.0.0' why: The Ansible collection community.azure is deprecated. Use azure.azcollection instead. alternative: Use M(azure.azcollection.azure_rm_availabilityset_info) instead. - name: Get facts for one availability set community.azure.azure_rm_availabilityset_info: name: Testing resource_group: myResourceGroup - name: Get facts for all availability sets in a specific resource group community.azure.azure_rm_availabilityset_info: resource_group: myResourceGroup azure_availabilityset: description: List of availability sets dicts. returned: always type: complex contains: location: description: - Location where the resource lives. type: str sample: eastus2 name: description: - Resource name. type: str sample: myAvailabilitySet properties: description: - The properties of the resource. type: dict contains: platformFaultDomainCount: description: - Fault Domain count. type: int sample: 3 platformUpdateDomainCount: description: - Update Domain count. type: int sample: 2 virtualMachines: description: - A list of references to all virtualmachines in the availability set. type: list sample: [] sku: description: - Location where the resource lives. type: str sample: Aligned type: description: - Resource type. type: str sample: "Microsoft.Compute/availabilitySets" tags: description: - Resource tags. type: dict sample: { env: sandbox } # handled in azure_rm_common Utility class to get availability set facts # was 2.13 Get a single availability set Get all availability sets Main module execution code path | 1.850753 | 2 |
tests/v3_api/common.py | sowmyav27/rancher | 0 | 576 | import inspect
import json
import os
import random
import subprocess
import time
import requests
import ast
import paramiko
import rancher
from rancher import ApiError
from lib.aws import AmazonWebServices
DEFAULT_TIMEOUT = 120
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', "http://localhost:80")
ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', "None")
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
TEST_IMAGE = "sangeetha/mytestcontainer"
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
CLUSTER_NAME_2 = ""
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def random_int(start, end):
return random.randint(start, end)
def random_test_name(name="test"):
return name + "-" + str(random_int(10000, 99999))
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_client_for_token(token):
return rancher.Client(url=CATTLE_API_URL, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None):
start = time.time()
ret = callback()
while ret is None or ret is False:
time.sleep(.5)
if time.time() - start > timeout:
if timeout_message:
raise Exception(timeout_message)
else:
raise Exception('Timeout waiting for condition')
ret = callback()
return ret
def random_name():
return "test" + "-" + str(random_int(10000, 99999))
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
client = get_client_for_token(token)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
crtb = client.update(
crtb,
roleTemplateId=role_template_id,
userId=user.id)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
prtb = client.update(
prtb,
roleTemplateId=role_template_id,
userId=user.id)
return prtb
def create_kubeconfig(cluster):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(kube_fname, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == pod_count
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == pod_count
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= pod_count
return
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
return pods_result["items"]
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
assert wl_result["status"]["readyReplicas"] == pod_count
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
execute_kubectl_cmd(get_pods)
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
assert len(pod["status"]["containerStatuses"]) == 2
assert "running" in pod["status"]["containerStatuses"][0]["state"]
assert "running" in pod["status"]["containerStatuses"][1]["state"]
def validate_workload_paused(p_client, workload, expectedstatus):
workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused
assert workloadStatus == expectedstatus
def validate_pod_images(expectedimage, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for pod in pods["items"]:
assert pod["spec"]["containers"][0]["image"] == expectedimage
def validate_pods_are_running_by_id(expectedpods, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
curpodnames = []
for pod in pods["items"]:
curpodnames.append(pod["metadata"]["name"])
for expectedpod in expectedpods["items"]:
assert expectedpod["metadata"]["name"] in curpodnames
def validate_workload_image(client, workload, expectedImage, ns):
workload = client.list_workload(uuid=workload.uuid).data[0]
assert workload.containers[0].image == expectedImage
validate_pod_images(expectedImage, workload, ns.name)
def execute_kubectl_cmd(cmd, json_out=True, stderr=False):
command = 'kubectl --kubeconfig {0} {1}'.format(
kube_fname, cmd)
if json_out:
command += ' -o json'
if stderr:
result = run_command_with_stderr(command)
else:
result = run_command(command)
if json_out:
result = json.loads(result)
print(result)
return result
def run_command(command):
return subprocess.check_output(command, shell=True, text=True)
def run_command_with_stderr(command):
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.output
returncode = e.returncode
print(returncode)
return (output, returncode)
def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
return wl
def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,
state="error"):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.transitioning != state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
while p.state != "running":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
return p
def get_schedulable_nodes(cluster):
client = get_admin_client()
nodes = client.list_node(clusterId=cluster.id).data
schedulable_nodes = []
for node in nodes:
if node.worker:
schedulable_nodes.append(node)
return schedulable_nodes
def get_role_nodes(cluster, role):
etcd_nodes = []
control_nodes = []
worker_nodes = []
node_list = []
client = get_admin_client()
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
if node.controlPlane:
control_nodes.append(node)
if node.worker:
worker_nodes.append(node)
if role == "etcd":
node_list = etcd_nodes
if role == "control":
node_list = control_nodes
if role == "worker":
node_list = worker_nodes
return node_list
def validate_ingress(p_client, cluster, workloads, host, path,
insecure_redirect=False):
time.sleep(10)
curl_args = " "
if (insecure_redirect):
curl_args = " -L --insecure "
if len(host) > 0:
curl_args += " --header 'Host: " + host + "'"
nodes = get_schedulable_nodes(cluster)
target_name_list = get_target_names(p_client, workloads)
for node in nodes:
host_ip = node.externalIpAddress
cmd = curl_args + " http://" + host_ip + path
validate_http_response(cmd, target_name_list)
def validate_ingress_using_endpoint(p_client, ingress, workloads,
timeout=300):
target_name_list = get_target_names(p_client, workloads)
start = time.time()
fqdn_available = False
url = None
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
ingress_list = p_client.list_ingress(uuid=ingress.uuid).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
if public_endpoint["hostname"].startswith(ingress.name):
fqdn_available = True
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
if "path" in public_endpoint.keys():
url += public_endpoint["path"]
time.sleep(10)
validate_http_response(url, target_name_list)
def get_target_names(p_client, workloads):
pods = []
for workload in workloads:
pod_list = p_client.list_pod(workloadId=workload.id).data
pods.extend(pod_list)
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url):
try:
requests.get(url)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def validate_http_response(cmd, target_name_list, client_pod=None):
target_hit_list = target_name_list[:]
count = 5 * len(target_name_list)
for i in range(1, count):
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
result = run_command(curl_cmd)
else:
wget_cmd = "wget -qO- " + cmd
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
result = result.rstrip()
print("cmd: \t" + cmd)
print("result: \t" + result)
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version=""):
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonset
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd")))
project, ns = create_project_and_ns(ADMIN_TOKEN, cluster)
p_client = get_project_client_for_token(project, ADMIN_TOKEN)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
if not skipIngresscheck:
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id], "targetPort": "80"}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected)
def validate_dns_entry(pod, host, expected):
# requires pod with `dig` available - TEST_IMAGE
cmd = 'ping -c 1 -W 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
assert ping_validation_pass is True
assert " 0% packet loss" in str(ping_output)
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
for expected_value in expected:
assert expected_value in str(dig_output)
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete Cluster
client.delete(cluster)
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if (len(nodes) > 0):
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
nodes = client.list_node(clusterId=cluster.id).data
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststess*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
ip_list.append(node.externalIpAddress)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
cmd = "ping -c 1 -W 1 " + pod_ip
response = kubectl_pod_exec(pod1, cmd)
print("Actual ping Response from " + pod1.name + ":" + str(response))
if allow_connectivity:
assert pod_ip in str(response) and " 0% packet loss" in str(response)
else:
assert pod_ip in str(response) and " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, username="root", password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_admin_client_and_cluster():
client = get_admin_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[]):
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == intermediate_state
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == "active"
wait_for_nodes_to_become_active(client, cluster,
exception_list=nodes_not_in_active_state)
return cluster
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
def cluster_cleanup(client, cluster, aws_nodes=None):
if RANCHER_CLEANUP_CLUSTER:
client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
else:
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def create_config_file(env_details):
file = open(env_file, "w")
file.write(env_details)
file.close()
def validate_hostPort(p_client, workload, source_port, cluster):
pods = p_client.list_pod(workloadId=workload.id).data
nodes = get_schedulable_nodes(cluster)
for node in nodes:
target_name_list = []
for pod in pods:
print(pod.nodeId + " check " + node.id)
if pod.nodeId == node.id:
target_name_list.append(pod.name)
break
host_ip = node.externalIpAddress
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_lb(p_client, workload):
url = get_endpoint_url_for_workload(p_client, workload)
target_name_list = get_target_names(p_client, [workload])
wait_until_lb_is_active(url)
validate_http_response(url + "/name.html", target_name_list)
def validate_nodePort(p_client, workload, cluster):
source_port = workload.publicEndpoints[0]["port"]
nodes = get_schedulable_nodes(cluster)
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
for node in nodes:
host_ip = node.externalIpAddress
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_clusterIp(p_client, workload, cluster_ip, test_pods):
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod["name"])
curl_cmd = "http://" + cluster_ip + "/name.html"
for pod in test_pods:
validate_http_response(curl_cmd, target_name_list, pod)
def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
while pv.state != "available":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to available")
time.sleep(.5)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
return pv
def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
while pvc.state != "bound":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to bound")
time.sleep(.5)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
return pvc
def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,
mount_path, sub_path, is_daemonSet=False):
volumes = [{"type": "volume",
"name": "vol1",
"persistentVolumeClaim": {
"readOnly": "false",
"type": "persistentVolumeClaimVolumeSource",
"persistentVolumeClaimId": pvc_name
}}]
volumeMounts = [{"readOnly": "False",
"type": "volumeMount",
"mountPath": mount_path,
"subPath": sub_path,
"name": "vol1"
}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts
}]
if is_daemonSet:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes,
daemonSetConfig={})
else:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes)
return workload
def write_content_to_file(pod, content, filename):
cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content)
output = kubectl_pod_exec(pod, cmd_write)
assert output.strip().decode('utf-8') == ""
def validate_file_content(pod, content, filename):
cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename)
output = kubectl_pod_exec(pod, cmd_get_content)
assert output.strip().decode('utf-8') == content
def wait_for_mcapp_to_active(client, multiClusterApp, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
print("\nuuid:")
print(multiClusterApp.uuid)
time.sleep(5)
mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
start = time.time()
assert len(mcapps) == 1
mapp = mcapps[0]
print(mapp.state)
while mapp.state != "active":
print(mapp.uuid)
print(mapp.state)
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
multiclusterapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
assert len(multiclusterapps) == 1
mapp = multiclusterapps[0]
return mapp
def validate_mcapp_cluster(app_id, p_client):
mcapp = p_client.list_app(name=app_id).data
assert len(mcapp) == 1
app = mcapp[0]
return app
def wait_for_mcapp_cluster_level_to_active(client, app_id, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
mcapps = client.list_app(name=app_id).data
start = time.time()
assert len(mcapps) == 1
mapp = mcapps[0]
while mapp.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
apps = client.list_app(name=app_id).data
assert len(apps) == 1
mapp = apps[0]
return mapp
def get_admin_client_and_cluster_mcapp():
clusters = []
client = get_admin_client()
if CLUSTER_NAME == "" or CLUSTER_NAME_2 == "":
clusters = client.list_cluster().data
else:
clusters.append(client.list_cluster(name=CLUSTER_NAME).data)
clusters.append(client.list_cluster(name=CLUSTER_NAME_2).data)
assert len(clusters) == 2
return client, clusters
def validate_multi_cluster_app_cluster(app_id1, app_id2, p_client1, p_client2):
validate_mcapp_cluster(app_id1, p_client1)
if app_id2 != "":
validate_mcapp_cluster(app_id2, p_client2)
# verify app in cluster is active or not
wait_for_mcapp_cluster_level_to_active(p_client1, app_id1)
if app_id2 != "":
wait_for_mcapp_cluster_level_to_active(p_client2, app_id2) | import inspect
import json
import os
import random
import subprocess
import time
import requests
import ast
import paramiko
import rancher
from rancher import ApiError
from lib.aws import AmazonWebServices
DEFAULT_TIMEOUT = 120
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', "http://localhost:80")
ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', "None")
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
TEST_IMAGE = "sangeetha/mytestcontainer"
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
CLUSTER_NAME_2 = ""
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def random_int(start, end):
return random.randint(start, end)
def random_test_name(name="test"):
return name + "-" + str(random_int(10000, 99999))
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_client_for_token(token):
return rancher.Client(url=CATTLE_API_URL, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None):
start = time.time()
ret = callback()
while ret is None or ret is False:
time.sleep(.5)
if time.time() - start > timeout:
if timeout_message:
raise Exception(timeout_message)
else:
raise Exception('Timeout waiting for condition')
ret = callback()
return ret
def random_name():
return "test" + "-" + str(random_int(10000, 99999))
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
client = get_client_for_token(token)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
crtb = client.update(
crtb,
roleTemplateId=role_template_id,
userId=user.id)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
prtb = client.update(
prtb,
roleTemplateId=role_template_id,
userId=user.id)
return prtb
def create_kubeconfig(cluster):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(kube_fname, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == pod_count
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == pod_count
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= pod_count
return
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
return pods_result["items"]
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
assert wl_result["status"]["readyReplicas"] == pod_count
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
execute_kubectl_cmd(get_pods)
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
assert len(pod["status"]["containerStatuses"]) == 2
assert "running" in pod["status"]["containerStatuses"][0]["state"]
assert "running" in pod["status"]["containerStatuses"][1]["state"]
def validate_workload_paused(p_client, workload, expectedstatus):
workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused
assert workloadStatus == expectedstatus
def validate_pod_images(expectedimage, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for pod in pods["items"]:
assert pod["spec"]["containers"][0]["image"] == expectedimage
def validate_pods_are_running_by_id(expectedpods, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
curpodnames = []
for pod in pods["items"]:
curpodnames.append(pod["metadata"]["name"])
for expectedpod in expectedpods["items"]:
assert expectedpod["metadata"]["name"] in curpodnames
def validate_workload_image(client, workload, expectedImage, ns):
workload = client.list_workload(uuid=workload.uuid).data[0]
assert workload.containers[0].image == expectedImage
validate_pod_images(expectedImage, workload, ns.name)
def execute_kubectl_cmd(cmd, json_out=True, stderr=False):
command = 'kubectl --kubeconfig {0} {1}'.format(
kube_fname, cmd)
if json_out:
command += ' -o json'
if stderr:
result = run_command_with_stderr(command)
else:
result = run_command(command)
if json_out:
result = json.loads(result)
print(result)
return result
def run_command(command):
return subprocess.check_output(command, shell=True, text=True)
def run_command_with_stderr(command):
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.output
returncode = e.returncode
print(returncode)
return (output, returncode)
def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
return wl
def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,
state="error"):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.transitioning != state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
while p.state != "running":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
return p
def get_schedulable_nodes(cluster):
client = get_admin_client()
nodes = client.list_node(clusterId=cluster.id).data
schedulable_nodes = []
for node in nodes:
if node.worker:
schedulable_nodes.append(node)
return schedulable_nodes
def get_role_nodes(cluster, role):
etcd_nodes = []
control_nodes = []
worker_nodes = []
node_list = []
client = get_admin_client()
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
if node.controlPlane:
control_nodes.append(node)
if node.worker:
worker_nodes.append(node)
if role == "etcd":
node_list = etcd_nodes
if role == "control":
node_list = control_nodes
if role == "worker":
node_list = worker_nodes
return node_list
def validate_ingress(p_client, cluster, workloads, host, path,
insecure_redirect=False):
time.sleep(10)
curl_args = " "
if (insecure_redirect):
curl_args = " -L --insecure "
if len(host) > 0:
curl_args += " --header 'Host: " + host + "'"
nodes = get_schedulable_nodes(cluster)
target_name_list = get_target_names(p_client, workloads)
for node in nodes:
host_ip = node.externalIpAddress
cmd = curl_args + " http://" + host_ip + path
validate_http_response(cmd, target_name_list)
def validate_ingress_using_endpoint(p_client, ingress, workloads,
timeout=300):
target_name_list = get_target_names(p_client, workloads)
start = time.time()
fqdn_available = False
url = None
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
ingress_list = p_client.list_ingress(uuid=ingress.uuid).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
if public_endpoint["hostname"].startswith(ingress.name):
fqdn_available = True
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
if "path" in public_endpoint.keys():
url += public_endpoint["path"]
time.sleep(10)
validate_http_response(url, target_name_list)
def get_target_names(p_client, workloads):
pods = []
for workload in workloads:
pod_list = p_client.list_pod(workloadId=workload.id).data
pods.extend(pod_list)
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url):
try:
requests.get(url)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def validate_http_response(cmd, target_name_list, client_pod=None):
target_hit_list = target_name_list[:]
count = 5 * len(target_name_list)
for i in range(1, count):
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
result = run_command(curl_cmd)
else:
wget_cmd = "wget -qO- " + cmd
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
result = result.rstrip()
print("cmd: \t" + cmd)
print("result: \t" + result)
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version=""):
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonset
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd")))
project, ns = create_project_and_ns(ADMIN_TOKEN, cluster)
p_client = get_project_client_for_token(project, ADMIN_TOKEN)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
if not skipIngresscheck:
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id], "targetPort": "80"}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected)
def validate_dns_entry(pod, host, expected):
# requires pod with `dig` available - TEST_IMAGE
cmd = 'ping -c 1 -W 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
assert ping_validation_pass is True
assert " 0% packet loss" in str(ping_output)
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
for expected_value in expected:
assert expected_value in str(dig_output)
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete Cluster
client.delete(cluster)
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if (len(nodes) > 0):
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
nodes = client.list_node(clusterId=cluster.id).data
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststess*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
ip_list.append(node.externalIpAddress)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
cmd = "ping -c 1 -W 1 " + pod_ip
response = kubectl_pod_exec(pod1, cmd)
print("Actual ping Response from " + pod1.name + ":" + str(response))
if allow_connectivity:
assert pod_ip in str(response) and " 0% packet loss" in str(response)
else:
assert pod_ip in str(response) and " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, username="root", password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_admin_client_and_cluster():
client = get_admin_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[]):
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == intermediate_state
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == "active"
wait_for_nodes_to_become_active(client, cluster,
exception_list=nodes_not_in_active_state)
return cluster
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
def cluster_cleanup(client, cluster, aws_nodes=None):
if RANCHER_CLEANUP_CLUSTER:
client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
else:
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def create_config_file(env_details):
file = open(env_file, "w")
file.write(env_details)
file.close()
def validate_hostPort(p_client, workload, source_port, cluster):
pods = p_client.list_pod(workloadId=workload.id).data
nodes = get_schedulable_nodes(cluster)
for node in nodes:
target_name_list = []
for pod in pods:
print(pod.nodeId + " check " + node.id)
if pod.nodeId == node.id:
target_name_list.append(pod.name)
break
host_ip = node.externalIpAddress
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_lb(p_client, workload):
url = get_endpoint_url_for_workload(p_client, workload)
target_name_list = get_target_names(p_client, [workload])
wait_until_lb_is_active(url)
validate_http_response(url + "/name.html", target_name_list)
def validate_nodePort(p_client, workload, cluster):
source_port = workload.publicEndpoints[0]["port"]
nodes = get_schedulable_nodes(cluster)
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
for node in nodes:
host_ip = node.externalIpAddress
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_clusterIp(p_client, workload, cluster_ip, test_pods):
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod["name"])
curl_cmd = "http://" + cluster_ip + "/name.html"
for pod in test_pods:
validate_http_response(curl_cmd, target_name_list, pod)
def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
while pv.state != "available":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to available")
time.sleep(.5)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
return pv
def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
while pvc.state != "bound":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to bound")
time.sleep(.5)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
return pvc
def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,
mount_path, sub_path, is_daemonSet=False):
volumes = [{"type": "volume",
"name": "vol1",
"persistentVolumeClaim": {
"readOnly": "false",
"type": "persistentVolumeClaimVolumeSource",
"persistentVolumeClaimId": pvc_name
}}]
volumeMounts = [{"readOnly": "False",
"type": "volumeMount",
"mountPath": mount_path,
"subPath": sub_path,
"name": "vol1"
}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts
}]
if is_daemonSet:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes,
daemonSetConfig={})
else:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes)
return workload
def write_content_to_file(pod, content, filename):
cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content)
output = kubectl_pod_exec(pod, cmd_write)
assert output.strip().decode('utf-8') == ""
def validate_file_content(pod, content, filename):
cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename)
output = kubectl_pod_exec(pod, cmd_get_content)
assert output.strip().decode('utf-8') == content
def wait_for_mcapp_to_active(client, multiClusterApp, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
print("\nuuid:")
print(multiClusterApp.uuid)
time.sleep(5)
mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
start = time.time()
assert len(mcapps) == 1
mapp = mcapps[0]
print(mapp.state)
while mapp.state != "active":
print(mapp.uuid)
print(mapp.state)
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
multiclusterapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
assert len(multiclusterapps) == 1
mapp = multiclusterapps[0]
return mapp
def validate_mcapp_cluster(app_id, p_client):
mcapp = p_client.list_app(name=app_id).data
assert len(mcapp) == 1
app = mcapp[0]
return app
def wait_for_mcapp_cluster_level_to_active(client, app_id, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
mcapps = client.list_app(name=app_id).data
start = time.time()
assert len(mcapps) == 1
mapp = mcapps[0]
while mapp.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
apps = client.list_app(name=app_id).data
assert len(apps) == 1
mapp = apps[0]
return mapp
def get_admin_client_and_cluster_mcapp():
clusters = []
client = get_admin_client()
if CLUSTER_NAME == "" or CLUSTER_NAME_2 == "":
clusters = client.list_cluster().data
else:
clusters.append(client.list_cluster(name=CLUSTER_NAME).data)
clusters.append(client.list_cluster(name=CLUSTER_NAME_2).data)
assert len(clusters) == 2
return client, clusters
def validate_multi_cluster_app_cluster(app_id1, app_id2, p_client1, p_client2):
validate_mcapp_cluster(app_id1, p_client1)
if app_id2 != "":
validate_mcapp_cluster(app_id2, p_client2)
# verify app in cluster is active or not
wait_for_mcapp_cluster_level_to_active(p_client1, app_id1)
if app_id2 != "":
wait_for_mcapp_cluster_level_to_active(p_client2, app_id2) | en | 0.889824 | # For cronjob, wait for the first pod to get created after # scheduled wait time # Create Daemon set workload and have an Ingress with Workload # rule pointing to this daemonset # requires pod with `dig` available - TEST_IMAGE # requires pod with `dig` available - TEST_IMAGE # Handle the case of nodes getting auto deleted when they are part of # nodepools # Delete Cluster # Delete nodes(in cluster) from AWS for Imported and Custom Cluster # verify app in cluster is active or not | 1.942934 | 2 |
LeetCode/Python3/String/20. Valid Parentheses.py | WatsonWangZh/CodingPractice | 11 | 577 | # Given a string containing just the characters '(', ')', '{', '}', '[' and ']',
# determine if the input string is valid.
# An input string is valid if:
# Open brackets must be closed by the same type of brackets.
# Open brackets must be closed in the correct order.
# Note that an empty string is also considered valid.
# Example 1:
# Input: "()"
# Output: true
# Example 2:
# Input: "()[]{}"
# Output: true
# Example 3:
# Input: "(]"
# Output: false
# Example 4:
# Input: "([)]"
# Output: false
# Example 5:
# Input: "{[]}"
# Output: true
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
dict = {')':'(',']':'[','}':'{'}
stack = []
for ch in s:
if ch in dict.values():
stack.append(ch)
elif ch in dict.keys():
if len(stack) == 0 or (stack.pop() != dict[ch]):
return False
return len(stack) == 0
def main():
s = Solution()
print(s.isValid("()"))
print(s.isValid("()[]{}"))
print(s.isValid("(]"))
print(s.isValid("([)]"))
print(s.isValid("{[]}"))
if __name__ == "__main__":
main()
| # Given a string containing just the characters '(', ')', '{', '}', '[' and ']',
# determine if the input string is valid.
# An input string is valid if:
# Open brackets must be closed by the same type of brackets.
# Open brackets must be closed in the correct order.
# Note that an empty string is also considered valid.
# Example 1:
# Input: "()"
# Output: true
# Example 2:
# Input: "()[]{}"
# Output: true
# Example 3:
# Input: "(]"
# Output: false
# Example 4:
# Input: "([)]"
# Output: false
# Example 5:
# Input: "{[]}"
# Output: true
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
dict = {')':'(',']':'[','}':'{'}
stack = []
for ch in s:
if ch in dict.values():
stack.append(ch)
elif ch in dict.keys():
if len(stack) == 0 or (stack.pop() != dict[ch]):
return False
return len(stack) == 0
def main():
s = Solution()
print(s.isValid("()"))
print(s.isValid("()[]{}"))
print(s.isValid("(]"))
print(s.isValid("([)]"))
print(s.isValid("{[]}"))
if __name__ == "__main__":
main()
| en | 0.641886 | # Given a string containing just the characters '(', ')', '{', '}', '[' and ']', # determine if the input string is valid. # An input string is valid if: # Open brackets must be closed by the same type of brackets. # Open brackets must be closed in the correct order. # Note that an empty string is also considered valid. # Example 1: # Input: "()" # Output: true # Example 2: # Input: "()[]{}" # Output: true # Example 3: # Input: "(]" # Output: false # Example 4: # Input: "([)]" # Output: false # Example 5: # Input: "{[]}" # Output: true :type s: str :rtype: bool | 4.226375 | 4 |
backend/ibutsu_server/controllers/health_controller.py | rsnyman/ibutsu-server | 10 | 578 | <filename>backend/ibutsu_server/controllers/health_controller.py
from flask import current_app
from sqlalchemy.exc import InterfaceError
from sqlalchemy.exc import OperationalError
try:
from ibutsu_server.db.model import Result
IS_CONNECTED = True
except ImportError:
IS_CONNECTED = False
def get_health(token_info=None, user=None):
"""Get a health report
:rtype: Health
"""
return {"status": "OK", "message": "Service is running"}
def get_database_health(token_info=None, user=None):
"""Get a health report for the database
:rtype: Health
"""
response = ({"status": "Pending", "message": "Fetching service status"}, 200)
# Try to connect to the database, and handle various responses
try:
if not IS_CONNECTED:
response = ({"status": "Error", "message": "Incomplete database configuration"}, 500)
else:
Result.query.first()
response = ({"status": "OK", "message": "Service is running"}, 200)
except OperationalError:
response = ({"status": "Error", "message": "Unable to connect to the database"}, 500)
except InterfaceError:
response = ({"status": "Error", "message": "Incorrect connection configuration"}, 500)
except Exception as e:
response = ({"status": "Error", "message": str(e)}, 500)
return response
def get_health_info(token_info=None, user=None):
"""Get the information about this server
:rtype: HealthInfo
"""
return {
"frontend": current_app.config.get("FRONTEND_URL", "http://localhost:3000"),
"backend": current_app.config.get("BACKEND_URL", "http://localhost:8080"),
"api_ui": current_app.config.get("BACKEND_URL", "http://localhost:8080") + "/api/ui/",
}
| <filename>backend/ibutsu_server/controllers/health_controller.py
from flask import current_app
from sqlalchemy.exc import InterfaceError
from sqlalchemy.exc import OperationalError
try:
from ibutsu_server.db.model import Result
IS_CONNECTED = True
except ImportError:
IS_CONNECTED = False
def get_health(token_info=None, user=None):
"""Get a health report
:rtype: Health
"""
return {"status": "OK", "message": "Service is running"}
def get_database_health(token_info=None, user=None):
"""Get a health report for the database
:rtype: Health
"""
response = ({"status": "Pending", "message": "Fetching service status"}, 200)
# Try to connect to the database, and handle various responses
try:
if not IS_CONNECTED:
response = ({"status": "Error", "message": "Incomplete database configuration"}, 500)
else:
Result.query.first()
response = ({"status": "OK", "message": "Service is running"}, 200)
except OperationalError:
response = ({"status": "Error", "message": "Unable to connect to the database"}, 500)
except InterfaceError:
response = ({"status": "Error", "message": "Incorrect connection configuration"}, 500)
except Exception as e:
response = ({"status": "Error", "message": str(e)}, 500)
return response
def get_health_info(token_info=None, user=None):
"""Get the information about this server
:rtype: HealthInfo
"""
return {
"frontend": current_app.config.get("FRONTEND_URL", "http://localhost:3000"),
"backend": current_app.config.get("BACKEND_URL", "http://localhost:8080"),
"api_ui": current_app.config.get("BACKEND_URL", "http://localhost:8080") + "/api/ui/",
}
| en | 0.862558 | Get a health report :rtype: Health Get a health report for the database :rtype: Health # Try to connect to the database, and handle various responses Get the information about this server :rtype: HealthInfo | 2.642551 | 3 |
src/python/tsnecuda/TSNE.py | rappdw/tsne-cuda | 1 | 579 | """Bindings for the Barnes Hut TSNE algorithm with fast nearest neighbors
Refs:
References
[1] <NAME>, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] <NAME>, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
import numpy as N
import ctypes
import os
import pkg_resources
def ord_string(s):
b = bytearray()
arr = b.extend(map(ord, s))
return N.array([x for x in b] + [0]).astype(N.uint8)
class TSNE(object):
def __init__(self,
n_components=2,
perplexity=50.0,
early_exaggeration=2.0,
learning_rate=200.0,
num_neighbors=1023,
force_magnify_iters=250,
pre_momentum=0.5,
post_momentum=0.8,
theta=0.5,
epssq=0.0025,
n_iter=1000,
n_iter_without_progress=1000,
min_grad_norm=1e-7,
perplexity_epsilon=1e-3,
metric='euclidean',
init='random',
return_style='once',
num_snapshots=5,
verbose=0,
random_seed=None,
use_interactive=False,
viz_timeout=10000,
viz_server="tcp://localhost:5556",
dump_points=False,
dump_file="dump.txt",
dump_interval=1,
print_interval=10,
device=0,
):
"""Initialization method for barnes hut T-SNE class.
"""
# Initialize the variables
self.n_components = int(n_components)
if self.n_components != 2:
raise ValueError('The current barnes-hut implementation does not support projection into dimensions other than 2 for now.')
self.perplexity = float(perplexity)
self.early_exaggeration = float(early_exaggeration)
self.learning_rate = float(learning_rate)
self.n_iter = int(n_iter)
self.n_iter_without_progress = int(n_iter_without_progress)
self.min_grad_norm = float(min_grad_norm)
if metric not in ['euclidean']:
raise ValueError('Non-Euclidean metrics are not currently supported. Please use metric=\'euclidean\' for now.')
else:
self.metric = metric
if init not in ['random']:
raise ValueError('Non-Random initialization is not currently supported. Please use init=\'random\' for now.')
else:
self.init = init
self.verbose = int(verbose)
# Initialize non-sklearn variables
self.num_neighbors = int(num_neighbors)
self.force_magnify_iters = int(force_magnify_iters)
self.perplexity_epsilon = float(perplexity_epsilon)
self.pre_momentum = float(pre_momentum)
self.post_momentum = float(post_momentum)
self.theta = float(theta)
self.epssq =float(epssq)
self.device = int(device)
self.print_interval = int(print_interval)
# Point dumpoing
self.dump_file = str(dump_file)
self.dump_points = bool(dump_points)
self.dump_interval = int(dump_interval)
# Viz
self.use_interactive = bool(use_interactive)
self.viz_server = str(viz_server)
self.viz_timeout = int(viz_timeout)
# Return style
if return_style not in ['once','snapshots']:
raise ValueError('Invalid return style...')
elif return_style == 'once':
self.return_style = 0
elif return_style == 'snapshots':
self.return_style = 1
self.num_snapshots = int(num_snapshots)
# Build the hooks for the BH T-SNE library
self._path = pkg_resources.resource_filename('tsnecuda','') # Load from current location
# self._faiss_lib = N.ctypeslib.load_library('libfaiss', self._path) # Load the ctypes library
# self._gpufaiss_lib = N.ctypeslib.load_library('libgpufaiss', self._path) # Load the ctypes library
self._lib = N.ctypeslib.load_library('libtsnecuda', self._path) # Load the ctypes library
# Hook the BH T-SNE function
self._lib.pymodule_bh_tsne.restype = None
self._lib.pymodule_bh_tsne.argtypes = [
N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS, WRITEABLE'), # result
N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, CONTIGUOUS'), # points
ctypes.POINTER(N.ctypeslib.c_intp), # dims
ctypes.c_float, # Perplexity
ctypes.c_float, # Learning Rate
ctypes.c_float, # Magnitude Factor
ctypes.c_int, # Num Neighbors
ctypes.c_int, # Iterations
ctypes.c_int, # Iterations no progress
ctypes.c_int, # Force Magnify iterations
ctypes.c_float, # Perplexity search epsilon
ctypes.c_float, # pre-exaggeration momentum
ctypes.c_float, # post-exaggeration momentum
ctypes.c_float, # Theta
ctypes.c_float, # epssq
ctypes.c_float, # Minimum gradient norm
ctypes.c_int, # Initialization types
N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS'), # Initialization Data
ctypes.c_bool, # Dump points
N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Dump File
ctypes.c_int, # Dump interval
ctypes.c_bool, # Use interactive
N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Viz Server
ctypes.c_int, # Viz timeout
ctypes.c_int, # Verbosity
ctypes.c_int, # Print interval
ctypes.c_int, # GPU Device
ctypes.c_int, # Return style
ctypes.c_int ] # Number of snapshots
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed output.
Arguments:
X {array} -- Input array, shape: (n_points, n_dimensions)
Keyword Arguments:
y {None} -- Ignored (default: {None})
"""
# Setup points/embedding requirements
self.points = N.require(X, N.float32, ['CONTIGUOUS', 'ALIGNED'])
self.embedding = N.zeros(shape=(X.shape[0],self.n_components))
self.embedding = N.require(self.embedding , N.float32, ['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE'])
# Handle Initialization
if y is None:
self.initialization_type = 1
self.init_data = N.require(N.zeros((1,1)),N.float32,['CONTIGUOUS','ALIGNED'])
else:
self.initialization_type = 3
self.init_data = N.require(y, N.float32, ['F_CONTIGUOUS', 'ALIGNED'])
# Handle dumping and viz strings
self.dump_file_ = N.require(ord_string(self.dump_file), N.uint8, ['CONTIGUOUS', 'ALIGNED'])
self.viz_server_ = N.require(ord_string(self.viz_server), N.uint8, ['CONTIGUOUS', 'ALIGNED'])
self._lib.pymodule_bh_tsne(
self.embedding, # result
self.points, # points
self.points.ctypes.shape, # dims
ctypes.c_float(self.perplexity), # Perplexity
ctypes.c_float(self.learning_rate), # Learning Rate
ctypes.c_float(self.early_exaggeration), # Magnitude Factor
ctypes.c_int(self.num_neighbors), # Num Neighbors
ctypes.c_int(self.n_iter), # Iterations
ctypes.c_int(self.n_iter_without_progress), # Iterations no progress
ctypes.c_int(self.force_magnify_iters), # Force Magnify iterations
ctypes.c_float(self.perplexity_epsilon), # Perplexity search epsilon
ctypes.c_float(self.pre_momentum), # pre-exaggeration momentum
ctypes.c_float(self.post_momentum), # post-exaggeration momentum
ctypes.c_float(self.theta), # Theta
ctypes.c_float(self.epssq), # epssq
ctypes.c_float(self.min_grad_norm), # Minimum gradient norm
ctypes.c_int(self.initialization_type), # Initialization types
self.init_data, # Initialization Data
ctypes.c_bool(self.dump_points), # Dump points
self.dump_file_, # Dump File
ctypes.c_int(self.dump_interval), # Dump interval
ctypes.c_bool(self.use_interactive), # Use interactive
self.viz_server_, # Viz Server
ctypes.c_int(self.viz_timeout), # Viz timeout
ctypes.c_int(self.verbose), # Verbosity
ctypes.c_int(self.print_interval), # Print interval
ctypes.c_int(self.device), # GPU Device
ctypes.c_int(self.return_style), # Return style
ctypes.c_int(self.num_snapshots) ) # Number of snapshots
return self.embedding
| """Bindings for the Barnes Hut TSNE algorithm with fast nearest neighbors
Refs:
References
[1] <NAME>, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] <NAME>, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
import numpy as N
import ctypes
import os
import pkg_resources
def ord_string(s):
b = bytearray()
arr = b.extend(map(ord, s))
return N.array([x for x in b] + [0]).astype(N.uint8)
class TSNE(object):
def __init__(self,
n_components=2,
perplexity=50.0,
early_exaggeration=2.0,
learning_rate=200.0,
num_neighbors=1023,
force_magnify_iters=250,
pre_momentum=0.5,
post_momentum=0.8,
theta=0.5,
epssq=0.0025,
n_iter=1000,
n_iter_without_progress=1000,
min_grad_norm=1e-7,
perplexity_epsilon=1e-3,
metric='euclidean',
init='random',
return_style='once',
num_snapshots=5,
verbose=0,
random_seed=None,
use_interactive=False,
viz_timeout=10000,
viz_server="tcp://localhost:5556",
dump_points=False,
dump_file="dump.txt",
dump_interval=1,
print_interval=10,
device=0,
):
"""Initialization method for barnes hut T-SNE class.
"""
# Initialize the variables
self.n_components = int(n_components)
if self.n_components != 2:
raise ValueError('The current barnes-hut implementation does not support projection into dimensions other than 2 for now.')
self.perplexity = float(perplexity)
self.early_exaggeration = float(early_exaggeration)
self.learning_rate = float(learning_rate)
self.n_iter = int(n_iter)
self.n_iter_without_progress = int(n_iter_without_progress)
self.min_grad_norm = float(min_grad_norm)
if metric not in ['euclidean']:
raise ValueError('Non-Euclidean metrics are not currently supported. Please use metric=\'euclidean\' for now.')
else:
self.metric = metric
if init not in ['random']:
raise ValueError('Non-Random initialization is not currently supported. Please use init=\'random\' for now.')
else:
self.init = init
self.verbose = int(verbose)
# Initialize non-sklearn variables
self.num_neighbors = int(num_neighbors)
self.force_magnify_iters = int(force_magnify_iters)
self.perplexity_epsilon = float(perplexity_epsilon)
self.pre_momentum = float(pre_momentum)
self.post_momentum = float(post_momentum)
self.theta = float(theta)
self.epssq =float(epssq)
self.device = int(device)
self.print_interval = int(print_interval)
# Point dumpoing
self.dump_file = str(dump_file)
self.dump_points = bool(dump_points)
self.dump_interval = int(dump_interval)
# Viz
self.use_interactive = bool(use_interactive)
self.viz_server = str(viz_server)
self.viz_timeout = int(viz_timeout)
# Return style
if return_style not in ['once','snapshots']:
raise ValueError('Invalid return style...')
elif return_style == 'once':
self.return_style = 0
elif return_style == 'snapshots':
self.return_style = 1
self.num_snapshots = int(num_snapshots)
# Build the hooks for the BH T-SNE library
self._path = pkg_resources.resource_filename('tsnecuda','') # Load from current location
# self._faiss_lib = N.ctypeslib.load_library('libfaiss', self._path) # Load the ctypes library
# self._gpufaiss_lib = N.ctypeslib.load_library('libgpufaiss', self._path) # Load the ctypes library
self._lib = N.ctypeslib.load_library('libtsnecuda', self._path) # Load the ctypes library
# Hook the BH T-SNE function
self._lib.pymodule_bh_tsne.restype = None
self._lib.pymodule_bh_tsne.argtypes = [
N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS, WRITEABLE'), # result
N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, CONTIGUOUS'), # points
ctypes.POINTER(N.ctypeslib.c_intp), # dims
ctypes.c_float, # Perplexity
ctypes.c_float, # Learning Rate
ctypes.c_float, # Magnitude Factor
ctypes.c_int, # Num Neighbors
ctypes.c_int, # Iterations
ctypes.c_int, # Iterations no progress
ctypes.c_int, # Force Magnify iterations
ctypes.c_float, # Perplexity search epsilon
ctypes.c_float, # pre-exaggeration momentum
ctypes.c_float, # post-exaggeration momentum
ctypes.c_float, # Theta
ctypes.c_float, # epssq
ctypes.c_float, # Minimum gradient norm
ctypes.c_int, # Initialization types
N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS'), # Initialization Data
ctypes.c_bool, # Dump points
N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Dump File
ctypes.c_int, # Dump interval
ctypes.c_bool, # Use interactive
N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Viz Server
ctypes.c_int, # Viz timeout
ctypes.c_int, # Verbosity
ctypes.c_int, # Print interval
ctypes.c_int, # GPU Device
ctypes.c_int, # Return style
ctypes.c_int ] # Number of snapshots
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed output.
Arguments:
X {array} -- Input array, shape: (n_points, n_dimensions)
Keyword Arguments:
y {None} -- Ignored (default: {None})
"""
# Setup points/embedding requirements
self.points = N.require(X, N.float32, ['CONTIGUOUS', 'ALIGNED'])
self.embedding = N.zeros(shape=(X.shape[0],self.n_components))
self.embedding = N.require(self.embedding , N.float32, ['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE'])
# Handle Initialization
if y is None:
self.initialization_type = 1
self.init_data = N.require(N.zeros((1,1)),N.float32,['CONTIGUOUS','ALIGNED'])
else:
self.initialization_type = 3
self.init_data = N.require(y, N.float32, ['F_CONTIGUOUS', 'ALIGNED'])
# Handle dumping and viz strings
self.dump_file_ = N.require(ord_string(self.dump_file), N.uint8, ['CONTIGUOUS', 'ALIGNED'])
self.viz_server_ = N.require(ord_string(self.viz_server), N.uint8, ['CONTIGUOUS', 'ALIGNED'])
self._lib.pymodule_bh_tsne(
self.embedding, # result
self.points, # points
self.points.ctypes.shape, # dims
ctypes.c_float(self.perplexity), # Perplexity
ctypes.c_float(self.learning_rate), # Learning Rate
ctypes.c_float(self.early_exaggeration), # Magnitude Factor
ctypes.c_int(self.num_neighbors), # Num Neighbors
ctypes.c_int(self.n_iter), # Iterations
ctypes.c_int(self.n_iter_without_progress), # Iterations no progress
ctypes.c_int(self.force_magnify_iters), # Force Magnify iterations
ctypes.c_float(self.perplexity_epsilon), # Perplexity search epsilon
ctypes.c_float(self.pre_momentum), # pre-exaggeration momentum
ctypes.c_float(self.post_momentum), # post-exaggeration momentum
ctypes.c_float(self.theta), # Theta
ctypes.c_float(self.epssq), # epssq
ctypes.c_float(self.min_grad_norm), # Minimum gradient norm
ctypes.c_int(self.initialization_type), # Initialization types
self.init_data, # Initialization Data
ctypes.c_bool(self.dump_points), # Dump points
self.dump_file_, # Dump File
ctypes.c_int(self.dump_interval), # Dump interval
ctypes.c_bool(self.use_interactive), # Use interactive
self.viz_server_, # Viz Server
ctypes.c_int(self.viz_timeout), # Viz timeout
ctypes.c_int(self.verbose), # Verbosity
ctypes.c_int(self.print_interval), # Print interval
ctypes.c_int(self.device), # GPU Device
ctypes.c_int(self.return_style), # Return style
ctypes.c_int(self.num_snapshots) ) # Number of snapshots
return self.embedding
| en | 0.564891 | Bindings for the Barnes Hut TSNE algorithm with fast nearest neighbors Refs: References [1] <NAME>, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008. [2] <NAME>, L.J.P. t-Distributed Stochastic Neighbor Embedding http://homepage.tudelft.nl/19j49/t-SNE.html Initialization method for barnes hut T-SNE class. # Initialize the variables # Initialize non-sklearn variables # Point dumpoing # Viz # Return style # Build the hooks for the BH T-SNE library # Load from current location # self._faiss_lib = N.ctypeslib.load_library('libfaiss', self._path) # Load the ctypes library # self._gpufaiss_lib = N.ctypeslib.load_library('libgpufaiss', self._path) # Load the ctypes library # Load the ctypes library # Hook the BH T-SNE function # result # points # dims # Perplexity # Learning Rate # Magnitude Factor # Num Neighbors # Iterations # Iterations no progress # Force Magnify iterations # Perplexity search epsilon # pre-exaggeration momentum # post-exaggeration momentum # Theta # epssq # Minimum gradient norm # Initialization types # Initialization Data # Dump points # Dump File # Dump interval # Use interactive # Viz Server # Viz timeout # Verbosity # Print interval # GPU Device # Return style # Number of snapshots Fit X into an embedded space and return that transformed output. Arguments: X {array} -- Input array, shape: (n_points, n_dimensions) Keyword Arguments: y {None} -- Ignored (default: {None}) # Setup points/embedding requirements # Handle Initialization # Handle dumping and viz strings # result # points # dims # Perplexity # Learning Rate # Magnitude Factor # Num Neighbors # Iterations # Iterations no progress # Force Magnify iterations # Perplexity search epsilon # pre-exaggeration momentum # post-exaggeration momentum # Theta # epssq # Minimum gradient norm # Initialization types # Initialization Data # Dump points # Dump File # Dump interval # Use interactive # Viz Server # Viz timeout # Verbosity # Print interval # GPU Device # Return style # Number of snapshots | 2.517498 | 3 |
school/admin/__init__.py | leyyin/university-SE | 3 | 580 | # contains any CRUD not related to strictly editing users info and courses info
from .views import admin
| # contains any CRUD not related to strictly editing users info and courses info
from .views import admin
| en | 0.779093 | # contains any CRUD not related to strictly editing users info and courses info | 0.999269 | 1 |
python/helpers/pydev/pydevd_file_utils.py | kirmerzlikin/intellij-community | 1 | 581 | <reponame>kirmerzlikin/intellij-community
r'''
This module provides utilities to get the absolute filenames so that we can be sure that:
- The case of a file will match the actual file in the filesystem (otherwise breakpoints won't be hit).
- Providing means for the user to make path conversions when doing a remote debugging session in
one machine and debugging in another.
To do that, the PATHS_FROM_ECLIPSE_TO_PYTHON constant must be filled with the appropriate paths.
@note:
in this context, the server is where your python process is running
and the client is where eclipse is running.
E.g.:
If the server (your python process) has the structure
/user/projects/my_project/src/package/module1.py
and the client has:
c:\my_project\src\package\module1.py
the PATHS_FROM_ECLIPSE_TO_PYTHON would have to be:
PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\my_project\src', r'/user/projects/my_project/src')]
alternatively, this can be set with an environment variable from the command line:
set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\my_project\src','/user/projects/my_project/src']]
@note: DEBUG_CLIENT_SERVER_TRANSLATION can be set to True to debug the result of those translations
@note: the case of the paths is important! Note that this can be tricky to get right when one machine
uses a case-independent filesystem and the other uses a case-dependent filesystem (if the system being
debugged is case-independent, 'normcase()' should be used on the paths defined in PATHS_FROM_ECLIPSE_TO_PYTHON).
@note: all the paths with breakpoints must be translated (otherwise they won't be found in the server)
@note: to enable remote debugging in the target machine (pydev extensions in the eclipse installation)
import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend)
see parameter docs on pydevd.py
@note: for doing a remote debugging session, all the pydevd_ files must be on the server accessible
through the PYTHONPATH (and the PATHS_FROM_ECLIPSE_TO_PYTHON only needs to be set on the target
machine for the paths that'll actually have breakpoints).
'''
from _pydevd_bundle.pydevd_constants import IS_PY2, IS_PY3K, DebugInfoHolder, IS_WINDOWS, IS_JYTHON
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
import json
import os.path
import sys
import traceback
_os_normcase = os.path.normcase
basename = os.path.basename
exists = os.path.exists
join = os.path.join
try:
rPath = os.path.realpath # @UndefinedVariable
except:
# jython does not support os.path.realpath
# realpath is a no-op on systems without islink support
rPath = os.path.abspath
# defined as a list of tuples where the 1st element of the tuple is the path in the client machine
# and the 2nd element is the path in the server machine.
# see module docstring for more details.
try:
PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]'))
except Exception:
sys.stderr.write('Error loading PATHS_FROM_ECLIPSE_TO_PYTHON from environment variable.\n')
traceback.print_exc()
PATHS_FROM_ECLIPSE_TO_PYTHON = []
else:
if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list):
sys.stderr.write('Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded from environment variable to be a list.\n')
PATHS_FROM_ECLIPSE_TO_PYTHON = []
else:
# Converting json lists to tuple
PATHS_FROM_ECLIPSE_TO_PYTHON = [tuple(x) for x in PATHS_FROM_ECLIPSE_TO_PYTHON]
# example:
# PATHS_FROM_ECLIPSE_TO_PYTHON = [
# (r'd:\temp\temp_workspace_2\test_python\src\yyy\yyy',
# r'd:\temp\temp_workspace_2\test_python\src\hhh\xxx')
# ]
convert_to_long_pathname = lambda filename:filename
convert_to_short_pathname = lambda filename:filename
get_path_with_real_case = lambda filename:filename
if sys.platform == 'win32':
try:
import ctypes
from ctypes.wintypes import MAX_PATH, LPCWSTR, LPWSTR, DWORD
GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW
GetLongPathName.argtypes = [LPCWSTR, LPWSTR, DWORD]
GetLongPathName.restype = DWORD
GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW
GetShortPathName.argtypes = [LPCWSTR, LPWSTR, DWORD]
GetShortPathName.restype = DWORD
def _convert_to_long_pathname(filename):
buf = ctypes.create_unicode_buffer(MAX_PATH)
if IS_PY2 and isinstance(filename, str):
filename = filename.decode(getfilesystemencoding())
rv = GetLongPathName(filename, buf, MAX_PATH)
if rv != 0 and rv <= MAX_PATH:
filename = buf.value
if IS_PY2:
filename = filename.encode(getfilesystemencoding())
return filename
def _convert_to_short_pathname(filename):
buf = ctypes.create_unicode_buffer(MAX_PATH)
if IS_PY2 and isinstance(filename, str):
filename = filename.decode(getfilesystemencoding())
rv = GetShortPathName(filename, buf, MAX_PATH)
if rv != 0 and rv <= MAX_PATH:
filename = buf.value
if IS_PY2:
filename = filename.encode(getfilesystemencoding())
return filename
def _get_path_with_real_case(filename):
ret = convert_to_long_pathname(convert_to_short_pathname(filename))
# This doesn't handle the drive letter properly (it'll be unchanged).
# Make sure the drive letter is always uppercase.
if len(ret) > 1 and ret[1] == ':' and ret[0].islower():
return ret[0].upper() + ret[1:]
return ret
# Check that it actually works
_get_path_with_real_case(__file__)
except:
# Something didn't quite work out, leave no-op conversions in place.
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
traceback.print_exc()
else:
convert_to_long_pathname = _convert_to_long_pathname
convert_to_short_pathname = _convert_to_short_pathname
get_path_with_real_case = _get_path_with_real_case
elif IS_JYTHON and IS_WINDOWS:
def get_path_with_real_case(filename):
from java.io import File
f = File(filename)
ret = f.getCanonicalPath()
if IS_PY2 and not isinstance(ret, str):
return ret.encode(getfilesystemencoding())
return ret
if IS_WINDOWS:
if IS_JYTHON:
def normcase(filename):
return filename.lower()
else:
def normcase(filename):
# `normcase` doesn't lower case on Python 2 for non-English locale, but Java
# side does it, so we should do it manually.
if '~' in filename:
filename = convert_to_long_pathname(filename)
filename = _os_normcase(filename)
return filename.lower()
else:
def normcase(filename):
return filename # no-op
_ide_os = 'WINDOWS' if IS_WINDOWS else 'UNIX'
def set_ide_os(os):
'''
We need to set the IDE os because the host where the code is running may be
actually different from the client (and the point is that we want the proper
paths to translate from the client to the server).
:param os:
'UNIX' or 'WINDOWS'
'''
global _ide_os
prev = _ide_os
if os == 'WIN': # Apparently PyCharm uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116)
os = 'WINDOWS'
assert os in ('WINDOWS', 'UNIX')
if prev != os:
_ide_os = os
# We need to (re)setup how the client <-> server translation works to provide proper separators.
setup_client_server_paths(_last_client_server_paths_set)
DEBUG_CLIENT_SERVER_TRANSLATION = os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION', 'False').lower() in ('1', 'true')
# Caches filled as requested during the debug session.
NORM_PATHS_CONTAINER = {}
NORM_PATHS_AND_BASE_CONTAINER = {}
def _NormFile(filename):
abs_path, real_path = _NormPaths(filename)
return real_path
def _AbsFile(filename):
abs_path, real_path = _NormPaths(filename)
return abs_path
# Returns tuple of absolute path and real path for given filename
def _NormPaths(filename):
try:
return NORM_PATHS_CONTAINER[filename]
except KeyError:
if filename.__class__ != str:
raise AssertionError('Paths passed to _NormPaths must be str. Found: %s (%s)' % (filename, type(filename)))
abs_path = _NormPath(filename, os.path.abspath)
real_path = _NormPath(filename, rPath)
# cache it for fast access later
NORM_PATHS_CONTAINER[filename] = abs_path, real_path
return abs_path, real_path
def _NormPath(filename, normpath):
r = normpath(filename)
ind = r.find('.zip')
if ind == -1:
ind = r.find('.egg')
if ind != -1:
ind += 4
zip_path = r[:ind]
inner_path = r[ind:]
if inner_path.startswith('!'):
# Note (fabioz): although I can replicate this by creating a file ending as
# .zip! or .egg!, I don't really know what's the real-world case for this
# (still kept as it was added by @jetbrains, but it should probably be reviewed
# later on).
# Note 2: it goes hand-in-hand with 'exists'.
inner_path = inner_path[1:]
zip_path = zip_path + '!'
if inner_path.startswith('/') or inner_path.startswith('\\'):
inner_path = inner_path[1:]
if inner_path:
r = join(normcase(zip_path), inner_path)
return r
r = normcase(r)
return r
_ZIP_SEARCH_CACHE = {}
_NOT_FOUND_SENTINEL = object()
def exists(file):
if os.path.exists(file):
return file
ind = file.find('.zip')
if ind == -1:
ind = file.find('.egg')
if ind != -1:
ind += 4
zip_path = file[:ind]
inner_path = file[ind:]
if inner_path.startswith("!"):
# Note (fabioz): although I can replicate this by creating a file ending as
# .zip! or .egg!, I don't really know what's the real-world case for this
# (still kept as it was added by @jetbrains, but it should probably be reviewed
# later on).
# Note 2: it goes hand-in-hand with '_NormPath'.
inner_path = inner_path[1:]
zip_path = zip_path + '!'
zip_file_obj = _ZIP_SEARCH_CACHE.get(zip_path, _NOT_FOUND_SENTINEL)
if zip_file_obj is None:
return False
elif zip_file_obj is _NOT_FOUND_SENTINEL:
try:
import zipfile
zip_file_obj = zipfile.ZipFile(zip_path, 'r')
_ZIP_SEARCH_CACHE[zip_path] = zip_file_obj
except:
_ZIP_SEARCH_CACHE[zip_path] = _NOT_FOUND_SENTINEL
return False
try:
if inner_path.startswith('/') or inner_path.startswith('\\'):
inner_path = inner_path[1:]
_info = zip_file_obj.getinfo(inner_path.replace('\\', '/'))
return join(zip_path, inner_path)
except KeyError:
return None
return None
# Now, let's do a quick test to see if we're working with a version of python that has no problems
# related to the names generated...
try:
try:
code = rPath.func_code
except AttributeError:
code = rPath.__code__
if not exists(_NormFile(code.co_filename)):
sys.stderr.write('-------------------------------------------------------------------------------\n')
sys.stderr.write('pydev debugger: CRITICAL WARNING: This version of python seems to be incorrectly compiled (internal generated filenames are not absolute)\n')
sys.stderr.write('pydev debugger: The debugger may still function, but it will work slower and may miss breakpoints.\n')
sys.stderr.write('pydev debugger: Related bug: http://bugs.python.org/issue1666807\n')
sys.stderr.write('-------------------------------------------------------------------------------\n')
sys.stderr.flush()
NORM_SEARCH_CACHE = {}
initial_norm_paths = _NormPaths
def _NormPaths(filename): # Let's redefine _NormPaths to work with paths that may be incorrect
try:
return NORM_SEARCH_CACHE[filename]
except KeyError:
abs_path, real_path = initial_norm_paths(filename)
if not exists(real_path):
# We must actually go on and check if we can find it as if it was a relative path for some of the paths in the pythonpath
for path in sys.path:
abs_path, real_path = initial_norm_paths(join(path, filename))
if exists(real_path):
break
else:
sys.stderr.write('pydev debugger: Unable to find real location for: %s\n' % (filename,))
abs_path = filename
real_path = filename
NORM_SEARCH_CACHE[filename] = abs_path, real_path
return abs_path, real_path
except:
# Don't fail if there's something not correct here -- but at least print it to the user so that we can correct that
traceback.print_exc()
# Note: as these functions may be rebound, users should always import
# pydevd_file_utils and then use:
#
# pydevd_file_utils.norm_file_to_client
# pydevd_file_utils.norm_file_to_server
#
# instead of importing any of those names to a given scope.
def _original_file_to_client(filename, cache={}):
try:
return cache[filename]
except KeyError:
cache[filename] = get_path_with_real_case(_AbsFile(filename))
return cache[filename]
_original_file_to_server = _NormFile
norm_file_to_client = _original_file_to_client
norm_file_to_server = _original_file_to_server
def _fix_path(path, sep):
if path.endswith('/') or path.endswith('\\'):
path = path[:-1]
if sep != '/':
path = path.replace('/', sep)
return path
_last_client_server_paths_set = []
def setup_client_server_paths(paths):
'''paths is the same format as PATHS_FROM_ECLIPSE_TO_PYTHON'''
global norm_file_to_client
global norm_file_to_server
global _last_client_server_paths_set
_last_client_server_paths_set = paths[:]
# Work on the client and server slashes.
python_sep = '\\' if IS_WINDOWS else '/'
eclipse_sep = '\\' if _ide_os == 'WINDOWS' else '/'
norm_filename_to_server_container = {}
norm_filename_to_client_container = {}
initial_paths = list(paths)
paths_from_eclipse_to_python = initial_paths[:]
# Apply normcase to the existing paths to follow the os preferences.
for i, (path0, path1) in enumerate(paths_from_eclipse_to_python[:]):
if IS_PY2:
if isinstance(path0, unicode):
path0 = path0.encode(sys.getfilesystemencoding())
if isinstance(path1, unicode):
path1 = path1.encode(sys.getfilesystemencoding())
path0 = _fix_path(path0, eclipse_sep)
path1 = _fix_path(path1, python_sep)
initial_paths[i] = (path0, path1)
paths_from_eclipse_to_python[i] = (normcase(path0), normcase(path1))
if not paths_from_eclipse_to_python:
# no translation step needed (just inline the calls)
norm_file_to_client = _original_file_to_client
norm_file_to_server = _original_file_to_server
return
# only setup translation functions if absolutely needed!
def _norm_file_to_server(filename, cache=norm_filename_to_server_container):
# Eclipse will send the passed filename to be translated to the python process
# So, this would be 'NormFileFromEclipseToPython'
try:
return cache[filename]
except KeyError:
if eclipse_sep != python_sep:
# Make sure that the separators are what we expect from the IDE.
filename = filename.replace(python_sep, eclipse_sep)
# used to translate a path from the client to the debug server
translated = normcase(filename)
for eclipse_prefix, server_prefix in paths_from_eclipse_to_python:
if translated.startswith(eclipse_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: replacing to server: %s\n' % (translated,))
translated = translated.replace(eclipse_prefix, server_prefix)
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: sent to server: %s\n' % (translated,))
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: to server: unable to find matching prefix for: %s in %s\n' % \
(translated, [x[0] for x in paths_from_eclipse_to_python]))
# Note that when going to the server, we do the replace first and only later do the norm file.
if eclipse_sep != python_sep:
translated = translated.replace(eclipse_sep, python_sep)
translated = _NormFile(translated)
cache[filename] = translated
return translated
def _norm_file_to_client(filename, cache=norm_filename_to_client_container):
# The result of this method will be passed to eclipse
# So, this would be 'NormFileFromPythonToEclipse'
try:
return cache[filename]
except KeyError:
# used to translate a path from the debug server to the client
translated = _NormFile(filename)
# After getting the real path, let's get it with the path with
# the real case and then obtain a new normalized copy, just in case
# the path is different now.
translated_proper_case = get_path_with_real_case(translated)
translated = _NormFile(translated_proper_case)
if IS_WINDOWS:
if translated.lower() != translated_proper_case.lower():
translated_proper_case = translated
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write(
'pydev debugger: _NormFile changed path (from: %s to %s)\n' % (
translated_proper_case, translated))
for i, (eclipse_prefix, python_prefix) in enumerate(paths_from_eclipse_to_python):
if translated.startswith(python_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: replacing to client: %s\n' % (translated,))
# Note: use the non-normalized version.
eclipse_prefix = initial_paths[i][0]
translated = eclipse_prefix + translated_proper_case[len(python_prefix):]
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: sent to client: %s\n' % (translated,))
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: to client: unable to find matching prefix for: %s in %s\n' % \
(translated, [x[1] for x in paths_from_eclipse_to_python]))
translated = translated_proper_case
if eclipse_sep != python_sep:
translated = translated.replace(python_sep, eclipse_sep)
# The resulting path is not in the python process, so, we cannot do a _NormFile here,
# only at the beginning of this method.
cache[filename] = translated
return translated
norm_file_to_server = _norm_file_to_server
norm_file_to_client = _norm_file_to_client
setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON)
def _is_int(filename):
# isdigit() doesn't support negative numbers
try:
int(filename)
return True
except:
return False
def is_real_file(filename):
# Check for Jupyter cells
return not _is_int(filename) and not filename.startswith("<ipython-input")
# For given file f returns tuple of its absolute path, real path and base name
def get_abs_path_real_path_and_base_from_file(f):
try:
return NORM_PATHS_AND_BASE_CONTAINER[f]
except:
if _NormPaths is None: # Interpreter shutdown
return f
if f is not None:
if f.endswith('.pyc'):
f = f[:-1]
elif f.endswith('$py.class'):
f = f[:-len('$py.class')] + '.py'
if not is_real_file(f):
abs_path, real_path, base = f, f, f
else:
abs_path, real_path = _NormPaths(f)
base = basename(real_path)
ret = abs_path, real_path, base
NORM_PATHS_AND_BASE_CONTAINER[f] = ret
return ret
def get_abs_path_real_path_and_base_from_frame(frame):
try:
return NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
# This one is just internal (so, does not need any kind of client-server translation)
f = frame.f_code.co_filename
if f is not None and f.startswith (('build/bdist.', 'build\\bdist.')):
# files from eggs in Python 2.7 have paths like build/bdist.linux-x86_64/egg/<path-inside-egg>
f = frame.f_globals['__file__']
if get_abs_path_real_path_and_base_from_file is None: # Interpreter shutdown
return f
ret = get_abs_path_real_path_and_base_from_file(f)
# Also cache based on the frame.f_code.co_filename (if we had it inside build/bdist it can make a difference).
NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] = ret
return ret
def get_fullname(mod_name):
if IS_PY3K:
import pkgutil
else:
from _pydev_imps import _pydev_pkgutil_old as pkgutil
try:
loader = pkgutil.get_loader(mod_name)
except:
return None
if loader is not None:
for attr in ("get_filename", "_get_filename"):
meth = getattr(loader, attr, None)
if meth is not None:
return meth(mod_name)
return None
def get_package_dir(mod_name):
for path in sys.path:
mod_path = join(path, mod_name.replace('.', '/'))
if os.path.isdir(mod_path):
return mod_path
return None
| r'''
This module provides utilities to get the absolute filenames so that we can be sure that:
- The case of a file will match the actual file in the filesystem (otherwise breakpoints won't be hit).
- Providing means for the user to make path conversions when doing a remote debugging session in
one machine and debugging in another.
To do that, the PATHS_FROM_ECLIPSE_TO_PYTHON constant must be filled with the appropriate paths.
@note:
in this context, the server is where your python process is running
and the client is where eclipse is running.
E.g.:
If the server (your python process) has the structure
/user/projects/my_project/src/package/module1.py
and the client has:
c:\my_project\src\package\module1.py
the PATHS_FROM_ECLIPSE_TO_PYTHON would have to be:
PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\my_project\src', r'/user/projects/my_project/src')]
alternatively, this can be set with an environment variable from the command line:
set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\my_project\src','/user/projects/my_project/src']]
@note: DEBUG_CLIENT_SERVER_TRANSLATION can be set to True to debug the result of those translations
@note: the case of the paths is important! Note that this can be tricky to get right when one machine
uses a case-independent filesystem and the other uses a case-dependent filesystem (if the system being
debugged is case-independent, 'normcase()' should be used on the paths defined in PATHS_FROM_ECLIPSE_TO_PYTHON).
@note: all the paths with breakpoints must be translated (otherwise they won't be found in the server)
@note: to enable remote debugging in the target machine (pydev extensions in the eclipse installation)
import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend)
see parameter docs on pydevd.py
@note: for doing a remote debugging session, all the pydevd_ files must be on the server accessible
through the PYTHONPATH (and the PATHS_FROM_ECLIPSE_TO_PYTHON only needs to be set on the target
machine for the paths that'll actually have breakpoints).
'''
from _pydevd_bundle.pydevd_constants import IS_PY2, IS_PY3K, DebugInfoHolder, IS_WINDOWS, IS_JYTHON
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
import json
import os.path
import sys
import traceback
_os_normcase = os.path.normcase
basename = os.path.basename
exists = os.path.exists
join = os.path.join
try:
rPath = os.path.realpath # @UndefinedVariable
except:
# jython does not support os.path.realpath
# realpath is a no-op on systems without islink support
rPath = os.path.abspath
# defined as a list of tuples where the 1st element of the tuple is the path in the client machine
# and the 2nd element is the path in the server machine.
# see module docstring for more details.
try:
PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]'))
except Exception:
sys.stderr.write('Error loading PATHS_FROM_ECLIPSE_TO_PYTHON from environment variable.\n')
traceback.print_exc()
PATHS_FROM_ECLIPSE_TO_PYTHON = []
else:
if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list):
sys.stderr.write('Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded from environment variable to be a list.\n')
PATHS_FROM_ECLIPSE_TO_PYTHON = []
else:
# Converting json lists to tuple
PATHS_FROM_ECLIPSE_TO_PYTHON = [tuple(x) for x in PATHS_FROM_ECLIPSE_TO_PYTHON]
# example:
# PATHS_FROM_ECLIPSE_TO_PYTHON = [
# (r'd:\temp\temp_workspace_2\test_python\src\yyy\yyy',
# r'd:\temp\temp_workspace_2\test_python\src\hhh\xxx')
# ]
convert_to_long_pathname = lambda filename:filename
convert_to_short_pathname = lambda filename:filename
get_path_with_real_case = lambda filename:filename
if sys.platform == 'win32':
try:
import ctypes
from ctypes.wintypes import MAX_PATH, LPCWSTR, LPWSTR, DWORD
GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW
GetLongPathName.argtypes = [LPCWSTR, LPWSTR, DWORD]
GetLongPathName.restype = DWORD
GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW
GetShortPathName.argtypes = [LPCWSTR, LPWSTR, DWORD]
GetShortPathName.restype = DWORD
def _convert_to_long_pathname(filename):
buf = ctypes.create_unicode_buffer(MAX_PATH)
if IS_PY2 and isinstance(filename, str):
filename = filename.decode(getfilesystemencoding())
rv = GetLongPathName(filename, buf, MAX_PATH)
if rv != 0 and rv <= MAX_PATH:
filename = buf.value
if IS_PY2:
filename = filename.encode(getfilesystemencoding())
return filename
def _convert_to_short_pathname(filename):
buf = ctypes.create_unicode_buffer(MAX_PATH)
if IS_PY2 and isinstance(filename, str):
filename = filename.decode(getfilesystemencoding())
rv = GetShortPathName(filename, buf, MAX_PATH)
if rv != 0 and rv <= MAX_PATH:
filename = buf.value
if IS_PY2:
filename = filename.encode(getfilesystemencoding())
return filename
def _get_path_with_real_case(filename):
ret = convert_to_long_pathname(convert_to_short_pathname(filename))
# This doesn't handle the drive letter properly (it'll be unchanged).
# Make sure the drive letter is always uppercase.
if len(ret) > 1 and ret[1] == ':' and ret[0].islower():
return ret[0].upper() + ret[1:]
return ret
# Check that it actually works
_get_path_with_real_case(__file__)
except:
# Something didn't quite work out, leave no-op conversions in place.
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
traceback.print_exc()
else:
convert_to_long_pathname = _convert_to_long_pathname
convert_to_short_pathname = _convert_to_short_pathname
get_path_with_real_case = _get_path_with_real_case
elif IS_JYTHON and IS_WINDOWS:
def get_path_with_real_case(filename):
from java.io import File
f = File(filename)
ret = f.getCanonicalPath()
if IS_PY2 and not isinstance(ret, str):
return ret.encode(getfilesystemencoding())
return ret
if IS_WINDOWS:
if IS_JYTHON:
def normcase(filename):
return filename.lower()
else:
def normcase(filename):
# `normcase` doesn't lower case on Python 2 for non-English locale, but Java
# side does it, so we should do it manually.
if '~' in filename:
filename = convert_to_long_pathname(filename)
filename = _os_normcase(filename)
return filename.lower()
else:
def normcase(filename):
return filename # no-op
_ide_os = 'WINDOWS' if IS_WINDOWS else 'UNIX'
def set_ide_os(os):
'''
We need to set the IDE os because the host where the code is running may be
actually different from the client (and the point is that we want the proper
paths to translate from the client to the server).
:param os:
'UNIX' or 'WINDOWS'
'''
global _ide_os
prev = _ide_os
if os == 'WIN': # Apparently PyCharm uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116)
os = 'WINDOWS'
assert os in ('WINDOWS', 'UNIX')
if prev != os:
_ide_os = os
# We need to (re)setup how the client <-> server translation works to provide proper separators.
setup_client_server_paths(_last_client_server_paths_set)
DEBUG_CLIENT_SERVER_TRANSLATION = os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION', 'False').lower() in ('1', 'true')
# Caches filled as requested during the debug session.
NORM_PATHS_CONTAINER = {}
NORM_PATHS_AND_BASE_CONTAINER = {}
def _NormFile(filename):
abs_path, real_path = _NormPaths(filename)
return real_path
def _AbsFile(filename):
abs_path, real_path = _NormPaths(filename)
return abs_path
# Returns tuple of absolute path and real path for given filename
def _NormPaths(filename):
try:
return NORM_PATHS_CONTAINER[filename]
except KeyError:
if filename.__class__ != str:
raise AssertionError('Paths passed to _NormPaths must be str. Found: %s (%s)' % (filename, type(filename)))
abs_path = _NormPath(filename, os.path.abspath)
real_path = _NormPath(filename, rPath)
# cache it for fast access later
NORM_PATHS_CONTAINER[filename] = abs_path, real_path
return abs_path, real_path
def _NormPath(filename, normpath):
r = normpath(filename)
ind = r.find('.zip')
if ind == -1:
ind = r.find('.egg')
if ind != -1:
ind += 4
zip_path = r[:ind]
inner_path = r[ind:]
if inner_path.startswith('!'):
# Note (fabioz): although I can replicate this by creating a file ending as
# .zip! or .egg!, I don't really know what's the real-world case for this
# (still kept as it was added by @jetbrains, but it should probably be reviewed
# later on).
# Note 2: it goes hand-in-hand with 'exists'.
inner_path = inner_path[1:]
zip_path = zip_path + '!'
if inner_path.startswith('/') or inner_path.startswith('\\'):
inner_path = inner_path[1:]
if inner_path:
r = join(normcase(zip_path), inner_path)
return r
r = normcase(r)
return r
_ZIP_SEARCH_CACHE = {}
_NOT_FOUND_SENTINEL = object()
def exists(file):
if os.path.exists(file):
return file
ind = file.find('.zip')
if ind == -1:
ind = file.find('.egg')
if ind != -1:
ind += 4
zip_path = file[:ind]
inner_path = file[ind:]
if inner_path.startswith("!"):
# Note (fabioz): although I can replicate this by creating a file ending as
# .zip! or .egg!, I don't really know what's the real-world case for this
# (still kept as it was added by @jetbrains, but it should probably be reviewed
# later on).
# Note 2: it goes hand-in-hand with '_NormPath'.
inner_path = inner_path[1:]
zip_path = zip_path + '!'
zip_file_obj = _ZIP_SEARCH_CACHE.get(zip_path, _NOT_FOUND_SENTINEL)
if zip_file_obj is None:
return False
elif zip_file_obj is _NOT_FOUND_SENTINEL:
try:
import zipfile
zip_file_obj = zipfile.ZipFile(zip_path, 'r')
_ZIP_SEARCH_CACHE[zip_path] = zip_file_obj
except:
_ZIP_SEARCH_CACHE[zip_path] = _NOT_FOUND_SENTINEL
return False
try:
if inner_path.startswith('/') or inner_path.startswith('\\'):
inner_path = inner_path[1:]
_info = zip_file_obj.getinfo(inner_path.replace('\\', '/'))
return join(zip_path, inner_path)
except KeyError:
return None
return None
# Now, let's do a quick test to see if we're working with a version of python that has no problems
# related to the names generated...
try:
try:
code = rPath.func_code
except AttributeError:
code = rPath.__code__
if not exists(_NormFile(code.co_filename)):
sys.stderr.write('-------------------------------------------------------------------------------\n')
sys.stderr.write('pydev debugger: CRITICAL WARNING: This version of python seems to be incorrectly compiled (internal generated filenames are not absolute)\n')
sys.stderr.write('pydev debugger: The debugger may still function, but it will work slower and may miss breakpoints.\n')
sys.stderr.write('pydev debugger: Related bug: http://bugs.python.org/issue1666807\n')
sys.stderr.write('-------------------------------------------------------------------------------\n')
sys.stderr.flush()
NORM_SEARCH_CACHE = {}
initial_norm_paths = _NormPaths
def _NormPaths(filename): # Let's redefine _NormPaths to work with paths that may be incorrect
try:
return NORM_SEARCH_CACHE[filename]
except KeyError:
abs_path, real_path = initial_norm_paths(filename)
if not exists(real_path):
# We must actually go on and check if we can find it as if it was a relative path for some of the paths in the pythonpath
for path in sys.path:
abs_path, real_path = initial_norm_paths(join(path, filename))
if exists(real_path):
break
else:
sys.stderr.write('pydev debugger: Unable to find real location for: %s\n' % (filename,))
abs_path = filename
real_path = filename
NORM_SEARCH_CACHE[filename] = abs_path, real_path
return abs_path, real_path
except:
# Don't fail if there's something not correct here -- but at least print it to the user so that we can correct that
traceback.print_exc()
# Note: as these functions may be rebound, users should always import
# pydevd_file_utils and then use:
#
# pydevd_file_utils.norm_file_to_client
# pydevd_file_utils.norm_file_to_server
#
# instead of importing any of those names to a given scope.
def _original_file_to_client(filename, cache={}):
try:
return cache[filename]
except KeyError:
cache[filename] = get_path_with_real_case(_AbsFile(filename))
return cache[filename]
_original_file_to_server = _NormFile
norm_file_to_client = _original_file_to_client
norm_file_to_server = _original_file_to_server
def _fix_path(path, sep):
if path.endswith('/') or path.endswith('\\'):
path = path[:-1]
if sep != '/':
path = path.replace('/', sep)
return path
_last_client_server_paths_set = []
def setup_client_server_paths(paths):
'''paths is the same format as PATHS_FROM_ECLIPSE_TO_PYTHON'''
global norm_file_to_client
global norm_file_to_server
global _last_client_server_paths_set
_last_client_server_paths_set = paths[:]
# Work on the client and server slashes.
python_sep = '\\' if IS_WINDOWS else '/'
eclipse_sep = '\\' if _ide_os == 'WINDOWS' else '/'
norm_filename_to_server_container = {}
norm_filename_to_client_container = {}
initial_paths = list(paths)
paths_from_eclipse_to_python = initial_paths[:]
# Apply normcase to the existing paths to follow the os preferences.
for i, (path0, path1) in enumerate(paths_from_eclipse_to_python[:]):
if IS_PY2:
if isinstance(path0, unicode):
path0 = path0.encode(sys.getfilesystemencoding())
if isinstance(path1, unicode):
path1 = path1.encode(sys.getfilesystemencoding())
path0 = _fix_path(path0, eclipse_sep)
path1 = _fix_path(path1, python_sep)
initial_paths[i] = (path0, path1)
paths_from_eclipse_to_python[i] = (normcase(path0), normcase(path1))
if not paths_from_eclipse_to_python:
# no translation step needed (just inline the calls)
norm_file_to_client = _original_file_to_client
norm_file_to_server = _original_file_to_server
return
# only setup translation functions if absolutely needed!
def _norm_file_to_server(filename, cache=norm_filename_to_server_container):
# Eclipse will send the passed filename to be translated to the python process
# So, this would be 'NormFileFromEclipseToPython'
try:
return cache[filename]
except KeyError:
if eclipse_sep != python_sep:
# Make sure that the separators are what we expect from the IDE.
filename = filename.replace(python_sep, eclipse_sep)
# used to translate a path from the client to the debug server
translated = normcase(filename)
for eclipse_prefix, server_prefix in paths_from_eclipse_to_python:
if translated.startswith(eclipse_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: replacing to server: %s\n' % (translated,))
translated = translated.replace(eclipse_prefix, server_prefix)
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: sent to server: %s\n' % (translated,))
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: to server: unable to find matching prefix for: %s in %s\n' % \
(translated, [x[0] for x in paths_from_eclipse_to_python]))
# Note that when going to the server, we do the replace first and only later do the norm file.
if eclipse_sep != python_sep:
translated = translated.replace(eclipse_sep, python_sep)
translated = _NormFile(translated)
cache[filename] = translated
return translated
def _norm_file_to_client(filename, cache=norm_filename_to_client_container):
# The result of this method will be passed to eclipse
# So, this would be 'NormFileFromPythonToEclipse'
try:
return cache[filename]
except KeyError:
# used to translate a path from the debug server to the client
translated = _NormFile(filename)
# After getting the real path, let's get it with the path with
# the real case and then obtain a new normalized copy, just in case
# the path is different now.
translated_proper_case = get_path_with_real_case(translated)
translated = _NormFile(translated_proper_case)
if IS_WINDOWS:
if translated.lower() != translated_proper_case.lower():
translated_proper_case = translated
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write(
'pydev debugger: _NormFile changed path (from: %s to %s)\n' % (
translated_proper_case, translated))
for i, (eclipse_prefix, python_prefix) in enumerate(paths_from_eclipse_to_python):
if translated.startswith(python_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: replacing to client: %s\n' % (translated,))
# Note: use the non-normalized version.
eclipse_prefix = initial_paths[i][0]
translated = eclipse_prefix + translated_proper_case[len(python_prefix):]
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: sent to client: %s\n' % (translated,))
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: to client: unable to find matching prefix for: %s in %s\n' % \
(translated, [x[1] for x in paths_from_eclipse_to_python]))
translated = translated_proper_case
if eclipse_sep != python_sep:
translated = translated.replace(python_sep, eclipse_sep)
# The resulting path is not in the python process, so, we cannot do a _NormFile here,
# only at the beginning of this method.
cache[filename] = translated
return translated
norm_file_to_server = _norm_file_to_server
norm_file_to_client = _norm_file_to_client
setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON)
def _is_int(filename):
# isdigit() doesn't support negative numbers
try:
int(filename)
return True
except:
return False
def is_real_file(filename):
# Check for Jupyter cells
return not _is_int(filename) and not filename.startswith("<ipython-input")
# For given file f returns tuple of its absolute path, real path and base name
def get_abs_path_real_path_and_base_from_file(f):
try:
return NORM_PATHS_AND_BASE_CONTAINER[f]
except:
if _NormPaths is None: # Interpreter shutdown
return f
if f is not None:
if f.endswith('.pyc'):
f = f[:-1]
elif f.endswith('$py.class'):
f = f[:-len('$py.class')] + '.py'
if not is_real_file(f):
abs_path, real_path, base = f, f, f
else:
abs_path, real_path = _NormPaths(f)
base = basename(real_path)
ret = abs_path, real_path, base
NORM_PATHS_AND_BASE_CONTAINER[f] = ret
return ret
def get_abs_path_real_path_and_base_from_frame(frame):
try:
return NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
# This one is just internal (so, does not need any kind of client-server translation)
f = frame.f_code.co_filename
if f is not None and f.startswith (('build/bdist.', 'build\\bdist.')):
# files from eggs in Python 2.7 have paths like build/bdist.linux-x86_64/egg/<path-inside-egg>
f = frame.f_globals['__file__']
if get_abs_path_real_path_and_base_from_file is None: # Interpreter shutdown
return f
ret = get_abs_path_real_path_and_base_from_file(f)
# Also cache based on the frame.f_code.co_filename (if we had it inside build/bdist it can make a difference).
NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] = ret
return ret
def get_fullname(mod_name):
if IS_PY3K:
import pkgutil
else:
from _pydev_imps import _pydev_pkgutil_old as pkgutil
try:
loader = pkgutil.get_loader(mod_name)
except:
return None
if loader is not None:
for attr in ("get_filename", "_get_filename"):
meth = getattr(loader, attr, None)
if meth is not None:
return meth(mod_name)
return None
def get_package_dir(mod_name):
for path in sys.path:
mod_path = join(path, mod_name.replace('.', '/'))
if os.path.isdir(mod_path):
return mod_path
return None | en | 0.881846 | This module provides utilities to get the absolute filenames so that we can be sure that: - The case of a file will match the actual file in the filesystem (otherwise breakpoints won't be hit). - Providing means for the user to make path conversions when doing a remote debugging session in one machine and debugging in another. To do that, the PATHS_FROM_ECLIPSE_TO_PYTHON constant must be filled with the appropriate paths. @note: in this context, the server is where your python process is running and the client is where eclipse is running. E.g.: If the server (your python process) has the structure /user/projects/my_project/src/package/module1.py and the client has: c:\my_project\src\package\module1.py the PATHS_FROM_ECLIPSE_TO_PYTHON would have to be: PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\my_project\src', r'/user/projects/my_project/src')] alternatively, this can be set with an environment variable from the command line: set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\my_project\src','/user/projects/my_project/src']] @note: DEBUG_CLIENT_SERVER_TRANSLATION can be set to True to debug the result of those translations @note: the case of the paths is important! Note that this can be tricky to get right when one machine uses a case-independent filesystem and the other uses a case-dependent filesystem (if the system being debugged is case-independent, 'normcase()' should be used on the paths defined in PATHS_FROM_ECLIPSE_TO_PYTHON). @note: all the paths with breakpoints must be translated (otherwise they won't be found in the server) @note: to enable remote debugging in the target machine (pydev extensions in the eclipse installation) import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend) see parameter docs on pydevd.py @note: for doing a remote debugging session, all the pydevd_ files must be on the server accessible through the PYTHONPATH (and the PATHS_FROM_ECLIPSE_TO_PYTHON only needs to be set on the target machine for the paths that'll actually have breakpoints). # @UndefinedVariable # jython does not support os.path.realpath # realpath is a no-op on systems without islink support # defined as a list of tuples where the 1st element of the tuple is the path in the client machine # and the 2nd element is the path in the server machine. # see module docstring for more details. # Converting json lists to tuple # example: # PATHS_FROM_ECLIPSE_TO_PYTHON = [ # (r'd:\temp\temp_workspace_2\test_python\src\yyy\yyy', # r'd:\temp\temp_workspace_2\test_python\src\hhh\xxx') # ] # This doesn't handle the drive letter properly (it'll be unchanged). # Make sure the drive letter is always uppercase. # Check that it actually works # Something didn't quite work out, leave no-op conversions in place. # `normcase` doesn't lower case on Python 2 for non-English locale, but Java # side does it, so we should do it manually. # no-op We need to set the IDE os because the host where the code is running may be actually different from the client (and the point is that we want the proper paths to translate from the client to the server). :param os: 'UNIX' or 'WINDOWS' # Apparently PyCharm uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116) # We need to (re)setup how the client <-> server translation works to provide proper separators. # Caches filled as requested during the debug session. # Returns tuple of absolute path and real path for given filename # cache it for fast access later # Note (fabioz): although I can replicate this by creating a file ending as # .zip! or .egg!, I don't really know what's the real-world case for this # (still kept as it was added by @jetbrains, but it should probably be reviewed # later on). # Note 2: it goes hand-in-hand with 'exists'. # Note (fabioz): although I can replicate this by creating a file ending as # .zip! or .egg!, I don't really know what's the real-world case for this # (still kept as it was added by @jetbrains, but it should probably be reviewed # later on). # Note 2: it goes hand-in-hand with '_NormPath'. # Now, let's do a quick test to see if we're working with a version of python that has no problems # related to the names generated... # Let's redefine _NormPaths to work with paths that may be incorrect # We must actually go on and check if we can find it as if it was a relative path for some of the paths in the pythonpath # Don't fail if there's something not correct here -- but at least print it to the user so that we can correct that # Note: as these functions may be rebound, users should always import # pydevd_file_utils and then use: # # pydevd_file_utils.norm_file_to_client # pydevd_file_utils.norm_file_to_server # # instead of importing any of those names to a given scope. paths is the same format as PATHS_FROM_ECLIPSE_TO_PYTHON # Work on the client and server slashes. # Apply normcase to the existing paths to follow the os preferences. # no translation step needed (just inline the calls) # only setup translation functions if absolutely needed! # Eclipse will send the passed filename to be translated to the python process # So, this would be 'NormFileFromEclipseToPython' # Make sure that the separators are what we expect from the IDE. # used to translate a path from the client to the debug server # Note that when going to the server, we do the replace first and only later do the norm file. # The result of this method will be passed to eclipse # So, this would be 'NormFileFromPythonToEclipse' # used to translate a path from the debug server to the client # After getting the real path, let's get it with the path with # the real case and then obtain a new normalized copy, just in case # the path is different now. # Note: use the non-normalized version. # The resulting path is not in the python process, so, we cannot do a _NormFile here, # only at the beginning of this method. # isdigit() doesn't support negative numbers # Check for Jupyter cells # For given file f returns tuple of its absolute path, real path and base name # Interpreter shutdown # This one is just internal (so, does not need any kind of client-server translation) # files from eggs in Python 2.7 have paths like build/bdist.linux-x86_64/egg/<path-inside-egg> # Interpreter shutdown # Also cache based on the frame.f_code.co_filename (if we had it inside build/bdist it can make a difference). | 2.51387 | 3 |
src/networking/SessionsManager.py | OfekHarel/Orion-Connection-Software | 1 | 582 | <gh_stars>1-10
import os
import socket
from random import randint
from src import Constants
from src.Constants import Network
from src.networking import NetworkPackets, Actions
from src.networking.Client import Client
from src.utils.DH_Encryption import Encryption
from src.utils.Enum import Enum
class SessionManager:
"""
This class is responsible for dealing with any flow of net msgs.
"""
def __init__(self):
address = (Network.SERVER_IP, Network.SERVER_PORT)
self.client = Client(str(socket.gethostname()), address)
self.val = self.client.connect()
if not self.val:
Network.IS_ONLINE = False
def go_crypto(self):
msg = NetworkPackets.split(self.client.receive())
g = int(msg[1])
n = int(msg[2])
g_pow_a_mod_n = int(msg[3])
crypto = Encryption(g, n)
crypto.get_full_key(g_pow_a_mod_n)
self.client.send(NetworkPackets.assemble(NetworkPackets.NetLogicIncomes.CONNECT.value,
str(crypto.get_partial_key())))
self.client.crypto = crypto
def gen_id(self) -> str:
num = str(randint(1, 9999))
num = num.zfill(4)
return num
def open_id_file(self):
try:
open(Constants.Files.ID, 'r+').close()
except FileNotFoundError:
open(Constants.Files.ID, 'x').close()
finally:
file = open(Constants.Files.ID, 'r+')
return file
def sync(self):
"""
This function contains the full process of the sync phase.
"""
if Network.IS_ONLINE:
self.go_crypto()
num = ""
file = self.open_id_file()
if os.path.getsize(Constants.Files.ID) == 0: # Empty
is_valid = False
while not is_valid:
num = self.gen_id()
self.client.send(NetworkPackets.assemble("COMPUTER", "ID_VAL", num))
msg = NetworkPackets.split(self.client.receive())
is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value
file.write(num)
else:
is_valid = False
num = file.read()
while not is_valid:
self.client.send(NetworkPackets.assemble("COMPUTER", "ID_VAL", num))
msg = NetworkPackets.split(self.client.receive())
is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value
if not is_valid:
num = self.gen_id()
if num != file.read():
file.close()
os.remove(Constants.Files.ID)
file = self.open_id_file()
file.write(num)
file.close()
def manage(self, incoming: str):
"""
This functions deals with the execution of the required operations.
:param incoming: Raw net msg.
"""
if Network.IS_ONLINE:
incoming = NetworkPackets.split(incoming)[0]
if incoming in Operation.list():
if incoming == Operation.VOL_UP.value:
Actions.vol_up()
elif incoming == Operation.VOL_DOWN.value:
Actions.vol_down()
elif incoming == Operation.PAUSE_PLAY_TOGGLE.value:
Actions.play_pause()
elif incoming == Operation.SKIP.value:
Actions.next_song()
elif incoming == Operation.PREV.value:
Actions.prev_song()
elif incoming == Operation.MUTE.value:
Actions.mute()
elif incoming == Operation.OFF.value:
Actions.shut_down()
elif incoming == Operation.SLEEP.value:
Actions.sleep()
elif incoming == Operation.RESTART.value:
Actions.restart()
elif incoming == Operation.LOCK.value:
Actions.lock()
elif incoming == Operation.LOG_OUT.value:
Actions.log_out()
elif incoming == Operation.MAGIC_BTN.value:
Actions.run_file()
elif incoming == Operation.USAGE.value:
self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_use_as_str_arr()))
elif incoming == Operation.DISCONNECT.value:
self.client.send(NetworkPackets.assemble(Operation.DISCONNECT.value))
return Operation.DISCONNECT
elif incoming in NetworkPackets.NetLogicIncomes.list():
if incoming == NetworkPackets.NetLogicIncomes.PAIRED.value:
Constants.Network.IS_PAIRING = True
self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_specs_as_str_arr()))
elif incoming == NetworkPackets.NetLogicIncomes.INVALID:
pass
class Operation(Enum):
"""
All the operations that can be asked to execute.
"""
VOL_UP = "VOL_UP"
VOL_DOWN = "VOL_DOWN"
PAUSE_PLAY_TOGGLE = "PTT"
SKIP = "SKIP"
PREV = "PREV"
MUTE = "MUTE"
OFF = "OFF"
SLEEP = "SLEEP"
RESTART = "RESTRT"
LOCK = "LCK"
LOG_OUT = "LGOT"
DISCONNECT = "DISCON"
MAGIC_BTN = "MAGIC"
SPECS_INFO = "SPECS"
USAGE = "USE"
| import os
import socket
from random import randint
from src import Constants
from src.Constants import Network
from src.networking import NetworkPackets, Actions
from src.networking.Client import Client
from src.utils.DH_Encryption import Encryption
from src.utils.Enum import Enum
class SessionManager:
"""
This class is responsible for dealing with any flow of net msgs.
"""
def __init__(self):
address = (Network.SERVER_IP, Network.SERVER_PORT)
self.client = Client(str(socket.gethostname()), address)
self.val = self.client.connect()
if not self.val:
Network.IS_ONLINE = False
def go_crypto(self):
msg = NetworkPackets.split(self.client.receive())
g = int(msg[1])
n = int(msg[2])
g_pow_a_mod_n = int(msg[3])
crypto = Encryption(g, n)
crypto.get_full_key(g_pow_a_mod_n)
self.client.send(NetworkPackets.assemble(NetworkPackets.NetLogicIncomes.CONNECT.value,
str(crypto.get_partial_key())))
self.client.crypto = crypto
def gen_id(self) -> str:
num = str(randint(1, 9999))
num = num.zfill(4)
return num
def open_id_file(self):
try:
open(Constants.Files.ID, 'r+').close()
except FileNotFoundError:
open(Constants.Files.ID, 'x').close()
finally:
file = open(Constants.Files.ID, 'r+')
return file
def sync(self):
"""
This function contains the full process of the sync phase.
"""
if Network.IS_ONLINE:
self.go_crypto()
num = ""
file = self.open_id_file()
if os.path.getsize(Constants.Files.ID) == 0: # Empty
is_valid = False
while not is_valid:
num = self.gen_id()
self.client.send(NetworkPackets.assemble("COMPUTER", "ID_VAL", num))
msg = NetworkPackets.split(self.client.receive())
is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value
file.write(num)
else:
is_valid = False
num = file.read()
while not is_valid:
self.client.send(NetworkPackets.assemble("COMPUTER", "ID_VAL", num))
msg = NetworkPackets.split(self.client.receive())
is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value
if not is_valid:
num = self.gen_id()
if num != file.read():
file.close()
os.remove(Constants.Files.ID)
file = self.open_id_file()
file.write(num)
file.close()
def manage(self, incoming: str):
"""
This functions deals with the execution of the required operations.
:param incoming: Raw net msg.
"""
if Network.IS_ONLINE:
incoming = NetworkPackets.split(incoming)[0]
if incoming in Operation.list():
if incoming == Operation.VOL_UP.value:
Actions.vol_up()
elif incoming == Operation.VOL_DOWN.value:
Actions.vol_down()
elif incoming == Operation.PAUSE_PLAY_TOGGLE.value:
Actions.play_pause()
elif incoming == Operation.SKIP.value:
Actions.next_song()
elif incoming == Operation.PREV.value:
Actions.prev_song()
elif incoming == Operation.MUTE.value:
Actions.mute()
elif incoming == Operation.OFF.value:
Actions.shut_down()
elif incoming == Operation.SLEEP.value:
Actions.sleep()
elif incoming == Operation.RESTART.value:
Actions.restart()
elif incoming == Operation.LOCK.value:
Actions.lock()
elif incoming == Operation.LOG_OUT.value:
Actions.log_out()
elif incoming == Operation.MAGIC_BTN.value:
Actions.run_file()
elif incoming == Operation.USAGE.value:
self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_use_as_str_arr()))
elif incoming == Operation.DISCONNECT.value:
self.client.send(NetworkPackets.assemble(Operation.DISCONNECT.value))
return Operation.DISCONNECT
elif incoming in NetworkPackets.NetLogicIncomes.list():
if incoming == NetworkPackets.NetLogicIncomes.PAIRED.value:
Constants.Network.IS_PAIRING = True
self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_specs_as_str_arr()))
elif incoming == NetworkPackets.NetLogicIncomes.INVALID:
pass
class Operation(Enum):
"""
All the operations that can be asked to execute.
"""
VOL_UP = "VOL_UP"
VOL_DOWN = "VOL_DOWN"
PAUSE_PLAY_TOGGLE = "PTT"
SKIP = "SKIP"
PREV = "PREV"
MUTE = "MUTE"
OFF = "OFF"
SLEEP = "SLEEP"
RESTART = "RESTRT"
LOCK = "LCK"
LOG_OUT = "LGOT"
DISCONNECT = "DISCON"
MAGIC_BTN = "MAGIC"
SPECS_INFO = "SPECS"
USAGE = "USE" | en | 0.926078 | This class is responsible for dealing with any flow of net msgs. This function contains the full process of the sync phase. # Empty This functions deals with the execution of the required operations. :param incoming: Raw net msg. All the operations that can be asked to execute. | 2.450482 | 2 |
influxdb_service_sdk/model/container/resource_requirements_pb2.py | easyopsapis/easyops-api-python | 5 | 583 | <filename>influxdb_service_sdk/model/container/resource_requirements_pb2.py
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: resource_requirements.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from influxdb_service_sdk.model.container import resource_list_pb2 as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='resource_requirements.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x1bresource_requirements.proto\x12\tcontainer\x1a\x38influxdb_service_sdk/model/container/resource_list.proto\"j\n\x14ResourceRequirements\x12\'\n\x06limits\x18\x01 \x01(\x0b\x32\x17.container.ResourceList\x12)\n\x08requests\x18\x02 \x01(\x0b\x32\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,])
_RESOURCEREQUIREMENTS = _descriptor.Descriptor(
name='ResourceRequirements',
full_name='container.ResourceRequirements',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='limits', full_name='container.ResourceRequirements.limits', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='requests', full_name='container.ResourceRequirements.requests', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=100,
serialized_end=206,
)
_RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST
_RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST
DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), {
'DESCRIPTOR' : _RESOURCEREQUIREMENTS,
'__module__' : 'resource_requirements_pb2'
# @@protoc_insertion_point(class_scope:container.ResourceRequirements)
})
_sym_db.RegisterMessage(ResourceRequirements)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| <filename>influxdb_service_sdk/model/container/resource_requirements_pb2.py
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: resource_requirements.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from influxdb_service_sdk.model.container import resource_list_pb2 as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='resource_requirements.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x1bresource_requirements.proto\x12\tcontainer\x1a\x38influxdb_service_sdk/model/container/resource_list.proto\"j\n\x14ResourceRequirements\x12\'\n\x06limits\x18\x01 \x01(\x0b\x32\x17.container.ResourceList\x12)\n\x08requests\x18\x02 \x01(\x0b\x32\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,])
_RESOURCEREQUIREMENTS = _descriptor.Descriptor(
name='ResourceRequirements',
full_name='container.ResourceRequirements',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='limits', full_name='container.ResourceRequirements.limits', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='requests', full_name='container.ResourceRequirements.requests', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=100,
serialized_end=206,
)
_RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST
_RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST
DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), {
'DESCRIPTOR' : _RESOURCEREQUIREMENTS,
'__module__' : 'resource_requirements_pb2'
# @@protoc_insertion_point(class_scope:container.ResourceRequirements)
})
_sym_db.RegisterMessage(ResourceRequirements)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| en | 0.527987 | # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: resource_requirements.proto # @@protoc_insertion_point(imports) # @@protoc_insertion_point(class_scope:container.ResourceRequirements) # @@protoc_insertion_point(module_scope) | 1.329192 | 1 |
tests/test_http_client.py | bhch/async-stripe | 8 | 584 | from __future__ import absolute_import, division, print_function
import pytest
import json
import asyncio
import stripe
import urllib3
from stripe import six, util
from async_stripe.http_client import TornadoAsyncHTTPClient
pytestmark = pytest.mark.asyncio
VALID_API_METHODS = ("get", "post", "delete")
class StripeClientTestCase(object):
REQUEST_LIBRARIES = ["AsyncHTTPClient"]
@pytest.fixture
def request_mocks(self, mocker):
request_mocks = {}
for lib in self.REQUEST_LIBRARIES:
request_mocks[lib] = mocker.patch("async_stripe.http_client.%s" % (lib,))
return request_mocks
class TestNewDefaultHttpClient(StripeClientTestCase):
@pytest.fixture(autouse=True)
def setup_warnings(self, request_mocks):
original_filters = stripe.http_client.warnings.filters[:]
stripe.http_client.warnings.simplefilter("ignore")
yield
stripe.http_client.warnings.filters = original_filters
def check_default(self, none_libs, expected):
for lib in none_libs:
setattr(stripe.http_client, lib, None)
inst = stripe.http_client.new_default_http_client()
assert isinstance(inst, expected)
def test_new_default_http_client_tornado(self):
self.check_default((), TornadoAsyncHTTPClient)
class TestRetrySleepTimeDefaultHttpClient(StripeClientTestCase):
from contextlib import contextmanager
def assert_sleep_times(self, client, expected):
until = len(expected)
actual = list(
map(lambda i: client._sleep_time_seconds(i + 1), range(until))
)
assert expected == actual
@contextmanager
def mock_max_delay(self, new_value):
original_value = stripe.http_client.HTTPClient.MAX_DELAY
stripe.http_client.HTTPClient.MAX_DELAY = new_value
try:
yield self
finally:
stripe.http_client.HTTPClient.MAX_DELAY = original_value
def test_sleep_time_exponential_back_off(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t
with self.mock_max_delay(10):
self.assert_sleep_times(client, [0.5, 1.0, 2.0, 4.0, 8.0])
def test_initial_delay_as_minimum(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t * 0.001
initial_delay = stripe.http_client.HTTPClient.INITIAL_DELAY
self.assert_sleep_times(client, [initial_delay] * 5)
def test_maximum_delay(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t
max_delay = stripe.http_client.HTTPClient.MAX_DELAY
expected = [0.5, 1.0, max_delay, max_delay, max_delay]
self.assert_sleep_times(client, expected)
def test_retry_after_header(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t
# Prefer retry-after if it's bigger
assert 30 == client._sleep_time_seconds(
2, (None, 409, {"retry-after": "30"})
)
# Prefer default if it's bigger
assert 2 == client._sleep_time_seconds(
3, (None, 409, {"retry-after": "1"})
)
# Ignore crazy-big values
assert 1 == client._sleep_time_seconds(
2, (None, 409, {"retry-after": "300"})
)
def test_randomness_added(self):
client = stripe.http_client.new_default_http_client()
random_value = 0.8
client._add_jitter_time = lambda t: t * random_value
base_value = stripe.http_client.HTTPClient.INITIAL_DELAY * random_value
with self.mock_max_delay(10):
expected = [
stripe.http_client.HTTPClient.INITIAL_DELAY,
base_value * 2,
base_value * 4,
base_value * 8,
base_value * 16,
]
self.assert_sleep_times(client, expected)
def test_jitter_has_randomness_but_within_range(self):
client = stripe.http_client.new_default_http_client()
jittered_ones = set(
map(lambda _: client._add_jitter_time(1), list(range(100)))
)
assert len(jittered_ones) > 1
assert all(0.5 <= val <= 1 for val in jittered_ones)
class TestRetryConditionsDefaultHttpClient(StripeClientTestCase):
def test_should_retry_on_codes(self):
one_xx = list(range(100, 104))
two_xx = list(range(200, 209))
three_xx = list(range(300, 308))
four_xx = list(range(400, 431))
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
codes = one_xx + two_xx + three_xx + four_xx
codes.remove(409)
# These status codes should not be retried by default.
for code in codes:
assert client._should_retry((None, code, None), None, 0) is False
# These status codes should be retried by default.
assert client._should_retry((None, 409, None), None, 0) is True
assert client._should_retry((None, 500, None), None, 0) is True
assert client._should_retry((None, 503, None), None, 0) is True
def test_should_retry_on_error(self, mocker):
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
api_connection_error = mocker.Mock()
api_connection_error.should_retry = True
assert client._should_retry(None, api_connection_error, 0) is True
api_connection_error.should_retry = False
assert client._should_retry(None, api_connection_error, 0) is False
def test_should_retry_on_stripe_should_retry_true(self, mocker):
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
headers = {"stripe-should-retry": "true"}
# Ordinarily, we would not retry a 400, but with the header as true, we would.
assert client._should_retry((None, 400, {}), None, 0) is False
assert client._should_retry((None, 400, headers), None, 0) is True
def test_should_retry_on_stripe_should_retry_false(self, mocker):
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
headers = {"stripe-should-retry": "false"}
# Ordinarily, we would retry a 500, but with the header as false, we would not.
assert client._should_retry((None, 500, {}), None, 0) is True
assert client._should_retry((None, 500, headers), None, 0) is False
def test_should_retry_on_num_retries(self, mocker):
client = stripe.http_client.new_default_http_client()
max_test_retries = 10
client._max_network_retries = lambda: max_test_retries
api_connection_error = mocker.Mock()
api_connection_error.should_retry = True
assert (
client._should_retry(
None, api_connection_error, max_test_retries + 1
)
is False
)
assert (
client._should_retry((None, 409, None), None, max_test_retries + 1)
is False
)
class TestHTTPClient(object):
@pytest.fixture(autouse=True)
def setup_stripe(self):
orig_attrs = {"enable_telemetry": stripe.enable_telemetry}
stripe.enable_telemetry = False
yield
stripe.enable_telemetry = orig_attrs["enable_telemetry"]
async def test_sends_telemetry_on_second_request(self, mocker):
class TestClient(stripe.http_client.HTTPClient):
pass
stripe.enable_telemetry = True
url = "http://fake.url"
client = TestClient()
response_future = asyncio.Future()
response_future.set_result(["", 200, {"Request-Id": "req_123"}])
client.request = mocker.MagicMock(
return_value=response_future
)
_, code, _ = await client.request_with_retries("get", url, {}, None)
assert code == 200
client.request.assert_called_with("get", url, {}, None)
response_future = asyncio.Future()
response_future.set_result(["", 200, {"Request-Id": "req_234"}])
client.request = mocker.MagicMock(
return_value=response_future
)
_, code, _ = await client.request_with_retries("get", url, {}, None)
assert code == 200
args, _ = client.request.call_args
assert "X-Stripe-Client-Telemetry" in args[2]
telemetry = json.loads(args[2]["X-Stripe-Client-Telemetry"])
assert telemetry["last_request_metrics"]["request_id"] == "req_123"
class ClientTestBase(object):
@pytest.fixture
def request_mock(self, request_mocks):
return request_mocks[self.REQUEST_CLIENT.name]
@property
def valid_url(self, path="/foo"):
return "https://api.stripe.com%s" % (path,)
def make_request(self, method, url, headers, post_data):
client = self.REQUEST_CLIENT(verify_ssl_certs=True)
return client.request_with_retries(method, url, headers, post_data)
async def make_request_stream(self, method, url, headers, post_data):
client = self.REQUEST_CLIENT(verify_ssl_certs=True)
return await client.request_stream_with_retries(
method, url, headers, post_data
)
@pytest.fixture
def mock_response(self):
def mock_response(mock, body, code):
raise NotImplementedError(
"You must implement this in your test subclass"
)
return mock_response
@pytest.fixture
def mock_error(self):
def mock_error(mock, error):
raise NotImplementedError(
"You must implement this in your test subclass"
)
return mock_error
@pytest.fixture
def check_call(self):
def check_call(
mock, method, abs_url, headers, params, is_streaming=False
):
raise NotImplementedError(
"You must implement this in your test subclass"
)
return check_call
def test_request(self, request_mock, mock_response, check_call):
mock_response(request_mock, '{"foo": "baz"}', 200)
for method in VALID_API_METHODS:
abs_url = self.valid_url
data = ""
if method != "post":
abs_url = "%s?%s" % (abs_url, data)
data = None
headers = {"my-header": "header val"}
body, code, _ = self.make_request(method, abs_url, headers, data)
assert code == 200
assert body == '{"foo": "baz"}'
check_call(request_mock, method, abs_url, data, headers)
def test_request_stream(
self, mocker, request_mock, mock_response, check_call
):
for method in VALID_API_METHODS:
mock_response(request_mock, "some streamed content", 200)
abs_url = self.valid_url
data = ""
if method != "post":
abs_url = "%s?%s" % (abs_url, data)
data = None
headers = {"my-header": "header val"}
print(dir(self))
print("make_request_stream" in dir(self))
stream, code, _ = self.make_request_stream(
method, abs_url, headers, data
)
assert code == 200
# Here we need to convert and align all content on one type (string)
# as some clients return a string stream others a byte stream.
body_content = stream.read()
if hasattr(body_content, "decode"):
body_content = body_content.decode("utf-8")
assert body_content == "some streamed content"
mocker.resetall()
def test_exception(self, request_mock, mock_error):
mock_error(request_mock)
with pytest.raises(stripe.error.APIConnectionError):
self.make_request("get", self.valid_url, {}, None)
class TestTornadoAsyncHTTPClient:
# :TODO: Write tests for tornado client
pass
class TestAPIEncode(StripeClientTestCase):
def test_encode_dict(self):
body = {"foo": {"dob": {"month": 1}, "name": "bat"}}
values = [t for t in stripe.api_requestor._api_encode(body)]
assert ("foo[dob][month]", 1) in values
assert ("foo[name]", "bat") in values
def test_encode_array(self):
body = {"foo": [{"dob": {"month": 1}, "name": "bat"}]}
values = [t for t in stripe.api_requestor._api_encode(body)]
assert ("foo[0][dob][month]", 1) in values
assert ("foo[0][name]", "bat") in values
| from __future__ import absolute_import, division, print_function
import pytest
import json
import asyncio
import stripe
import urllib3
from stripe import six, util
from async_stripe.http_client import TornadoAsyncHTTPClient
pytestmark = pytest.mark.asyncio
VALID_API_METHODS = ("get", "post", "delete")
class StripeClientTestCase(object):
REQUEST_LIBRARIES = ["AsyncHTTPClient"]
@pytest.fixture
def request_mocks(self, mocker):
request_mocks = {}
for lib in self.REQUEST_LIBRARIES:
request_mocks[lib] = mocker.patch("async_stripe.http_client.%s" % (lib,))
return request_mocks
class TestNewDefaultHttpClient(StripeClientTestCase):
@pytest.fixture(autouse=True)
def setup_warnings(self, request_mocks):
original_filters = stripe.http_client.warnings.filters[:]
stripe.http_client.warnings.simplefilter("ignore")
yield
stripe.http_client.warnings.filters = original_filters
def check_default(self, none_libs, expected):
for lib in none_libs:
setattr(stripe.http_client, lib, None)
inst = stripe.http_client.new_default_http_client()
assert isinstance(inst, expected)
def test_new_default_http_client_tornado(self):
self.check_default((), TornadoAsyncHTTPClient)
class TestRetrySleepTimeDefaultHttpClient(StripeClientTestCase):
from contextlib import contextmanager
def assert_sleep_times(self, client, expected):
until = len(expected)
actual = list(
map(lambda i: client._sleep_time_seconds(i + 1), range(until))
)
assert expected == actual
@contextmanager
def mock_max_delay(self, new_value):
original_value = stripe.http_client.HTTPClient.MAX_DELAY
stripe.http_client.HTTPClient.MAX_DELAY = new_value
try:
yield self
finally:
stripe.http_client.HTTPClient.MAX_DELAY = original_value
def test_sleep_time_exponential_back_off(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t
with self.mock_max_delay(10):
self.assert_sleep_times(client, [0.5, 1.0, 2.0, 4.0, 8.0])
def test_initial_delay_as_minimum(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t * 0.001
initial_delay = stripe.http_client.HTTPClient.INITIAL_DELAY
self.assert_sleep_times(client, [initial_delay] * 5)
def test_maximum_delay(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t
max_delay = stripe.http_client.HTTPClient.MAX_DELAY
expected = [0.5, 1.0, max_delay, max_delay, max_delay]
self.assert_sleep_times(client, expected)
def test_retry_after_header(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t
# Prefer retry-after if it's bigger
assert 30 == client._sleep_time_seconds(
2, (None, 409, {"retry-after": "30"})
)
# Prefer default if it's bigger
assert 2 == client._sleep_time_seconds(
3, (None, 409, {"retry-after": "1"})
)
# Ignore crazy-big values
assert 1 == client._sleep_time_seconds(
2, (None, 409, {"retry-after": "300"})
)
def test_randomness_added(self):
client = stripe.http_client.new_default_http_client()
random_value = 0.8
client._add_jitter_time = lambda t: t * random_value
base_value = stripe.http_client.HTTPClient.INITIAL_DELAY * random_value
with self.mock_max_delay(10):
expected = [
stripe.http_client.HTTPClient.INITIAL_DELAY,
base_value * 2,
base_value * 4,
base_value * 8,
base_value * 16,
]
self.assert_sleep_times(client, expected)
def test_jitter_has_randomness_but_within_range(self):
client = stripe.http_client.new_default_http_client()
jittered_ones = set(
map(lambda _: client._add_jitter_time(1), list(range(100)))
)
assert len(jittered_ones) > 1
assert all(0.5 <= val <= 1 for val in jittered_ones)
class TestRetryConditionsDefaultHttpClient(StripeClientTestCase):
def test_should_retry_on_codes(self):
one_xx = list(range(100, 104))
two_xx = list(range(200, 209))
three_xx = list(range(300, 308))
four_xx = list(range(400, 431))
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
codes = one_xx + two_xx + three_xx + four_xx
codes.remove(409)
# These status codes should not be retried by default.
for code in codes:
assert client._should_retry((None, code, None), None, 0) is False
# These status codes should be retried by default.
assert client._should_retry((None, 409, None), None, 0) is True
assert client._should_retry((None, 500, None), None, 0) is True
assert client._should_retry((None, 503, None), None, 0) is True
def test_should_retry_on_error(self, mocker):
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
api_connection_error = mocker.Mock()
api_connection_error.should_retry = True
assert client._should_retry(None, api_connection_error, 0) is True
api_connection_error.should_retry = False
assert client._should_retry(None, api_connection_error, 0) is False
def test_should_retry_on_stripe_should_retry_true(self, mocker):
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
headers = {"stripe-should-retry": "true"}
# Ordinarily, we would not retry a 400, but with the header as true, we would.
assert client._should_retry((None, 400, {}), None, 0) is False
assert client._should_retry((None, 400, headers), None, 0) is True
def test_should_retry_on_stripe_should_retry_false(self, mocker):
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
headers = {"stripe-should-retry": "false"}
# Ordinarily, we would retry a 500, but with the header as false, we would not.
assert client._should_retry((None, 500, {}), None, 0) is True
assert client._should_retry((None, 500, headers), None, 0) is False
def test_should_retry_on_num_retries(self, mocker):
client = stripe.http_client.new_default_http_client()
max_test_retries = 10
client._max_network_retries = lambda: max_test_retries
api_connection_error = mocker.Mock()
api_connection_error.should_retry = True
assert (
client._should_retry(
None, api_connection_error, max_test_retries + 1
)
is False
)
assert (
client._should_retry((None, 409, None), None, max_test_retries + 1)
is False
)
class TestHTTPClient(object):
@pytest.fixture(autouse=True)
def setup_stripe(self):
orig_attrs = {"enable_telemetry": stripe.enable_telemetry}
stripe.enable_telemetry = False
yield
stripe.enable_telemetry = orig_attrs["enable_telemetry"]
async def test_sends_telemetry_on_second_request(self, mocker):
class TestClient(stripe.http_client.HTTPClient):
pass
stripe.enable_telemetry = True
url = "http://fake.url"
client = TestClient()
response_future = asyncio.Future()
response_future.set_result(["", 200, {"Request-Id": "req_123"}])
client.request = mocker.MagicMock(
return_value=response_future
)
_, code, _ = await client.request_with_retries("get", url, {}, None)
assert code == 200
client.request.assert_called_with("get", url, {}, None)
response_future = asyncio.Future()
response_future.set_result(["", 200, {"Request-Id": "req_234"}])
client.request = mocker.MagicMock(
return_value=response_future
)
_, code, _ = await client.request_with_retries("get", url, {}, None)
assert code == 200
args, _ = client.request.call_args
assert "X-Stripe-Client-Telemetry" in args[2]
telemetry = json.loads(args[2]["X-Stripe-Client-Telemetry"])
assert telemetry["last_request_metrics"]["request_id"] == "req_123"
class ClientTestBase(object):
@pytest.fixture
def request_mock(self, request_mocks):
return request_mocks[self.REQUEST_CLIENT.name]
@property
def valid_url(self, path="/foo"):
return "https://api.stripe.com%s" % (path,)
def make_request(self, method, url, headers, post_data):
client = self.REQUEST_CLIENT(verify_ssl_certs=True)
return client.request_with_retries(method, url, headers, post_data)
async def make_request_stream(self, method, url, headers, post_data):
client = self.REQUEST_CLIENT(verify_ssl_certs=True)
return await client.request_stream_with_retries(
method, url, headers, post_data
)
@pytest.fixture
def mock_response(self):
def mock_response(mock, body, code):
raise NotImplementedError(
"You must implement this in your test subclass"
)
return mock_response
@pytest.fixture
def mock_error(self):
def mock_error(mock, error):
raise NotImplementedError(
"You must implement this in your test subclass"
)
return mock_error
@pytest.fixture
def check_call(self):
def check_call(
mock, method, abs_url, headers, params, is_streaming=False
):
raise NotImplementedError(
"You must implement this in your test subclass"
)
return check_call
def test_request(self, request_mock, mock_response, check_call):
mock_response(request_mock, '{"foo": "baz"}', 200)
for method in VALID_API_METHODS:
abs_url = self.valid_url
data = ""
if method != "post":
abs_url = "%s?%s" % (abs_url, data)
data = None
headers = {"my-header": "header val"}
body, code, _ = self.make_request(method, abs_url, headers, data)
assert code == 200
assert body == '{"foo": "baz"}'
check_call(request_mock, method, abs_url, data, headers)
def test_request_stream(
self, mocker, request_mock, mock_response, check_call
):
for method in VALID_API_METHODS:
mock_response(request_mock, "some streamed content", 200)
abs_url = self.valid_url
data = ""
if method != "post":
abs_url = "%s?%s" % (abs_url, data)
data = None
headers = {"my-header": "header val"}
print(dir(self))
print("make_request_stream" in dir(self))
stream, code, _ = self.make_request_stream(
method, abs_url, headers, data
)
assert code == 200
# Here we need to convert and align all content on one type (string)
# as some clients return a string stream others a byte stream.
body_content = stream.read()
if hasattr(body_content, "decode"):
body_content = body_content.decode("utf-8")
assert body_content == "some streamed content"
mocker.resetall()
def test_exception(self, request_mock, mock_error):
mock_error(request_mock)
with pytest.raises(stripe.error.APIConnectionError):
self.make_request("get", self.valid_url, {}, None)
class TestTornadoAsyncHTTPClient:
# :TODO: Write tests for tornado client
pass
class TestAPIEncode(StripeClientTestCase):
def test_encode_dict(self):
body = {"foo": {"dob": {"month": 1}, "name": "bat"}}
values = [t for t in stripe.api_requestor._api_encode(body)]
assert ("foo[dob][month]", 1) in values
assert ("foo[name]", "bat") in values
def test_encode_array(self):
body = {"foo": [{"dob": {"month": 1}, "name": "bat"}]}
values = [t for t in stripe.api_requestor._api_encode(body)]
assert ("foo[0][dob][month]", 1) in values
assert ("foo[0][name]", "bat") in values
| en | 0.879804 | # Prefer retry-after if it's bigger # Prefer default if it's bigger # Ignore crazy-big values # These status codes should not be retried by default. # These status codes should be retried by default. # Ordinarily, we would not retry a 400, but with the header as true, we would. # Ordinarily, we would retry a 500, but with the header as false, we would not. # Here we need to convert and align all content on one type (string) # as some clients return a string stream others a byte stream. # :TODO: Write tests for tornado client | 1.946393 | 2 |
http/static/jsonvis.py | cheeseywhiz/cheeseywhiz | 0 | 585 | <filename>http/static/jsonvis.py
"""\
Provides html file visualization of a json dataset
"""
import json
import subprocess
class JsonVis:
def _open_list(self):
self.instructions.append(('open_list', None))
def _list_item(self, data):
self.instructions.append(('list_item', str(data)))
def _horiz_rule(self):
self.instructions.append(('horiz_rule', None))
def _close_list(self):
self.instructions.append(('close_list', None))
def _iterate(self, data: iter):
if isinstance(data, dict):
for key, value in data.items():
self._iterate(key)
self._open_list()
self._iterate(value)
self._close_list()
elif isinstance(data, list):
self._open_list()
for item in data:
self._iterate(item)
self._horiz_rule()
self._close_list()
else:
self._list_item(data)
def download(self, url: str):
"""
Store a python dictionary generated from json data at <url> in
self.data. Returns self.
"""
data = subprocess.run(
f"curl '{url}'", # Quotes required around url for URL parameters
stdout=subprocess.PIPE,
shell=True
).stdout
self.data = json.loads(data)
return self
def make_instructions(self):
"""
Take self.data and return a list of instructions about its html
visualization that is parsed by json.html.
"""
self.instructions = []
self._open_list()
self._iterate(self.data)
self._close_list()
return self.instructions
| <filename>http/static/jsonvis.py
"""\
Provides html file visualization of a json dataset
"""
import json
import subprocess
class JsonVis:
def _open_list(self):
self.instructions.append(('open_list', None))
def _list_item(self, data):
self.instructions.append(('list_item', str(data)))
def _horiz_rule(self):
self.instructions.append(('horiz_rule', None))
def _close_list(self):
self.instructions.append(('close_list', None))
def _iterate(self, data: iter):
if isinstance(data, dict):
for key, value in data.items():
self._iterate(key)
self._open_list()
self._iterate(value)
self._close_list()
elif isinstance(data, list):
self._open_list()
for item in data:
self._iterate(item)
self._horiz_rule()
self._close_list()
else:
self._list_item(data)
def download(self, url: str):
"""
Store a python dictionary generated from json data at <url> in
self.data. Returns self.
"""
data = subprocess.run(
f"curl '{url}'", # Quotes required around url for URL parameters
stdout=subprocess.PIPE,
shell=True
).stdout
self.data = json.loads(data)
return self
def make_instructions(self):
"""
Take self.data and return a list of instructions about its html
visualization that is parsed by json.html.
"""
self.instructions = []
self._open_list()
self._iterate(self.data)
self._close_list()
return self.instructions
| en | 0.657096 | \ Provides html file visualization of a json dataset Store a python dictionary generated from json data at <url> in self.data. Returns self. # Quotes required around url for URL parameters Take self.data and return a list of instructions about its html visualization that is parsed by json.html. | 3.186198 | 3 |
sim2d_game_analyzer/MainWindow.py | goncamateus/sim2d_game_analyzer | 1 | 586 | import sys
from PyQt5 import QtGui
from PyQt5.QtCore import QEvent, QPoint, Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import (QApplication, QDialog, QGroupBox, QMainWindow,
QTabWidget, QVBoxLayout, QWidget)
from sim2d_game_analyzer.fmdb_tab import FMDBTab
class MainWindow(QMainWindow):
title = "Sim2d Game Analyzer"
top = 500
left = 100
width = 70*4
height = 130*4
def __init__(self):
QMainWindow.__init__(self)
self.setGeometry(self.screen().geometry())
self.setWindowTitle(self.title)
self.setWindowIcon(QIcon("sim2d_game_analyzer/figures/icon.png"))
vbox = QVBoxLayout()
tabWidget = QTabWidget()
tabWidget.setFont(QtGui.QFont("Sanserif", 12))
self.fmdb_tab = FMDBTab()
tabWidget.addTab(self.fmdb_tab, FMDBTab.NAME)
vbox.addWidget(tabWidget)
wid = QWidget(self)
self.setCentralWidget(wid)
wid.setLayout(vbox)
if __name__ == "__main__":
app = QApplication(sys.argv)
mainwindow = MainWindow()
sys.exit(app.exec())
| import sys
from PyQt5 import QtGui
from PyQt5.QtCore import QEvent, QPoint, Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import (QApplication, QDialog, QGroupBox, QMainWindow,
QTabWidget, QVBoxLayout, QWidget)
from sim2d_game_analyzer.fmdb_tab import FMDBTab
class MainWindow(QMainWindow):
title = "Sim2d Game Analyzer"
top = 500
left = 100
width = 70*4
height = 130*4
def __init__(self):
QMainWindow.__init__(self)
self.setGeometry(self.screen().geometry())
self.setWindowTitle(self.title)
self.setWindowIcon(QIcon("sim2d_game_analyzer/figures/icon.png"))
vbox = QVBoxLayout()
tabWidget = QTabWidget()
tabWidget.setFont(QtGui.QFont("Sanserif", 12))
self.fmdb_tab = FMDBTab()
tabWidget.addTab(self.fmdb_tab, FMDBTab.NAME)
vbox.addWidget(tabWidget)
wid = QWidget(self)
self.setCentralWidget(wid)
wid.setLayout(vbox)
if __name__ == "__main__":
app = QApplication(sys.argv)
mainwindow = MainWindow()
sys.exit(app.exec())
| none | 1 | 2.336098 | 2 |
|
cmd/extractor.py | Grammarian/sicle | 0 | 587 | # pip install openpyxl
# pip install cuid
import os.path
import json
import datetime
from openpyxl import load_workbook
import cuid # https://github.com/necaris/cuid.py - create uuid's in the format that graphcool expects
SOURCE_XLSX = "./data/CLP_combined.xlsx"
EXTRACT_OUTPUT_DIR = "../server/extract"
SCHOOL_TITLES = ["ORGANISATION_ID", "ORGANISATION_NAME", "ORG_ELECTORATE", "P_ADDRESS1", "P_SUBURB", "P_STATE",
"P_POSTCODE", "S_ADDRESS1", "S_SUBURB", "S_STATE", "S_POSTCODE", "SCHOOL_NAME", "SCH_ELECTORATE",
"SCHOOL_ID", "SCHOOL_P_ADDRESS1",
"SCHOOL_P_SUBURB", "SCHOOL_P_STATE", "SCHOOL_P_POSTCODE", "SCHOOL_S_ADDRESS1", "SCHOOL_S_SUBURB",
"SCHOOL_S_STATE", "SCHOOL_S_POSTCODE", "LOCATION_NAME", "LOC_ELECTORATE", "LOC_S_ADDRESS1",
"LOC_S_SUBURB", "LOC_S_STATE", "LOC_S_POSTCODE"]
ORGANISATION_FIELDS = {"ORGANISATION_ID": "CLP_ORGANISATION_ID", "ORGANISATION_NAME": "NAME",
"ORG_ELECTORATE": "ELECTORATE", "S_ADDRESS1": "ADDRESS", "S_SUBURB": "SUBURB",
"S_STATE": "STATE", "S_POSTCODE": "POSTCODE", }
SCHOOL_FIELDS = {"SCHOOL_NAME": "NAME", "SCH_ELECTORATE": "ELECTORATE", "SCHOOL_ID": "CLP_SCHOOL_ID",
"ORGANISATION_ID": "CLP_ORGANISATION_ID",
"SCHOOL_S_ADDRESS1": "ADDRESS", "SCHOOL_S_SUBURB": "SUBURB", "SCHOOL_S_STATE": "STATE",
"SCHOOL_S_POSTCODE": "POSTCODE", }
LOCATION_FIELDS = {"LOCATION_NAME": "NAME", "LOC_ELECTORATE": "ELECTORATE", "SCHOOL_ID": "CLP_SCHOOL_ID",
"LOC_S_ADDRESS1": "ADDRESS", "LOC_S_SUBURB": "SUBURB", "LOC_S_STATE": "STATE",
"LOC_S_POSTCODE": "POSTCODE"}
TEACHER_TITLES = ["TEACHER_ID", "ORGANISATION_NAME", "SCHOOL_NAME", "TEACHER_NAME", "TITLE", "LNAME", "FNAME",
"TEACHER_LANGUAGES", "P_ADDRESS1", "P_ADDRESS2", "P_SUBURB", "P_STATE", "P_POSTCODE",
"TELEPHONE", "TEL_EVENING", "EMAIL", "MOBILE", "LEVEL_TAUGHT", "LEVEL_OF_EDUCATION",
"FIELD_OF_EDUCATION", "DEGREE_COUNTRY", "DEGREE_YEAR", "ORGANISATION_ID", "SCHOOL_ID"]
STUDENT_TITLES = ["SCHOOL_NAME", "SCHOOL_ID", "STUDENT_ID", "STUDENT_SRN", "LOCATION_NAME",
"STUDENT_LNAME", "STUDENT_FNAME", "DOB", "TEL", "LOCATION_NAME_1"]
TEACHER_FIELDS = {"TEACHER_ID": "CLP_TEACHER_ID", "ORGANISATION_NAME": "ORGANISATION_NAME",
"SCHOOL_NAME": "SCHOOL_NAME", "TITLE": "TITLE",
"LNAME": "FAMILY_NAME", "FNAME": "GIVEN_NAMES", "TEACHER_LANGUAGES": "LANGUAGES",
"P_ADDRESS1": "ADDRESS1", "P_ADDRESS2": "ADDRESS2", "P_SUBURB": "SUBURB",
"P_STATE": "STATE", "P_POSTCODE": "POSTCODE",
"TELEPHONE": "DAY_PHONE", "TEL_EVENING": "EVENING_PHONE", "EMAIL": "EMAIL", "MOBILE": "MOBILE",
"LEVEL_TAUGHT": "LEVEL_TAUGHT", "LEVEL_OF_EDUCATION": "EDUCATION_LEVEL",
"FIELD_OF_EDUCATION": "EDUCATION_FIELD", "DEGREE_COUNTRY": "EDUCATION_COUNTRY",
"DEGREE_YEAR": "EDUCATION_YEAR",
"ORGANISATION_ID": "ORGANISATION_ID", "SCHOOL_ID": "SCHOOL_ID", }
STUDENT_FIELDS = {"SCHOOL_NAME": "SCHOOL_NAME", "SCHOOL_ID": "SCHOOL_ID", "STUDENT_ID": "CLP_STUDENT_ID",
"STUDENT_SRN": "SRN", "LOCATION_NAME": "LOCATION",
"STUDENT_LNAME": "FAMILY_NAME", "STUDENT_FNAME": "GIVEN_NAMES", "DOB": "DATE_OF_BIRTH",
"TEL": "PHONE", "LOCATION_NAME_1": "DAY_SCHOOL", }
class Sheet:
"Data container object to hold the contents of one sheet within an excel spreadsheet"
def __init__(self, name, titles=None, rows=None):
self.name = name
self.titles = titles or []
self.rows = rows or []
def convert_row_to_dict(titles, row):
data = {}
for (i, cell) in enumerate(row):
if cell.Value is not None:
data[titles[i]] = str(cell.value)
return data
def convert_xlsx(xlsx_file):
"""Convert the given XLSX spreadsheet to iterable of Sheet objects,
in which row has been converted into a dictionary"""
work_book = load_workbook(filename=xlsx_file, read_only=True, data_only=True)
for sheet in work_book:
rows = [x for x in sheet.iter_rows()]
if rows:
titles = [cell.value for cell in rows[0]]
dicts = [convert_row_to_dict(titles, row) for row in rows[1:]]
yield Sheet(sheet.title, titles, dicts)
else:
yield Sheet(sheet.title)
def to_camel(s):
"""Convert an underscored title into camel case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId'"""
bits = [(x.lower() if i == 0 else x.title())
for (i, x) in enumerate(s.split("_"))]
return "".join(bits)
def relative_to_absolute(relative_path):
path_to_py = os.path.abspath(os.path.dirname(__file__))
return os.path.join(path_to_py, relative_path)
def extract(fields, row_as_dict):
data = {}
for (k, v) in fields.items():
data[to_camel(v)] = row_as_dict[k]
return data
def process_sheet(sheet, titles, field_defns):
if titles != sheet.titles:
print("Sheet doesn't have expected titles:", [(i, x) for (i, x) in enumerate(titles) if x != sheet.titles[i]])
return []
structs = [[extract(defn, x) for x in sheet.rows] for defn in field_defns]
return structs
def unique(key, dicts):
t = {x[key]: x for x in dicts}
return t.values()
def now_as_iso8601():
return datetime.datetime.now().replace(microsecond=0).isoformat() + "Z"
def inject_required(type_name, dicts):
"Inject the required fields that graphcool import required"
for x in dicts:
x["_typeName"] = type_name
x["id"] = cuid.cuid()
x["createdAt"] = x["updatedAt"] = now_as_iso8601()
return list(dicts)
def prepare_organisations(organisations):
unique_orgs = unique("clpOrganisationId", organisations)
fat_orgs = inject_required("ClpOrganisation", unique_orgs)
return fat_orgs
def prepare_schools(schools):
uniques = unique("clpSchoolId", schools)
injected = inject_required("ClpSchool", uniques)
return injected
def prepare_locations(locations):
# There are multiple locations, each of which is identitical except that for being related to a different school.
# We have to collect all the schools that meet at the same location.
uniques = {}
for x in locations:
# get an existing location with the given name, or add the new location
location = uniques.setdefault(x["name"], x)
related_schools = location.setdefault("schools", list())
related_schools.append(x.pop("clpSchoolId"))
injected = inject_required("ClpLocation", uniques.values())
# FIX THIS - Current extract doesn't include the CLP location id :( Make one up for the time being
for x in injected:
x["clpLocationId"] = cuid.cuid()
return injected
def convert_dob_to_datetime(s):
"Convert the string from 99/MON/YY to a ISO date"
dt = datetime.datetime.strptime(s, "%d/%b/%y")
return dt.isoformat() + ".0Z" # GraphCool import insists on microseconds, hence the ".0"
def prepare_students(students):
uniques = unique("clpStudentId", students)
injected = inject_required("ClpStudent", uniques)
for x in injected:
x["dateOfBirth"] = convert_dob_to_datetime(x["dateOfBirth"])
return injected
def prepare_teachers(teachers):
# Like locations, the same teacher can have multiple records,
# each of which is identitical except that for being related to a different school.
# We have to collect all the schools that the same teacher is teaching at.
uniques = {}
for x in teachers:
# get an existing teacher with that id, or add the new teacher record
teacher = uniques.setdefault(x["clpTeacherId"], x)
related_schools = teacher.setdefault("schools", list())
related_schools.append(x.pop("schoolId"))
injected = inject_required("ClpTeacher", uniques.values())
return injected
def extract_from_xlsx(file_path):
for sheet in convert_xlsx(file_path):
if sheet.name == "SCHOOL-ORG":
(organisations, schools, locations) = process_sheet(
sheet, SCHOOL_TITLES, [ORGANISATION_FIELDS, SCHOOL_FIELDS, LOCATION_FIELDS])
elif sheet.name == "Teacher":
(teachers, ) = process_sheet(sheet, TEACHER_TITLES, [TEACHER_FIELDS])
elif sheet.name == "Student":
(students, ) = process_sheet(sheet, STUDENT_TITLES, [STUDENT_FIELDS])
else:
print("Ignoring sheet:", sheet.name)
return (organisations, schools, locations, teachers, students)
def copy_without(dicts, *keys_to_remove):
"Return iterable that contains copies of the given dictionary with all the given keys removed"
copies = [x.copy() for x in dicts]
for d in copies:
for to_remove in keys_to_remove:
d.pop(to_remove, None)
return copies
def write_nodes(*list_of_lists):
for (i, one_list) in enumerate(list_of_lists):
nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + str(i), "nodes"))
os.makedirs(nodes_dir, exist_ok=True)
path = os.path.join(nodes_dir, "1.json")
with open(path, "w") as f:
nodes = {
"valueType": "nodes",
"values": one_list
}
f.write(json.dumps(nodes))
def write_relations(list_of_lists):
for (i, one_list) in enumerate(list_of_lists):
nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + "-relations" + str(i), "relations"))
os.makedirs(nodes_dir, exist_ok=True)
path = os.path.join(nodes_dir, "1.json")
with open(path, "w") as f:
nodes = {
"valueType": "relations",
"values": list(one_list)
}
f.write(json.dumps(nodes))
def chunks(n, l):
"""Yield n successive similar-sized chunks from l."""
chunk_size = 1 + len(l) // n
for i in range(0, len(l), chunk_size):
yield l[i:i + chunk_size]
def prepare(raw_organisations, raw_schools, raw_locations, raw_teachers, raw_students):
return (
prepare_organisations(raw_organisations),
prepare_schools(raw_schools),
prepare_locations(raw_locations),
prepare_teachers(raw_teachers),
prepare_students(raw_students)
)
def make_relation(entity1, id1, field1, entity2, id2, field2):
return [
{"_typeName": entity1, "id": id1, "fieldName": field1},
{"_typeName": entity2, "id": id2, "fieldName": field2}
]
def generate_relations(organisations, schools, locations, teachers, students):
# Build school -> organisation relations
org_keys = {x["clpOrganisationId"]: x["id"] for x in organisations}
yield [make_relation("ClpOrganisation", org_keys[x["clpOrganisationId"]], "schools",
"ClpSchool", x["id"], "organisation") for x in schools]
# Build location -> school relations
school_keys = {x["clpSchoolId"]: x["id"] for x in schools}
yield [make_relation("ClpLocation", location["id"], "schools",
"ClpSchool", school_keys[schoolId], "locations")
for location in locations for schoolId in location.get("schools", [])]
# Build teacher -> school relations
yield [make_relation("ClpTeacher", teacher["id"], "schools",
"ClpSchool", school_keys[schoolId], "teachers")
for teacher in teachers for schoolId in teacher.get("schools", [])]
# Build student -> school relations
yield [make_relation("ClpStudent", student["id"], "school",
"ClpSchool", school_keys[student["schoolId"]], "students")
for student in students if student["schoolId"] in school_keys]
def main():
xlsx_path = relative_to_absolute(SOURCE_XLSX)
raw_collections = extract_from_xlsx(xlsx_path)
(organisations, schools, locations, teachers, students) = prepare(*raw_collections)
write_nodes(
organisations,
copy_without(schools, "clpOrganisationId"),
copy_without(locations, "schools"),
copy_without(teachers, "organisationId", "organisationName", "schools", "schoolName"),
*chunks(3, copy_without(students, "schoolId", "schoolName", "location")))
write_relations(generate_relations(organisations, schools, locations, teachers, students))
if __name__ == "__main__":
main()
| # pip install openpyxl
# pip install cuid
import os.path
import json
import datetime
from openpyxl import load_workbook
import cuid # https://github.com/necaris/cuid.py - create uuid's in the format that graphcool expects
SOURCE_XLSX = "./data/CLP_combined.xlsx"
EXTRACT_OUTPUT_DIR = "../server/extract"
SCHOOL_TITLES = ["ORGANISATION_ID", "ORGANISATION_NAME", "ORG_ELECTORATE", "P_ADDRESS1", "P_SUBURB", "P_STATE",
"P_POSTCODE", "S_ADDRESS1", "S_SUBURB", "S_STATE", "S_POSTCODE", "SCHOOL_NAME", "SCH_ELECTORATE",
"SCHOOL_ID", "SCHOOL_P_ADDRESS1",
"SCHOOL_P_SUBURB", "SCHOOL_P_STATE", "SCHOOL_P_POSTCODE", "SCHOOL_S_ADDRESS1", "SCHOOL_S_SUBURB",
"SCHOOL_S_STATE", "SCHOOL_S_POSTCODE", "LOCATION_NAME", "LOC_ELECTORATE", "LOC_S_ADDRESS1",
"LOC_S_SUBURB", "LOC_S_STATE", "LOC_S_POSTCODE"]
ORGANISATION_FIELDS = {"ORGANISATION_ID": "CLP_ORGANISATION_ID", "ORGANISATION_NAME": "NAME",
"ORG_ELECTORATE": "ELECTORATE", "S_ADDRESS1": "ADDRESS", "S_SUBURB": "SUBURB",
"S_STATE": "STATE", "S_POSTCODE": "POSTCODE", }
SCHOOL_FIELDS = {"SCHOOL_NAME": "NAME", "SCH_ELECTORATE": "ELECTORATE", "SCHOOL_ID": "CLP_SCHOOL_ID",
"ORGANISATION_ID": "CLP_ORGANISATION_ID",
"SCHOOL_S_ADDRESS1": "ADDRESS", "SCHOOL_S_SUBURB": "SUBURB", "SCHOOL_S_STATE": "STATE",
"SCHOOL_S_POSTCODE": "POSTCODE", }
LOCATION_FIELDS = {"LOCATION_NAME": "NAME", "LOC_ELECTORATE": "ELECTORATE", "SCHOOL_ID": "CLP_SCHOOL_ID",
"LOC_S_ADDRESS1": "ADDRESS", "LOC_S_SUBURB": "SUBURB", "LOC_S_STATE": "STATE",
"LOC_S_POSTCODE": "POSTCODE"}
TEACHER_TITLES = ["TEACHER_ID", "ORGANISATION_NAME", "SCHOOL_NAME", "TEACHER_NAME", "TITLE", "LNAME", "FNAME",
"TEACHER_LANGUAGES", "P_ADDRESS1", "P_ADDRESS2", "P_SUBURB", "P_STATE", "P_POSTCODE",
"TELEPHONE", "TEL_EVENING", "EMAIL", "MOBILE", "LEVEL_TAUGHT", "LEVEL_OF_EDUCATION",
"FIELD_OF_EDUCATION", "DEGREE_COUNTRY", "DEGREE_YEAR", "ORGANISATION_ID", "SCHOOL_ID"]
STUDENT_TITLES = ["SCHOOL_NAME", "SCHOOL_ID", "STUDENT_ID", "STUDENT_SRN", "LOCATION_NAME",
"STUDENT_LNAME", "STUDENT_FNAME", "DOB", "TEL", "LOCATION_NAME_1"]
TEACHER_FIELDS = {"TEACHER_ID": "CLP_TEACHER_ID", "ORGANISATION_NAME": "ORGANISATION_NAME",
"SCHOOL_NAME": "SCHOOL_NAME", "TITLE": "TITLE",
"LNAME": "FAMILY_NAME", "FNAME": "GIVEN_NAMES", "TEACHER_LANGUAGES": "LANGUAGES",
"P_ADDRESS1": "ADDRESS1", "P_ADDRESS2": "ADDRESS2", "P_SUBURB": "SUBURB",
"P_STATE": "STATE", "P_POSTCODE": "POSTCODE",
"TELEPHONE": "DAY_PHONE", "TEL_EVENING": "EVENING_PHONE", "EMAIL": "EMAIL", "MOBILE": "MOBILE",
"LEVEL_TAUGHT": "LEVEL_TAUGHT", "LEVEL_OF_EDUCATION": "EDUCATION_LEVEL",
"FIELD_OF_EDUCATION": "EDUCATION_FIELD", "DEGREE_COUNTRY": "EDUCATION_COUNTRY",
"DEGREE_YEAR": "EDUCATION_YEAR",
"ORGANISATION_ID": "ORGANISATION_ID", "SCHOOL_ID": "SCHOOL_ID", }
STUDENT_FIELDS = {"SCHOOL_NAME": "SCHOOL_NAME", "SCHOOL_ID": "SCHOOL_ID", "STUDENT_ID": "CLP_STUDENT_ID",
"STUDENT_SRN": "SRN", "LOCATION_NAME": "LOCATION",
"STUDENT_LNAME": "FAMILY_NAME", "STUDENT_FNAME": "GIVEN_NAMES", "DOB": "DATE_OF_BIRTH",
"TEL": "PHONE", "LOCATION_NAME_1": "DAY_SCHOOL", }
class Sheet:
"Data container object to hold the contents of one sheet within an excel spreadsheet"
def __init__(self, name, titles=None, rows=None):
self.name = name
self.titles = titles or []
self.rows = rows or []
def convert_row_to_dict(titles, row):
data = {}
for (i, cell) in enumerate(row):
if cell.Value is not None:
data[titles[i]] = str(cell.value)
return data
def convert_xlsx(xlsx_file):
"""Convert the given XLSX spreadsheet to iterable of Sheet objects,
in which row has been converted into a dictionary"""
work_book = load_workbook(filename=xlsx_file, read_only=True, data_only=True)
for sheet in work_book:
rows = [x for x in sheet.iter_rows()]
if rows:
titles = [cell.value for cell in rows[0]]
dicts = [convert_row_to_dict(titles, row) for row in rows[1:]]
yield Sheet(sheet.title, titles, dicts)
else:
yield Sheet(sheet.title)
def to_camel(s):
"""Convert an underscored title into camel case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId'"""
bits = [(x.lower() if i == 0 else x.title())
for (i, x) in enumerate(s.split("_"))]
return "".join(bits)
def relative_to_absolute(relative_path):
path_to_py = os.path.abspath(os.path.dirname(__file__))
return os.path.join(path_to_py, relative_path)
def extract(fields, row_as_dict):
data = {}
for (k, v) in fields.items():
data[to_camel(v)] = row_as_dict[k]
return data
def process_sheet(sheet, titles, field_defns):
if titles != sheet.titles:
print("Sheet doesn't have expected titles:", [(i, x) for (i, x) in enumerate(titles) if x != sheet.titles[i]])
return []
structs = [[extract(defn, x) for x in sheet.rows] for defn in field_defns]
return structs
def unique(key, dicts):
t = {x[key]: x for x in dicts}
return t.values()
def now_as_iso8601():
return datetime.datetime.now().replace(microsecond=0).isoformat() + "Z"
def inject_required(type_name, dicts):
"Inject the required fields that graphcool import required"
for x in dicts:
x["_typeName"] = type_name
x["id"] = cuid.cuid()
x["createdAt"] = x["updatedAt"] = now_as_iso8601()
return list(dicts)
def prepare_organisations(organisations):
unique_orgs = unique("clpOrganisationId", organisations)
fat_orgs = inject_required("ClpOrganisation", unique_orgs)
return fat_orgs
def prepare_schools(schools):
uniques = unique("clpSchoolId", schools)
injected = inject_required("ClpSchool", uniques)
return injected
def prepare_locations(locations):
# There are multiple locations, each of which is identitical except that for being related to a different school.
# We have to collect all the schools that meet at the same location.
uniques = {}
for x in locations:
# get an existing location with the given name, or add the new location
location = uniques.setdefault(x["name"], x)
related_schools = location.setdefault("schools", list())
related_schools.append(x.pop("clpSchoolId"))
injected = inject_required("ClpLocation", uniques.values())
# FIX THIS - Current extract doesn't include the CLP location id :( Make one up for the time being
for x in injected:
x["clpLocationId"] = cuid.cuid()
return injected
def convert_dob_to_datetime(s):
"Convert the string from 99/MON/YY to a ISO date"
dt = datetime.datetime.strptime(s, "%d/%b/%y")
return dt.isoformat() + ".0Z" # GraphCool import insists on microseconds, hence the ".0"
def prepare_students(students):
uniques = unique("clpStudentId", students)
injected = inject_required("ClpStudent", uniques)
for x in injected:
x["dateOfBirth"] = convert_dob_to_datetime(x["dateOfBirth"])
return injected
def prepare_teachers(teachers):
# Like locations, the same teacher can have multiple records,
# each of which is identitical except that for being related to a different school.
# We have to collect all the schools that the same teacher is teaching at.
uniques = {}
for x in teachers:
# get an existing teacher with that id, or add the new teacher record
teacher = uniques.setdefault(x["clpTeacherId"], x)
related_schools = teacher.setdefault("schools", list())
related_schools.append(x.pop("schoolId"))
injected = inject_required("ClpTeacher", uniques.values())
return injected
def extract_from_xlsx(file_path):
for sheet in convert_xlsx(file_path):
if sheet.name == "SCHOOL-ORG":
(organisations, schools, locations) = process_sheet(
sheet, SCHOOL_TITLES, [ORGANISATION_FIELDS, SCHOOL_FIELDS, LOCATION_FIELDS])
elif sheet.name == "Teacher":
(teachers, ) = process_sheet(sheet, TEACHER_TITLES, [TEACHER_FIELDS])
elif sheet.name == "Student":
(students, ) = process_sheet(sheet, STUDENT_TITLES, [STUDENT_FIELDS])
else:
print("Ignoring sheet:", sheet.name)
return (organisations, schools, locations, teachers, students)
def copy_without(dicts, *keys_to_remove):
"Return iterable that contains copies of the given dictionary with all the given keys removed"
copies = [x.copy() for x in dicts]
for d in copies:
for to_remove in keys_to_remove:
d.pop(to_remove, None)
return copies
def write_nodes(*list_of_lists):
for (i, one_list) in enumerate(list_of_lists):
nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + str(i), "nodes"))
os.makedirs(nodes_dir, exist_ok=True)
path = os.path.join(nodes_dir, "1.json")
with open(path, "w") as f:
nodes = {
"valueType": "nodes",
"values": one_list
}
f.write(json.dumps(nodes))
def write_relations(list_of_lists):
for (i, one_list) in enumerate(list_of_lists):
nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + "-relations" + str(i), "relations"))
os.makedirs(nodes_dir, exist_ok=True)
path = os.path.join(nodes_dir, "1.json")
with open(path, "w") as f:
nodes = {
"valueType": "relations",
"values": list(one_list)
}
f.write(json.dumps(nodes))
def chunks(n, l):
"""Yield n successive similar-sized chunks from l."""
chunk_size = 1 + len(l) // n
for i in range(0, len(l), chunk_size):
yield l[i:i + chunk_size]
def prepare(raw_organisations, raw_schools, raw_locations, raw_teachers, raw_students):
return (
prepare_organisations(raw_organisations),
prepare_schools(raw_schools),
prepare_locations(raw_locations),
prepare_teachers(raw_teachers),
prepare_students(raw_students)
)
def make_relation(entity1, id1, field1, entity2, id2, field2):
return [
{"_typeName": entity1, "id": id1, "fieldName": field1},
{"_typeName": entity2, "id": id2, "fieldName": field2}
]
def generate_relations(organisations, schools, locations, teachers, students):
# Build school -> organisation relations
org_keys = {x["clpOrganisationId"]: x["id"] for x in organisations}
yield [make_relation("ClpOrganisation", org_keys[x["clpOrganisationId"]], "schools",
"ClpSchool", x["id"], "organisation") for x in schools]
# Build location -> school relations
school_keys = {x["clpSchoolId"]: x["id"] for x in schools}
yield [make_relation("ClpLocation", location["id"], "schools",
"ClpSchool", school_keys[schoolId], "locations")
for location in locations for schoolId in location.get("schools", [])]
# Build teacher -> school relations
yield [make_relation("ClpTeacher", teacher["id"], "schools",
"ClpSchool", school_keys[schoolId], "teachers")
for teacher in teachers for schoolId in teacher.get("schools", [])]
# Build student -> school relations
yield [make_relation("ClpStudent", student["id"], "school",
"ClpSchool", school_keys[student["schoolId"]], "students")
for student in students if student["schoolId"] in school_keys]
def main():
xlsx_path = relative_to_absolute(SOURCE_XLSX)
raw_collections = extract_from_xlsx(xlsx_path)
(organisations, schools, locations, teachers, students) = prepare(*raw_collections)
write_nodes(
organisations,
copy_without(schools, "clpOrganisationId"),
copy_without(locations, "schools"),
copy_without(teachers, "organisationId", "organisationName", "schools", "schoolName"),
*chunks(3, copy_without(students, "schoolId", "schoolName", "location")))
write_relations(generate_relations(organisations, schools, locations, teachers, students))
if __name__ == "__main__":
main()
| en | 0.939394 | # pip install openpyxl # pip install cuid # https://github.com/necaris/cuid.py - create uuid's in the format that graphcool expects Convert the given XLSX spreadsheet to iterable of Sheet objects, in which row has been converted into a dictionary Convert an underscored title into camel case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId' # There are multiple locations, each of which is identitical except that for being related to a different school. # We have to collect all the schools that meet at the same location. # get an existing location with the given name, or add the new location # FIX THIS - Current extract doesn't include the CLP location id :( Make one up for the time being # GraphCool import insists on microseconds, hence the ".0" # Like locations, the same teacher can have multiple records, # each of which is identitical except that for being related to a different school. # We have to collect all the schools that the same teacher is teaching at. # get an existing teacher with that id, or add the new teacher record Yield n successive similar-sized chunks from l. # Build school -> organisation relations # Build location -> school relations # Build teacher -> school relations # Build student -> school relations | 2.49263 | 2 |
fHDHR/origin/origin_channels.py | crackers8199/fHDHR_USTVGO | 0 | 588 | import os
import sys
from lxml import html
import pathlib
import json
import m3u8
from seleniumwire import webdriver
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.firefox.options import Options as FirefoxOptions
IFRAME_CSS_SELECTOR = '.iframe-container>iframe'
# Disable
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore
def enablePrint():
sys.stdout = sys.__stdout__
class OriginChannels():
def __init__(self, fhdhr, origin):
self.fhdhr = fhdhr
self.origin = origin
self.cache_dir = self.fhdhr.config.dict["filedir"]["epg_cache"]["origin"]["top"]
self.m3ucache = pathlib.Path(self.cache_dir).joinpath('m3ucache.json')
self.cached_m3u = {}
self.load_m3u_cache()
def load_m3u_cache(self):
if os.path.isfile(self.m3ucache):
self.fhdhr.logger.info("Loading Previously Saved Channel m3u.")
with open(self.m3ucache, 'r') as m3ufile:
self.cached_m3u = json.load(m3ufile)
def save_m3u_cache(self):
self.fhdhr.logger.info("Saving Channel m3u cache.")
with open(self.m3ucache, 'w') as m3ufile:
m3ufile.write(json.dumps(self.cached_m3u, indent=4))
def get_channels(self):
channel_list = []
chan_names, chan_urls = self.scrape_channels()
chan_number_index = 1
for name, url in zip(chan_names, chan_urls):
chan_dict = {
"name": name.rstrip(),
"number": chan_number_index,
"callsign": self.format_callsign(url),
}
channel_list.append(chan_dict)
chan_number_index += 1
return channel_list
def get_channel_stream(self, chandict, allchandict):
caching = True
streamlist = []
streamdict = {}
if chandict["callsign"] in list(self.cached_m3u):
streamurl = self.cached_m3u[chandict["callsign"]]
else:
streamurl = self.get_ustvgo_stream(chandict)
# if self.fhdhr.config.dict["origin"]["force_best"]:
streamurl = self.m3u8_beststream(streamurl)
streamdict = {"number": chandict["number"], "stream_url": streamurl}
streamlist.append(streamdict)
return streamlist, caching
def m3u8_beststream(self, m3u8_url):
bestStream = None
videoUrlM3u = m3u8.load(m3u8_url)
if not videoUrlM3u.is_variant:
return m3u8_url
for videoStream in videoUrlM3u.playlists:
if not bestStream:
bestStream = videoStream
elif videoStream.stream_info.bandwidth > bestStream.stream_info.bandwidth:
bestStream = videoStream
if not bestStream:
return bestStream.absolute_uri
else:
return m3u8_url
def scrape_channels(self):
channels_url = "https://ustvgo.tv/"
chanpage = self.fhdhr.web.session.get(channels_url)
tree = html.fromstring(chanpage.content)
channel_names_xpath = "/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/text()"
channel_urls_xpath = "/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/@href"
chan_names = tree.xpath(channel_names_xpath)
chan_urls = tree.xpath(channel_urls_xpath)
return chan_names, chan_urls
def format_callsign(self, url):
callsign = (url
.split('/')[-2]
.replace('-live', '')
.replace('-channel', '')
.replace('-free', '')
.replace('-streaming', ''))
return callsign
def get_ustvgo_stream(self, chandict):
driver = self.get_firefox_driver()
blockPrint()
driver.get("https://ustvgo.tv/" + chandict["callsign"])
enablePrint()
# Get iframe
iframe = None
try:
iframe = driver.find_element_by_css_selector(IFRAME_CSS_SELECTOR)
except NoSuchElementException:
self.fhdhr.logger.error('Video frame is not found for channel')
return None
# Detect VPN-required channels
try:
driver.switch_to.frame(iframe)
driver.find_element_by_xpath("//*[text()='This channel requires our VPN to watch!']")
need_vpn = True
except NoSuchElementException:
need_vpn = False
finally:
driver.switch_to.default_content()
if need_vpn:
self.fhdhr.logger.warning('Channel needs VPN to be grabbed.')
return None
# Autoplay
iframe.click()
try:
playlist = driver.wait_for_request('/playlist.m3u8', timeout=10)
except TimeoutException:
self.fhdhr.logger.error('Channel m3u8 not found.')
return None
streamurl = str(playlist)
driver.close()
driver.quit()
self.cached_m3u[chandict["callsign"]] = streamurl
self.save_m3u_cache()
return streamurl
def get_firefox_driver(self):
ff_options = FirefoxOptions()
ff_options.add_argument('--headless')
firefox_profile = webdriver.FirefoxProfile()
firefox_profile.set_preference('permissions.default.image', 2)
firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')
firefox_profile.set_preference('dom.disable_beforeunload', True)
firefox_profile.set_preference('browser.tabs.warnOnClose', False)
firefox_profile.set_preference('media.volume_scale', '0.0')
set_seleniumwire_options = {
'connection_timeout': None,
'verify_ssl': False,
'suppress_connection_errors': True
}
driver = webdriver.Firefox(seleniumwire_options=set_seleniumwire_options, options=ff_options, firefox_profile=firefox_profile)
return driver
| import os
import sys
from lxml import html
import pathlib
import json
import m3u8
from seleniumwire import webdriver
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.firefox.options import Options as FirefoxOptions
IFRAME_CSS_SELECTOR = '.iframe-container>iframe'
# Disable
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore
def enablePrint():
sys.stdout = sys.__stdout__
class OriginChannels():
def __init__(self, fhdhr, origin):
self.fhdhr = fhdhr
self.origin = origin
self.cache_dir = self.fhdhr.config.dict["filedir"]["epg_cache"]["origin"]["top"]
self.m3ucache = pathlib.Path(self.cache_dir).joinpath('m3ucache.json')
self.cached_m3u = {}
self.load_m3u_cache()
def load_m3u_cache(self):
if os.path.isfile(self.m3ucache):
self.fhdhr.logger.info("Loading Previously Saved Channel m3u.")
with open(self.m3ucache, 'r') as m3ufile:
self.cached_m3u = json.load(m3ufile)
def save_m3u_cache(self):
self.fhdhr.logger.info("Saving Channel m3u cache.")
with open(self.m3ucache, 'w') as m3ufile:
m3ufile.write(json.dumps(self.cached_m3u, indent=4))
def get_channels(self):
channel_list = []
chan_names, chan_urls = self.scrape_channels()
chan_number_index = 1
for name, url in zip(chan_names, chan_urls):
chan_dict = {
"name": name.rstrip(),
"number": chan_number_index,
"callsign": self.format_callsign(url),
}
channel_list.append(chan_dict)
chan_number_index += 1
return channel_list
def get_channel_stream(self, chandict, allchandict):
caching = True
streamlist = []
streamdict = {}
if chandict["callsign"] in list(self.cached_m3u):
streamurl = self.cached_m3u[chandict["callsign"]]
else:
streamurl = self.get_ustvgo_stream(chandict)
# if self.fhdhr.config.dict["origin"]["force_best"]:
streamurl = self.m3u8_beststream(streamurl)
streamdict = {"number": chandict["number"], "stream_url": streamurl}
streamlist.append(streamdict)
return streamlist, caching
def m3u8_beststream(self, m3u8_url):
bestStream = None
videoUrlM3u = m3u8.load(m3u8_url)
if not videoUrlM3u.is_variant:
return m3u8_url
for videoStream in videoUrlM3u.playlists:
if not bestStream:
bestStream = videoStream
elif videoStream.stream_info.bandwidth > bestStream.stream_info.bandwidth:
bestStream = videoStream
if not bestStream:
return bestStream.absolute_uri
else:
return m3u8_url
def scrape_channels(self):
channels_url = "https://ustvgo.tv/"
chanpage = self.fhdhr.web.session.get(channels_url)
tree = html.fromstring(chanpage.content)
channel_names_xpath = "/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/text()"
channel_urls_xpath = "/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/@href"
chan_names = tree.xpath(channel_names_xpath)
chan_urls = tree.xpath(channel_urls_xpath)
return chan_names, chan_urls
def format_callsign(self, url):
callsign = (url
.split('/')[-2]
.replace('-live', '')
.replace('-channel', '')
.replace('-free', '')
.replace('-streaming', ''))
return callsign
def get_ustvgo_stream(self, chandict):
driver = self.get_firefox_driver()
blockPrint()
driver.get("https://ustvgo.tv/" + chandict["callsign"])
enablePrint()
# Get iframe
iframe = None
try:
iframe = driver.find_element_by_css_selector(IFRAME_CSS_SELECTOR)
except NoSuchElementException:
self.fhdhr.logger.error('Video frame is not found for channel')
return None
# Detect VPN-required channels
try:
driver.switch_to.frame(iframe)
driver.find_element_by_xpath("//*[text()='This channel requires our VPN to watch!']")
need_vpn = True
except NoSuchElementException:
need_vpn = False
finally:
driver.switch_to.default_content()
if need_vpn:
self.fhdhr.logger.warning('Channel needs VPN to be grabbed.')
return None
# Autoplay
iframe.click()
try:
playlist = driver.wait_for_request('/playlist.m3u8', timeout=10)
except TimeoutException:
self.fhdhr.logger.error('Channel m3u8 not found.')
return None
streamurl = str(playlist)
driver.close()
driver.quit()
self.cached_m3u[chandict["callsign"]] = streamurl
self.save_m3u_cache()
return streamurl
def get_firefox_driver(self):
ff_options = FirefoxOptions()
ff_options.add_argument('--headless')
firefox_profile = webdriver.FirefoxProfile()
firefox_profile.set_preference('permissions.default.image', 2)
firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')
firefox_profile.set_preference('dom.disable_beforeunload', True)
firefox_profile.set_preference('browser.tabs.warnOnClose', False)
firefox_profile.set_preference('media.volume_scale', '0.0')
set_seleniumwire_options = {
'connection_timeout': None,
'verify_ssl': False,
'suppress_connection_errors': True
}
driver = webdriver.Firefox(seleniumwire_options=set_seleniumwire_options, options=ff_options, firefox_profile=firefox_profile)
return driver
| en | 0.283704 | # Disable # Restore # if self.fhdhr.config.dict["origin"]["force_best"]: # Get iframe # Detect VPN-required channels # Autoplay | 2.133891 | 2 |
version.py | XioNoX/ansible-junos-stdlib-old | 0 | 589 | VERSION = "1.4.0"
DATE = "2016-Sept-21"
| VERSION = "1.4.0"
DATE = "2016-Sept-21"
| none | 1 | 1.086322 | 1 |
|
Episode11-Menu/Pygame/explosion.py | Inksaver/Shmup_With_Pygame_Love2D_Monogame | 1 | 590 | <gh_stars>1-10
import pygame
import shared
class Explosion():
def __init__(self, images:list, centre:tuple, key:str) -> None:
''' Class variables. key: 'sm', 'lg', 'player '''
self.images = images # list of 8 images
self.centre = centre # use for all frames
self.key = key # key used later
self.image = images[key][0] # set to first image in the sequence
self.rect = self.image.get_rect() # define rectangle from image size
self.rect.center = self.centre # set centre for all frames
self.frame = 0 # no of first frame
self.time_passed = 0 # set timer to 0
self.frame_rate = 0.1 # 8 images played at 1 frame per 0.1 secs = 0.8 seconds
self.active = True
def update(self, dt):
self.time_passed += dt
if self.time_passed >= self.frame_rate: # 0.1 seconds has passed
self.time_passed = 0 # reset timer
self.frame += 1 # increase frame number
if self.frame >= len(self.images[self.key]): # check if end of list?
self.active = False # animation finished
else:
self.image = self.images[self.key][self.frame] # next frame
self.rect = self.image.get_rect() # new rectangle
self.rect.center = self.centre # set centre to parameter value
return self.active
def draw(self):
shared.screen.blit(self.image, self.rect) # draw current frame | import pygame
import shared
class Explosion():
def __init__(self, images:list, centre:tuple, key:str) -> None:
''' Class variables. key: 'sm', 'lg', 'player '''
self.images = images # list of 8 images
self.centre = centre # use for all frames
self.key = key # key used later
self.image = images[key][0] # set to first image in the sequence
self.rect = self.image.get_rect() # define rectangle from image size
self.rect.center = self.centre # set centre for all frames
self.frame = 0 # no of first frame
self.time_passed = 0 # set timer to 0
self.frame_rate = 0.1 # 8 images played at 1 frame per 0.1 secs = 0.8 seconds
self.active = True
def update(self, dt):
self.time_passed += dt
if self.time_passed >= self.frame_rate: # 0.1 seconds has passed
self.time_passed = 0 # reset timer
self.frame += 1 # increase frame number
if self.frame >= len(self.images[self.key]): # check if end of list?
self.active = False # animation finished
else:
self.image = self.images[self.key][self.frame] # next frame
self.rect = self.image.get_rect() # new rectangle
self.rect.center = self.centre # set centre to parameter value
return self.active
def draw(self):
shared.screen.blit(self.image, self.rect) # draw current frame | en | 0.729279 | Class variables. key: 'sm', 'lg', 'player # list of 8 images # use for all frames # key used later # set to first image in the sequence # define rectangle from image size # set centre for all frames # no of first frame # set timer to 0 # 8 images played at 1 frame per 0.1 secs = 0.8 seconds # 0.1 seconds has passed # reset timer # increase frame number # check if end of list? # animation finished # next frame # new rectangle # set centre to parameter value # draw current frame | 3.204575 | 3 |
funcx_endpoint/funcx_endpoint/strategies/base.py | arokem/funcX | 1 | 591 | import sys
import threading
import logging
import time
logger = logging.getLogger("interchange.strategy.base")
class BaseStrategy(object):
"""Implements threshold-interval based flow control.
The overall goal is to trap the flow of apps from the
workflow, measure it and redirect it the appropriate executors for
processing.
This is based on the following logic:
.. code-block:: none
BEGIN (INTERVAL, THRESHOLD, callback) :
start = current_time()
while (current_time()-start < INTERVAL) :
count = get_events_since(start)
if count >= THRESHOLD :
break
callback()
This logic ensures that the callbacks are activated with a maximum delay
of `interval` for systems with infrequent events as well as systems which would
generate large bursts of events.
Once a callback is triggered, the callback generally runs a strategy
method on the sites available as well asqeuque
TODO: When the debug logs are enabled this module emits duplicate messages.
This issue needs more debugging. What I've learnt so far is that the duplicate
messages are present only when the timer thread is started, so this could be
from a duplicate logger being added by the thread.
"""
def __init__(self, *args, threshold=20, interval=5):
"""Initialize the flowcontrol object.
We start the timer thread here
Parameters
----------
- threshold (int) : Tasks after which the callback is triggered
- interval (int) : seconds after which timer expires
"""
self.interchange = None
self.threshold = threshold
self.interval = interval
self.cb_args = args
self.callback = self.strategize
self._handle = None
self._event_count = 0
self._event_buffer = []
self._wake_up_time = time.time() + 1
self._kill_event = threading.Event()
self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,))
self._thread.daemon = True
def start(self, interchange):
"""Actually start the strategy
Parameters
----------
interchange: funcx.executors.high_throughput.interchange.Interchange
Interchange to bind the strategy to
"""
self.interchange = interchange
if hasattr(interchange.config, 'provider'):
logger.debug("Strategy bounds-> init:{}, min:{}, max:{}".format(
interchange.config.provider.init_blocks,
interchange.config.provider.min_blocks,
interchange.config.provider.max_blocks))
self._thread.start()
def strategize(self, *args, **kwargs):
""" Strategize is called everytime the threshold or the interval is hit
"""
logger.debug("Strategize called with {} {}".format(args, kwargs))
def _wake_up_timer(self, kill_event):
"""Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
"""
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind='timer')
else:
print("Sleeping a bit more")
def notify(self, event_id):
"""Let the FlowControl system know that there is an event.
This method is to be called from the Interchange to notify the flowcontrol
"""
self._event_buffer.extend([event_id])
self._event_count += 1
if self._event_count >= self.threshold:
logger.debug("Eventcount >= threshold")
self.make_callback(kind="event")
def make_callback(self, kind=None):
"""Makes the callback and resets the timer.
KWargs:
- kind (str): Default=None, used to pass information on what
triggered the callback
"""
self._wake_up_time = time.time() + self.interval
self.callback(tasks=self._event_buffer, kind=kind)
self._event_buffer = []
def close(self):
"""Merge the threads and terminate."""
self._kill_event.set()
self._thread.join()
class Timer(object):
"""This timer is a simplified version of the FlowControl timer.
This timer does not employ notify events.
This is based on the following logic :
.. code-block:: none
BEGIN (INTERVAL, THRESHOLD, callback) :
start = current_time()
while (current_time()-start < INTERVAL) :
wait()
break
callback()
"""
def __init__(self, callback, *args, interval=5):
"""Initialize the flowcontrol object
We start the timer thread here
Args:
- dfk (DataFlowKernel) : DFK object to track parsl progress
KWargs:
- threshold (int) : Tasks after which the callback is triggered
- interval (int) : seconds after which timer expires
"""
self.interval = interval
self.cb_args = args
self.callback = callback
self._wake_up_time = time.time() + 1
self._kill_event = threading.Event()
self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,))
self._thread.daemon = True
self._thread.start()
def _wake_up_timer(self, kill_event):
"""Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
"""
# Sleep till time to wake up
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind='timer')
else:
print("Sleeping a bit more")
def make_callback(self, kind=None):
"""Makes the callback and resets the timer.
"""
self._wake_up_time = time.time() + self.interval
self.callback(*self.cb_args)
def close(self):
"""Merge the threads and terminate.
"""
self._kill_event.set()
self._thread.join()
| import sys
import threading
import logging
import time
logger = logging.getLogger("interchange.strategy.base")
class BaseStrategy(object):
"""Implements threshold-interval based flow control.
The overall goal is to trap the flow of apps from the
workflow, measure it and redirect it the appropriate executors for
processing.
This is based on the following logic:
.. code-block:: none
BEGIN (INTERVAL, THRESHOLD, callback) :
start = current_time()
while (current_time()-start < INTERVAL) :
count = get_events_since(start)
if count >= THRESHOLD :
break
callback()
This logic ensures that the callbacks are activated with a maximum delay
of `interval` for systems with infrequent events as well as systems which would
generate large bursts of events.
Once a callback is triggered, the callback generally runs a strategy
method on the sites available as well asqeuque
TODO: When the debug logs are enabled this module emits duplicate messages.
This issue needs more debugging. What I've learnt so far is that the duplicate
messages are present only when the timer thread is started, so this could be
from a duplicate logger being added by the thread.
"""
def __init__(self, *args, threshold=20, interval=5):
"""Initialize the flowcontrol object.
We start the timer thread here
Parameters
----------
- threshold (int) : Tasks after which the callback is triggered
- interval (int) : seconds after which timer expires
"""
self.interchange = None
self.threshold = threshold
self.interval = interval
self.cb_args = args
self.callback = self.strategize
self._handle = None
self._event_count = 0
self._event_buffer = []
self._wake_up_time = time.time() + 1
self._kill_event = threading.Event()
self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,))
self._thread.daemon = True
def start(self, interchange):
"""Actually start the strategy
Parameters
----------
interchange: funcx.executors.high_throughput.interchange.Interchange
Interchange to bind the strategy to
"""
self.interchange = interchange
if hasattr(interchange.config, 'provider'):
logger.debug("Strategy bounds-> init:{}, min:{}, max:{}".format(
interchange.config.provider.init_blocks,
interchange.config.provider.min_blocks,
interchange.config.provider.max_blocks))
self._thread.start()
def strategize(self, *args, **kwargs):
""" Strategize is called everytime the threshold or the interval is hit
"""
logger.debug("Strategize called with {} {}".format(args, kwargs))
def _wake_up_timer(self, kill_event):
"""Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
"""
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind='timer')
else:
print("Sleeping a bit more")
def notify(self, event_id):
"""Let the FlowControl system know that there is an event.
This method is to be called from the Interchange to notify the flowcontrol
"""
self._event_buffer.extend([event_id])
self._event_count += 1
if self._event_count >= self.threshold:
logger.debug("Eventcount >= threshold")
self.make_callback(kind="event")
def make_callback(self, kind=None):
"""Makes the callback and resets the timer.
KWargs:
- kind (str): Default=None, used to pass information on what
triggered the callback
"""
self._wake_up_time = time.time() + self.interval
self.callback(tasks=self._event_buffer, kind=kind)
self._event_buffer = []
def close(self):
"""Merge the threads and terminate."""
self._kill_event.set()
self._thread.join()
class Timer(object):
"""This timer is a simplified version of the FlowControl timer.
This timer does not employ notify events.
This is based on the following logic :
.. code-block:: none
BEGIN (INTERVAL, THRESHOLD, callback) :
start = current_time()
while (current_time()-start < INTERVAL) :
wait()
break
callback()
"""
def __init__(self, callback, *args, interval=5):
"""Initialize the flowcontrol object
We start the timer thread here
Args:
- dfk (DataFlowKernel) : DFK object to track parsl progress
KWargs:
- threshold (int) : Tasks after which the callback is triggered
- interval (int) : seconds after which timer expires
"""
self.interval = interval
self.cb_args = args
self.callback = callback
self._wake_up_time = time.time() + 1
self._kill_event = threading.Event()
self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,))
self._thread.daemon = True
self._thread.start()
def _wake_up_timer(self, kill_event):
"""Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
"""
# Sleep till time to wake up
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind='timer')
else:
print("Sleeping a bit more")
def make_callback(self, kind=None):
"""Makes the callback and resets the timer.
"""
self._wake_up_time = time.time() + self.interval
self.callback(*self.cb_args)
def close(self):
"""Merge the threads and terminate.
"""
self._kill_event.set()
self._thread.join()
| en | 0.883779 | Implements threshold-interval based flow control. The overall goal is to trap the flow of apps from the workflow, measure it and redirect it the appropriate executors for processing. This is based on the following logic: .. code-block:: none BEGIN (INTERVAL, THRESHOLD, callback) : start = current_time() while (current_time()-start < INTERVAL) : count = get_events_since(start) if count >= THRESHOLD : break callback() This logic ensures that the callbacks are activated with a maximum delay of `interval` for systems with infrequent events as well as systems which would generate large bursts of events. Once a callback is triggered, the callback generally runs a strategy method on the sites available as well asqeuque TODO: When the debug logs are enabled this module emits duplicate messages. This issue needs more debugging. What I've learnt so far is that the duplicate messages are present only when the timer thread is started, so this could be from a duplicate logger being added by the thread. Initialize the flowcontrol object. We start the timer thread here Parameters ---------- - threshold (int) : Tasks after which the callback is triggered - interval (int) : seconds after which timer expires Actually start the strategy Parameters ---------- interchange: funcx.executors.high_throughput.interchange.Interchange Interchange to bind the strategy to Strategize is called everytime the threshold or the interval is hit Internal. This is the function that the thread will execute. waits on an event so that the thread can make a quick exit when close() is called Args: - kill_event (threading.Event) : Event to wait on # Waiting for the event returns True only when the event # is set, usually by the parent thread Let the FlowControl system know that there is an event. This method is to be called from the Interchange to notify the flowcontrol Makes the callback and resets the timer. KWargs: - kind (str): Default=None, used to pass information on what triggered the callback Merge the threads and terminate. This timer is a simplified version of the FlowControl timer. This timer does not employ notify events. This is based on the following logic : .. code-block:: none BEGIN (INTERVAL, THRESHOLD, callback) : start = current_time() while (current_time()-start < INTERVAL) : wait() break callback() Initialize the flowcontrol object We start the timer thread here Args: - dfk (DataFlowKernel) : DFK object to track parsl progress KWargs: - threshold (int) : Tasks after which the callback is triggered - interval (int) : seconds after which timer expires Internal. This is the function that the thread will execute. waits on an event so that the thread can make a quick exit when close() is called Args: - kill_event (threading.Event) : Event to wait on # Sleep till time to wake up # Waiting for the event returns True only when the event # is set, usually by the parent thread Makes the callback and resets the timer. Merge the threads and terminate. | 2.668815 | 3 |
hw3 cnn and vis/gradcam.py | mtang1001/ML-Exploration | 0 | 592 | import torch
import torchvision
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
from captum.attr import GuidedGradCam, GuidedBackprop
from captum.attr import LayerActivation, LayerConductance, LayerGradCam
from data_utils import *
from image_utils import *
from captum_utils import *
import numpy as np
from visualizers import GradCam
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
X, y, class_names = load_imagenet_val(num=5)
# FOR THIS SECTION ONLY, we need to use gradients. We introduce a new model we will use explicitly for GradCAM for this.
gc_model = torchvision.models.squeezenet1_1(pretrained=True)
gc = GradCam()
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
# Guided Back-Propagation
gbp_result = gc.guided_backprop(X_tensor,y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gbp_result.shape[0]):
plt.subplot(1, 5, i + 1)
img = gbp_result[i]
img = rescale(img)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/guided_backprop.png')
# GradCam
# GradCAM. We have given you which module(=layer) that we need to capture gradients from, which you can see in conv_module variable below
gc_model = torchvision.models.squeezenet1_1(pretrained=True)
for param in gc_model.parameters():
param.requires_grad = True
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gradcam_result.shape[0]):
gradcam_val = gradcam_result[i]
img = X[i] + (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255)
img = img / np.max(img)
plt.subplot(1, 5, i + 1)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/gradcam.png')
# As a final step, we can combine GradCam and Guided Backprop to get Guided GradCam.
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model)
gbp_result = gc.guided_backprop(X_tensor, y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gradcam_result.shape[0]):
gbp_val = gbp_result[i]
gradcam_val = np.expand_dims(gradcam_result[i], axis=2)
# Pointwise multiplication and normalization of the gradcam and guided backprop results (2 lines)
img = gradcam_val * gbp_val
img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
img = np.float32(img)
img = torch.from_numpy(img)
img = deprocess(img)
plt.subplot(1, 5, i + 1)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/guided_gradcam.png')
# **************************************************************************************** #
# Captum
model = torchvision.models.squeezenet1_1(pretrained=True)
# We don't want to train the model, so tell PyTorch not to compute gradients
# with respect to model parameters.
for param in model.parameters():
param.requires_grad = False
# Convert X and y from numpy arrays to Torch Tensors
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0)
y_tensor = torch.LongTensor(y)
conv_module = model.features[12]
##############################################################################
# TODO: Compute/Visualize GuidedBackprop and Guided GradCAM as well. #
# visualize_attr_maps function from captum_utils.py is useful for #
# visualizing captum outputs #
# Use conv_module as the convolution layer for gradcam #
##############################################################################
# Computing Guided GradCam
ggc = GuidedGradCam(model, conv_module)
attribution_gcc = compute_attributions(ggc, X_tensor, target = y_tensor)
# print(X_tensor.shape, y_tensor.shape, attribution_gcc.shape)
visualize_attr_maps('visualization/GuidedGradCam.png', X, y, class_names, [attribution_gcc], ['Guided_Grad_Cam'])
# Computing Guided BackProp
gbp = GuidedBackprop(model)
attribution_gbp = compute_attributions(gbp, X_tensor, target = y_tensor)
visualize_attr_maps('visualization/GuidedBackpropCam.png', X, y, class_names, [attribution_gbp], ['Guided_Backprop_Cam'])
##############################################################################
# END OF YOUR CODE #
##############################################################################
# Try out different layers and see observe how the attributions change
layer = model.features[3]
# Example visualization for using layer visualizations
# layer_act = LayerActivation(model, layer)
# layer_act_attr = compute_attributions(layer_act, X_tensor)
# layer_act_attr_sum = layer_act_attr.mean(axis=1, keepdim=True)
##############################################################################
# TODO: Visualize Individual Layer Gradcam and Layer Conductance (similar #
# to what we did for the other captum sections, using our helper methods), #
# but with some preprocessing calculations. #
# #
# You can refer to the LayerActivation example above and you should be #
# using 'layer' given above for this section #
# #
# Also note that, you would need to customize your 'attr_preprocess' #
# parameter that you send along to 'visualize_attr_maps' as the default #
# 'attr_preprocess' is written to only to handle multi channel attributions. #
# #
# For layer gradcam look at the usage of the parameter relu_attributions #
##############################################################################
# Layer gradcam aggregates across all channels
from captum.attr import LayerAttribution
N, C, H, W = X_tensor.shape
LC = LayerConductance(model, layer)
LC_attr = compute_attributions(LC, X_tensor, target = y_tensor)
LC_attr_sum = LC_attr.mean(axis = 1, keepdim = True)
LC_attr_int = LayerAttribution.interpolate(LC_attr_sum, (H,W) )
LC_attr_int = LC_attr_int.repeat(1, 3, 1, 1)
visualize_attr_maps('visualization/LayerConductance.png', X, y, class_names, [LC_attr_int], ['LayerConductance'])
LGC = LayerGradCam(model, layer)
LGC_attr = compute_attributions(LGC, X_tensor, target = y_tensor)
LGC_attr_sum = LGC_attr.mean(axis = 1, keepdim = True)
LGC_attr_int = LayerAttribution.interpolate(LGC_attr_sum, (H,W))
LGC_attr_int = LGC_attr_int.repeat(1, 3, 1, 1)
visualize_attr_maps ('visualization/LayerGradCam.png', X, y, class_names, [LGC_attr_int], ['LayerGradCam'])
##############################################################################
# END OF YOUR CODE #
##############################################################################
| import torch
import torchvision
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
from captum.attr import GuidedGradCam, GuidedBackprop
from captum.attr import LayerActivation, LayerConductance, LayerGradCam
from data_utils import *
from image_utils import *
from captum_utils import *
import numpy as np
from visualizers import GradCam
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
X, y, class_names = load_imagenet_val(num=5)
# FOR THIS SECTION ONLY, we need to use gradients. We introduce a new model we will use explicitly for GradCAM for this.
gc_model = torchvision.models.squeezenet1_1(pretrained=True)
gc = GradCam()
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
# Guided Back-Propagation
gbp_result = gc.guided_backprop(X_tensor,y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gbp_result.shape[0]):
plt.subplot(1, 5, i + 1)
img = gbp_result[i]
img = rescale(img)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/guided_backprop.png')
# GradCam
# GradCAM. We have given you which module(=layer) that we need to capture gradients from, which you can see in conv_module variable below
gc_model = torchvision.models.squeezenet1_1(pretrained=True)
for param in gc_model.parameters():
param.requires_grad = True
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gradcam_result.shape[0]):
gradcam_val = gradcam_result[i]
img = X[i] + (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255)
img = img / np.max(img)
plt.subplot(1, 5, i + 1)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/gradcam.png')
# As a final step, we can combine GradCam and Guided Backprop to get Guided GradCam.
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model)
gbp_result = gc.guided_backprop(X_tensor, y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gradcam_result.shape[0]):
gbp_val = gbp_result[i]
gradcam_val = np.expand_dims(gradcam_result[i], axis=2)
# Pointwise multiplication and normalization of the gradcam and guided backprop results (2 lines)
img = gradcam_val * gbp_val
img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
img = np.float32(img)
img = torch.from_numpy(img)
img = deprocess(img)
plt.subplot(1, 5, i + 1)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/guided_gradcam.png')
# **************************************************************************************** #
# Captum
model = torchvision.models.squeezenet1_1(pretrained=True)
# We don't want to train the model, so tell PyTorch not to compute gradients
# with respect to model parameters.
for param in model.parameters():
param.requires_grad = False
# Convert X and y from numpy arrays to Torch Tensors
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0)
y_tensor = torch.LongTensor(y)
conv_module = model.features[12]
##############################################################################
# TODO: Compute/Visualize GuidedBackprop and Guided GradCAM as well. #
# visualize_attr_maps function from captum_utils.py is useful for #
# visualizing captum outputs #
# Use conv_module as the convolution layer for gradcam #
##############################################################################
# Computing Guided GradCam
ggc = GuidedGradCam(model, conv_module)
attribution_gcc = compute_attributions(ggc, X_tensor, target = y_tensor)
# print(X_tensor.shape, y_tensor.shape, attribution_gcc.shape)
visualize_attr_maps('visualization/GuidedGradCam.png', X, y, class_names, [attribution_gcc], ['Guided_Grad_Cam'])
# Computing Guided BackProp
gbp = GuidedBackprop(model)
attribution_gbp = compute_attributions(gbp, X_tensor, target = y_tensor)
visualize_attr_maps('visualization/GuidedBackpropCam.png', X, y, class_names, [attribution_gbp], ['Guided_Backprop_Cam'])
##############################################################################
# END OF YOUR CODE #
##############################################################################
# Try out different layers and see observe how the attributions change
layer = model.features[3]
# Example visualization for using layer visualizations
# layer_act = LayerActivation(model, layer)
# layer_act_attr = compute_attributions(layer_act, X_tensor)
# layer_act_attr_sum = layer_act_attr.mean(axis=1, keepdim=True)
##############################################################################
# TODO: Visualize Individual Layer Gradcam and Layer Conductance (similar #
# to what we did for the other captum sections, using our helper methods), #
# but with some preprocessing calculations. #
# #
# You can refer to the LayerActivation example above and you should be #
# using 'layer' given above for this section #
# #
# Also note that, you would need to customize your 'attr_preprocess' #
# parameter that you send along to 'visualize_attr_maps' as the default #
# 'attr_preprocess' is written to only to handle multi channel attributions. #
# #
# For layer gradcam look at the usage of the parameter relu_attributions #
##############################################################################
# Layer gradcam aggregates across all channels
from captum.attr import LayerAttribution
N, C, H, W = X_tensor.shape
LC = LayerConductance(model, layer)
LC_attr = compute_attributions(LC, X_tensor, target = y_tensor)
LC_attr_sum = LC_attr.mean(axis = 1, keepdim = True)
LC_attr_int = LayerAttribution.interpolate(LC_attr_sum, (H,W) )
LC_attr_int = LC_attr_int.repeat(1, 3, 1, 1)
visualize_attr_maps('visualization/LayerConductance.png', X, y, class_names, [LC_attr_int], ['LayerConductance'])
LGC = LayerGradCam(model, layer)
LGC_attr = compute_attributions(LGC, X_tensor, target = y_tensor)
LGC_attr_sum = LGC_attr.mean(axis = 1, keepdim = True)
LGC_attr_int = LayerAttribution.interpolate(LGC_attr_sum, (H,W))
LGC_attr_int = LGC_attr_int.repeat(1, 3, 1, 1)
visualize_attr_maps ('visualization/LayerGradCam.png', X, y, class_names, [LGC_attr_int], ['LayerGradCam'])
##############################################################################
# END OF YOUR CODE #
##############################################################################
| en | 0.458202 | # set default size of plots # FOR THIS SECTION ONLY, we need to use gradients. We introduce a new model we will use explicitly for GradCAM for this. # Guided Back-Propagation # GradCam # GradCAM. We have given you which module(=layer) that we need to capture gradients from, which you can see in conv_module variable below # As a final step, we can combine GradCam and Guided Backprop to get Guided GradCam. # Pointwise multiplication and normalization of the gradcam and guided backprop results (2 lines) # **************************************************************************************** # # Captum # We don't want to train the model, so tell PyTorch not to compute gradients # with respect to model parameters. # Convert X and y from numpy arrays to Torch Tensors ############################################################################## # TODO: Compute/Visualize GuidedBackprop and Guided GradCAM as well. # # visualize_attr_maps function from captum_utils.py is useful for # # visualizing captum outputs # # Use conv_module as the convolution layer for gradcam # ############################################################################## # Computing Guided GradCam # print(X_tensor.shape, y_tensor.shape, attribution_gcc.shape) # Computing Guided BackProp ############################################################################## # END OF YOUR CODE # ############################################################################## # Try out different layers and see observe how the attributions change # Example visualization for using layer visualizations # layer_act = LayerActivation(model, layer) # layer_act_attr = compute_attributions(layer_act, X_tensor) # layer_act_attr_sum = layer_act_attr.mean(axis=1, keepdim=True) ############################################################################## # TODO: Visualize Individual Layer Gradcam and Layer Conductance (similar # # to what we did for the other captum sections, using our helper methods), # # but with some preprocessing calculations. # # # # You can refer to the LayerActivation example above and you should be # # using 'layer' given above for this section # # # # Also note that, you would need to customize your 'attr_preprocess' # # parameter that you send along to 'visualize_attr_maps' as the default # # 'attr_preprocess' is written to only to handle multi channel attributions. # # # # For layer gradcam look at the usage of the parameter relu_attributions # ############################################################################## # Layer gradcam aggregates across all channels ############################################################################## # END OF YOUR CODE # ############################################################################## | 2.559934 | 3 |
src/genotypes.py | k8lion/admmdarts | 0 | 593 | <gh_stars>0
from collections import namedtuple
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
PRIMITIVES = [
'none',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
CRBPRIMITIVES = [
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
NASNet = Genotype(
normal=[
('sep_conv_5x5', 1),
('sep_conv_3x3', 0),
('sep_conv_5x5', 0),
('sep_conv_3x3', 0),
('avg_pool_3x3', 1),
('skip_connect', 0),
('avg_pool_3x3', 0),
('avg_pool_3x3', 0),
('sep_conv_3x3', 1),
('skip_connect', 1),
],
normal_concat=[2, 3, 4, 5, 6],
reduce=[
('sep_conv_5x5', 1),
('sep_conv_7x7', 0),
('max_pool_3x3', 1),
('sep_conv_7x7', 0),
('avg_pool_3x3', 1),
('sep_conv_5x5', 0),
('skip_connect', 3),
('avg_pool_3x3', 2),
('sep_conv_3x3', 2),
('max_pool_3x3', 1),
],
reduce_concat=[4, 5, 6],
)
AmoebaNet = Genotype(
normal=[
('avg_pool_3x3', 0),
('max_pool_3x3', 1),
('sep_conv_3x3', 0),
('sep_conv_5x5', 2),
('sep_conv_3x3', 0),
('avg_pool_3x3', 3),
('sep_conv_3x3', 1),
('skip_connect', 1),
('skip_connect', 0),
('avg_pool_3x3', 1),
],
normal_concat=[4, 5, 6],
reduce=[
('avg_pool_3x3', 0),
('sep_conv_3x3', 1),
('max_pool_3x3', 0),
('sep_conv_7x7', 2),
('sep_conv_7x7', 0),
('avg_pool_3x3', 1),
('max_pool_3x3', 0),
('max_pool_3x3', 1),
('conv_7x1_1x7', 0),
('sep_conv_3x3', 5),
],
reduce_concat=[3, 4, 6]
)
DARTS_V1 = Genotype(
normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0),
('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[2, 3, 4, 5],
reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0),
('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4, 5])
DARTS_V2 = Genotype(
normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1),
('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5],
reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0),
('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5])
DARTS = DARTS_V2
BATH = Genotype(
normal=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_5x5', 2), ('dil_conv_5x5', 0),
('max_pool_3x3', 2), ('sep_conv_3x3', 2), ('sep_conv_3x3', 0)], normal_concat=range(2, 6),
reduce=[('max_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_5x5', 2), ('skip_connect', 3),
('avg_pool_3x3', 2), ('sep_conv_3x3', 4), ('dil_conv_5x5', 1)], reduce_concat=range(2, 6))
BATH2 = Genotype(
normal=[('max_pool_3x3', 1), ('skip_connect', 0), ('skip_connect', 2), ('max_pool_3x3', 1), ('skip_connect', 1),
('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0)], normal_concat=range(2, 6),
reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 0), ('dil_conv_3x3', 1), ('skip_connect', 1),
('skip_connect', 0), ('dil_conv_5x5', 0), ('sep_conv_3x3', 4)], reduce_concat=range(2, 6))
| from collections import namedtuple
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
PRIMITIVES = [
'none',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
CRBPRIMITIVES = [
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
NASNet = Genotype(
normal=[
('sep_conv_5x5', 1),
('sep_conv_3x3', 0),
('sep_conv_5x5', 0),
('sep_conv_3x3', 0),
('avg_pool_3x3', 1),
('skip_connect', 0),
('avg_pool_3x3', 0),
('avg_pool_3x3', 0),
('sep_conv_3x3', 1),
('skip_connect', 1),
],
normal_concat=[2, 3, 4, 5, 6],
reduce=[
('sep_conv_5x5', 1),
('sep_conv_7x7', 0),
('max_pool_3x3', 1),
('sep_conv_7x7', 0),
('avg_pool_3x3', 1),
('sep_conv_5x5', 0),
('skip_connect', 3),
('avg_pool_3x3', 2),
('sep_conv_3x3', 2),
('max_pool_3x3', 1),
],
reduce_concat=[4, 5, 6],
)
AmoebaNet = Genotype(
normal=[
('avg_pool_3x3', 0),
('max_pool_3x3', 1),
('sep_conv_3x3', 0),
('sep_conv_5x5', 2),
('sep_conv_3x3', 0),
('avg_pool_3x3', 3),
('sep_conv_3x3', 1),
('skip_connect', 1),
('skip_connect', 0),
('avg_pool_3x3', 1),
],
normal_concat=[4, 5, 6],
reduce=[
('avg_pool_3x3', 0),
('sep_conv_3x3', 1),
('max_pool_3x3', 0),
('sep_conv_7x7', 2),
('sep_conv_7x7', 0),
('avg_pool_3x3', 1),
('max_pool_3x3', 0),
('max_pool_3x3', 1),
('conv_7x1_1x7', 0),
('sep_conv_3x3', 5),
],
reduce_concat=[3, 4, 6]
)
DARTS_V1 = Genotype(
normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0),
('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[2, 3, 4, 5],
reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0),
('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4, 5])
DARTS_V2 = Genotype(
normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1),
('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5],
reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0),
('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5])
DARTS = DARTS_V2
BATH = Genotype(
normal=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_5x5', 2), ('dil_conv_5x5', 0),
('max_pool_3x3', 2), ('sep_conv_3x3', 2), ('sep_conv_3x3', 0)], normal_concat=range(2, 6),
reduce=[('max_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_5x5', 2), ('skip_connect', 3),
('avg_pool_3x3', 2), ('sep_conv_3x3', 4), ('dil_conv_5x5', 1)], reduce_concat=range(2, 6))
BATH2 = Genotype(
normal=[('max_pool_3x3', 1), ('skip_connect', 0), ('skip_connect', 2), ('max_pool_3x3', 1), ('skip_connect', 1),
('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0)], normal_concat=range(2, 6),
reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 0), ('dil_conv_3x3', 1), ('skip_connect', 1),
('skip_connect', 0), ('dil_conv_5x5', 0), ('sep_conv_3x3', 4)], reduce_concat=range(2, 6)) | none | 1 | 1.761603 | 2 |
|
colab_logica.py | smdesai/logica | 0 | 594 | <gh_stars>0
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for using Logica in CoLab."""
from .common import color
from .common import concertina_lib
from .compiler import functors
from .compiler import rule_translate
from .compiler import universe
import IPython
from IPython.core.magic import register_cell_magic
from IPython.display import display
import os
import pandas
from .parser_py import parse
from .common import sqlite3_logica
BQ_READY = True # By default.
try:
from google.cloud import bigquery
except:
BQ_READY = False
print('Could not import google.cloud.bigquery.')
try:
from google.colab import auth
except:
BQ_READY = False
print('Could not import google.cloud.auth.')
try:
from google.colab import widgets
WIDGETS_IMPORTED = True
except:
WIDGETS_IMPORTED = False
print('Could not import google.colab.widgets.')
PROJECT = None
# TODO: Should this be renamed to PSQL_ENGINE, PSQL_CONNECTION?
DB_ENGINE = None
DB_CONNECTION = None
USER_AUTHENTICATED = False
TABULATED_OUTPUT = True
SHOW_FULL_QUERY = True
PREAMBLE = None
def SetPreamble(preamble):
global PREAMBLE
PREAMBLE = preamble
def SetProject(project):
global PROJECT
PROJECT = project
def SetDbConnection(connection):
global DB_CONNECTION
DB_CONNECTION = connection
def EnsureAuthenticatedUser():
global USER_AUTHENTICATED
global PROJECT
if USER_AUTHENTICATED:
return
auth.authenticate_user()
if PROJECT is None:
print("Please enter project_id to use for BigQuery queries.")
PROJECT = input()
print("project_id is set to %s" % PROJECT)
print("You can change it with logica.colab_logica.SetProject command.")
USER_AUTHENTICATED = True
def SetTabulatedOutput(tabulated_output):
global TABULATED_OUTPUT
global SHOW_FULL_QUERY
TABULATED_OUTPUT = tabulated_output
SHOW_FULL_QUERY = TABULATED_OUTPUT
if not WIDGETS_IMPORTED:
SetTabulatedOutput(False)
def TabBar(*args):
"""Returns a real TabBar or a mock. Useful for UIs that don't support JS."""
if TABULATED_OUTPUT:
return widgets.TabBar(*args)
class MockTab:
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, *x):
pass
class MockTabBar:
def __init__(self):
pass
def output_to(self, x):
return MockTab()
return MockTabBar()
@register_cell_magic
def logica(line, cell):
Logica(line, cell, run_query=True)
def ParseList(line):
line = line.strip()
if not line:
predicates = []
else:
predicates = [p.strip() for p in line.split(',')]
return predicates
def RunSQL(sql, engine, connection=None, is_final=False):
if engine == 'bigquery':
client = bigquery.Client(project=PROJECT)
return client.query(sql).to_dataframe()
elif engine == 'psql':
if is_final:
return pandas.read_sql(sql, connection)
else:
return connection.execute(sql)
elif engine == 'sqlite':
statements = parse.SplitRaw(sql, ';')
connection.executescript(sql)
if is_final:
return pandas.read_sql(statements[-1], connection)
else:
pass
return None
else:
raise Exception('Logica only supports BigQuery, PostgreSQL and SQLite '
'for now.')
class SqliteRunner(object):
def __init__(self):
self.connection = sqlite3_logica.SqliteConnect()
# TODO: Sqlite runner should not be accepting an engine.
def __call__(self, sql, engine, is_final):
return RunSQL(sql, engine, self.connection, is_final)
class PostgresRunner(object):
def __init__(self):
global DB_CONNECTION
global DB_ENGINE
if DB_CONNECTION:
self.engine = DB_ENGINE
self.connection = DB_CONNECTION
else:
(self.engine, self.connection) = PostgresJumpStart()
DB_ENGINE = self.engine
DB_CONNECTION = self.connection
def __call__(self, sql, engine, is_final):
return RunSQL(sql, engine, self.connection, is_final)
def ShowError(error_text):
print(color.Format('[ {error}Error{end} ] ' + error_text))
def Logica(line, cell, run_query):
"""Running Logica predicates and storing results."""
predicates = ParseList(line)
if not predicates:
ShowError('No predicates to run.')
return
try:
program = ';\n'.join(s for s in [PREAMBLE, cell] if s)
parsed_rules = parse.ParseFile(program)['rule']
except parse.ParsingException as e:
e.ShowMessage()
return
try:
program = universe.LogicaProgram(parsed_rules)
except functors.FunctorError as e:
e.ShowMessage()
return
engine = program.annotations.Engine()
if engine == 'bigquery' and not BQ_READY:
ShowError(
'BigQuery client and/or authentification is not installed. \n'
'It is the easiest to run BigQuery requests from Google CoLab:\n'
' https://colab.research.google.com/.\n'
'Note that running Logica on SQLite requires no installation.\n'
'This could be a good fit for working with small data or learning Logica.\n'
'Use {warning}@Engine("sqlite");{end} annotation in your program to use SQLite.')
return
bar = TabBar(predicates + ['(Log)'])
logs_idx = len(predicates)
executions = []
sub_bars = []
ip = IPython.get_ipython()
for idx, predicate in enumerate(predicates):
with bar.output_to(logs_idx):
try:
sql = program.FormattedPredicateSql(predicate)
executions.append(program.execution)
ip.push({predicate + '_sql': sql})
except rule_translate.RuleCompileException as e:
print('Encountered error when compiling %s.' % predicate)
e.ShowMessage()
return
# Publish output to Colab cell.
with bar.output_to(idx):
sub_bar = TabBar(['SQL', 'Result'])
sub_bars.append(sub_bar)
with sub_bar.output_to(0):
if SHOW_FULL_QUERY:
print(
color.Format(
'The following query is stored at {warning}%s{end} '
'variable.' % (
predicate + '_sql')))
print(sql)
else:
print('Query is stored at %s variable.' %
color.Warn(predicate + '_sql'))
with bar.output_to(logs_idx):
if engine == 'sqlite':
sql_runner = SqliteRunner()
elif engine == 'psql':
sql_runner = PostgresRunner()
elif engine == 'bigquery':
EnsureAuthenticatedUser()
sql_runner = RunSQL
else:
raise Exception('Logica only supports BigQuery, PostgreSQL and SQLite '
'for now.')
result_map = concertina_lib.ExecuteLogicaProgram(
executions, sql_runner=sql_runner, sql_engine=engine)
for idx, predicate in enumerate(predicates):
t = result_map[predicate]
ip.push({predicate: t})
with bar.output_to(idx):
with sub_bars[idx].output_to(1):
if run_query:
print(
color.Format(
'The following table is stored at {warning}%s{end} '
'variable.' %
predicate))
display(t)
else:
print('The query was not run.')
print(' ') # To activate the tabbar.
def PostgresJumpStart():
# Install postgresql server.
print("Installing and configuring an empty PostgreSQL database.")
result = 0
result += os.system('sudo apt-get -y -qq update')
result += os.system('sudo apt-get -y -qq install postgresql')
result += os.system('sudo service postgresql start')
# Ignoring user creation error, as they may already exist.
result += 0 * os.system(
'sudo -u postgres psql -c "CREATE USER logica WITH SUPERUSER"')
result += os.system(
'sudo -u postgres psql -c "ALTER USER logica PASSWORD \'<PASSWORD>\';"')
result += os.system(
'sudo -u postgres psql -U postgres -c \'CREATE DATABASE logica;\'')
if result != 0:
print("""Installation failed. Please try the following manually:
# Install Logica.
!pip install logica
# Install postgresql server.
!sudo apt-get -y -qq update
!sudo apt-get -y -qq install postgresql
!sudo service postgresql start
# Prepare database for Logica.
!sudo -u postgres psql -c "CREATE USER logica WITH SUPERUSER"
!sudo -u postgres psql -c "ALTER USER logica PASSWORD '<PASSWORD>';"
!sudo -u postgres psql -U postgres -c 'CREATE DATABASE logica;'
# Connect to the database.
from logica import colab_logica
from sqlalchemy import create_engine
import pandas
engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600);
connection = engine.connect();
colab_logica.SetDbConnection(connection)""")
return
print('Installation succeeded. Connecting...')
# Connect to the database.
from logica import colab_logica
from sqlalchemy import create_engine
import pandas
engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600)
connection = engine.connect()
print('Connected.')
return engine, connection
| #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for using Logica in CoLab."""
from .common import color
from .common import concertina_lib
from .compiler import functors
from .compiler import rule_translate
from .compiler import universe
import IPython
from IPython.core.magic import register_cell_magic
from IPython.display import display
import os
import pandas
from .parser_py import parse
from .common import sqlite3_logica
BQ_READY = True # By default.
try:
from google.cloud import bigquery
except:
BQ_READY = False
print('Could not import google.cloud.bigquery.')
try:
from google.colab import auth
except:
BQ_READY = False
print('Could not import google.cloud.auth.')
try:
from google.colab import widgets
WIDGETS_IMPORTED = True
except:
WIDGETS_IMPORTED = False
print('Could not import google.colab.widgets.')
PROJECT = None
# TODO: Should this be renamed to PSQL_ENGINE, PSQL_CONNECTION?
DB_ENGINE = None
DB_CONNECTION = None
USER_AUTHENTICATED = False
TABULATED_OUTPUT = True
SHOW_FULL_QUERY = True
PREAMBLE = None
def SetPreamble(preamble):
global PREAMBLE
PREAMBLE = preamble
def SetProject(project):
global PROJECT
PROJECT = project
def SetDbConnection(connection):
global DB_CONNECTION
DB_CONNECTION = connection
def EnsureAuthenticatedUser():
global USER_AUTHENTICATED
global PROJECT
if USER_AUTHENTICATED:
return
auth.authenticate_user()
if PROJECT is None:
print("Please enter project_id to use for BigQuery queries.")
PROJECT = input()
print("project_id is set to %s" % PROJECT)
print("You can change it with logica.colab_logica.SetProject command.")
USER_AUTHENTICATED = True
def SetTabulatedOutput(tabulated_output):
global TABULATED_OUTPUT
global SHOW_FULL_QUERY
TABULATED_OUTPUT = tabulated_output
SHOW_FULL_QUERY = TABULATED_OUTPUT
if not WIDGETS_IMPORTED:
SetTabulatedOutput(False)
def TabBar(*args):
"""Returns a real TabBar or a mock. Useful for UIs that don't support JS."""
if TABULATED_OUTPUT:
return widgets.TabBar(*args)
class MockTab:
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, *x):
pass
class MockTabBar:
def __init__(self):
pass
def output_to(self, x):
return MockTab()
return MockTabBar()
@register_cell_magic
def logica(line, cell):
Logica(line, cell, run_query=True)
def ParseList(line):
line = line.strip()
if not line:
predicates = []
else:
predicates = [p.strip() for p in line.split(',')]
return predicates
def RunSQL(sql, engine, connection=None, is_final=False):
if engine == 'bigquery':
client = bigquery.Client(project=PROJECT)
return client.query(sql).to_dataframe()
elif engine == 'psql':
if is_final:
return pandas.read_sql(sql, connection)
else:
return connection.execute(sql)
elif engine == 'sqlite':
statements = parse.SplitRaw(sql, ';')
connection.executescript(sql)
if is_final:
return pandas.read_sql(statements[-1], connection)
else:
pass
return None
else:
raise Exception('Logica only supports BigQuery, PostgreSQL and SQLite '
'for now.')
class SqliteRunner(object):
def __init__(self):
self.connection = sqlite3_logica.SqliteConnect()
# TODO: Sqlite runner should not be accepting an engine.
def __call__(self, sql, engine, is_final):
return RunSQL(sql, engine, self.connection, is_final)
class PostgresRunner(object):
def __init__(self):
global DB_CONNECTION
global DB_ENGINE
if DB_CONNECTION:
self.engine = DB_ENGINE
self.connection = DB_CONNECTION
else:
(self.engine, self.connection) = PostgresJumpStart()
DB_ENGINE = self.engine
DB_CONNECTION = self.connection
def __call__(self, sql, engine, is_final):
return RunSQL(sql, engine, self.connection, is_final)
def ShowError(error_text):
print(color.Format('[ {error}Error{end} ] ' + error_text))
def Logica(line, cell, run_query):
"""Running Logica predicates and storing results."""
predicates = ParseList(line)
if not predicates:
ShowError('No predicates to run.')
return
try:
program = ';\n'.join(s for s in [PREAMBLE, cell] if s)
parsed_rules = parse.ParseFile(program)['rule']
except parse.ParsingException as e:
e.ShowMessage()
return
try:
program = universe.LogicaProgram(parsed_rules)
except functors.FunctorError as e:
e.ShowMessage()
return
engine = program.annotations.Engine()
if engine == 'bigquery' and not BQ_READY:
ShowError(
'BigQuery client and/or authentification is not installed. \n'
'It is the easiest to run BigQuery requests from Google CoLab:\n'
' https://colab.research.google.com/.\n'
'Note that running Logica on SQLite requires no installation.\n'
'This could be a good fit for working with small data or learning Logica.\n'
'Use {warning}@Engine("sqlite");{end} annotation in your program to use SQLite.')
return
bar = TabBar(predicates + ['(Log)'])
logs_idx = len(predicates)
executions = []
sub_bars = []
ip = IPython.get_ipython()
for idx, predicate in enumerate(predicates):
with bar.output_to(logs_idx):
try:
sql = program.FormattedPredicateSql(predicate)
executions.append(program.execution)
ip.push({predicate + '_sql': sql})
except rule_translate.RuleCompileException as e:
print('Encountered error when compiling %s.' % predicate)
e.ShowMessage()
return
# Publish output to Colab cell.
with bar.output_to(idx):
sub_bar = TabBar(['SQL', 'Result'])
sub_bars.append(sub_bar)
with sub_bar.output_to(0):
if SHOW_FULL_QUERY:
print(
color.Format(
'The following query is stored at {warning}%s{end} '
'variable.' % (
predicate + '_sql')))
print(sql)
else:
print('Query is stored at %s variable.' %
color.Warn(predicate + '_sql'))
with bar.output_to(logs_idx):
if engine == 'sqlite':
sql_runner = SqliteRunner()
elif engine == 'psql':
sql_runner = PostgresRunner()
elif engine == 'bigquery':
EnsureAuthenticatedUser()
sql_runner = RunSQL
else:
raise Exception('Logica only supports BigQuery, PostgreSQL and SQLite '
'for now.')
result_map = concertina_lib.ExecuteLogicaProgram(
executions, sql_runner=sql_runner, sql_engine=engine)
for idx, predicate in enumerate(predicates):
t = result_map[predicate]
ip.push({predicate: t})
with bar.output_to(idx):
with sub_bars[idx].output_to(1):
if run_query:
print(
color.Format(
'The following table is stored at {warning}%s{end} '
'variable.' %
predicate))
display(t)
else:
print('The query was not run.')
print(' ') # To activate the tabbar.
def PostgresJumpStart():
# Install postgresql server.
print("Installing and configuring an empty PostgreSQL database.")
result = 0
result += os.system('sudo apt-get -y -qq update')
result += os.system('sudo apt-get -y -qq install postgresql')
result += os.system('sudo service postgresql start')
# Ignoring user creation error, as they may already exist.
result += 0 * os.system(
'sudo -u postgres psql -c "CREATE USER logica WITH SUPERUSER"')
result += os.system(
'sudo -u postgres psql -c "ALTER USER logica PASSWORD \'<PASSWORD>\';"')
result += os.system(
'sudo -u postgres psql -U postgres -c \'CREATE DATABASE logica;\'')
if result != 0:
print("""Installation failed. Please try the following manually:
# Install Logica.
!pip install logica
# Install postgresql server.
!sudo apt-get -y -qq update
!sudo apt-get -y -qq install postgresql
!sudo service postgresql start
# Prepare database for Logica.
!sudo -u postgres psql -c "CREATE USER logica WITH SUPERUSER"
!sudo -u postgres psql -c "ALTER USER logica PASSWORD '<PASSWORD>';"
!sudo -u postgres psql -U postgres -c 'CREATE DATABASE logica;'
# Connect to the database.
from logica import colab_logica
from sqlalchemy import create_engine
import pandas
engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600);
connection = engine.connect();
colab_logica.SetDbConnection(connection)""")
return
print('Installation succeeded. Connecting...')
# Connect to the database.
from logica import colab_logica
from sqlalchemy import create_engine
import pandas
engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600)
connection = engine.connect()
print('Connected.')
return engine, connection | en | 0.672578 | #!/usr/bin/python # # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Library for using Logica in CoLab. # By default. # TODO: Should this be renamed to PSQL_ENGINE, PSQL_CONNECTION? Returns a real TabBar or a mock. Useful for UIs that don't support JS. # TODO: Sqlite runner should not be accepting an engine. Running Logica predicates and storing results. # Publish output to Colab cell. # To activate the tabbar. # Install postgresql server. # Ignoring user creation error, as they may already exist. Installation failed. Please try the following manually: # Install Logica. !pip install logica # Install postgresql server. !sudo apt-get -y -qq update !sudo apt-get -y -qq install postgresql !sudo service postgresql start # Prepare database for Logica. !sudo -u postgres psql -c "CREATE USER logica WITH SUPERUSER" !sudo -u postgres psql -c "ALTER USER logica PASSWORD '<PASSWORD>';" !sudo -u postgres psql -U postgres -c 'CREATE DATABASE logica;' # Connect to the database. from logica import colab_logica from sqlalchemy import create_engine import pandas engine = create_engine('postgresql+psycopg2://logica:logica@127.0.0.1', pool_recycle=3600); connection = engine.connect(); colab_logica.SetDbConnection(connection) # Connect to the database. | 2.209812 | 2 |
pyccel/ast/basic.py | toddrme2178/pyccel | 0 | 595 | from sympy.core.basic import Basic as sp_Basic
class Basic(sp_Basic):
"""Basic class for Pyccel AST."""
_fst = None
def set_fst(self, fst):
"""Sets the redbaron fst."""
self._fst = fst
@property
def fst(self):
return self._fst
| from sympy.core.basic import Basic as sp_Basic
class Basic(sp_Basic):
"""Basic class for Pyccel AST."""
_fst = None
def set_fst(self, fst):
"""Sets the redbaron fst."""
self._fst = fst
@property
def fst(self):
return self._fst
| en | 0.353293 | Basic class for Pyccel AST. Sets the redbaron fst. | 2.448946 | 2 |
alibi_detect/utils/tests/test_discretize.py | Clusks/alibi-detect | 1,227 | 596 | from itertools import product
import numpy as np
import pytest
from alibi_detect.utils.discretizer import Discretizer
x = np.random.rand(10, 4)
n_features = x.shape[1]
feature_names = [str(_) for _ in range(n_features)]
categorical_features = [[], [1, 3]]
percentiles = [list(np.arange(25, 100, 25)), list(np.arange(10, 100, 10))]
tests = list(product(categorical_features, percentiles))
n_tests = len(tests)
@pytest.fixture
def cats_and_percentiles(request):
cat, perc = tests[request.param]
return cat, perc
@pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True)
def test_discretizer(cats_and_percentiles):
cat, perc = cats_and_percentiles
disc = Discretizer(x, cat, feature_names, perc)
to_disc = list(disc.names.keys())
assert len(to_disc) == (x.shape[1] - len(cat))
x_disc = disc.discretize(x)
for k, v in disc.names.items():
assert len(v) <= len(perc) + 1
assert callable(disc.lambdas[k])
assert (x_disc[:, k].min() == 0).all()
assert (x_disc[:, k].max() == len(perc)).all()
for i in range(x.shape[1]):
if i not in to_disc:
assert (x_disc[:, i] == x[:, i]).all()
| from itertools import product
import numpy as np
import pytest
from alibi_detect.utils.discretizer import Discretizer
x = np.random.rand(10, 4)
n_features = x.shape[1]
feature_names = [str(_) for _ in range(n_features)]
categorical_features = [[], [1, 3]]
percentiles = [list(np.arange(25, 100, 25)), list(np.arange(10, 100, 10))]
tests = list(product(categorical_features, percentiles))
n_tests = len(tests)
@pytest.fixture
def cats_and_percentiles(request):
cat, perc = tests[request.param]
return cat, perc
@pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True)
def test_discretizer(cats_and_percentiles):
cat, perc = cats_and_percentiles
disc = Discretizer(x, cat, feature_names, perc)
to_disc = list(disc.names.keys())
assert len(to_disc) == (x.shape[1] - len(cat))
x_disc = disc.discretize(x)
for k, v in disc.names.items():
assert len(v) <= len(perc) + 1
assert callable(disc.lambdas[k])
assert (x_disc[:, k].min() == 0).all()
assert (x_disc[:, k].max() == len(perc)).all()
for i in range(x.shape[1]):
if i not in to_disc:
assert (x_disc[:, i] == x[:, i]).all()
| none | 1 | 2.055551 | 2 |
|
tinc/tests/parameter_space_test.py | AlloSphere-Research-Group/tinc-python | 1 | 597 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 14 11:49:43 2021
@author: Andres
"""
import sys,time
import unittest
from tinc import *
class ParameterSpaceTest(unittest.TestCase):
def test_parameter(self):
p1 = Parameter("param1")
p2 = Parameter("param2")
ps = ParameterSpace("ps")
ps.register_parameters([p1, p2])
def test_process(self):
p1 = Parameter("param1")
p1.values = [0, 1,2,3,4]
p2 = Parameter("param2")
p2.values = [-0.3,-0.2, -0.1, 0]
ps = ParameterSpace("ps")
ps.register_parameters([p1, p2])
def func(param1, param2):
return param1 * param2
result = ps.run_process(func)
self.assertAlmostEqual(result, p1.value * p2.value)
p1.value = 3
p2.value = -0.1
result = ps.run_process(func)
self.assertAlmostEqual(result, p1.value * p2.value)
p1.value = 3
p2.value = -0.1
def test_sweep_cache(self):
p1 = Parameter("param1")
p1.values = [0, 1,2,3,4]
p2 = Parameter("param2")
p2.values = [-0.3,-0.2, -0.1, 0]
ps = ParameterSpace("ps")
ps.register_parameters([p1, p2])
ps.enable_cache("ps_test")
def func(param1, param2):
return param1 * param2
ps.sweep(func)
def test_data_directories(self):
dim1 = Parameter("dim1")
dim1.values = [0.1,0.2,0.3,0.4, 0.5]
dim2 = Parameter("dim2")
dim2.set_space_representation_type(parameter_space_representation_types.INDEX)
dim2.values = [0.1,0.2,0.3,0.4, 0.5]
dim3 = Parameter("dim3")
dim3.set_space_representation_type(parameter_space_representation_types.ID)
dim2.values = [0.1,0.2,0.3,0.4, 0.5]
ps = ParameterSpace("ps")
ps.register_parameters([dim1, dim2, dim3])
ps.set_current_path_template("file_%%dim1%%_%%dim2:INDEX%%")
dim1.value=0.2
dim2.value=0.2
self.assertEqual(ps.get_current_relative_path(), 'file_0.2_1')
# TODO ML complete tests see C++ tests for parameter space
def test_common_id(self):
dim1 = Parameter("dim1")
dim1.values = [0.1, 0.1, 0.2, 0.2, 0.3, 0.3]
dim1.ids = ["0.1_1" ,"0.1_2","0.2_1" ,"0.2_2", "0.3_1" ,"0.3_2"]
dim2 = Parameter("dim2")
dim2.set_space_representation_type(parameter_space_representation_types.INDEX)
dim2.values = [1,1,1,2,2,2]
dim2.ids = ["0.1_1", "0.2_1", "0.3_1", "0.1_2", "0.2_2", "0.3_2"]
ps = ParameterSpace("ps")
ps.register_parameters([dim1, dim2])
dim1.value = 0.1
dim2.value = 1
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.1_1")
dim1.value = 0.2
dim2.value = 1
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.2_1")
dim1.value = 0.1
dim2.value = 2
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.1_2")
dim1.value = 0.2
dim2.value = 2
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.2_2")
dim1.value = 0.3
dim2.value = 2
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.3_2")
if __name__ == '__main__':
unittest.main()
| # -*- coding: utf-8 -*-
"""
Created on Mon Jun 14 11:49:43 2021
@author: Andres
"""
import sys,time
import unittest
from tinc import *
class ParameterSpaceTest(unittest.TestCase):
def test_parameter(self):
p1 = Parameter("param1")
p2 = Parameter("param2")
ps = ParameterSpace("ps")
ps.register_parameters([p1, p2])
def test_process(self):
p1 = Parameter("param1")
p1.values = [0, 1,2,3,4]
p2 = Parameter("param2")
p2.values = [-0.3,-0.2, -0.1, 0]
ps = ParameterSpace("ps")
ps.register_parameters([p1, p2])
def func(param1, param2):
return param1 * param2
result = ps.run_process(func)
self.assertAlmostEqual(result, p1.value * p2.value)
p1.value = 3
p2.value = -0.1
result = ps.run_process(func)
self.assertAlmostEqual(result, p1.value * p2.value)
p1.value = 3
p2.value = -0.1
def test_sweep_cache(self):
p1 = Parameter("param1")
p1.values = [0, 1,2,3,4]
p2 = Parameter("param2")
p2.values = [-0.3,-0.2, -0.1, 0]
ps = ParameterSpace("ps")
ps.register_parameters([p1, p2])
ps.enable_cache("ps_test")
def func(param1, param2):
return param1 * param2
ps.sweep(func)
def test_data_directories(self):
dim1 = Parameter("dim1")
dim1.values = [0.1,0.2,0.3,0.4, 0.5]
dim2 = Parameter("dim2")
dim2.set_space_representation_type(parameter_space_representation_types.INDEX)
dim2.values = [0.1,0.2,0.3,0.4, 0.5]
dim3 = Parameter("dim3")
dim3.set_space_representation_type(parameter_space_representation_types.ID)
dim2.values = [0.1,0.2,0.3,0.4, 0.5]
ps = ParameterSpace("ps")
ps.register_parameters([dim1, dim2, dim3])
ps.set_current_path_template("file_%%dim1%%_%%dim2:INDEX%%")
dim1.value=0.2
dim2.value=0.2
self.assertEqual(ps.get_current_relative_path(), 'file_0.2_1')
# TODO ML complete tests see C++ tests for parameter space
def test_common_id(self):
dim1 = Parameter("dim1")
dim1.values = [0.1, 0.1, 0.2, 0.2, 0.3, 0.3]
dim1.ids = ["0.1_1" ,"0.1_2","0.2_1" ,"0.2_2", "0.3_1" ,"0.3_2"]
dim2 = Parameter("dim2")
dim2.set_space_representation_type(parameter_space_representation_types.INDEX)
dim2.values = [1,1,1,2,2,2]
dim2.ids = ["0.1_1", "0.2_1", "0.3_1", "0.1_2", "0.2_2", "0.3_2"]
ps = ParameterSpace("ps")
ps.register_parameters([dim1, dim2])
dim1.value = 0.1
dim2.value = 1
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.1_1")
dim1.value = 0.2
dim2.value = 1
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.2_1")
dim1.value = 0.1
dim2.value = 2
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.1_2")
dim1.value = 0.2
dim2.value = 2
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.2_2")
dim1.value = 0.3
dim2.value = 2
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.3_2")
if __name__ == '__main__':
unittest.main()
| en | 0.390301 | # -*- coding: utf-8 -*- Created on Mon Jun 14 11:49:43 2021 @author: Andres # TODO ML complete tests see C++ tests for parameter space | 2.924619 | 3 |
interpretable_ddts/runfiles/gym_runner.py | CORE-Robotics-Lab/Interpretable_DDTS_AISTATS2020 | 5 | 598 | # Created by <NAME> on 8/28/19
import gym
import numpy as np
import torch
from interpretable_ddts.agents.ddt_agent import DDTAgent
from interpretable_ddts.agents.mlp_agent import MLPAgent
from interpretable_ddts.opt_helpers.replay_buffer import discount_reward
import torch.multiprocessing as mp
import argparse
import copy
import random
def run_episode(q, agent_in, ENV_NAME, seed=0):
agent = agent_in.duplicate()
if ENV_NAME == 'lunar':
env = gym.make('LunarLander-v2')
elif ENV_NAME == 'cart':
env = gym.make('CartPole-v1')
else:
raise Exception('No valid environment selected')
done = False
torch.manual_seed(seed)
env.seed(seed)
np.random.seed(seed)
env.action_space.seed(seed)
random.seed(seed)
state = env.reset() # Reset environment and record the starting state
while not done:
action = agent.get_action(state)
# Step through environment using chosen action
state, reward, done, _ = env.step(action)
# env.render()
# Save reward
agent.save_reward(reward)
if done:
break
reward_sum = np.sum(agent.replay_buffer.rewards_list)
rewards_list, advantage_list, deeper_advantage_list = discount_reward(agent.replay_buffer.rewards_list,
agent.replay_buffer.value_list,
agent.replay_buffer.deeper_value_list)
agent.replay_buffer.rewards_list = rewards_list
agent.replay_buffer.advantage_list = advantage_list
agent.replay_buffer.deeper_advantage_list = deeper_advantage_list
to_return = [reward_sum, copy.deepcopy(agent.replay_buffer.__getstate__())]
if q is not None:
try:
q.put(to_return)
except RuntimeError as e:
print(e)
return to_return
return to_return
def main(episodes, agent, ENV_NAME):
running_reward_array = []
for episode in range(episodes):
reward = 0
returned_object = run_episode(None, agent_in=agent, ENV_NAME=ENV_NAME)
reward += returned_object[0]
running_reward_array.append(returned_object[0])
agent.replay_buffer.extend(returned_object[1])
if reward >= 499:
agent.save('../models/'+str(episode)+'th')
agent.end_episode(reward)
running_reward = sum(running_reward_array[-100:]) / float(min(100.0, len(running_reward_array)))
if episode % 50 == 0:
print(f'Episode {episode} Last Reward: {reward} Average Reward: {running_reward}')
if episode % 500 == 0:
agent.save('../models/'+str(episode)+'th')
return running_reward_array
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--agent_type", help="architecture of agent to run", type=str, default='ddt')
parser.add_argument("-e", "--episodes", help="how many episodes", type=int, default=2000)
parser.add_argument("-l", "--num_leaves", help="number of leaves for DDT/DRL ", type=int, default=8)
parser.add_argument("-n", "--num_hidden", help="number of hidden layers for MLP ", type=int, default=0)
parser.add_argument("-env", "--env_type", help="environment to run on", type=str, default='cart')
parser.add_argument("-gpu", help="run on GPU?", action='store_true')
args = parser.parse_args()
AGENT_TYPE = args.agent_type # 'ddt', 'mlp'
NUM_EPS = args.episodes # num episodes Default 1000
ENV_TYPE = args.env_type # 'cart' or 'lunar' Default 'cart'
USE_GPU = args.gpu # Applies for 'prolo' only. use gpu? Default false
if ENV_TYPE == 'lunar':
init_env = gym.make('LunarLander-v2')
dim_in = init_env.observation_space.shape[0]
dim_out = init_env.action_space.n
elif ENV_TYPE == 'cart':
init_env = gym.make('CartPole-v1')
dim_in = init_env.observation_space.shape[0]
dim_out = init_env.action_space.n
else:
raise Exception('No valid environment selected')
print(f"Agent {AGENT_TYPE} on {ENV_TYPE} ")
# mp.set_start_method('spawn')
mp.set_sharing_strategy('file_system')
for i in range(5):
bot_name = AGENT_TYPE + ENV_TYPE
if USE_GPU:
bot_name += 'GPU'
if AGENT_TYPE == 'ddt':
policy_agent = DDTAgent(bot_name=bot_name,
input_dim=dim_in,
output_dim=dim_out,
rule_list=False,
num_rules=args.num_leaves)
elif AGENT_TYPE == 'mlp':
policy_agent = MLPAgent(input_dim=dim_in,
bot_name=bot_name,
output_dim=dim_out,
num_hidden=args.num_hidden)
else:
raise Exception('No valid network selected')
reward_array = main(NUM_EPS, policy_agent, ENV_TYPE)
| # Created by <NAME> on 8/28/19
import gym
import numpy as np
import torch
from interpretable_ddts.agents.ddt_agent import DDTAgent
from interpretable_ddts.agents.mlp_agent import MLPAgent
from interpretable_ddts.opt_helpers.replay_buffer import discount_reward
import torch.multiprocessing as mp
import argparse
import copy
import random
def run_episode(q, agent_in, ENV_NAME, seed=0):
agent = agent_in.duplicate()
if ENV_NAME == 'lunar':
env = gym.make('LunarLander-v2')
elif ENV_NAME == 'cart':
env = gym.make('CartPole-v1')
else:
raise Exception('No valid environment selected')
done = False
torch.manual_seed(seed)
env.seed(seed)
np.random.seed(seed)
env.action_space.seed(seed)
random.seed(seed)
state = env.reset() # Reset environment and record the starting state
while not done:
action = agent.get_action(state)
# Step through environment using chosen action
state, reward, done, _ = env.step(action)
# env.render()
# Save reward
agent.save_reward(reward)
if done:
break
reward_sum = np.sum(agent.replay_buffer.rewards_list)
rewards_list, advantage_list, deeper_advantage_list = discount_reward(agent.replay_buffer.rewards_list,
agent.replay_buffer.value_list,
agent.replay_buffer.deeper_value_list)
agent.replay_buffer.rewards_list = rewards_list
agent.replay_buffer.advantage_list = advantage_list
agent.replay_buffer.deeper_advantage_list = deeper_advantage_list
to_return = [reward_sum, copy.deepcopy(agent.replay_buffer.__getstate__())]
if q is not None:
try:
q.put(to_return)
except RuntimeError as e:
print(e)
return to_return
return to_return
def main(episodes, agent, ENV_NAME):
running_reward_array = []
for episode in range(episodes):
reward = 0
returned_object = run_episode(None, agent_in=agent, ENV_NAME=ENV_NAME)
reward += returned_object[0]
running_reward_array.append(returned_object[0])
agent.replay_buffer.extend(returned_object[1])
if reward >= 499:
agent.save('../models/'+str(episode)+'th')
agent.end_episode(reward)
running_reward = sum(running_reward_array[-100:]) / float(min(100.0, len(running_reward_array)))
if episode % 50 == 0:
print(f'Episode {episode} Last Reward: {reward} Average Reward: {running_reward}')
if episode % 500 == 0:
agent.save('../models/'+str(episode)+'th')
return running_reward_array
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--agent_type", help="architecture of agent to run", type=str, default='ddt')
parser.add_argument("-e", "--episodes", help="how many episodes", type=int, default=2000)
parser.add_argument("-l", "--num_leaves", help="number of leaves for DDT/DRL ", type=int, default=8)
parser.add_argument("-n", "--num_hidden", help="number of hidden layers for MLP ", type=int, default=0)
parser.add_argument("-env", "--env_type", help="environment to run on", type=str, default='cart')
parser.add_argument("-gpu", help="run on GPU?", action='store_true')
args = parser.parse_args()
AGENT_TYPE = args.agent_type # 'ddt', 'mlp'
NUM_EPS = args.episodes # num episodes Default 1000
ENV_TYPE = args.env_type # 'cart' or 'lunar' Default 'cart'
USE_GPU = args.gpu # Applies for 'prolo' only. use gpu? Default false
if ENV_TYPE == 'lunar':
init_env = gym.make('LunarLander-v2')
dim_in = init_env.observation_space.shape[0]
dim_out = init_env.action_space.n
elif ENV_TYPE == 'cart':
init_env = gym.make('CartPole-v1')
dim_in = init_env.observation_space.shape[0]
dim_out = init_env.action_space.n
else:
raise Exception('No valid environment selected')
print(f"Agent {AGENT_TYPE} on {ENV_TYPE} ")
# mp.set_start_method('spawn')
mp.set_sharing_strategy('file_system')
for i in range(5):
bot_name = AGENT_TYPE + ENV_TYPE
if USE_GPU:
bot_name += 'GPU'
if AGENT_TYPE == 'ddt':
policy_agent = DDTAgent(bot_name=bot_name,
input_dim=dim_in,
output_dim=dim_out,
rule_list=False,
num_rules=args.num_leaves)
elif AGENT_TYPE == 'mlp':
policy_agent = MLPAgent(input_dim=dim_in,
bot_name=bot_name,
output_dim=dim_out,
num_hidden=args.num_hidden)
else:
raise Exception('No valid network selected')
reward_array = main(NUM_EPS, policy_agent, ENV_TYPE)
| en | 0.540288 | # Created by <NAME> on 8/28/19 # Reset environment and record the starting state # Step through environment using chosen action # env.render() # Save reward # 'ddt', 'mlp' # num episodes Default 1000 # 'cart' or 'lunar' Default 'cart' # Applies for 'prolo' only. use gpu? Default false # mp.set_start_method('spawn') | 2.215842 | 2 |
Week 08/tw10_words_by_prefix.py | andrewn488/OMSBA-5061 | 0 | 599 | """ TW10: Words by Prefix
Team: <NAME>, <NAME>
For: OMSBA 2061, Seattle University
Date: 11/3/2020
"""
def wordByPrefix(prefix_length, word):
my_dict = {}
for key in word:
for letter in word:
prefix_key = letter[:prefix_length]
letter = word[:prefix_length]
return prefix_key
return letter
question_2 = ['able', 'ability', 'apple', 'tryst', 'trial', 'tremendous', 'tree']
my_list = []
for elem in question_2:
prefix = elem[:2]
my_list.append(prefix)
print(my_list)
def question_3(prefix_length, word):
my_list = []
for key in word:
prefix = key[:prefix_length]
my_list.append(prefix)
return my_list
def wordByPrefix(prefix_length, word):
my_list = []
#count = 0
for key in word:
prefix = key[:prefix_length]
my_list.append(prefix)
count = {}
for letter in my_list:
if letter.isalpha():
if letter not in count:
count[letter] = 0
count[letter] += 1
return count
def wordByPrefix(prefix_length, word):
my_list = []
#count = 0
for key in word:
prefix = key[:prefix_length]
my_list.append(prefix)
count = {}
for letter in my_list:
if letter.isalpha():
if letter not in count:
letter[count] = []
count.update(letter)
return count
| """ TW10: Words by Prefix
Team: <NAME>, <NAME>
For: OMSBA 2061, Seattle University
Date: 11/3/2020
"""
def wordByPrefix(prefix_length, word):
my_dict = {}
for key in word:
for letter in word:
prefix_key = letter[:prefix_length]
letter = word[:prefix_length]
return prefix_key
return letter
question_2 = ['able', 'ability', 'apple', 'tryst', 'trial', 'tremendous', 'tree']
my_list = []
for elem in question_2:
prefix = elem[:2]
my_list.append(prefix)
print(my_list)
def question_3(prefix_length, word):
my_list = []
for key in word:
prefix = key[:prefix_length]
my_list.append(prefix)
return my_list
def wordByPrefix(prefix_length, word):
my_list = []
#count = 0
for key in word:
prefix = key[:prefix_length]
my_list.append(prefix)
count = {}
for letter in my_list:
if letter.isalpha():
if letter not in count:
count[letter] = 0
count[letter] += 1
return count
def wordByPrefix(prefix_length, word):
my_list = []
#count = 0
for key in word:
prefix = key[:prefix_length]
my_list.append(prefix)
count = {}
for letter in my_list:
if letter.isalpha():
if letter not in count:
letter[count] = []
count.update(letter)
return count
| en | 0.654792 | TW10: Words by Prefix
Team: <NAME>, <NAME>
For: OMSBA 2061, Seattle University
Date: 11/3/2020 #count = 0 #count = 0 | 3.513327 | 4 |