function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def test_empty_heterogeneous_tuples(self): table = self.tables.some_table stmt = ( select([table.c.id]) .where( tuple_(table.c.x, table.c.z).in_( bindparam("q", expanding=True) ) ) .order_by(table.c.id) ) self._assert_result(stmt, [], params={"q": []})
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def test_empty_homogeneous_tuples(self): table = self.tables.some_table stmt = ( select([table.c.id]) .where( tuple_(table.c.x, table.c.y).in_( bindparam("q", expanding=True) ) ) .order_by(table.c.id) ) self._assert_result(stmt, [], params={"q": []})
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def test_bound_in_two_tuple(self): table = self.tables.some_table stmt = ( select([table.c.id]) .where( tuple_(table.c.x, table.c.y).in_( bindparam("q", expanding=True) ) ) .order_by(table.c.id) ) self._assert_result( stmt, [(2,), (3,), (4,)], params={"q": [(2, 3), (3, 4), (4, 5)]} )
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def test_bound_in_heterogeneous_two_tuple(self): table = self.tables.some_table stmt = ( select([table.c.id]) .where( tuple_(table.c.x, table.c.z).in_( bindparam("q", expanding=True) ) ) .order_by(table.c.id) ) self._assert_result( stmt, [(2,), (3,), (4,)], params={"q": [(2, "z2"), (3, "z3"), (4, "z4")]}, )
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def test_empty_set_against_integer_negation(self): table = self.tables.some_table stmt = ( select([table.c.id]) .where(table.c.x.notin_(bindparam("q", expanding=True))) .order_by(table.c.id) ) self._assert_result(stmt, [(1,), (2,), (3,), (4,)], params={"q": []})
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def test_empty_set_against_string_negation(self): table = self.tables.some_table stmt = ( select([table.c.id]) .where(table.c.z.notin_(bindparam("q", expanding=True))) .order_by(table.c.id) ) self._assert_result(stmt, [(1,), (2,), (3,), (4,)], params={"q": []})
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def define_tables(cls, metadata): Table( "some_table", metadata, Column("id", Integer, primary_key=True), Column("data", String(50)), )
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def insert_data(cls, connection): connection.execute( cls.tables.some_table.insert(), [ {"id": 1, "data": "abcdefg"}, {"id": 2, "data": "ab/cdefg"}, {"id": 3, "data": "ab%cdefg"}, {"id": 4, "data": "ab_cdefg"}, {"id": 5, "data": "abcde/fg"}, {"id": 6, "data": "abcde%fg"}, {"id": 7, "data": "ab#cdefg"}, {"id": 8, "data": "ab9cdefg"}, {"id": 9, "data": "abcde#fg"}, {"id": 10, "data": "abcd9fg"}, ], )
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def test_startswith_unescaped(self): col = self.tables.some_table.c.data self._test(col.startswith("ab%c"), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10})
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def test_startswith_sqlexpr(self): col = self.tables.some_table.c.data self._test( col.startswith(literal_column("'ab%c'")), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, )
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def test_startswith_autoescape_escape(self): col = self.tables.some_table.c.data self._test(col.startswith("ab%c", autoescape=True, escape="#"), {3}) self._test(col.startswith("ab#c", autoescape=True, escape="#"), {7})
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def test_endswith_sqlexpr(self): col = self.tables.some_table.c.data self._test( col.endswith(literal_column("'e%fg'")), {1, 2, 3, 4, 5, 6, 7, 8, 9} )
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def test_endswith_escape(self): col = self.tables.some_table.c.data self._test(col.endswith("e##fg", escape="#"), {9})
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def test_contains_unescaped(self): col = self.tables.some_table.c.data self._test(col.contains("b%cde"), {1, 2, 3, 4, 5, 6, 7, 8, 9})
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def test_contains_escape(self): col = self.tables.some_table.c.data self._test(col.contains("b##cde", escape="#"), {7})
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def define_tables(cls, metadata): Table( "square", metadata, Column("id", Integer, primary_key=True), Column("side", Integer), Column("area", Integer, Computed("side * side")), Column("perimeter", Integer, Computed("4 * side")), )
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def insert_data(cls, connection): connection.execute( cls.tables.square.insert(), [{"id": 1, "side": 10}, {"id": 10, "side": 42}], )
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def test_select_columns(self): with config.db.connect() as conn: res = conn.execute( select( [self.tables.square.c.area, self.tables.square.c.perimeter] ) .select_from(self.tables.square) .order_by(self.tables.square.c.id) ).fetchall() eq_(res, [(100, 40), (1764, 168)])
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def define_tables(cls, metadata): Table( "stuff", metadata, Column("id", Integer, primary_key=True), Column("data", String(50)), )
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def insert_data(cls, connection): connection.execute( cls.tables.stuff.insert(), [ {"id": 1, "data": "some data"}, {"id": 2, "data": "some data"}, {"id": 3, "data": "some data"}, {"id": 4, "data": "some other data"}, ], )
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def test_select_exists_false(self, connection): stuff = self.tables.stuff eq_( connection.execute( select([literal(1)]).where( exists().where(stuff.c.data == "no data") ) ).fetchall(), [], )
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def define_tables(cls, metadata): Table( "is_distinct_test", metadata, Column("id", Integer, primary_key=True), Column("col_a", Integer, nullable=True), Column("col_b", Integer, nullable=True), )
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def test_is_or_isnot_distinct_from( self, col_a_value, col_b_value, expected_row_count_for_is, connection
gltn/stdm
[ 26, 29, 26, 55, 1401777923 ]
def __call__(cls, verb, *args): if verb not in cls.verbs: cls.verbs[verb] = super(_VerbSingleton, cls).__call__(verb, *args) return cls.verbs[verb]
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def __init__(self, verb, description): """ Initialize EventType. :verb: Scim verb :description: HR description text """ self.verb = verb self.description = description
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def __eq__(self, other): """Equality.""" return isinstance(other, EventType) and other.verb == self.verb
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def __init__(self, entity_id, entity_type, ident): self.entity_id = int(entity_id) self.entity_type = entity_type self.ident = ident
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def __eq__(self, other): return (isinstance(other, EntityRef) and self.entity_id == other.entity_id)
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def __init__(self, slot): """ Creates a new datetime descriptor. :param str slot: The attribute name where the actual value is stored. """ self.slot = slot
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def __get__(self, obj, cls=None): if not obj: return self return getattr(obj, self.slot, None)
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def __delete__(self, obj): if hasattr(obj, self.slot): delattr(obj, self.slot)
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def __init__(self, event_type, subject=None, objects=None, context=None, attributes=None, timestamp=None, scheduled=None): """ :param EventType event: the type of event :param EntityRef subject: reference to the affected entity :param list objects: sequence of other affected objects (EntityRef) :param list context: sequence of affected systems (str) :param list attributes: sequence of affected attributes (str) :param datetime timestamp: when the event originated :param datetime schedule: when the event should be issued """ self.event_type = event_type self.subject = subject self.timestamp = timestamp self.scheduled = scheduled self.objects = set(objects or []) self.context = set(context or []) self.attributes = set(attributes or [])
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def mergeable(self, other): """Can this event be merged with other.""" if self.scheduled is not None: return False if self.subject != other.subject: return False if self.event_type == CREATE: return other.event_type not in (DEACTIVATE, REMOVE) if self.event_type == DELETE: return other.event_type in (REMOVE, DEACTIVATE, ADD, ACTIVATE, MODIFY, PASSWORD) if (self.event_type == other.event_type and self.event_type in (ADD, REMOVE, ACTIVATE, DEACTIVATE)): return True if self.context != other.context: return False return True
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def ret_self(): self.objects.update(other.objects) return [self]
unioslo/cerebrum
[ 9, 3, 9, 40, 1396362121 ]
def get_path(): return addon.getAddonInfo('path').decode('utf-8')
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def translate_path(path): return xbmc.translatePath(path).decode('utf-8')
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def get_version(): return addon.getAddonInfo('version')
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def get_name(): return addon.getAddonInfo('name')
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def end_of_directory(cache_to_disc=True): xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=cache_to_disc)
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def create_item(queries, label, thumb='', fanart='', is_folder=None, is_playable=None, total_items=0, menu_items=None, replace_menu=False): list_item = xbmcgui.ListItem(label, iconImage=thumb, thumbnailImage=thumb) add_item(queries, list_item, fanart, is_folder, is_playable, total_items, menu_items, replace_menu)
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def parse_query(query): q = {'mode': 'main'} if query.startswith('?'): query = query[1:] queries = urlparse.parse_qs(query) for key in queries: if len(queries[key]) == 1: q[key] = queries[key][0] else: q[key] = queries[key] return q
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def __init__(self, options): MasterRule.__init__(self, options) self.short_name="php" self.long_name="Checks security problems on php config file" self.type="config" self.required_files = ['/etc/php5/apache2/php.ini', '/etc/php5/cli/php.ini', '/etc/php.ini']
hdoria/HnTool
[ 48, 22, 48, 5, 1331344993 ]
def setUp(self): self.cg = CharacterGeneratorMock(width = 9, height = 14) self.mda = MonochromeDisplayAdapter(self.cg)
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def reset_testable(self): self.reset_count += 1
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_ports_list(self): self.assertEqual(self.mda.get_ports_list(), [0x03B0, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7, 0x03B8, 0x03B9, 0x03BA, 0x03BB])
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_get_memory_size(self): self.assertEqual(self.mda.get_memory_size(), 4096)
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_initial_state(self): self.assertEqual(self.mda.control_reg, 0x00) self.assertEqual(self.mda.control_reg, 0x00) self.assertEqual(self.mda.screen, None) self.assertEqual(self.mda.char_generator, self.cg) self.assertEqual(len(self.mda.video_ram), 4096)
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_mem_write_byte_updates_video_ram(self): self.mda.mem_write_byte(0x0000, 0x41) self.assertEqual(self.mda.video_ram[0x0000], 0x41)
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_mem_write_byte_calls_char_generator_top_left(self): self.mda.mem_write_byte(0x0000, 0x41) self.assertEqual(self.cg.last_blit, (None, (0, 0), 0x41, MDA_GREEN, MDA_BLACK))
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_mem_write_byte_calls_char_generator_bottom_right(self): self.mda.mem_write_byte(3998, 0xFF) self.assertEqual(self.cg.last_blit, (None, (711, 336), 0xFF, MDA_GREEN, MDA_BLACK))
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_mem_write_byte_char_before_attribute(self): self.mda.mem_write_byte(3998, 0xFF) self.assertEqual(self.cg.last_blit, (None, (711, 336), 0xFF, MDA_GREEN, MDA_BLACK)) self.mda.mem_write_byte(3999, MDA_ATTR_INTENSITY) self.assertEqual(self.cg.last_blit, (None, (711, 336), 0xFF, MDA_BRIGHT_GREEN, MDA_BLACK))
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_mem_write_byte_attribute_before_char(self): self.mda.mem_write_byte(3999, MDA_ATTR_INTENSITY) self.assertEqual(self.cg.last_blit, (None, (711, 336), 0x00, MDA_BRIGHT_GREEN, MDA_BLACK)) self.mda.mem_write_byte(3998, 0xFF) self.assertEqual(self.cg.last_blit, (None, (711, 336), 0xFF, MDA_BRIGHT_GREEN, MDA_BLACK))
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_mem_write_byte_write_off_screen(self): self.mda.mem_write_byte(4000, 0xFF) self.assertEqual(self.cg.last_blit, None)
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_mem_read_byte(self): self.mda.video_ram[77] = 0xA5 self.assertEqual(self.mda.mem_read_byte(77), 0xA5)
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_mem_read_byte_off_screen(self): self.assertEqual(self.mda.mem_read_byte(4000), 0x00)
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_reset_on_high_resolution_enable(self): self.assertEqual(self.reset_count, 0)
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_mem_write_word_at_top_left(self): self.mda.mem_write_word(0x0000, 0x0841) # 'A' with intensity. self.assertEqual(self.mda.video_ram[0x0000], 0x41) self.assertEqual(self.mda.video_ram[0x0001], 0x08) self.assertEqual(self.cg.last_blit, (None, (0, 0), 0x41, MDA_BRIGHT_GREEN, MDA_BLACK))
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_mem_write_word_at_bottom_right(self): self.mda.mem_write_word(3998, 0x085A) # 'Z' with intensity. self.assertEqual(self.mda.video_ram[3998], 0x5A) self.assertEqual(self.mda.video_ram[3999], 0x08) self.assertEqual(self.cg.last_blit, (None, (711, 336), 0x5A, MDA_BRIGHT_GREEN, MDA_BLACK))
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_mem_write_word_at_bottom_right_just_past(self): self.mda.mem_write_word(3999, 0xFF08) # 'Z' with intensity. self.assertEqual(self.mda.video_ram[3998], 0x00) # Should be unmodified. self.assertEqual(self.mda.video_ram[3999], 0x08) self.assertEqual(self.cg.last_blit, (None, (711, 336), 0x00, MDA_BRIGHT_GREEN, MDA_BLACK))
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_mem_read_word(self): self.mda.video_ram[0x0000] = 0x41 self.mda.video_ram[0x0001] = 0x08 self.assertEqual(self.mda.mem_read_word(0x0000), 0x0841)
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_mem_read_word_just_past_the_end(self): self.mda.video_ram[3998] = 0x12 self.mda.video_ram[3999] = 0x34 self.assertEqual(self.mda.mem_read_word(3999), 0x0034)
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_horizontal_retrace_toggles(self): self.assertEqual(self.mda.io_read_byte(0x3BA), 0xF0) self.assertEqual(self.mda.io_read_byte(0x3BA), 0xF1) self.assertEqual(self.mda.io_read_byte(0x3BA), 0xF0)
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_current_pixel_updates_on_status_read(self): self.assertEqual(self.mda.current_pixel, [0, 0]) self.mda.io_read_byte(0x3BA) self.assertEqual(self.mda.current_pixel, [1, 0])
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_current_pixel_wraps_right(self): self.mda.current_pixel = [719, 0] self.mda.io_read_byte(0x3BA) self.assertEqual(self.mda.current_pixel, [0, 1])
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def test_current_pixel_wraps_bottom(self): self.mda.current_pixel = [719, 349] self.mda.io_read_byte(0x3BA) self.assertEqual(self.mda.current_pixel, [0, 0])
astamp/PyXT
[ 4, 1, 4, 15, 1434075737 ]
def _getSmartIndenter(indenterName, qpart, indenter): """Get indenter by name. Available indenters are none, normal, cstyle, haskell, lilypond, lisp, python, ruby, xml Indenter name is not case sensitive Raise KeyError if not found indentText is indentation, which shall be used. i.e. '\t' for tabs, ' ' for 4 space symbols """ indenterName = indenterName.lower() if indenterName in ('haskell', 'lilypond'): # not supported yet logger.warning('Smart indentation for %s not supported yet. But you could be a hero who implemented it' % indenterName) from qutepart.indenter.base import IndentAlgNormal as indenterClass elif 'none' == indenterName: from qutepart.indenter.base import IndentAlgBase as indenterClass elif 'normal' == indenterName: from qutepart.indenter.base import IndentAlgNormal as indenterClass elif 'cstyle' == indenterName: from qutepart.indenter.cstyle import IndentAlgCStyle as indenterClass elif 'python' == indenterName: from qutepart.indenter.python import IndentAlgPython as indenterClass elif 'ruby' == indenterName: from qutepart.indenter.ruby import IndentAlgRuby as indenterClass elif 'xml' == indenterName: from qutepart.indenter.xmlindent import IndentAlgXml as indenterClass elif 'haskell' == indenterName: from qutepart.indenter.haskell import IndenterHaskell as indenterClass elif 'lilypond' == indenterName: from qutepart.indenter.lilypond import IndenterLilypond as indenterClass elif 'lisp' == indenterName: from qutepart.indenter.lisp import IndentAlgLisp as indenterClass elif 'scheme' == indenterName: from qutepart.indenter.scheme import IndentAlgScheme as indenterClass else: raise KeyError("Indenter %s not found" % indenterName) return indenterClass(qpart, indenter)
amirgeva/coide
[ 4, 3, 4, 7, 1410892521 ]
def __init__(self, qpart): self._qpart = qpart self.width = self._DEFAULT_INDENT_WIDTH self.useTabs = self._DEFAULT_INDENT_USE_TABS self._smartIndenter = _getSmartIndenter('normal', self._qpart, self)
amirgeva/coide
[ 4, 3, 4, 7, 1410892521 ]
def text(self): """Get indent text as \t or string of spaces """ if self.useTabs: return '\t' else: return ' ' * self.width
amirgeva/coide
[ 4, 3, 4, 7, 1410892521 ]
def autoIndentBlock(self, block, char = '\n'): """Indent block after Enter pressed or trigger character typed """ cursor = QTextCursor(block) currentText = block.text() spaceAtStartLen = len(currentText) - len(currentText.lstrip()) currentIndent = currentText[:spaceAtStartLen] indent = self._smartIndenter.computeIndent(block, char) if indent is not None and indent != currentIndent: self._qpart.replaceText(block.position(), spaceAtStartLen, indent)
amirgeva/coide
[ 4, 3, 4, 7, 1410892521 ]
def blockIndentation(block): text = block.text() return text[:len(text) - len(text.lstrip())]
amirgeva/coide
[ 4, 3, 4, 7, 1410892521 ]
def indentBlock(block): cursor = cursorAtSpaceEnd(block) cursor.insertText(' ' if withSpace else self.text())
amirgeva/coide
[ 4, 3, 4, 7, 1410892521 ]
def unIndentBlock(block): currentIndent = blockIndentation(block) if currentIndent.endswith('\t'): charsToRemove = 1 elif withSpace: charsToRemove = 1 if currentIndent else 0 else: if self.useTabs: charsToRemove = min(spacesCount(currentIndent), self.width) else: # spaces if currentIndent.endswith(self.text()): # remove indent level charsToRemove = self.width else: # remove all spaces charsToRemove = min(spacesCount(currentIndent), self.width) if charsToRemove: cursor = cursorAtSpaceEnd(block) cursor.setPosition(cursor.position() - charsToRemove, QTextCursor.KeepAnchor) cursor.removeSelectedText()
amirgeva/coide
[ 4, 3, 4, 7, 1410892521 ]
def onShortcutIndentAfterCursor(self): """Tab pressed and no selection. Insert text after cursor """ cursor = self._qpart.textCursor() def insertIndent(): if self.useTabs: cursor.insertText('\t') else: # indent to integer count of indents from line start charsToInsert = self.width - (len(self._qpart.textBeforeCursor()) % self.width) cursor.insertText(' ' * charsToInsert) if cursor.positionInBlock() == 0: # if no any indent - indent smartly block = cursor.block() self.autoIndentBlock(block, '') # if no smart indentation - just insert one indent if self._qpart.textBeforeCursor() == '': insertIndent() else: insertIndent()
amirgeva/coide
[ 4, 3, 4, 7, 1410892521 ]
def onAutoIndentTriggered(self): """Indent current line or selected lines """ cursor = self._qpart.textCursor() startBlock = self._qpart.document().findBlock(cursor.selectionStart()) endBlock = self._qpart.document().findBlock(cursor.selectionEnd()) if startBlock != endBlock: # indent multiply lines stopBlock = endBlock.next() block = startBlock with self._qpart: while block != stopBlock: self.autoIndentBlock(block, '') block = block.next() else: # indent 1 line self.autoIndentBlock(startBlock, '')
amirgeva/coide
[ 4, 3, 4, 7, 1410892521 ]
def setup(*args, **kwargs): try: ctypes.cdll.LoadLibrary(leptlib) except Exception as e: raise NidabaPluginException(e.message)
OpenPhilology/nidaba
[ 82, 12, 82, 5, 1428784500 ]
def sauvola(doc, method=u'sauvola', whsize=10, factor=0.35): """ Binarizes an input document utilizing Sauvola thresholding as described in [0]. Expects 8bpp grayscale images as input. [0] Sauvola, Jaakko, and Matti Pietikäinen. "Adaptive document image binarization." Pattern recognition 33.2 (2000): 225-236. Args: doc (unicode): The input document tuple. method (unicode): The suffix string appended to all output files whsize (int): The window width and height that local statistics are calculated on are twice the value of whsize. The minimal value is 2. factor (float): The threshold reduction factor due to variance. 0 =< factor < 1. Returns: (unicode, unicode): Storage tuple of the output file Raises: NidabaInvalidParameterException: Input parameters are outside the valid range. """ input_path = storage.get_abs_path(*doc) output_path = storage.insert_suffix(input_path, method, unicode(whsize), unicode(factor)) lept_sauvola(input_path, output_path, whsize, factor) return storage.get_storage_path(output_path)
OpenPhilology/nidaba
[ 82, 12, 82, 5, 1428784500 ]
def dewarp(doc, method=u'dewarp'): """ Removes perspective distortion (as commonly exhibited by overhead scans) from an 1bpp input image. Args: doc (unicode, unicode): The input document tuple. method (unicode): The suffix string appended to all output files. Returns: (unicode, unicode): Storage tuple of the output file """ input_path = storage.get_abs_path(*doc) output_path = storage.insert_suffix(input_path, method) lept_dewarp(input_path, output_path) return storage.get_storage_path(output_path)
OpenPhilology/nidaba
[ 82, 12, 82, 5, 1428784500 ]
def deskew(doc, method=u'deskew'): """ Removes skew (rotational distortion) from an 1bpp input image. Args: doc (unicode, unicode): The input document tuple. method (unicode): The suffix string appended to all output files. Returns: (unicode, unicode): Storage tuple of the output file """ input_path = storage.get_abs_path(*doc) output_path = storage.insert_suffix(input_path, method) lept_deskew(input_path, output_path) return storage.get_storage_path(output_path)
OpenPhilology/nidaba
[ 82, 12, 82, 5, 1428784500 ]
def init(): pass
amsn/amsn2
[ 100, 67, 100, 1, 1259403254 ]
def loadPlugin(plugin_name): """ @type plugin_name: str """ pass
amsn/amsn2
[ 100, 67, 100, 1, 1259403254 ]
def unLoadPlugin(plugin_name): """ @type plugin_name: str """ pass
amsn/amsn2
[ 100, 67, 100, 1, 1259403254 ]
def registerPlugin(plugin_instance): """ @type plugin_instance: L{amsn2.plugins.developers.aMSNPlugin} """ pass
amsn/amsn2
[ 100, 67, 100, 1, 1259403254 ]
def getPlugins(): pass
amsn/amsn2
[ 100, 67, 100, 1, 1259403254 ]
def getPluginsWithStatus(): pass
amsn/amsn2
[ 100, 67, 100, 1, 1259403254 ]
def getLoadedPlugins(): pass
amsn/amsn2
[ 100, 67, 100, 1, 1259403254 ]
def findPlugin(plugin_name): """ @type plugin_name: str """ pass
amsn/amsn2
[ 100, 67, 100, 1, 1259403254 ]
def saveConfig(plugin_name, data): """ @type plugin_name: str @type data: object """ pass
amsn/amsn2
[ 100, 67, 100, 1, 1259403254 ]
def _rematcher(regex): """compile the regexp with the best available regexp engine and return a matcher function""" m = util.re.compile(regex) try: # slightly faster, provided by facebook's re2 bindings return m.test_match except AttributeError: return m.match
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def _expandsubinclude(kindpats, root): """Returns the list of subinclude matcher args and the kindpats without the subincludes in it.""" relmatchers = [] other = [] for kind, pat, source in kindpats: if kind == "subinclude": sourceroot = pathutil.dirname(util.normpath(source)) pat = util.pconvert(pat) path = pathutil.join(sourceroot, pat) newroot = pathutil.dirname(path) matcherargs = (newroot, "", [], ["include:%s" % path]) prefix = pathutil.canonpath(root, root, newroot) if prefix: prefix += "/" relmatchers.append((prefix, matcherargs)) else: other.append((kind, pat, source)) return relmatchers, other
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def match( root, cwd, patterns=None, include=None, exclude=None, default="glob", exact=False, auditor=None, ctx=None, warn=None, badfn=None, icasefs=False,
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def normalize(patterns, default, root, cwd, auditor, warn): kp = _donormalize(patterns, default, root, cwd, auditor, warn) kindpats = [] for kind, pats, source in kp: if kind not in ("re", "relre"): # regex can't be normalized p = pats pats = dsnormalize(pats) # Preserve the original to handle a case only rename. if p != pats and p in dirstate: kindpats.append((kind, p, source)) kindpats.append((kind, pats, source)) return kindpats
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def exact(root, cwd, files, badfn=None): return exactmatcher(root, cwd, files, badfn=badfn)
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def never(root, cwd): return nevermatcher(root, cwd)
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def badmatch(match, badfn): """Make a copy of the given matcher, replacing its bad method with the given one. """ m = copy.copy(match) m.bad = badfn return m
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def _testrefastpath(repat): """Test if a re pattern can use fast path. That is, for every "$A/$B" path the pattern matches, "$A" must also be matched, Return True if we're sure it is. Return False otherwise. """ # XXX: It's very hard to implement this. These are what need to be # supported in production and tests. Very hacky. But we plan to get rid # of re matchers eventually. # Rules like "(?!experimental/)" if repat.startswith("(?!") and repat.endswith(")") and repat.count(")") == 1: return True # Rules used in doctest if repat == "(i|j)$": return True return False
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def __init__(self, *args, **kwargs): # If True, avoid entering subdirectories, and match everything recursively, # unconditionally. self.matchrecursive = False # If True, avoid entering subdirectories, and return "unsure" for # everything. This is set to True when complex re patterns (potentially # including "/") are used. self.unsurerecursive = False # Patterns for matching paths in this directory. self._kindpats = [] # Glob patterns used to match parent directories of another glob # pattern. self._globdirpats = [] super(_tree, self).__init__(*args, **kwargs)
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def visitdir(self, path): """Similar to matcher.visitdir""" path = normalizerootdir(path, "visitdir") if self.matchrecursive: return "all" elif self.unsurerecursive: return True elif path == "": return True if self._kindpats and self._compiledpats(path): # XXX: This is incorrect. But re patterns are already used in # production. We should kill them! # Need to test "if every string starting with 'path' matches". # Obviously it's impossible to test *every* string with the # standard regex API, therefore pick a random strange path to test # it approximately. if self._compiledpats("%s/*/_/-/0/*" % path): return "all" else: return True if self._globdirpats and self._compileddirpats(path): return True subdir, rest = self._split(path) subtree = self.get(subdir) if subtree is None: return False else: return subtree.visitdir(rest)
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def _compiledpats(self): pat, matchfunc = _buildregexmatch(self._kindpats, "") return matchfunc
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def _compileddirpats(self): pat, matchfunc = _buildregexmatch( [("glob", p, "") for p in self._globdirpats], "$" ) return matchfunc
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]
def _remainingpats(pat, prefix): """list of patterns with prefix stripped >>> _remainingpats("a/b/c", "") ['a/b/c'] >>> _remainingpats("a/b/c", "a") ['b/c'] >>> _remainingpats("a/b/c", "a/b") ['c'] >>> _remainingpats("a/b/c", "a/b/c") [] >>> _remainingpats("", "") [] """ if prefix: if prefix == pat: return [] else: assert pat[len(prefix)] == "/" return [pat[len(prefix) + 1 :]] else: if pat: return [pat] else: return []
facebookexperimental/eden
[ 4737, 192, 4737, 106, 1462467227 ]