rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
def interfaceConfiguration(name, ip):
def interfaceConfiguration(name):
def interfaceConfiguration(name, ip): f.write('Interface "{0}"\n'.format(name)) f.write('{\n') f.write(' IPv4Multicast 255.255.255.255\n') if ip is not None: f.write(' IPv4Src {0}\n'.format(ip)) f.write(' HelloInterval 5.0\n') f.write(' HelloValidityTime 40.0\n') f.write(' TcInterval 7.0\n') f.write(' TcValidityTime 161.0\n') f.write(' MidInterval 18.0\n') f.write(' MidValidityTime 324.0\n') f.write(' HnaInterval 18.0\n') f.write(' HnaValidityTime 324.0\n') f.write('}\n') f.write('\n')
if ip is not None: f.write(' IPv4Src {0}\n'.format(ip))
def interfaceConfiguration(name, ip): f.write('Interface "{0}"\n'.format(name)) f.write('{\n') f.write(' IPv4Multicast 255.255.255.255\n') if ip is not None: f.write(' IPv4Src {0}\n'.format(ip)) f.write(' HelloInterval 5.0\n') f.write(' HelloValidityTime 40.0\n') f.write(' TcInterval 7.0\n') f.write(' TcValidityTime 161.0\n') f.write(' MidInterval 18.0\n') f.write(' MidValidityTime 324.0\n') f.write(' HnaInterval 18.0\n') f.write(' HnaValidityTime 324.0\n') f.write('}\n') f.write('\n')
interfaceConfiguration(interface['name'], interface['ip'])
interfaceConfiguration(interface['name'])
def interfaceConfiguration(name, ip): f.write('Interface "{0}"\n'.format(name)) f.write('{\n') f.write(' IPv4Multicast 255.255.255.255\n') if ip is not None: f.write(' IPv4Src {0}\n'.format(ip)) f.write(' HelloInterval 5.0\n') f.write(' HelloValidityTime 40.0\n') f.write(' TcInterval 7.0\n') f.write(' TcValidityTime 161.0\n') f.write(' MidInterval 18.0\n') f.write(' MidValidityTime 324.0\n') f.write(' HnaInterval 18.0\n') f.write(' HnaValidityTime 324.0\n') f.write('}\n') f.write('\n')
f.write('\n') f.write(' f.write('config alias routerid\n') f.write('\toption interface loopback\n') f.write('\toption proto static\n') f.write('\toption ipaddr %s\n' % self.ip) f.write('\toption netmask 255.255.255.255\n')
def __generateNetworkConfig(self, f): # VLAN configuration layout = portLayouts[self.portLayout] if isinstance(layout, tuple): f.write('#### VLAN configuration\n') f.write('config switch %s\n' % ("eth0" if not self.portLayout in switchIds else switchIds[self.portLayout])) f.write('\toption vlan0 "%s"\n' % layout[0]) f.write('\toption vlan1 "%s"\n' % layout[1]) f.write('\n') # Loopback configuration (static) f.write('#### Loopback configuration\n') f.write('config interface loopback\n') f.write('\toption ifname "lo"\n') f.write('\toption proto static\n') f.write('\toption ipaddr 127.0.0.1\n') f.write('\toption netmask 255.0.0.0\n') f.write('\n')
if not self.profile:
from wlanlj.generator.models import Profile try: self.profile except Profile.DoesNotExist:
def adapt_to_router_type(self): """ Ensures that new router type is compatible with current configuration. """ if not self.profile: return for entry in self.profile.template.adaptation_chain.all().order_by("priority"): cls = load_plugin(entry.class_name, required_super = RouterTransition) transition = cls() transition.adapt(self)
if db_backend.startswith('postgresql'):
if db_backend.find('postgresql') != -1:
def ensure_success(errcode): if errcode != 0: print "ERROR: Command failed to execute, aborting!" exit(1)
elif db_backend.startswith('sqlite'):
elif db_backend.find('sqlite') != -1:
def ensure_success(errcode): if errcode != 0: print "ERROR: Command failed to execute, aborting!" exit(1)
elif db_backend.startswith('mysql'):
elif db_backend.find('mysql') != -1:
def ensure_success(errcode): if errcode != 0: print "ERROR: Command failed to execute, aborting!" exit(1)
return _("unknown nodes")
return _("Unknown nodes")
def node_type_as_string_plural(self): """ Returns node type as string. """ if self.node_type == NodeType.Mesh: return _("Mesh nodes") elif self.node_type == NodeType.Server: return _("Server nodes") elif self.node_type == NodeType.Test: return _("Test nodes") elif self.node_type == NodeType.Mobile: return _("Mobile nodes") else: return _("unknown nodes")
twitter_api = twitter.Api(username=settings.TWITTER_USERNAME, settings.TWITTER_PASSWORD)
twitter_api = twitter.Api(username = settings.TWITTER_USERNAME, password = settings.TWITTER_PASSWORD)
def generate_new_node_tweet(node): if not tweets_enabled(): return try: bit_api = bitly.Api(login=settings.BITLY_LOGIN, apikey=settings.BITLY_API_KEY) twitter_api = twitter.Api(username=settings.TWITTER_USERNAME, settings.TWITTER_PASSWORD) node_link = bit_api.shorten(node.get_url()) msg = "A new node %s has just connected to the mesh %s" % (node.name, node_link) twitter_api.PostUpdate(msg) except: logging.warning(format_exc())
if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi
try: if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi except Profile.DoesNotExist: pass
def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in n.get_peers(): lq_avg += float(peer.lq) ilq_avg += float(peer.ilq) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.get_peers(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) # Validate BSSID and ESSID if n.bssid != "02:CA:FF:EE:BA:BE": NodeWarning.create(n, WarningCode.BSSIDMismatch, EventSource.Monitor) try: if n.essid != n.project.ssid: NodeWarning.create(n, WarningCode.ESSIDMismatch, EventSource.Monitor) except Project.DoesNotExist: pass if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) try: if n.channel != n.profile.channel: NodeWarning.create(n, WarningCode.ChannelMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) if 'errors' in info['wifi']: error_count = safe_int_convert(info['wifi']['errors']) if error_count != n.wifi_error_count and error_count > 0: Event.create_event(n, EventCode.WifiErrors, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.wifi_error_count, error_count)) n.wifi_error_count = error_count if 'net' in info: loss_count = safe_int_convert(info['net']['losses']) if loss_count != n.loss_count and loss_count > 1: Event.create_event(n, EventCode.ConnectivityLoss, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.loss_count, loss_count)) n.loss_count = loss_count # Check VPN configuration if 'vpn' in info['net']: n.vpn_mac = info['net']['vpn']['mac'] or None try: upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:-3]) // 1000 except TypeError: upload_limit = None if n.vpn_mac and n.vpn_mac != n.vpn_mac_conf: NodeWarning.create(n, WarningCode.VPNMacMismatch, EventSource.Monitor) try: if upload_limit != n.profile.vpn_egress_limit: NodeWarning.create(n, WarningCode.VPNLimitMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile try: if n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) except Profile.DoesNotExist: pass else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state']), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if all selected optional packages are present in package listing try: missing_packages = [] for package in n.profile.optional_packages.all(): if n.installedpackage_set.filter(name = package.name).count() == 0: missing_packages.append(package.name) if missing_packages: NodeWarning.create(n, WarningCode.OptPackageNotFound, EventSource.Monitor, details = ("Packages missing: %s" % ", ".join(missing_packages))) except Profile.DoesNotExist: pass # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning("Failed to interpret nodewatcher data for node '%s (%s)'!" % (n.name, n.ip)) logging.warning(format_exc()) NodeWarning.create(n, WarningCode.NodewatcherInterpretFailed, EventSource.Monitor) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None
l = hna.setdefault(ip, []) l.append('%s/32' % alias)
for x in alias: l = hna.setdefault(ip, []) l.append('%s/32' % x)
def parse_tables(data): """ Parses the OLSR routing tables. """ isTable = False isTableHead = False currentTable = '' nodes = {} hna = {} for line in data.splitlines(): line = line.strip() if line[0:6] == 'Table:' and line[7:] in ('Topology', 'HNA', 'MID'): isTable = True isTableHead = True currentTable = line[7:] continue if isTable and isTableHead: isTableHead = False continue if isTable and not line: isTable = False currentTable = '' continue if currentTable == 'Topology': srcIp, dstIp, LQ, ILQ, ETX = line.split('\t') try: if not float(ETX): continue except ValueError: # Newer OLSR versions can use INFINITE as ETX continue srcNode = create_node(srcIp, nodes, hna) dstNode = create_node(dstIp, nodes, hna) srcNode.links.append((dstIp, LQ, ILQ, ETX)) elif currentTable == 'HNA': try: network, cidr, gwIp = line.split('\t') except ValueError: # Newer OLSR versions have changed the format network, gwIp = line.split('\t') network, cidr = network.split('/') node = hna.setdefault(gwIp, []) node.append('%s/%s' % (network, cidr)) elif currentTable == 'MID': ip, alias = line.split('\t') # Treat MIDs as /32 HNAs l = hna.setdefault(ip, []) l.append('%s/32' % alias) return nodes, hna
return item.get_full_url()
return item.node.get_full_url()
def item_link(self, item): return item.get_full_url()
"LINE1:loss_def r'GPRINT:loss_def:LAST: Current\:%8.2lf', r'GPRINT:loss_def:AVERAGE:Average\:%8.2lf', r'GPRINT:loss_def:MAX:Maximum\:%8.2lf\n', "LINE1:loss_100 r'GPRINT:loss_100:LAST: Current\:%8.2lf', r'GPRINT:loss_100:AVERAGE:Average\:%8.2lf', r'GPRINT:loss_100:MAX:Maximum\:%8.2lf\n', "LINE1:loss_500 r'GPRINT:loss_500:LAST: Current\:%8.2lf', r'GPRINT:loss_500:AVERAGE:Average\:%8.2lf', r'GPRINT:loss_500:MAX:Maximum\:%8.2lf\n', "LINE1:loss_1000 r'GPRINT:loss_1000:LAST:Current\:%8.2lf', r'GPRINT:loss_1000:AVERAGE:Average\:%8.2lf', r'GPRINT:loss_1000:MAX:Maximum\:%8.2lf\n', "LINE1:loss_1480 r'GPRINT:loss_1480:LAST:Current\:%8.2lf', r'GPRINT:loss_1480:AVERAGE:Average\:%8.2lf', r'GPRINT:loss_1480:MAX:Maximum\:%8.2lf\n', '--alt-y-grid', '--units-exponent', '0',
"CDEF:nloss_def=loss_def,100,/", "CDEF:nloss_100=loss_100,100,/", "CDEF:nloss_500=loss_500,100,/", "CDEF:nloss_1000=loss_1000,100,/", "CDEF:nloss_1480=loss_1480,100,/", "LINE1:nloss_def r'GPRINT:nloss_def:LAST: Current\:%8.2lf', r'GPRINT:nloss_def:AVERAGE:Average\:%8.2lf', r'GPRINT:nloss_def:MAX:Maximum\:%8.2lf\n', "LINE1:nloss_100 r'GPRINT:nloss_100:LAST: Current\:%8.2lf', r'GPRINT:nloss_100:AVERAGE:Average\:%8.2lf', r'GPRINT:nloss_100:MAX:Maximum\:%8.2lf\n', "LINE1:nloss_500 r'GPRINT:nloss_500:LAST: Current\:%8.2lf', r'GPRINT:nloss_500:AVERAGE:Average\:%8.2lf', r'GPRINT:nloss_500:MAX:Maximum\:%8.2lf\n', "LINE1:nloss_1000 r'GPRINT:nloss_1000:LAST:Current\:%8.2lf', r'GPRINT:nloss_1000:AVERAGE:Average\:%8.2lf', r'GPRINT:nloss_1000:MAX:Maximum\:%8.2lf\n', "LINE1:nloss_1480 r'GPRINT:nloss_1480:LAST:Current\:%8.2lf', r'GPRINT:nloss_1480:AVERAGE:Average\:%8.2lf', r'GPRINT:nloss_1480:MAX:Maximum\:%8.2lf\n',
def __str__(self): return "DS:%s:%s:%s:U:U" % (self.name, self.type, self.heartbeat)
'--upper-limit', '100'
'--upper-limit', '1'
def __str__(self): return "DS:%s:%s:%s:U:U" % (self.name, self.type, self.heartbeat)
if len(wifiSubnet) and n.clients >= ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4:
if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4):
def process_node(node_ip, ping_results, is_duped, peers): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in peers: lq_avg += float(peer[1]) ilq_avg += float(peer[2]) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.src.all(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile if not n.profile or n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Check for VPN statistics if 'vpn' in info: n.vpn_mac = info['vpn']['mac'] # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients >= ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4: Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state'], 1), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning(format_exc()) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None
last_updated = graph.last_update,
last_update = graph.last_update,
def draw_graph(graph_id, timespan): """ Draws the specified graph. @param graph_id: Graph primary key @param timespan: Timespan to draw the graph for @return: True on success, False on failure """ logger = draw_graph.get_logger() # First check that we haven't drawn this graph already result = cache.get('nodewatcher.graphs.drawn.{0}.{1}'.format(graph_id, timespan)) if result is not None: return bool(result) # Since the graph has not yet been drawn, let's draw it try: graph_id = int(graph_id) # XXX Check for hardcoded graphs if graph_id > 0: graph = nodes_models.GraphItem.objects.get(pk = graph_id) archive_path = str(os.path.join(settings.MONITOR_WORKDIR, 'rra', graph.rra)) # Actually draw the graph rrd.RRA.graph( graphs.RRA_CONF_MAP[graph.type], str(graph.title), graph.id, archive_path, end_time = int(time.mktime(graph.last_update.timetuple())), dead = graph.dead, last_updated = graph.last_update, timespan = timespan ) else: # XXX One of the hardcoded graphs conf, title, rrd_path = GLOBAL_GRAPHS[graph_id] archive_path = str(os.path.join(settings.MONITOR_WORKDIR, 'rra', rrd_path)) # Actually draw the graph rrd.RRA.graph(conf, title, graph_id, archive_path, timespan = timespan) result = True except: logger.error(traceback.format_exc()) result = False # Mark the graph as drawn cache.set('nodewatcher.graphs.drawn.{0}.{1}'.format(graph_id, timespan), result) return result
upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:-3]) // 1000
offset = -3 unit = 1000 if 'Kbit' in info['net']['vpn']['upload_limit']: offset = -4 unit = 1 upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:offset]) // unit
def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in n.get_peers(): lq_avg += float(peer.lq) ilq_avg += float(peer.ilq) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.get_peers(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) # Validate BSSID and ESSID if n.bssid != "02:CA:FF:EE:BA:BE": NodeWarning.create(n, WarningCode.BSSIDMismatch, EventSource.Monitor) try: if n.essid != n.project.ssid: NodeWarning.create(n, WarningCode.ESSIDMismatch, EventSource.Monitor) except Project.DoesNotExist: pass if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) try: if n.channel != n.profile.channel: NodeWarning.create(n, WarningCode.ChannelMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) if 'errors' in info['wifi']: error_count = safe_int_convert(info['wifi']['errors']) if error_count != n.wifi_error_count and error_count > 0: Event.create_event(n, EventCode.WifiErrors, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.wifi_error_count, error_count)) n.wifi_error_count = error_count if 'net' in info: loss_count = safe_int_convert(info['net']['losses']) if loss_count != n.loss_count and loss_count > 1: Event.create_event(n, EventCode.ConnectivityLoss, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.loss_count, loss_count)) n.loss_count = loss_count # Check VPN configuration if 'vpn' in info['net']: n.vpn_mac = info['net']['vpn']['mac'] or None try: upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:-3]) // 1000 except TypeError: upload_limit = None if n.vpn_mac and n.vpn_mac != n.vpn_mac_conf: NodeWarning.create(n, WarningCode.VPNMacMismatch, EventSource.Monitor) try: if upload_limit != n.profile.vpn_egress_limit: NodeWarning.create(n, WarningCode.VPNLimitMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile try: if n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) except Profile.DoesNotExist: pass else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs try: if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi except Profile.DoesNotExist: pass add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state']), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if all selected optional packages are present in package listing try: missing_packages = [] for package in n.profile.optional_packages.all(): if n.installedpackage_set.filter(name = package.name).count() == 0: missing_packages.append(package.name) if missing_packages: NodeWarning.create(n, WarningCode.OptPackageNotFound, EventSource.Monitor, details = ("Packages missing: %s" % ", ".join(missing_packages))) except Profile.DoesNotExist: pass # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning("Failed to interpret nodewatcher data for node '%s (%s)'!" % (n.name, n.ip)) logging.warning(format_exc()) NodeWarning.create(n, WarningCode.NodewatcherInterpretFailed, EventSource.Monitor) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None
NodeWarning.create(n, WarningCode.AnnounceConflict, EventSource.Monitor)
NodeWarning.create(n, WarningCode.UnregisteredAnnounce, EventSource.Monitor)
def check_mesh_status(): """ Performs a mesh status check. """ # Initialize the state of nodes and subnets, remove out of date ap clients and graph items Node.objects.all().update(visible = False) Subnet.objects.all().update(visible = False) APClient.objects.filter(last_update__lt = datetime.now() - timedelta(minutes = 11)).delete() GraphItem.objects.filter(last_update__lt = datetime.now() - timedelta(days = 30)).delete() # Reset some states NodeWarning.objects.all().update(source = EventSource.Monitor, dirty = False) Node.objects.all().update(warnings = False, conflicting_subnets = False) Link.objects.all().delete() # Fetch routing tables from OLSR try: nodes, hna = wifi_utils.get_tables(settings.MONITOR_OLSR_HOST) except TypeError: logging.error("Unable to fetch routing tables from '%s'!" % settings.MONITOR_OLSR_HOST) return # Ping nodes present in the database and visible in OLSR dbNodes = {} nodesToPing = [] for nodeIp in nodes.keys(): try: # Try to get the node from the database n = Node.get_exclusive(ip = nodeIp) n.visible = True n.peers = len(nodes[nodeIp].links) # If we have succeeded, add to list (if not invalid) if not n.is_invalid(): if n.awaiting_renumber: # Reset any status from awaiting renumber to invalid for notice in n.renumber_notices.all(): try: rn = Node.objects.get(ip = notice.original_ip) if rn.status == NodeStatus.AwaitingRenumber: rn.status = NodeStatus.Invalid rn.node_type = NodeType.Unknown rn.awaiting_renumber = False rn.save() except Node.DoesNotExist: pass notice.delete() n.awaiting_renumber = False n.save() nodesToPing.append(nodeIp) else: n.last_seen = datetime.now() n.peers = len(nodes[nodeIp].links) # Create a warning since node is not registered NodeWarning.create(n, WarningCode.UnregisteredNode, EventSource.Monitor) n.save() dbNodes[nodeIp] = n except Node.DoesNotExist: # Node does not exist, create an invalid entry for it n = Node(ip = nodeIp, status = NodeStatus.Invalid, last_seen = datetime.now()) n.visible = True n.node_type = NodeType.Unknown n.peers = len(nodes[nodeIp].links) # Check if there are any renumber notices for this IP address try: notice = RenumberNotice.objects.get(original_ip = nodeIp) n.status = NodeStatus.AwaitingRenumber n.node_type = notice.node.node_type n.awaiting_renumber = True except RenumberNotice.DoesNotExist: pass n.save(force_insert = True) dbNodes[nodeIp] = n # Create an event and append a warning since an unknown node has appeared NodeWarning.create(n, WarningCode.UnregisteredNode, EventSource.Monitor) Event.create_event(n, EventCode.UnknownNodeAppeared, '', EventSource.Monitor) # Add a warning to all nodes that have been stuck in renumbering state for over a week for node in Node.objects.filter(renumber_notices__renumbered_at__lt = datetime.now() - timedelta(days = 7)): NodeWarning.create(node, WarningCode.LongRenumber, EventSource.Monitor) node.save() # Mark invisible nodes as down for node in Node.objects.exclude(status__in = (NodeStatus.Invalid, NodeStatus.AwaitingRenumber)): oldStatus = node.status if node.ip not in dbNodes: if node.status == NodeStatus.New: node.status = NodeStatus.Pending elif node.status != NodeStatus.Pending: node.status = NodeStatus.Down node.save() if oldStatus in (NodeStatus.Up, NodeStatus.Visible, NodeStatus.Duped) and node.status == NodeStatus.Down: Event.create_event(node, EventCode.NodeDown, '', EventSource.Monitor) # Invalidate uptime credit for this node node.uptime_last = None node.save() # Setup all node peerings for nodeIp, node in nodes.iteritems(): n = dbNodes[nodeIp] oldRedundancyLink = n.redundancy_link n.redundancy_link = False for peerIp, lq, ilq, etx, vtime in node.links: l = Link(src = n, dst = dbNodes[peerIp], lq = float(lq), ilq = float(ilq), etx = float(etx), vtime = vtime) l.save() # Check if any of the peers has never peered with us before if n.is_adjacency_important() and l.dst.is_adjacency_important() and not n.peer_history.filter(pk = l.dst.pk).count(): n.peer_history.add(l.dst) Event.create_event(n, EventCode.AdjacencyEstablished, '', EventSource.Monitor, data = 'Peer node: %s' % l.dst, aggregate = False) Event.create_event(l.dst, EventCode.AdjacencyEstablished, '', EventSource.Monitor, data = 'Peer node: %s' % n, aggregate = False) # Check if we have a peering with any border routers if l.dst.border_router: n.redundancy_link = True if not n.is_invalid(): if oldRedundancyLink and not n.redundancy_link: Event.create_event(n, EventCode.RedundancyLoss, '', EventSource.Monitor) elif not oldRedundancyLink and n.redundancy_link: Event.create_event(n, EventCode.RedundancyRestored, '', EventSource.Monitor) if n.redundancy_req and not n.redundancy_link: NodeWarning.create(n, WarningCode.NoBorderPeering, EventSource.Monitor) n.save() # Add nodes to topology map and generate output if not getattr(settings, 'MONITOR_DISABLE_GRAPHS', None): # Only generate topology when graphing is not disabled topology = DotTopologyPlotter() for node in dbNodes.values(): topology.addNode(node) topology.save(os.path.join(settings.GRAPH_DIR, 'mesh_topology.png'), os.path.join(settings.GRAPH_DIR, 'mesh_topology.dot')) # Update valid subnet status in the database for nodeIp, subnets in hna.iteritems(): if nodeIp not in dbNodes: continue for subnet in subnets: subnet, cidr = subnet.split("/") try: s = Subnet.objects.get(node__ip = nodeIp, subnet = subnet, cidr = int(cidr)) s.last_seen = datetime.now() s.visible = True if s.status == SubnetStatus.Subset: pass elif s.status in (SubnetStatus.AnnouncedOk, SubnetStatus.NotAnnounced): s.status = SubnetStatus.AnnouncedOk elif not s.node.border_router or s.status == SubnetStatus.Hijacked: NodeWarning.create(s.node, WarningCode.UnregisteredAnnounce, EventSource.Monitor) s.node.save() # Recheck if this is a more specific prefix announce for an allocated prefix if s.status == SubnetStatus.NotAllocated and s.is_more_specific(): s.status = SubnetStatus.Subset s.save() except Subnet.DoesNotExist: # Subnet does not exist, prepare one s = Subnet(node = dbNodes[nodeIp], subnet = subnet, cidr = int(cidr), last_seen = datetime.now()) s.visible = True # Check if this is a more specific prefix announce for an allocated prefix if s.is_more_specific(): s.status = SubnetStatus.Subset else: s.status = SubnetStatus.NotAllocated s.save() # Check if this is a hijack n = dbNodes[nodeIp] try: origin = Subnet.objects.ip_filter( # Subnet overlaps with another one ip_subnet__contains = '%s/%s' % (subnet, cidr) ).exclude( # Of another node (= filter all subnets belonging to current node) node = s.node ).get( # That is allocated and visible allocated = True, visible = True ) s.status = SubnetStatus.Hijacked s.save() # Generate an event Event.create_event(n, EventCode.SubnetHijacked, '', EventSource.Monitor, data = 'Subnet: %s/%s\n Allocated to: %s' % (s.subnet, s.cidr, origin.node)) except Subnet.DoesNotExist: pass # Flag node entry with warnings flag (if not a border router) if s.status != SubnetStatus.Subset and (not n.border_router or s.status == SubnetStatus.Hijacked): NodeWarning.create(n, WarningCode.AnnounceConflict, EventSource.Monitor) n.save() # Detect subnets that cause conflicts and raise warning flags for all involved # nodes if s.is_conflicting(): NodeWarning.create(s.node, WarningCode.AnnounceConflict, EventSource.Monitor) s.node.conflicting_subnets = True s.node.save() for cs in s.get_conflicting_subnets(): NodeWarning.create(cs.node, WarningCode.AnnounceConflict, EventSource.Monitor) cs.node.conflicting_subnets = True cs.node.save() # Remove (or change their status) subnets that are not visible Subnet.objects.filter(status__in = (SubnetStatus.NotAllocated, SubnetStatus.Subset), visible = False).delete() Subnet.objects.filter(status = SubnetStatus.AnnouncedOk, visible = False).update(status = SubnetStatus.NotAnnounced) for subnet in Subnet.objects.filter(status = SubnetStatus.NotAnnounced, node__visible = True): NodeWarning.create(subnet.node, WarningCode.OwnNotAnnounced, EventSource.Monitor) subnet.node.save() # Remove subnets that were hijacked but are not visible anymore for s in Subnet.objects.filter(status = SubnetStatus.Hijacked, visible = False): Event.create_event(s.node, EventCode.SubnetRestored, '', EventSource.Monitor, data = 'Subnet: %s/%s' % (s.subnet, s.cidr)) s.delete() # Remove invisible unknown nodes for node in Node.objects.filter(status = NodeStatus.Invalid, visible = False).all(): # Create an event since an unknown node has disappeared Event.create_event(node, EventCode.UnknownNodeDisappeared, '', EventSource.Monitor) Node.objects.filter(status__in = (NodeStatus.Invalid, NodeStatus.AwaitingRenumber), visible = False).delete() # Ping the nodes to prepare information for later node processing varsize_results = {} results, dupes = wifi_utils.ping_hosts(10, nodesToPing) for packet_size in (100, 500, 1000, 1480): r, d = wifi_utils.ping_hosts(10, nodesToPing, packet_size - 8) for node_ip in nodesToPing: varsize_results.setdefault(node_ip, []).append(r[node_ip][3] if node_ip in r else None) if getattr(settings, 'MONITOR_DISABLE_MULTIPROCESSING', None): # Multiprocessing is disabled (the MONITOR_DISABLE_MULTIPROCESSING option is usually # used for debug purpuses where a single process is prefered) for node_ip in nodesToPing: process_node(node_ip, results.get(node_ip), node_ip in dupes, nodes[node_ip].links, varsize_results.get(node_ip)) # Commit the transaction here since we do everything in the same session transaction.commit() else: # We MUST commit the current transaction here, because we will be processing # some transactions in parallel and must ensure that this transaction that has # modified the nodes is commited. Otherwise this will deadlock! transaction.commit() worker_results = [] for node_ip in nodesToPing: worker_results.append( WORKER_POOL.apply_async(process_node, (node_ip, results.get(node_ip), node_ip in dupes, nodes[node_ip].links, varsize_results.get(node_ip))) ) # Wait for all workers to finish processing objects = {} for result in worker_results: try: k, v = result.get() objects[k] = v except Exception, e: logging.warning(format_exc()) # When GC debugging is enabled make some additional computations if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): global _MAX_GC_OBJCOUNT objcount = sum(objects.values()) if '_MAX_GC_OBJCOUNT' not in globals(): _MAX_GC_OBJCOUNT = objcount logging.debug("GC object count: %d %s" % (objcount, "!M" if objcount > _MAX_GC_OBJCOUNT else "")) _MAX_GC_OBJCOUNT = max(_MAX_GC_OBJCOUNT, objcount) # Cleanup all out of date warnings NodeWarning.clear_obsolete_warnings(EventSource.Monitor)
self.fields['prefix_%s' % subnet.pk] = forms.IntegerField()
self.fields['prefix_%s' % subnet.pk] = forms.IntegerField(required = False, initial = 27)
def __init__(self, user, node, *args, **kwargs): """ Class constructor. """ super(RenumberForm, self).__init__(*args, **kwargs) self.__node = node # Use renumber with subnet only when this is possible self.fields['primary_ip'] = forms.ChoiceField( choices = [ (RenumberAction.SetManually, _("Set manually")) ], initial = RenumberAction.SetManually ) if node.is_primary_ip_in_subnet(): self.fields['primary_ip'].choices.insert(0, (RenumberAction.Renumber, _("Renumber with subnet")) ) self.fields['primary_ip'].initial = RenumberAction.Renumber else: self.fields['primary_ip'].choices.insert(0, (RenumberAction.Keep, _("Keep")), ) self.fields['primary_ip'].initial = RenumberAction.Keep if not user.is_staff: del self.fields['primary_ip'].choices[1] # Setup dynamic form fields, depending on how may subnets a node has primary = node.subnet_set.ip_filter(ip_subnet__contains = "%s/32" % node.ip).filter(allocated = True).exclude(cidr = 0) for subnet in node.subnet_set.filter(allocated = True).order_by('ip_subnet'): pools = [] for pool in node.project.pools.exclude(status = PoolStatus.Full).order_by('network'): pools.append((pool.pk, _("Renumber to %s [%s/%s]") % (pool.description, pool.network, pool.cidr))) choices = [ (RenumberAction.Keep, _("Keep")), (RenumberAction.Remove, _("Remove")) ] # Primary subnets should not be removed if primary and primary[0] == subnet: del choices[1] self.fields['subnet_%s' % subnet.pk] = forms.ChoiceField( choices = choices + pools, initial = RenumberAction.Keep, widget = forms.Select(attrs = { 'class' : 'subnet' }) ) # Field for choosing new subnet prefix size self.fields['prefix_%s' % subnet.pk] = forms.IntegerField()
return _("If this is not intentional, you are using an old firmware version or it is a bug. In the later case please report it. If it is intentional, please get in contact with network administrators to arrange a configuration option in the firmware for it.")
return _("If this is not intentional, it is a bug. Please report it. If it is intentional, please get in contact with network administrators to arrange a configuration option in the firmware for it.")
def to_help_string(code): """ A helper method for transforming a warning code to a human readable help string.
for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None
def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in peers: lq_avg += float(peer[1]) ilq_avg += float(peer[2]) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.src.all(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile if not n.profile or n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Check for VPN statistics if 'vpn' in info: n.vpn_mac = info['vpn']['mac'] # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state'], 1), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning(format_exc()) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None
states.get(info['solar']['state'], 1),
states.get(info['solar']['state']),
def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in peers: lq_avg += float(peer[1]) ilq_avg += float(peer[2]) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.src.all(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile if not n.profile or n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Check for VPN statistics if 'vpn' in info: n.vpn_mac = info['vpn']['mac'] # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state'], 1), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning(format_exc()) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None
result = "%s-%s-%s%s-%s%s" % (d['hostname'], d['router_name'], version, ("-%s" % type if type else ""), filechecksum, ext)
router_name = d['router_name'].replace('-', '') result = "%s-%s-%s%s-%s%s" % (d['hostname'], router_name, version, ("-%s" % type if type else "-all"), filechecksum, ext)
def generate_image(d): """ Generates an image accoording to given configuration. """ logging.debug(repr(d)) if d['imagebuilder'] not in IMAGEBUILDERS: raise Exception("Invalid imagebuilder specified!") x = OpenWrtConfig() x.setUUID(d['uuid']) x.setOpenwrtVersion(d['openwrt_ver']) x.setArch(d['arch']) x.setPortLayout(d['port_layout']) x.setWifiIface(d['iface_wifi'], d['driver'], d['channel']) x.setWifiAnt(d['rx_ant'], d['tx_ant']) x.setLanIface(d['iface_lan']) x.setNodeType("adhoc") x.setPassword(d['root_pass']) x.setHostname(d['hostname']) x.setIp(d['ip']) x.setSSID(d['ssid']) # Add WAN interface and all subnets if d['wan_dhcp']: x.addInterface("wan", d['iface_wan'], init = True) else: x.addInterface("wan", d['iface_wan'], d['wan_ip'], d['wan_cidr'], d['wan_gw'], init = True) for subnet in d['subnets']: x.addSubnet(str(subnet['iface']), str(subnet['network']), subnet['cidr'], subnet['dhcp'], True) x.setCaptivePortal(d['captive_portal']) if d['vpn']: x.setVpn(d['vpn_username'], d['vpn_password'], d['vpn_mac'], d['vpn_limit']) if d['lan_wifi_bridge']: x.enableLanWifiBridge() if d['lan_wan_switch']: x.switchWanToLan() # Add optional packages for package in d['opt_pkg']: x.addPackage(package) # Cleanup stuff from previous builds os.chdir(WORKDIR) os.system("rm -rf build/files/*") os.system("rm -rf build/%s/bin/*" % d['imagebuilder']) os.mkdir("build/files/etc") x.generate("build/files/etc") if d['only_config']: # Just pack configuration and send it prefix = hashlib.md5(os.urandom(32)).hexdigest()[0:16] tempfile = os.path.join(DESTINATION, prefix + "-config.zip") zip = ZipFile(tempfile, 'w', ZIP_DEFLATED) os.chdir('build/files') for root, dirs, files in os.walk("etc"): for file in files: zip.write(os.path.join(root, file)) zip.close() # Generate checksum f = open(tempfile, 'r') checksum = hashlib.md5(f.read()) f.close() # We can take just first 22 characters as checksums are fixed size and we can reconstruct it filechecksum = urlsafe_b64encode(checksum.digest())[:22] checksum = checksum.hexdigest() result = "%s-%s-config-%s.zip" % (d['hostname'], d['router_name'], filechecksum) destination = os.path.join(DESTINATION, result) os.rename(tempfile, destination) # Send an e-mail t = loader.get_template('generator/email_config.txt') c = Context({ 'hostname' : d['hostname'], 'ip' : d['ip'], 'username' : d['vpn_username'], 'config' : result, 'checksum' : checksum, 'network' : { 'name' : settings.NETWORK_NAME, 'home' : settings.NETWORK_HOME, 'contact' : settings.NETWORK_CONTACT, 'description' : getattr(settings, 'NETWORK_DESCRIPTION', None) }, 'images_bindist_url' : getattr(settings, 'IMAGES_BINDIST_URL', None) }) send_mail( settings.EMAIL_SUBJECT_PREFIX + (_("Configuration for %s/%s") % (d['hostname'], d['ip'])), t.render(c), settings.EMAIL_IMAGE_GENERATOR_SENDER, [d['email']], fail_silently = False ) else: # Generate full image x.build("build/%s" % d['imagebuilder']) # Read image version try: f = open(glob('%s/build/%s/build_dir/target-*/root-*/etc/version' % (WORKDIR, d['imagebuilder']))[0], 'r') version = f.read().strip().replace('.', '_') f.close() except: version = 'unknown' # Get resulting image files = [] for file, type in d['imagefiles']: file = str(file) source = "%s/build/%s/bin/%s" % (WORKDIR, d['imagebuilder'], file) f = open(source, 'r') checksum = hashlib.md5(f.read()) f.close() # We can take just first 22 characters as checksums are fixed size and we can reconstruct it filechecksum = urlsafe_b64encode(checksum.digest())[:22] checksum = checksum.hexdigest() ext = os.path.splitext(file)[1] result = "%s-%s-%s%s-%s%s" % (d['hostname'], d['router_name'], version, ("-%s" % type if type else ""), filechecksum, ext) destination = os.path.join(DESTINATION, result) os.rename(source, destination) files.append({ 'name' : result, 'checksum' : checksum }) # Send an e-mail t = loader.get_template('generator/email.txt') c = Context({ 'hostname' : d['hostname'], 'ip' : d['ip'], 'username' : d['vpn_username'], 'files' : files, 'network' : { 'name' : settings.NETWORK_NAME, 'home' : settings.NETWORK_HOME, 'contact' : settings.NETWORK_CONTACT, 'description' : getattr(settings, 'NETWORK_DESCRIPTION', None) }, 'images_bindist_url' : getattr(settings, 'IMAGES_BINDIST_URL', None) }) send_mail( settings.EMAIL_SUBJECT_PREFIX + (_("Router images for %s/%s") % (d['hostname'], d['ip'])), t.render(c), settings.EMAIL_IMAGE_GENERATOR_SENDER, [d['email']], fail_silently = False )
raise TemplateSyntaxError("'%s' tag expected format is 'as name'" % args[0])
raise template.TemplateSyntaxError("'%s' tag expected format is 'as name'" % args[0])
def setcontext(parser, token): """ Sets (updates) current template context with the rendered output of the block inside tags. """ nodelist = parser.parse(('endsetcontext',)) args = list(token.split_contents()) if len(args) != 3 or args[1] != "as": raise TemplateSyntaxError("'%s' tag expected format is 'as name'" % args[0]) variable = args[2] parser.delete_first_token() return SetContextNode(nodelist, variable)
raise TemplateSyntaxError("'%s' tag requires at most two arguments" % args[0])
raise template.TemplateSyntaxError("'%s' tag requires at most two arguments" % args[0])
def notice(parser, token): """ Renders notice. """ nodelist = parser.parse(('endnotice',)) args = list(token.split_contents()) if len(args) > 3: raise TemplateSyntaxError("'%s' tag requires at most two arguments" % args[0]) classes = args[2] if len(args) > 2 else '""' notice_type = args[1] if len(args) > 1 else '""' parser.delete_first_token() notice_type = parser.compile_filter(notice_type) classes = parser.compile_filter(classes) return NoticeNode(nodelist, notice_type, classes)
form = InfoStickerForm({
form = InfoStickerForm(initial = {
def sticker(request): """ Display a form for generating an info sticker. """ user = UserAccount.for_user(request.user) # We want disabled error to show only after POST (to be same as image generation behavior) disabled = False if request.method == 'POST': form = InfoStickerForm(request.POST) if form.is_valid(): if getattr(settings, 'STICKERS_ENABLED', None): return HttpResponseRedirect(form.save(user)) else: disabled = True else: form = InfoStickerForm({ 'name' : user.name, 'phone' : user.phone, 'project' : user.project.id if user.project else 0 }) return render_to_response('nodes/sticker.html', { 'form' : form, 'stickers_disabled' : disabled }, context_instance = RequestContext(request) )