Tuesday 14 February 2017

Monitoring network links by sending ping probe to both interfaces

Today I faced a scenario where I need to monitor a link without the help of SNMP(Simple Network Management Protocol). To solve the problem, I wrote a python script the details of script can be found at following link

https://github.com/lkpatel/link_monitoring_using_ping/blob/master/monitor_links.py

Please note that SNMP is still be better choice as it also give link performance statistics.

Tuesday 7 February 2017

To check if a HTTP web proxy is working

Some time back I wrote a python script a useful script to check if a web proxy is working or not. The following code in a file named as check_proxy.py

#!/usr/bin/python

#This code check which proxies are working in a given list of proxies
#you need to provide proxy usernamd and password


import argparse
import requests
parser = argparse.ArgumentParser()
parser.add_argument('--username', help='Give your user name',required=True)
parser.add_argument('--password', help='Give your password',required=True)
args = vars(parser.parse_args())
bare_proxy_list = ['10.10.10.10','10.10.10.11',]
proxy_list = {}
error_guide ="""
General Error Code Information :-

1xx -- Informational
2xx -- Success
3xx -- Redirection
4xx -- client error
5xx -- Server error; 503 = Service unavailable, 504 = Gateway timeout
"""
print error_guide
for proxy in bare_proxy_list:
    my_proxy = "http://"+args['username']+":"+args['password']+"@"+proxy+":8080"
    proxy_list['http']=my_proxy
    proxy_list['https']=my_proxy
    try:
      result = requests.get("http://google.co.in", proxies=proxy_list)
      if result.status_code == 200:
         print proxy + " is working"
      else:
         print proxy + " is not working with response code = "+str(result.status_code)
    except IOError: pass

To run the above code type
python check_proxy.py   --username <username> --password <password>

Source :-
https://github.com/lkpatel/some-scripts.git


Monday 6 February 2017

Netflow version 9 collector in python

In my last post, I had provided a code snippet how to write netflow version 5 collector in python.In this post, I will give a code snippet of netflow version 9 collector. Please note that this code is not production ready. I had just written it for proof of concept.

import socket, struct
import threading,logging
from socket import inet_ntoa
from utils.enums import template_field
from utils.parse import parse

LOG_FILENAME = 'log.out'
#logging.basicConfig(filename=LOG_FILENAME,format='%(levelname)s:%(message)s',level=logging.DEBUG,)
logging.basicConfig(format='%(levelname)s:%(message)s',level=logging.DEBUG,)

SIZE_OF_HEADER = 20

#templates = [{"id":265,"data_length":48,"description":[{"field_type": 21, "field_length": 4}, {"field_type": 22, "field_length": 4}, {"field_type": 1, "field_length": 4}, {"field_type": 2, "field_length": 4}, {"field_type": 10, "field_length": 2}, {"field_type": 14, "field_length": 2}, {"field_type": 8, "field_length": 4}, {"field_type": 12, "field_length": 4}, {"field_type": 4, "field_length": 1}, {"field_type": 5, "field_length": 1}, {"field_type": 7, "field_length": 2}, {"field_type": 11, "field_length": 2}, {"field_type": 48, "field_length": 1}, {"field_type": 51, "field_length":1}, {"field_type": 15, "field_length": 4}, {"field_type": 13, "field_length": 1}, {"field_type": 9, "field_length": 1}, {"field_type": 6, "field_length": 1}, {"field_type": 61, "field_length": 1}, {"field_type": 17, "field_length": 2}, {"field_type": 16, "field_length": 2}]}]

templates=[]

sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('0.0.0.0', 9998))

def processPacket(data,addr):
    (version, count) = struct.unpack('!HH',data[0:4])
    logging.debug("Version %s, count %s "%(version,count))
    if version != 9:
       logging.error("Not NetFlow v9!")
       return None
    uptime = socket.ntohl(struct.unpack('I',data[4:8])[0])
    epochseconds = socket.ntohl(struct.unpack('I',data[8:12])[0])
    logging.debug("Uptime %s , epochseconds %s "% (uptime, epochseconds))
    data=data[SIZE_OF_HEADER:]
    while len(data) >0:
        (flow_set_id, flow_set_length) = struct.unpack('!HH',data[0:4])
        logging.debug("flow_set_id %d, flow_set_length %d "%(flow_set_id,flow_set_length))
        my_data = data[4:flow_set_length]
        data = data[flow_set_length:]
        if flow_set_id == 0:
           # data template found.
           template={}
           (template_id, template_field_length) = struct.unpack('!HH',my_data[0:4])
           logging.debug("template_id %d, template_field_length %d "%(template_id,template_field_length))
           my_data=my_data[4:]
           template['id']= template_id
           template['description']=[]
           template['data_length']=0
           template['address']=addr[0]
           for i in xrange(0,template_field_length*4,4):
               template_element={}
               template_element['field_type']=parse(my_data[i:i+2],"INT",2)
               template_field_length = parse(my_data[i+2:i+4],"INT",2)
               template_element['field_length']=template_field_length
               template['data_length'] += template_field_length
               template['description'].append(template_element)
           for temp in templates:
               if temp["id"]== template_id:
                  #update dict
                  templates.remove(temp)
                  break
           templates.append(template)
           logging.debug(templates)


        if flow_set_id == 1:  # options template found.Lets add it to template list
            while len(my_data) >6:
              template={}
              (template_id, option_scope_length) = struct.unpack('!HH',my_data[0:4])
              logging.debug("option template_id %d, option_scope_length %d "%(template_id,option_scope_length))
              option_length = struct.unpack('!H',my_data[4:6])[0]
              my_data=my_data[6:]
              if template_id == 0 or option_scope_length >0:
                 # probably padding or special case. Right now not handling
                 my_data=my_data[option_scope_length:]
                 break
              else:
                 template['id']= template_id
                 template['description']=[]
                 template['data_length']=0
                 template['address']=addr[0]
                 for i in xrange(0,option_length,4):
                     template_element={}
                     template_element['field_type']=parse(my_data[i:i+2],"INT",2)
                     template_field_length = parse(my_data[i+2:i+4],"INT",2)
                     template_element['field_length']=template_field_length
                     template['data_length'] += template_field_length
                     template['description'].append(template_element)
                 for temp in templates:
                     if temp["id"]== template_id:
                        #update dict
                        templates.remove(temp)
                        break
                 templates.append(template)
                 logging.debug(templates)
              my_data=my_data[option_length:]
              #padding = flow_set_length - (10 +option_scope_length + option_length)
              #my_data=my_data[padding:]
          if flow_set_id > 255:
           # let us parse flow data
           # first check if template present
           my_template = None
           for template in templates:
               if flow_set_id == template["id"] and addr[0] == template['address']: #check if template from same ip exist
                  my_template = template
                  break
           if not my_template:
              logging.debug("No suitable template found")
           else:
              nf_data=[]
              template_total_data_length = my_template['data_length']
              while len(my_data) >= template_total_data_length:
                  for field in my_template['description']:
                      field_name = template_field[field['field_type']]['name']
                      field_type = template_field[field['field_type']]['data_type']
                      field_length = field['field_length']
                      if field_length ==0:
                         field_length = template_field[field['field_type']]['default']
                      logging.debug("Data length = %d "%(field_length))
                      ext_data = parse(my_data[:field_length],field_type,field_length)
                      logging.debug ("%s : %s"%(field_name, ext_data))
                      nf_data.append({field_name:ext_data})
                      my_data = my_data[field_length:]
              logging.info(nf_data)


while True:
      buf, addr = sock.recvfrom(1500)
      t = threading.Thread(target=processPacket, args=(buf,addr))
      t.start()



Netflow version 5 collector in python

I am going to provide brief code snippet of netflow version 5 collector in python.
In netflow version 5, the template of data is fixed. It simply means that you know what will come at what place. Netflow version 5 packet structure is as following:-

Packet header Structure

BytesContent labelBrief Description
0-1     versionNetFlow export format version number
2-3     countNumber of flows exported in this packet (1-30)
4-7     sys_uptimeCurrent time in milliseconds since the export device booted
8-11     unix_secsCurrent count of seconds since 0000 UTC 1970
12-15     unix_nsecsResidual nanoseconds since 0000 UTC 1970
16-19     flow_sequenceSequence counter of total flows seen
20     engine_typeType of flow-switching engine
21     engine_idSlot number of the flow-switching engine
22-23      sampling_interval       First two bits hold the sampling mode; remaining 14 bits hold value of sampling interval

Packet record format


Bytes    Content Label     Brief Description
0-3srcaddrSource IP address
4-7dstaddrDestination IP address
8-11nexthopIP address of next hop router
12-13inputSNMP index of input interface
14-15outputSNMP index of output interface
16-19dPktsPackets in the flow
20-23dOctetsTotal number of Layer 3 bytes in the packets of the flow
24-27firstSysUptime at start of flow
28-31lastSysUptime at the time the last packet of the flow was received
32-33srcportTCP/UDP source port number or equivalent
34-35dstportTCP/UDP destination port number or equivalent
36pad1Unused (zero) bytes
37tcp_flagsCumulative OR of TCP flags
38protIP protocol type (for example, TCP = 6; UDP = 17)
39tosIP type of service (ToS)
40-41src_asAutonomous system number of the source, either origin or peer
42-43dst_asAutonomous system number of the destination, either origin or peer
44src_maskSource address prefix mask bits
45dst_maskDestination address prefix mask bits
46-47pad2Unused (zero) bytes


A network device like router, switch etc can send netflow data to a netflow collector. Netflow is proprietary protocol of Cisco but many other devices also support it. So if you have a cisco router and you want to write your own netflow collector, you can do in following ways :-

1. Using standard python library
 
import socket, struct
import threading
from socket import inet_ntoa

SIZE_OF_HEADER = 24
SIZE_OF_RECORD = 48

sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('0.0.0.0', 9999))

def processPacket(buf,addr):
        (version, count) = struct.unpack('!HH',buf[0:4])
        if version != 5:
                print "Not NetFlow v5!"
                #continue

        # It's pretty unlikely you'll ever see more then 1000 records in a 1500 byte  UDP packet
        if count <= 0 or count >= 1000:
                print "Invalid count %s" % count
                #continue

        uptime = socket.ntohl(struct.unpack('I',buf[4:8])[0])
        epochseconds = socket.ntohl(struct.unpack('I',buf[8:12])[0])

        for i in range(0, count):
                try:
                        base = SIZE_OF_HEADER+(i*SIZE_OF_RECORD)

                        data = struct.unpack('!IIIIHH',buf[base+16:base+36])

                        nfdata = {}
                        nfdata['saddr'] = inet_ntoa(buf[base+0:base+4])
                        nfdata['daddr'] = inet_ntoa(buf[base+4:base+8])
                        nfdata['pcount'] = data[0]
                        nfdata['bcount'] = data[1]
                        nfdata['stime'] = data[2]
                        nfdata['etime'] = data[3]
                        nfdata['sport'] = data[4]
                        nfdata['dport'] = data[5]
                        nfdata['protocol'] = ord(buf[base+38])
                except:
                        continue

        # Do something with the netflow record..
        print "%s:%s -> %s:%s" % (nfdata['saddr'],nfdata['sport'],nfdata['daddr'],nfdata['dport'])


while True:
      buf, addr = sock.recvfrom(1500)
      t = threading.Thread(target=processPacket, args=(buf,addr))
      t.start()
               


2. Using twisted library

     
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
import socket
from socket import inet_ntoa
import struct
SIZE_OF_HEADER = 24
SIZE_OF_RECORD = 48


class CaptureNetflowPacket(DatagramProtocol):

    def datagramReceived(self, data, addr):
        #print("received %r from %s" % (data, addr))
        #self.transport.write(data, addr)
         (version, count) = struct.unpack('!HH',data[0:4])
         print "Version %s, count %s "%(version,count)
         if version != 5:
            print "Not NetFlow v5!"
         uptime = socket.ntohl(struct.unpack('I',data[4:8])[0])
         epochseconds = socket.ntohl(struct.unpack('I',data[8:12])[0])
         print "Uptime %s , epochseconds %s "% (uptime, epochseconds)
         for i in range(0, count):
                #try:
                base = SIZE_OF_HEADER+(i*SIZE_OF_RECORD)

                fdata = struct.unpack('!IIIIHH',data[base+16:base+36])

                nfdata = {}
                nfdata['saddr'] = inet_ntoa(data[base+0:base+4])
                nfdata['daddr'] = inet_ntoa(data[base+4:base+8])
                nfdata['pcount'] = fdata[0]
                nfdata['bcount'] = fdata[1]
                nfdata['stime'] = fdata[2]
                nfdata['etime'] = fdata[3]
                nfdata['sport'] = fdata[4]
                nfdata['dport'] = fdata[5]
                nfdata['protocol'] = ord(data[base+38])
                #except:
                        #continue
                        #print "Exception occured"

                # Do something with the netflow record..
                print "%s:%s -> %s:%s" % (nfdata['saddr'],nfdata['sport'],nfdata['daddr'],nfdata['dport'])
                print "%s : %s : %s " %(nfdata['pcount'],nfdata['bcount'],nfdata['protocol'])




reactor.listenUDP(9999, CaptureNetflowPacket())
reactor.run()
    

Please note that collector is listening on port 9999 over UDP socket.
This code simply prints packet data on console but one can extend to save the packet data in database.