diff --git a/TODO b/TODO
index 1abae8f..ab1368c 100644
--- a/TODO
+++ b/TODO
@@ -1,3 +1,5 @@
Modificar o arquivo INSTALL dizendo o que tem que ser feito, dizendo que o ./util/install.sh é instrusivo
Descrever que pode ser instalado com o pip install -r requirements.txt
+
+Verificar se da para utilizar o watchdog e o logrotate
diff --git a/config/__init__.py b/config/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/config/vcflow.conf b/config/vcflow.conf
new file mode 100644
index 0000000..88f454f
--- /dev/null
+++ b/config/vcflow.conf
@@ -0,0 +1,15 @@
+{
+ "configuration_database": "/etc/vcflow/vcflow_configuration_database.conf",
+
+ "identification_database": "/tmp/.vcflow_identification_database.conf",
+
+ "hybrid_topology_discovery":{
+ "use_hybrid_topology_discovery": true,
+ "lldp_vlan_id": 0,
+ "lldp_vlan_pcp": 7
+ },
+
+ "l2vpn_vlan_circuits":{
+ "first_transport_vlan_id_of_circuits": 2
+ }
+}
diff --git a/config/vcflow_configuration_database.conf b/config/vcflow_configuration_database.conf
new file mode 100644
index 0000000..0e4f03a
--- /dev/null
+++ b/config/vcflow_configuration_database.conf
@@ -0,0 +1,10 @@
+# This is the VCFlow circuits configuration file. This file provides
+# the description of each virtual circuit to be installed on the netwok
+# by VCFlow.
+#
+# Configuration data is parsed as follows:
+# DPID Endpoint 1, Interface Endpoint 1, VLAN ID Endpoint 1, DPID Endpoint 2, Interface Endpoint 2, VLAN ID Endpoint 2
+#
+# Each item of the configuration entry must be separated by spaces. Example:
+#1 s1-eth1 10 2 s2-eth1 10
+
diff --git a/requirements.txt b/requirements.txt
index 78ddf63..96133f0 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,3 @@
Flask>=0.10
networkx>=1.8.1
requests
-ryu>=3.6
diff --git a/spc/spf.py b/spc/spf.py
new file mode 100644
index 0000000..d539e36
--- /dev/null
+++ b/spc/spf.py
@@ -0,0 +1,181 @@
+import logging
+
+class portas:
+ def __init__(self, int_saida_src, int_entrada_dst, custo=1):
+ self.int_saida_src = int_saida_src #interface de saida do switch fonte
+ self.int_entrada_dst = int_entrada_dst #interface de entrada do switch destino
+ self.custo = custo # custo do enlace
+
+ def __repr__(self):
+ return "[" + str(self.int_saida_src) + "," + str(self.int_entrada_dst) + "," + str(self.custo) + "]"
+
+
+class dados_dijkstra:
+ def __init__(self, precedente, estimativa, visitado):
+ self.precedente = precedente # switch destino, id do switch
+ self.estimativa = estimativa # custo para o switch destino, inteiro para o switch destino
+ self.visitado = visitado # foi calculado para o path para o caminho destino, sim ou nao
+
+ def __repr__(self):
+ return str(self.precedente) + "," + str(self.estimativa) + "," + str(self.visitado)
+
+
+class saida:
+ def __init__(self, switch, porta_saida, num_salto):
+ self.switch = switch
+ self.porta_saida = porta_saida
+ self.num_salto = num_salto
+
+ def __repr__(self):
+ return "salto: " + str(self.num_salto) + "," + " switch: " + str(self.switch) + "," + " porta_saida: " + str(self.porta_saida) + ";"
+
+
+class calculo_dijkstra: # Retorna um objeto com a tabela de roteamento
+ def __init__(self, topologia):
+ self.tabela_dijkstra = {}
+ self.tabela_roteamento_completa = {}
+ verificador = []
+ prosseguimento = "sim"
+ self.logger = logging.getLogger('spf_calculation_application')
+ if not len(self.logger.handlers):
+ self._set_logger()
+
+ self.logger.info('Routing table calculation has been initiated.')
+ self.logger.info('Topology object is: %s.', str(topologia))
+
+ """Calcula a tabela dijkstra para todos os switches da topologia"""
+ """Para realizar o calculo tem que verificar se para cada switch dest de cada switch origem existe um switch origem (verifica se existe o par switch_origem:switch_destino e switch_destino_switch_origem)"""
+ for switch_origem in topologia.keys():
+ for switch_destino in topologia[switch_origem]:
+ if topologia.has_key(switch_destino):
+ if topologia[switch_destino].has_key(switch_origem):
+ verificador.append("ok")
+ else:
+ verificador.append("nok")
+ else:
+ verificador.append("nok")
+
+ for i in verificador:
+ if (i == "nok"):
+ self.logger.info('Proceeding could not be completed due to topology mismatch.')
+ prosseguimento = "nao"
+
+ if (prosseguimento == "sim"):
+ for switch in topologia.keys():
+ self.tabela_dijkstra[switch] = self.calcula(self.inicializacao(topologia), topologia, switch)
+
+ self.tabela_roteamento_completa = self.monta_tabela_roteamento(self.tabela_dijkstra, topologia)
+
+ def _set_logger(self):
+ self.logger.setLevel(logging.INFO)
+ self.logger.propagate = False
+ hdlr = logging.StreamHandler()
+ fmt_str = '[SPF][%(levelname)s] %(funcName)s | %(created)f | %(asctime)s: %(message)s'
+ hdlr.setFormatter(logging.Formatter(fmt_str))
+ self.logger.addHandler(hdlr)
+
+ def __repr__(self): # Metodo que retorna a maneira como o objeto da classe e printado na tela
+ return str(self.tabela_roteamento_completa)
+
+ def inicializacao(self, topologia): # faz todos os vertices
+ # Cria uma um dicionario em que as chaves sao topologia.keys() e os valores sao o objeto dados_dijkstra
+ # Essa tabela e utilizada para o calculo do algoritmo Dijkstra (uma tabela por topologia)
+ tabela_dijkstra = {}
+ for switch in topologia.keys():
+ tabela_dijkstra[switch] = dados_dijkstra(None, float("inf"), "nao")
+
+ self.logger.debug('Routing table has been initiated. %s', tabela_dijkstra)
+ return tabela_dijkstra
+
+ def calcula(self, tabela_inicializada, topologia, switch_raiz):
+ #Algoritmo para calculo da tabela dijkstra
+ """Atribua valor zero a estimativa do custo minimo do vertice s (raiz da busca) e infinito as demais estimativas)"""
+ """Marque a raiz como precedente da propria raiz"""
+ tabela_inicializada[switch_raiz].estimativa = 0
+ tabela_inicializada[switch_raiz].precedente = switch_raiz
+
+ for switch_vizinho_raiz in topologia[switch_raiz].keys():
+ tabela_inicializada[switch_vizinho_raiz].estimativa = topologia[switch_raiz][switch_vizinho_raiz].custo # ******* atribui o custo
+ tabela_inicializada[switch_vizinho_raiz].precedente = switch_raiz
+
+ """Marque a raiz como visitada"""
+ tabela_inicializada[switch_raiz].visitado = "sim"
+
+ switches_nao_visitados = 1
+ """Enquanto existirem vertices (switches) nao visitados"""
+ while switches_nao_visitados == 1:
+ """Escolha um vertice k ainda nao visitado cuja estimativa seja a menor dentre todos os vertices nao visitados"""
+ #Cria um dicionatio com as estimativas do switches vizinhos nao visitados
+ estimativas = {}
+ for switch in tabela_inicializada.keys():
+ if (tabela_inicializada[switch].visitado == "nao"):
+ estimativas[switch] = tabela_inicializada[switch].estimativa
+ # Referencia: stackoverflow get key with the least value from a dictionary
+ switch_vizinho_menor_estimativa = min(estimativas, key=estimativas.get) #retorna o switch com menor estimativa
+
+ """Para todos os vizinhos de k"""
+ for switches_vizinhos_switch_menor_estimativa in topologia[switch_vizinho_menor_estimativa].keys():
+ """Some a estimativa do vertice k com o custo do arco que une k (switch_vizinho_menor_estimativa) a j(switches_vizinhos_switch_menor_estimativa)"""
+ nova_estimativa = tabela_inicializada[switch_vizinho_menor_estimativa].estimativa + topologia[switches_vizinhos_switch_menor_estimativa][switch_vizinho_menor_estimativa].custo
+ if ( nova_estimativa < tabela_inicializada[switches_vizinhos_switch_menor_estimativa].estimativa ):
+ tabela_inicializada[switches_vizinhos_switch_menor_estimativa].estimativa = nova_estimativa
+ tabela_inicializada[switches_vizinhos_switch_menor_estimativa].precedente = switch_vizinho_menor_estimativa
+
+ """Marque k (switch_vizinho_menor_estimativa) como visitado"""
+ tabela_inicializada[switch_vizinho_menor_estimativa].visitado = "sim"
+
+ """Enquanto existirem vertices (switches) nao visitados"""
+ switches_nao_visitados = 0
+ for switch in tabela_inicializada.keys():
+ if (tabela_inicializada[switch].visitado == "nao"):
+ switches_nao_visitados = 1
+
+ return tabela_inicializada
+
+ def monta_tabela_roteamento(self, tabela_todos_switches, topologia):
+ #Exemplo do recebimento {1: {1: {'visitado': 'sim', 'precedente': 1, 'estimativa': 0}, 2: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}, 3: {'visitado': 'nao', 'precedente': 2, 'estimativa': 2}, 4: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}, 5: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}}
+ #tabela_roteamento = {switch_origem:{switch_destino:[switch, porta_saida; switch, porta_saida; ultimo_switch, None]}}
+ tabela_roteamento = {}
+
+ # Faz verificacao se a topologia esta dividida em duas ou mais partes. Se estiver retorna a tabela de roteamento vazia
+ for sw_ori in tabela_todos_switches.keys():
+ for sw_dest in tabela_todos_switches[sw_ori].keys():
+ if ( tabela_todos_switches[sw_ori][sw_dest].precedente == None ): # Se o precedente de qualquer switch na tabela_dijkstra for None (ou seja, ele nao entrou no calculo de vizinhos, entao a topologia esta desmenbrada) entao nao continua
+ return tabela_roteamento
+
+ for switch_origem in tabela_todos_switches.keys():
+ tabela_roteamento[switch_origem] = {}
+ for switch_destino in tabela_todos_switches[switch_origem].keys():
+ if ( switch_destino != switch_origem ):
+ #print "A topologia antes do encontra caminho e: ", topologia
+ #print "Antes do encontra caminho o switch origem e: ", switch_origem, "e o switch destino e: ", switch_destino
+ tabela_roteamento[switch_origem][switch_destino] = self.encontra_caminho(switch_origem, switch_destino, tabela_todos_switches, topologia)
+
+ return tabela_roteamento
+
+ def encontra_caminho(self, sw_origem, sw_destino, tabela_todos_switches, topologia):
+ """Encontra caminho entre um switch de origem e um destino atraves da topologia"""
+ #Exemplo do recebimento tabela_todos_switches {1: {1: {'visitado': 'sim', 'precedente': 1, 'estimativa': 0}, 2: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}, 3: {'visitado': 'nao', 'precedente': 2, 'estimativa': 2}, 4: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}, 5: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}}
+
+ caminho_switches = [] #Lista com o dpid dos switches com o caminho de tras para frente, do sw_destino para o sw_origem
+ salto_anterior = sw_destino
+ while ( salto_anterior != sw_origem ):
+ caminho_switches.append(salto_anterior)
+ salto_anterior = tabela_todos_switches[sw_origem][salto_anterior].precedente
+
+ caminho_switches.append(salto_anterior) # Faz o append do switch origem
+ caminho_switches = caminho_switches[::-1] # Inverte a lista com o caminho para obter o caminho na ordem de sw_origem a sw_destino
+
+ caminho = []
+ contador_saltos = 0
+ for posicao_switch_saida in range(len(caminho_switches)):
+ if ( caminho_switches[posicao_switch_saida] != sw_destino ):
+ interface_saida = topologia[caminho_switches[posicao_switch_saida]][caminho_switches[posicao_switch_saida+1]].int_saida_src
+
+ else:
+ interface_saida = None
+
+ caminho.append(saida(caminho_switches[posicao_switch_saida], interface_saida, contador_saltos))
+ contador_saltos = contador_saltos + 1
+
+ return caminho
diff --git a/spc/spf.py~ b/spc/spf.py~
new file mode 100644
index 0000000..d539e36
--- /dev/null
+++ b/spc/spf.py~
@@ -0,0 +1,181 @@
+import logging
+
+class portas:
+ def __init__(self, int_saida_src, int_entrada_dst, custo=1):
+ self.int_saida_src = int_saida_src #interface de saida do switch fonte
+ self.int_entrada_dst = int_entrada_dst #interface de entrada do switch destino
+ self.custo = custo # custo do enlace
+
+ def __repr__(self):
+ return "[" + str(self.int_saida_src) + "," + str(self.int_entrada_dst) + "," + str(self.custo) + "]"
+
+
+class dados_dijkstra:
+ def __init__(self, precedente, estimativa, visitado):
+ self.precedente = precedente # switch destino, id do switch
+ self.estimativa = estimativa # custo para o switch destino, inteiro para o switch destino
+ self.visitado = visitado # foi calculado para o path para o caminho destino, sim ou nao
+
+ def __repr__(self):
+ return str(self.precedente) + "," + str(self.estimativa) + "," + str(self.visitado)
+
+
+class saida:
+ def __init__(self, switch, porta_saida, num_salto):
+ self.switch = switch
+ self.porta_saida = porta_saida
+ self.num_salto = num_salto
+
+ def __repr__(self):
+ return "salto: " + str(self.num_salto) + "," + " switch: " + str(self.switch) + "," + " porta_saida: " + str(self.porta_saida) + ";"
+
+
+class calculo_dijkstra: # Retorna um objeto com a tabela de roteamento
+ def __init__(self, topologia):
+ self.tabela_dijkstra = {}
+ self.tabela_roteamento_completa = {}
+ verificador = []
+ prosseguimento = "sim"
+ self.logger = logging.getLogger('spf_calculation_application')
+ if not len(self.logger.handlers):
+ self._set_logger()
+
+ self.logger.info('Routing table calculation has been initiated.')
+ self.logger.info('Topology object is: %s.', str(topologia))
+
+ """Calcula a tabela dijkstra para todos os switches da topologia"""
+ """Para realizar o calculo tem que verificar se para cada switch dest de cada switch origem existe um switch origem (verifica se existe o par switch_origem:switch_destino e switch_destino_switch_origem)"""
+ for switch_origem in topologia.keys():
+ for switch_destino in topologia[switch_origem]:
+ if topologia.has_key(switch_destino):
+ if topologia[switch_destino].has_key(switch_origem):
+ verificador.append("ok")
+ else:
+ verificador.append("nok")
+ else:
+ verificador.append("nok")
+
+ for i in verificador:
+ if (i == "nok"):
+ self.logger.info('Proceeding could not be completed due to topology mismatch.')
+ prosseguimento = "nao"
+
+ if (prosseguimento == "sim"):
+ for switch in topologia.keys():
+ self.tabela_dijkstra[switch] = self.calcula(self.inicializacao(topologia), topologia, switch)
+
+ self.tabela_roteamento_completa = self.monta_tabela_roteamento(self.tabela_dijkstra, topologia)
+
+ def _set_logger(self):
+ self.logger.setLevel(logging.INFO)
+ self.logger.propagate = False
+ hdlr = logging.StreamHandler()
+ fmt_str = '[SPF][%(levelname)s] %(funcName)s | %(created)f | %(asctime)s: %(message)s'
+ hdlr.setFormatter(logging.Formatter(fmt_str))
+ self.logger.addHandler(hdlr)
+
+ def __repr__(self): # Metodo que retorna a maneira como o objeto da classe e printado na tela
+ return str(self.tabela_roteamento_completa)
+
+ def inicializacao(self, topologia): # faz todos os vertices
+ # Cria uma um dicionario em que as chaves sao topologia.keys() e os valores sao o objeto dados_dijkstra
+ # Essa tabela e utilizada para o calculo do algoritmo Dijkstra (uma tabela por topologia)
+ tabela_dijkstra = {}
+ for switch in topologia.keys():
+ tabela_dijkstra[switch] = dados_dijkstra(None, float("inf"), "nao")
+
+ self.logger.debug('Routing table has been initiated. %s', tabela_dijkstra)
+ return tabela_dijkstra
+
+ def calcula(self, tabela_inicializada, topologia, switch_raiz):
+ #Algoritmo para calculo da tabela dijkstra
+ """Atribua valor zero a estimativa do custo minimo do vertice s (raiz da busca) e infinito as demais estimativas)"""
+ """Marque a raiz como precedente da propria raiz"""
+ tabela_inicializada[switch_raiz].estimativa = 0
+ tabela_inicializada[switch_raiz].precedente = switch_raiz
+
+ for switch_vizinho_raiz in topologia[switch_raiz].keys():
+ tabela_inicializada[switch_vizinho_raiz].estimativa = topologia[switch_raiz][switch_vizinho_raiz].custo # ******* atribui o custo
+ tabela_inicializada[switch_vizinho_raiz].precedente = switch_raiz
+
+ """Marque a raiz como visitada"""
+ tabela_inicializada[switch_raiz].visitado = "sim"
+
+ switches_nao_visitados = 1
+ """Enquanto existirem vertices (switches) nao visitados"""
+ while switches_nao_visitados == 1:
+ """Escolha um vertice k ainda nao visitado cuja estimativa seja a menor dentre todos os vertices nao visitados"""
+ #Cria um dicionatio com as estimativas do switches vizinhos nao visitados
+ estimativas = {}
+ for switch in tabela_inicializada.keys():
+ if (tabela_inicializada[switch].visitado == "nao"):
+ estimativas[switch] = tabela_inicializada[switch].estimativa
+ # Referencia: stackoverflow get key with the least value from a dictionary
+ switch_vizinho_menor_estimativa = min(estimativas, key=estimativas.get) #retorna o switch com menor estimativa
+
+ """Para todos os vizinhos de k"""
+ for switches_vizinhos_switch_menor_estimativa in topologia[switch_vizinho_menor_estimativa].keys():
+ """Some a estimativa do vertice k com o custo do arco que une k (switch_vizinho_menor_estimativa) a j(switches_vizinhos_switch_menor_estimativa)"""
+ nova_estimativa = tabela_inicializada[switch_vizinho_menor_estimativa].estimativa + topologia[switches_vizinhos_switch_menor_estimativa][switch_vizinho_menor_estimativa].custo
+ if ( nova_estimativa < tabela_inicializada[switches_vizinhos_switch_menor_estimativa].estimativa ):
+ tabela_inicializada[switches_vizinhos_switch_menor_estimativa].estimativa = nova_estimativa
+ tabela_inicializada[switches_vizinhos_switch_menor_estimativa].precedente = switch_vizinho_menor_estimativa
+
+ """Marque k (switch_vizinho_menor_estimativa) como visitado"""
+ tabela_inicializada[switch_vizinho_menor_estimativa].visitado = "sim"
+
+ """Enquanto existirem vertices (switches) nao visitados"""
+ switches_nao_visitados = 0
+ for switch in tabela_inicializada.keys():
+ if (tabela_inicializada[switch].visitado == "nao"):
+ switches_nao_visitados = 1
+
+ return tabela_inicializada
+
+ def monta_tabela_roteamento(self, tabela_todos_switches, topologia):
+ #Exemplo do recebimento {1: {1: {'visitado': 'sim', 'precedente': 1, 'estimativa': 0}, 2: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}, 3: {'visitado': 'nao', 'precedente': 2, 'estimativa': 2}, 4: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}, 5: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}}
+ #tabela_roteamento = {switch_origem:{switch_destino:[switch, porta_saida; switch, porta_saida; ultimo_switch, None]}}
+ tabela_roteamento = {}
+
+ # Faz verificacao se a topologia esta dividida em duas ou mais partes. Se estiver retorna a tabela de roteamento vazia
+ for sw_ori in tabela_todos_switches.keys():
+ for sw_dest in tabela_todos_switches[sw_ori].keys():
+ if ( tabela_todos_switches[sw_ori][sw_dest].precedente == None ): # Se o precedente de qualquer switch na tabela_dijkstra for None (ou seja, ele nao entrou no calculo de vizinhos, entao a topologia esta desmenbrada) entao nao continua
+ return tabela_roteamento
+
+ for switch_origem in tabela_todos_switches.keys():
+ tabela_roteamento[switch_origem] = {}
+ for switch_destino in tabela_todos_switches[switch_origem].keys():
+ if ( switch_destino != switch_origem ):
+ #print "A topologia antes do encontra caminho e: ", topologia
+ #print "Antes do encontra caminho o switch origem e: ", switch_origem, "e o switch destino e: ", switch_destino
+ tabela_roteamento[switch_origem][switch_destino] = self.encontra_caminho(switch_origem, switch_destino, tabela_todos_switches, topologia)
+
+ return tabela_roteamento
+
+ def encontra_caminho(self, sw_origem, sw_destino, tabela_todos_switches, topologia):
+ """Encontra caminho entre um switch de origem e um destino atraves da topologia"""
+ #Exemplo do recebimento tabela_todos_switches {1: {1: {'visitado': 'sim', 'precedente': 1, 'estimativa': 0}, 2: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}, 3: {'visitado': 'nao', 'precedente': 2, 'estimativa': 2}, 4: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}, 5: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}}
+
+ caminho_switches = [] #Lista com o dpid dos switches com o caminho de tras para frente, do sw_destino para o sw_origem
+ salto_anterior = sw_destino
+ while ( salto_anterior != sw_origem ):
+ caminho_switches.append(salto_anterior)
+ salto_anterior = tabela_todos_switches[sw_origem][salto_anterior].precedente
+
+ caminho_switches.append(salto_anterior) # Faz o append do switch origem
+ caminho_switches = caminho_switches[::-1] # Inverte a lista com o caminho para obter o caminho na ordem de sw_origem a sw_destino
+
+ caminho = []
+ contador_saltos = 0
+ for posicao_switch_saida in range(len(caminho_switches)):
+ if ( caminho_switches[posicao_switch_saida] != sw_destino ):
+ interface_saida = topologia[caminho_switches[posicao_switch_saida]][caminho_switches[posicao_switch_saida+1]].int_saida_src
+
+ else:
+ interface_saida = None
+
+ caminho.append(saida(caminho_switches[posicao_switch_saida], interface_saida, contador_saltos))
+ contador_saltos = contador_saltos + 1
+
+ return caminho
diff --git a/spc/spf~ b/spc/spf~
new file mode 100644
index 0000000..a3bd403
--- /dev/null
+++ b/spc/spf~
@@ -0,0 +1,176 @@
+class portas:
+ def __init__(self, int_saida_src, int_entrada_dst, custo=1):
+ self.int_saida_src = int_saida_src #interface de saida do switch fonte
+ self.int_entrada_dst = int_entrada_dst #interface de entrada do switch destino
+ self.custo = custo # custo do enlace
+
+ def __repr__(self):
+ return "[" + str(self.int_saida_src) + "," + str(self.int_entrada_dst) + "," + str(self.custo) + "]"
+
+class dados_dijkstra:
+ def __init__(self, precedente, estimativa, visitado):
+ self.precedente = precedente # switch destino, id do switch
+ self.estimativa = estimativa # custo para o switch destino, inteiro para o switch destino
+ self.visitado = visitado # foi calculado para o path para o caminho destino, sim ou nao
+
+ def __repr__(self):
+ return str(self.precedente) + "," + str(self.estimativa) + "," + str(self.visitado)
+
+class saida:
+ def __init__(self, switch, porta_saida, num_salto):
+ self.switch = switch
+ self.porta_saida = porta_saida
+ self.num_salto = num_salto
+
+ def __repr__(self):
+ return "salto: " + str(self.num_salto) + "," + " switch: " + str(self.switch) + "," + " porta_saida: " + str(self.porta_saida) + ";"
+
+class calculo_dijkstra: # Retorna um objeto com a tabela de roteamento
+ def __init__(self, topologia):
+ self.tabela_dijkstra = {}
+ self.tabela_roteamento_completa = {}
+ verificador = []
+ prosseguimento = "sim"
+ self.logger = logging.getLogger('spf_calculation_application')
+ if not len(self.logger.handlers):
+ self._set_logger()
+
+ self.logger.info('Routing table calculation has been initiated.')
+ self.logger.info('Topology object is: %s.', str(topologia))
+
+ """Calcula a tabela dijkstra para todos os switches da topologia"""
+ """Para realizar o calculo tem que verificar se para cada switch dest de cada switch origem existe um switch origem (verifica se existe o par switch_origem:switch_destino e switch_destino_switch_origem)"""
+ for switch_origem in topologia.keys():
+ for switch_destino in topologia[switch_origem]:
+ if topologia.has_key(switch_destino):
+ if topologia[switch_destino].has_key(switch_origem):
+ verificador.append("ok")
+ else:
+ verificador.append("nok")
+ else:
+ verificador.append("nok")
+
+ for i in verificador:
+ if (i == "nok"):
+ self.logger.info('Proceeding could not be completed due to topology mismatch.')
+ prosseguimento = "nao"
+
+ if (prosseguimento == "sim"):
+ for switch in topologia.keys():
+ self.tabela_dijkstra[switch] = self.calcula(self.inicializacao(topologia), topologia, switch)
+
+ self.tabela_roteamento_completa = self.monta_tabela_roteamento(self.tabela_dijkstra, topologia)
+
+ def _set_logger(self):
+ self.logger.setLevel(logging.INFO)
+ self.logger.propagate = False
+ hdlr = logging.StreamHandler()
+ fmt_str = '[SPF][%(levelname)s] %(funcName)s | %(created)f | %(asctime)s: %(message)s'
+ hdlr.setFormatter(logging.Formatter(fmt_str))
+ self.logger.addHandler(hdlr)
+
+ def __repr__(self): # Metodo que retorna a maneira como o objeto da classe e printado na tela
+ return str(self.tabela_roteamento_completa)
+
+ def inicializacao(self, topologia): # faz todos os vertices
+ # Cria uma um dicionario em que as chaves sao topologia.keys() e os valores sao o objeto dados_dijkstra
+ # Essa tabela e utilizada para o calculo do algoritmo Dijkstra (uma tabela por topologia)
+ tabela_dijkstra = {}
+ for switch in topologia.keys():
+ tabela_dijkstra[switch] = dados_dijkstra(None, float("inf"), "nao")
+
+ self.logger.debug('Routing table has been initiated. %s', tabela_dijkstra)
+ return tabela_dijkstra
+
+ def calcula(self, tabela_inicializada, topologia, switch_raiz):
+ #Algoritmo para calculo da tabela dijkstra
+ """Atribua valor zero a estimativa do custo minimo do vertice s (raiz da busca) e infinito as demais estimativas)"""
+ """Marque a raiz como precedente da propria raiz"""
+ tabela_inicializada[switch_raiz].estimativa = 0
+ tabela_inicializada[switch_raiz].precedente = switch_raiz
+
+ for switch_vizinho_raiz in topologia[switch_raiz].keys():
+ tabela_inicializada[switch_vizinho_raiz].estimativa = topologia[switch_raiz][switch_vizinho_raiz].custo # ******* atribui o custo
+ tabela_inicializada[switch_vizinho_raiz].precedente = switch_raiz
+
+ """Marque a raiz como visitada"""
+ tabela_inicializada[switch_raiz].visitado = "sim"
+
+ switches_nao_visitados = 1
+ """Enquanto existirem vertices (switches) nao visitados"""
+ while switches_nao_visitados == 1:
+ """Escolha um vertice k ainda nao visitado cuja estimativa seja a menor dentre todos os vertices nao visitados"""
+ #Cria um dicionatio com as estimativas do switches vizinhos nao visitados
+ estimativas = {}
+ for switch in tabela_inicializada.keys():
+ if (tabela_inicializada[switch].visitado == "nao"):
+ estimativas[switch] = tabela_inicializada[switch].estimativa
+ # Referencia: stackoverflow get key with the least value from a dictionary
+ switch_vizinho_menor_estimativa = min(estimativas, key=estimativas.get) #retorna o switch com menor estimativa
+
+ """Para todos os vizinhos de k"""
+ for switches_vizinhos_switch_menor_estimativa in topologia[switch_vizinho_menor_estimativa].keys():
+ """Some a estimativa do vertice k com o custo do arco que une k (switch_vizinho_menor_estimativa) a j(switches_vizinhos_switch_menor_estimativa)"""
+ nova_estimativa = tabela_inicializada[switch_vizinho_menor_estimativa].estimativa + topologia[switches_vizinhos_switch_menor_estimativa][switch_vizinho_menor_estimativa].custo
+ if ( nova_estimativa < tabela_inicializada[switches_vizinhos_switch_menor_estimativa].estimativa ):
+ tabela_inicializada[switches_vizinhos_switch_menor_estimativa].estimativa = nova_estimativa
+ tabela_inicializada[switches_vizinhos_switch_menor_estimativa].precedente = switch_vizinho_menor_estimativa
+
+ """Marque k (switch_vizinho_menor_estimativa) como visitado"""
+ tabela_inicializada[switch_vizinho_menor_estimativa].visitado = "sim"
+
+ """Enquanto existirem vertices (switches) nao visitados"""
+ switches_nao_visitados = 0
+ for switch in tabela_inicializada.keys():
+ if (tabela_inicializada[switch].visitado == "nao"):
+ switches_nao_visitados = 1
+
+ return tabela_inicializada
+
+ def monta_tabela_roteamento(self, tabela_todos_switches, topologia):
+ #Exemplo do recebimento {1: {1: {'visitado': 'sim', 'precedente': 1, 'estimativa': 0}, 2: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}, 3: {'visitado': 'nao', 'precedente': 2, 'estimativa': 2}, 4: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}, 5: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}}
+ #tabela_roteamento = {switch_origem:{switch_destino:[switch, porta_saida; switch, porta_saida; ultimo_switch, None]}}
+ tabela_roteamento = {}
+
+ # Faz verificacao se a topologia esta dividida em duas ou mais partes. Se estiver retorna a tabela de roteamento vazia
+ for sw_ori in tabela_todos_switches.keys():
+ for sw_dest in tabela_todos_switches[sw_ori].keys():
+ if ( tabela_todos_switches[sw_ori][sw_dest].precedente == None ): # Se o precedente de qualquer switch na tabela_dijkstra for None (ou seja, ele nao entrou no calculo de vizinhos, entao a topologia esta desmenbrada) entao nao continua
+ return tabela_roteamento
+
+ for switch_origem in tabela_todos_switches.keys():
+ tabela_roteamento[switch_origem] = {}
+ for switch_destino in tabela_todos_switches[switch_origem].keys():
+ if ( switch_destino != switch_origem ):
+ #print "A topologia antes do encontra caminho e: ", topologia
+ #print "Antes do encontra caminho o switch origem e: ", switch_origem, "e o switch destino e: ", switch_destino
+ tabela_roteamento[switch_origem][switch_destino] = self.encontra_caminho(switch_origem, switch_destino, tabela_todos_switches, topologia)
+
+ return tabela_roteamento
+
+ def encontra_caminho(self, sw_origem, sw_destino, tabela_todos_switches, topologia):
+ """Encontra caminho entre um switch de origem e um destino atraves da topologia"""
+ #Exemplo do recebimento tabela_todos_switches {1: {1: {'visitado': 'sim', 'precedente': 1, 'estimativa': 0}, 2: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}, 3: {'visitado': 'nao', 'precedente': 2, 'estimativa': 2}, 4: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}, 5: {'visitado': 'sim', 'precedente': 1, 'estimativa': 1}}
+
+ caminho_switches = [] #Lista com o dpid dos switches com o caminho de tras para frente, do sw_destino para o sw_origem
+ salto_anterior = sw_destino
+ while ( salto_anterior != sw_origem ):
+ caminho_switches.append(salto_anterior)
+ salto_anterior = tabela_todos_switches[sw_origem][salto_anterior].precedente
+
+ caminho_switches.append(salto_anterior) # Faz o append do switch origem
+ caminho_switches = caminho_switches[::-1] # Inverte a lista com o caminho para obter o caminho na ordem de sw_origem a sw_destino
+
+ caminho = []
+ contador_saltos = 0
+ for posicao_switch_saida in range(len(caminho_switches)):
+ if ( caminho_switches[posicao_switch_saida] != sw_destino ):
+ interface_saida = topologia[caminho_switches[posicao_switch_saida]][caminho_switches[posicao_switch_saida+1]].int_saida_src
+
+ else:
+ interface_saida = None
+
+ caminho.append(saida(caminho_switches[posicao_switch_saida], interface_saida, contador_saltos))
+ contador_saltos = contador_saltos + 1
+
+ return caminho
diff --git a/util/install.sh b/util/install.sh
index b51610b..e530b05 100644
--- a/util/install.sh
+++ b/util/install.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
-# Mininet install script for Ubuntu (and Debian Wheezy+)
-# Brandon Heller (brandonh@stanford.edu)
+# VCFlow install script for Ubuntu (and Debian Wheezy+)
+# Pedro Diniz (phds@cbpf.br)
# Fail on error
set -e
@@ -9,21 +9,18 @@ set -e
# Fail on unset var usage
set -o nounset
-# Get directory containing mininet folder
-MININET_DIR="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd -P )"
+# Get directory containing VCFlow folder
+VCFLOW_DIR="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd -P )"
# Set up build directory, which by default is the working directory
-# unless the working directory is a subdirectory of mininet,
-# in which case we use the directory containing mininet
+# unless the working directory is a subdirectory of vcflow,
+# in which case we use the directory containing vcflow
BUILD_DIR="$(pwd -P)"
case $BUILD_DIR in
- $MININET_DIR/*) BUILD_DIR=$MININET_DIR;; # currect directory is a subdirectory
+ $VCFLOW_DIR/*) BUILD_DIR=$VCFLOW_DIR;; # currect directory is a subdirectory
*) BUILD_DIR=$BUILD_DIR;;
esac
-# Location of CONFIG_NET_NS-enabled kernel(s)
-KERNEL_LOC=http://www.openflow.org/downloads/mininet
-
# Attempt to identify Linux release
DIST=Unknown
@@ -103,71 +100,19 @@ function version_ge {
}
-# Kernel Deb pkg to be removed:
-KERNEL_IMAGE_OLD=linux-image-2.6.26-33-generic
-
-DRIVERS_DIR=/lib/modules/${KERNEL_NAME}/kernel/drivers/net
-
-OVS_RELEASE=1.4.0
-OVS_PACKAGE_LOC=https://github.com/downloads/mininet/mininet
-OVS_BUILDSUFFIX=-ignore # was -2
-OVS_PACKAGE_NAME=ovs-$OVS_RELEASE-core-$DIST_LC-$RELEASE-$ARCH$OVS_BUILDSUFFIX.tar
-OVS_TAG=v$OVS_RELEASE
-
-OF13_SWITCH_REV=${OF13_SWITCH_REV:-""}
-
-
-function kernel {
- echo "Install Mininet-compatible kernel if necessary"
- $update update
- if ! $install linux-image-$KERNEL_NAME; then
- echo "Could not install linux-image-$KERNEL_NAME"
- echo "Skipping - assuming installed kernel is OK."
- fi
-}
-
-function kernel_clean {
- echo "Cleaning kernel..."
-
- # To save disk space, remove previous kernel
- if ! $remove $KERNEL_IMAGE_OLD; then
- echo $KERNEL_IMAGE_OLD not installed.
- fi
-
- # Also remove downloaded packages:
- rm -f $HOME/linux-headers-* $HOME/linux-image-*
-}
-
-# Install Mininet deps
-function mn_deps {
- echo "Installing Mininet dependencies"
+# Install VCFlow deps
+function vcflow_deps {
+ echo "Installing VCFlow dependencies"
if [ "$DIST" = "Fedora" -o "$DIST" = "RedHatEnterpriseServer" ]; then
- $install gcc make socat psmisc xterm openssh-clients iperf \
- iproute telnet python-setuptools libcgroup-tools \
- ethtool help2man pyflakes pylint python-pep8 python-pexpect
+ $install python-pip
elif [ "$DIST" = "SUSE LINUX" ]; then
- $install gcc make socat psmisc xterm openssh iperf \
- iproute telnet python-setuptools libcgroup-tools \
- ethtool help2man python-pyflakes python3-pylint python-pep8 python-pexpect
+ $install python-pip
else
- $install gcc make socat psmisc xterm ssh iperf iproute telnet \
- python-setuptools cgroup-bin ethtool help2man \
- pyflakes pylint pep8 python-pexpect
- fi
-
- echo "Installing Mininet core"
- pushd $MININET_DIR/mininet
- sudo make install
- popd
-}
-
-# Install Mininet developer dependencies
-function mn_dev {
- echo "Installing Mininet developer dependencies"
- $install doxygen doxypy texlive-fonts-recommended
- if ! $install doxygen-latex; then
- echo "doxygen-latex not needed"
+ $install python-pip
fi
+
+ python_pip=`which pip`
+ $python_pip install -r $VCFLOW_DIR/requirements.txt
}
# The following will cause a full OF install, covering:
@@ -217,26 +162,6 @@ function of13 {
fi
fi
- # Install netbee
- if [ "$DIST" = "Ubuntu" ] && version_ge $RELEASE 14.04; then
- NBEESRC="nbeesrc-feb-24-2015"
- NBEEDIR="netbee"
- else
- NBEESRC="nbeesrc-jan-10-2013"
- NBEEDIR="nbeesrc-jan-10-2013"
- fi
-
- NBEEURL=${NBEEURL:-http://www.nbee.org/download/}
- wget -nc ${NBEEURL}${NBEESRC}.zip
- unzip ${NBEESRC}.zip
- cd ${NBEEDIR}/src
- cmake .
- make
- cd $BUILD_DIR/
- sudo cp ${NBEEDIR}/bin/libn*.so /usr/local/lib
- sudo ldconfig
- sudo cp -R ${NBEEDIR}/include/ /usr/
-
# Resume the install:
cd $BUILD_DIR/ofsoftswitch13
./boot.sh
@@ -247,214 +172,6 @@ function of13 {
}
-function install_wireshark {
- if ! which wireshark; then
- echo "Installing Wireshark"
- if [ "$DIST" = "Fedora" -o "$DIST" = "RedHatEnterpriseServer" ]; then
- $install wireshark wireshark-gnome
- elif [ "$DIST" = "SUSE LINUX" ]; then
- $install wireshark
- else
- $install wireshark tshark
- fi
- fi
-
- # Copy coloring rules: OF is white-on-blue:
- echo "Optionally installing wireshark color filters"
- mkdir -p $HOME/.wireshark
- cp -n $MININET_DIR/mininet/util/colorfilters $HOME/.wireshark
-
- echo "Checking Wireshark version"
- WSVER=`wireshark -v | egrep -o '[0-9\.]+' | head -1`
- if version_ge $WSVER 1.12; then
- echo "Wireshark version $WSVER >= 1.12 - returning"
- return
- fi
-
- echo "Cloning LoxiGen and building openflow.lua dissector"
- cd $BUILD_DIR
- git clone https://github.com/floodlight/loxigen.git
- cd loxigen
- make wireshark
-
- # Copy into plugin directory
- # libwireshark0/ on 11.04; libwireshark1/ on later
- WSDIR=`find /usr/lib -type d -name 'libwireshark*' | head -1`
- WSPLUGDIR=$WSDIR/plugins/
- PLUGIN=loxi_output/wireshark/openflow.lua
- sudo cp $PLUGIN $WSPLUGDIR
- echo "Copied openflow plugin $PLUGIN to $WSPLUGDIR"
-
- cd $BUILD_DIR
-}
-
-
-# Install Open vSwitch specific version Ubuntu package
-function ubuntuOvs {
- echo "Creating and Installing Open vSwitch packages..."
-
- OVS_SRC=$BUILD_DIR/openvswitch
- OVS_TARBALL_LOC=http://openvswitch.org/releases
-
- if ! echo "$DIST" | egrep "Ubuntu|Debian" > /dev/null; then
- echo "OS must be Ubuntu or Debian"
- $cd BUILD_DIR
- return
- fi
- if [ "$DIST" = "Ubuntu" ] && ! version_ge $RELEASE 12.04; then
- echo "Ubuntu version must be >= 12.04"
- cd $BUILD_DIR
- return
- fi
- if [ "$DIST" = "Debian" ] && ! version_ge $RELEASE 7.0; then
- echo "Debian version must be >= 7.0"
- cd $BUILD_DIR
- return
- fi
-
- rm -rf $OVS_SRC
- mkdir -p $OVS_SRC
- cd $OVS_SRC
-
- if wget $OVS_TARBALL_LOC/openvswitch-$OVS_RELEASE.tar.gz 2> /dev/null; then
- tar xzf openvswitch-$OVS_RELEASE.tar.gz
- else
- echo "Failed to find OVS at $OVS_TARBALL_LOC/openvswitch-$OVS_RELEASE.tar.gz"
- cd $BUILD_DIR
- return
- fi
-
- # Remove any old packages
-
- $remove openvswitch-common openvswitch-datapath-dkms openvswitch-pki openvswitch-switch \
- openvswitch-controller || true
-
- # Get build deps
- $install build-essential fakeroot debhelper autoconf automake libssl-dev \
- pkg-config bzip2 openssl python-all procps python-qt4 \
- python-zopeinterface python-twisted-conch dkms dh-python dh-autoreconf \
- uuid-runtime
-
- # Build OVS
- parallel=`grep processor /proc/cpuinfo | wc -l`
- cd $BUILD_DIR/openvswitch/openvswitch-$OVS_RELEASE
- DEB_BUILD_OPTIONS='parallel=$parallel nocheck' fakeroot debian/rules binary
- cd ..
- for pkg in common datapath-dkms pki switch; do
- pkg=openvswitch-${pkg}_$OVS_RELEASE*.deb
- echo "Installing $pkg"
- $pkginst $pkg
- done
- if $pkginst openvswitch-controller_$OVS_RELEASE*.deb 2>/dev/null; then
- echo "Ignoring error installing openvswitch-controller"
- fi
-
- /sbin/modinfo openvswitch
- sudo ovs-vsctl show
- # Switch can run on its own, but
- # Mininet should control the controller
- # This appears to only be an issue on Ubuntu/Debian
- if sudo service openvswitch-controller stop 2>/dev/null; then
- echo "Stopped running controller"
- fi
- if [ -e /etc/init.d/openvswitch-controller ]; then
- sudo update-rc.d openvswitch-controller disable
- fi
-}
-
-
-# Install Open vSwitch
-
-function ovs {
- echo "Installing Open vSwitch..."
-
- if [ "$DIST" = "Fedora" -o "$DIST" = "RedHatEnterpriseServer" ]; then
- $install openvswitch openvswitch-controller
- return
- fi
-
- if [ "$DIST" = "Ubuntu" ] && ! version_ge $RELEASE 14.04; then
- # Older Ubuntu versions need openvswitch-datapath/-dkms
- # Manually installing openvswitch-datapath may be necessary
- # for manually built kernel .debs using Debian's defective kernel
- # packaging, which doesn't yield usable headers.
- if ! dpkg --get-selections | grep openvswitch-datapath; then
- # If you've already installed a datapath, assume you
- # know what you're doing and don't need dkms datapath.
- # Otherwise, install it.
- $install openvswitch-datapath-dkms
- fi
- fi
-
- $install openvswitch-switch
- OVSC=""
- if $install openvswitch-controller; then
- OVSC="openvswitch-controller"
- else
- echo "Attempting to install openvswitch-testcontroller"
- if $install openvswitch-testcontroller; then
- OVSC="openvswitch-testcontroller"
- else
- echo "Failed - skipping openvswitch-testcontroller"
- fi
- fi
- if [ "$OVSC" ]; then
- # Switch can run on its own, but
- # Mininet should control the controller
- # This appears to only be an issue on Ubuntu/Debian
- if sudo service $OVSC stop; then
- echo "Stopped running controller"
- fi
- if [ -e /etc/init.d/$OVSC ]; then
- sudo update-rc.d $OVSC disable
- fi
- fi
-}
-
-function remove_ovs {
- pkgs=`dpkg --get-selections | grep openvswitch | awk '{ print $1;}'`
- echo "Removing existing Open vSwitch packages:"
- echo $pkgs
- if ! $remove $pkgs; then
- echo "Not all packages removed correctly"
- fi
- # For some reason this doesn't happen
- if scripts=`ls /etc/init.d/*openvswitch* 2>/dev/null`; then
- echo $scripts
- for s in $scripts; do
- s=$(basename $s)
- echo SCRIPT $s
- sudo service $s stop
- sudo rm -f /etc/init.d/$s
- sudo update-rc.d -f $s remove
- done
- fi
- echo "Done removing OVS"
-}
-
-function ivs {
- echo "Installing Indigo Virtual Switch..."
-
- IVS_SRC=$BUILD_DIR/ivs
-
- # Install dependencies
- $install gcc make
- if [ "$DIST" = "Fedora" -o "$DIST" = "RedHatEnterpriseServer" ]; then
- $install git pkgconfig libnl3-devel libcap-devel openssl-devel
- else
- $install git-core pkg-config libnl-3-dev libnl-route-3-dev \
- libnl-genl-3-dev
- fi
-
- # Install IVS from source
- cd $BUILD_DIR
- git clone git://github.com/floodlight/ivs $IVS_SRC
- cd $IVS_SRC
- git submodule update --init
- make
- sudo make install
-}
-
# Install RYU
function ryu {
echo "Installing RYU..."
@@ -480,222 +197,14 @@ function ryu {
sudo ln -s ./bin/ryu-manager /usr/local/bin/ryu-manager
}
-# Install NOX with tutorial files
-function nox {
- echo "Installing NOX w/tutorial files..."
-
- # Install NOX deps:
- $install autoconf automake g++ libtool python python-twisted \
- swig libssl-dev make
- if [ "$DIST" = "Debian" ]; then
- $install libboost1.35-dev
- elif [ "$DIST" = "Ubuntu" ]; then
- $install python-dev libboost-dev
- $install libboost-filesystem-dev
- $install libboost-test-dev
- fi
- # Install NOX optional deps:
- $install libsqlite3-dev python-simplejson
-
- # Fetch NOX destiny
- cd $BUILD_DIR/
- git clone https://github.com/noxrepo/nox-classic.git noxcore
- cd noxcore
- if ! git checkout -b destiny remotes/origin/destiny ; then
- echo "Did not check out a new destiny branch - assuming current branch is destiny"
- fi
-
- # Apply patches
- git checkout -b tutorial-destiny
- git am $MININET_DIR/mininet/util/nox-patches/*tutorial-port-nox-destiny*.patch
- if [ "$DIST" = "Ubuntu" ] && version_ge $RELEASE 12.04; then
- git am $MININET_DIR/mininet/util/nox-patches/*nox-ubuntu12-hacks.patch
- fi
+# Copy Configuration File
+function cp_config_file {
+ echo "Installing configuration files..."
+
+ mkdir /etc/vcflow
- # Build
- ./boot.sh
- mkdir build
- cd build
- ../configure
- make -j3
- #make check
-
- # Add NOX_CORE_DIR env var:
- sed -i -e 's|# for examples$|&\nexport NOX_CORE_DIR=$BUILD_DIR/noxcore/build/src|' ~/.bashrc
-
- # To verify this install:
- #cd ~/noxcore/build/src
- #./nox_core -v -i ptcp:
-}
-
-# Install NOX Classic/Zaku for OpenFlow 1.3
-function nox13 {
- echo "Installing NOX w/tutorial files..."
-
- # Install NOX deps:
- $install autoconf automake g++ libtool python python-twisted \
- swig libssl-dev make
- if [ "$DIST" = "Debian" ]; then
- $install libboost1.35-dev
- elif [ "$DIST" = "Ubuntu" ]; then
- $install python-dev libboost-dev
- $install libboost-filesystem-dev
- $install libboost-test-dev
- fi
-
- # Fetch NOX destiny
- cd $BUILD_DIR/
- git clone https://github.com/CPqD/nox13oflib.git
- cd nox13oflib
-
- # Build
- ./boot.sh
- mkdir build
- cd build
- ../configure
- make -j3
- #make check
-
- # To verify this install:
- #cd ~/nox13oflib/build/src
- #./nox_core -v -i ptcp:
-}
-
-
-# "Install" POX
-function pox {
- echo "Installing POX into $BUILD_DIR/pox..."
- cd $BUILD_DIR
- git clone https://github.com/noxrepo/pox.git
-}
-
-# Install OFtest
-function oftest {
- echo "Installing oftest..."
-
- # Install deps:
- $install tcpdump python-scapy
-
- # Install oftest:
- cd $BUILD_DIR/
- git clone git://github.com/floodlight/oftest
-}
-
-# Install cbench
-function cbench {
- echo "Installing cbench..."
-
- if [ "$DIST" = "Fedora" -o "$DIST" = "RedHatEnterpriseServer" ]; then
- $install net-snmp-devel libpcap-devel libconfig-devel
- elif [ "$DIST" = "SUSE LINUX" ]; then
- $install net-snmp-devel libpcap-devel libconfig-devel
- else
- $install libsnmp-dev libpcap-dev libconfig-dev
- fi
- cd $BUILD_DIR/
- # was: git clone git://gitosis.stanford.edu/oflops.git
- # Use our own fork on github for now:
- git clone git://github.com/mininet/oflops
- cd oflops
- sh boot.sh || true # possible error in autoreconf, so run twice
- sh boot.sh
- ./configure --with-openflow-src-dir=$BUILD_DIR/openflow
- make
- sudo make install || true # make install fails; force past this
-}
-
-function vm_other {
- echo "Doing other Mininet VM setup tasks..."
-
- # Remove avahi-daemon, which may cause unwanted discovery packets to be
- # sent during tests, near link status changes:
- echo "Removing avahi-daemon"
- $remove avahi-daemon
-
- # was: Disable IPv6. Add to /etc/modprobe.d/blacklist:
- #echo "Attempting to disable IPv6"
- #if [ "$DIST" = "Ubuntu" ]; then
- # BLACKLIST=/etc/modprobe.d/blacklist.conf
- #else
- # BLACKLIST=/etc/modprobe.d/blacklist
- #fi
- #sudo sh -c "echo 'blacklist net-pf-10\nblacklist ipv6' >> $BLACKLIST"
- echo "Disabling IPv6"
- # Disable IPv6
- if ! grep 'disable_ipv6' /etc/sysctl.conf; then
- echo 'Disabling IPv6'
- echo '
-# Mininet: disable IPv6
-net.ipv6.conf.all.disable_ipv6 = 1
-net.ipv6.conf.default.disable_ipv6 = 1
-net.ipv6.conf.lo.disable_ipv6 = 1' | sudo tee -a /etc/sysctl.conf > /dev/null
- fi
- # Since the above doesn't disable neighbor discovery, also do this:
- if ! grep 'ipv6.disable' /etc/default/grub; then
- sudo sed -i -e \
- 's/GRUB_CMDLINE_LINUX_DEFAULT="/GRUB_CMDLINE_LINUX_DEFAULT="ipv6.disable=1 /' \
- /etc/default/grub
- sudo update-grub
- fi
- # Disabling IPv6 breaks X11 forwarding via ssh
- line='AddressFamily inet'
- file='/etc/ssh/sshd_config'
- echo "Adding $line to $file"
- if ! grep "$line" $file > /dev/null; then
- echo "$line" | sudo tee -a $file > /dev/null
- fi
-
- # Enable command auto completion using sudo; modify ~/.bashrc:
- sed -i -e 's|# for examples$|&\ncomplete -cf sudo|' ~/.bashrc
-
- # Install tcpdump, cmd-line packet dump tool. Also install gitk,
- # a graphical git history viewer.
- $install tcpdump gitk
-
- # Install common text editors
- $install vim nano emacs
-
- # Install NTP
- $install ntp
-
- # Install vconfig for VLAN example
- if [ "$DIST" = "Fedora" -o "$DIST" = "RedHatEnterpriseServer" ]; then
- $install vconfig
- else
- $install vlan
- fi
-
- # Set git to colorize everything.
- git config --global color.diff auto
- git config --global color.status auto
- git config --global color.branch auto
-
- # Reduce boot screen opt-out delay. Modify timeout in /boot/grub/menu.lst to 1:
- if [ "$DIST" = "Debian" ]; then
- sudo sed -i -e 's/^timeout.*$/timeout 1/' /boot/grub/menu.lst
- fi
-
- # Clean unneeded debs:
- rm -f ~/linux-headers-* ~/linux-image-*
-}
-
-# Script to copy built OVS kernel module to where modprobe will
-# find them automatically. Removes the need to keep an environment variable
-# for insmod usage, and works nicely with multiple kernel versions.
-#
-# The downside is that after each recompilation of OVS you'll need to
-# re-run this script. If you're using only one kernel version, then it may be
-# a good idea to use a symbolic link in place of the copy below.
-function modprobe {
- echo "Setting up modprobe for OVS kmod..."
- set +o nounset
- if [ -z "$OVS_KMODS" ]; then
- echo "OVS_KMODS not set. Aborting."
- else
- sudo cp $OVS_KMODS $DRIVERS_DIR
- sudo depmod -a ${KERNEL_NAME}
- fi
- set -o nounset
+ cp -vp $BUILD_DIR/vcflow/config/vcflow_configuration_database.conf /etc/vcflow
+ cp -vp $BUILD_DIR/vcflow/config/vcflow.conf /etc/vcflow
}
function all {
@@ -708,22 +217,11 @@ function all {
printf " install.sh -fnpv\n\n"
exit 3
fi
- echo "Installing all packages except for -eix (doxypy, ivs, nox-classic)..."
- kernel
- mn_deps
- # Skip mn_dev (doxypy/texlive/fonts/etc.) because it's huge
- # mn_dev
- of
- install_wireshark
- ovs
- # We may add ivs once it's more mature
- # ivs
- # NOX-classic is deprecated, but you can install it manually if desired.
- # nox
- pox
- oftest
- cbench
- echo "Enjoy Mininet!"
+ echo "Installing all packages..."
+ ryu
+ vcflow_deps
+ cp_config_file
+ echo "Enjoy VCFlow!"
}
# Restore disk space and remove sensitive files before shipping a VM.
@@ -732,31 +230,6 @@ function vm_clean {
sudo apt-get clean
sudo apt-get autoremove
sudo rm -rf /tmp/*
- sudo rm -rf openvswitch*.tar.gz
-
- # Remove sensistive files
- history -c # note this won't work if you have multiple bash sessions
- rm -f ~/.bash_history # need to clear in memory and remove on disk
- rm -f ~/.ssh/id_rsa* ~/.ssh/known_hosts
- sudo rm -f ~/.ssh/authorized_keys*
-
- # Remove SSH keys and regenerate on boot
- echo 'Removing SSH keys from /etc/ssh/'
- sudo rm -f /etc/ssh/*key*
- if ! grep mininet /etc/rc.local >& /dev/null; then
- sudo sed -i -e "s/exit 0//" /etc/rc.local
- echo '
-# mininet: regenerate ssh keys if we deleted them
-if ! stat -t /etc/ssh/*key* >/dev/null 2>&1; then
- /usr/sbin/dpkg-reconfigure openssh-server
-fi
-exit 0
-' | sudo tee -a /etc/rc.local > /dev/null
- fi
-
- # Remove Mininet files
- #sudo rm -f /lib/modules/python2.5/site-packages/mininet*
- #sudo rm -f /usr/bin/mnexec
# Clear optional dev script for SSH keychain load on boot
rm -f ~/.bash_profile
@@ -765,44 +238,24 @@ exit 0
git config --global user.name "None"
git config --global user.email "None"
- # Note: you can shrink the .vmdk in vmware using
- # vmware-vdiskmanager -k *.vmdk
- echo "Zeroing out disk blocks for efficient compaction..."
- time sudo dd if=/dev/zero of=/tmp/zero bs=1M || true
- sync ; sleep 1 ; sync ; sudo rm -f /tmp/zero
-
}
function usage {
- printf '\nUsage: %s [-abcdfhikmnprtvVwxy03]\n\n' $(basename $0) >&2
+ printf '\nUsage: %s [-acdfhy03]\n\n' $(basename $0) >&2
printf 'This install script attempts to install useful packages\n' >&2
- printf 'for Mininet. It should (hopefully) work on Ubuntu 11.10+\n' >&2
+ printf 'for VCFlow. It should (hopefully) work on Ubuntu 11.10+\n' >&2
printf 'If you run into trouble, try\n' >&2
printf 'installing one thing at a time, and looking at the \n' >&2
printf 'specific installation function in this script.\n\n' >&2
printf 'options:\n' >&2
printf -- ' -a: (default) install (A)ll packages - good luck!\n' >&2
- printf -- ' -b: install controller (B)enchmark (oflops)\n' >&2
- printf -- ' -c: (C)lean up after kernel install\n' >&2
+ printf -- ' -c: (C)opy configuration files\n' >&2
printf -- ' -d: (D)elete some sensitive files from a VM image\n' >&2
- printf -- ' -e: install Mininet d(E)veloper dependencies\n' >&2
printf -- ' -f: install Open(F)low\n' >&2
printf -- ' -h: print this (H)elp message\n' >&2
- printf -- ' -i: install (I)ndigo Virtual Switch\n' >&2
- printf -- ' -k: install new (K)ernel\n' >&2
- printf -- ' -m: install Open vSwitch kernel (M)odule from source dir\n' >&2
- printf -- ' -n: install Mini(N)et dependencies + core files\n' >&2
- printf -- ' -p: install (P)OX OpenFlow Controller\n' >&2
- printf -- ' -r: remove existing Open vSwitch packages\n' >&2
- printf -- ' -s
: place dependency (S)ource/build trees in \n' >&2
- printf -- ' -t: complete o(T)her Mininet VM setup tasks\n' >&2
- printf -- ' -v: install Open (V)switch\n' >&2
- printf -- ' -V : install a particular version of Open (V)switch on Ubuntu\n' >&2
- printf -- ' -w: install OpenFlow (W)ireshark dissector\n' >&2
printf -- ' -y: install R(y)u Controller\n' >&2
- printf -- ' -x: install NO(X) Classic OpenFlow controller\n' >&2
printf -- ' -0: (default) -0[fx] installs OpenFlow 1.0 versions\n' >&2
printf -- ' -3: -3[fx] installs OpenFlow 1.3 versions\n' >&2
exit 2
@@ -814,39 +267,18 @@ if [ $# -eq 0 ]
then
all
else
- while getopts 'abcdefhikmnprs:tvV:wxy03' OPTION
+ while getopts 'acdfhy03' OPTION
do
case $OPTION in
a) all;;
- b) cbench;;
- c) kernel_clean;;
+ c) cp_config_file;;
d) vm_clean;;
- e) mn_dev;;
f) case $OF_VERSION in
1.0) of;;
1.3) of13;;
*) echo "Invalid OpenFlow version $OF_VERSION";;
esac;;
h) usage;;
- i) ivs;;
- k) kernel;;
- m) modprobe;;
- n) mn_deps;;
- p) pox;;
- r) remove_ovs;;
- s) mkdir -p $OPTARG; # ensure the directory is created
- BUILD_DIR="$( cd -P "$OPTARG" && pwd )"; # get the full path
- echo "Dependency installation directory: $BUILD_DIR";;
- t) vm_other;;
- v) ovs;;
- V) OVS_RELEASE=$OPTARG;
- ubuntuOvs;;
- w) install_wireshark;;
- x) case $OF_VERSION in
- 1.0) nox;;
- 1.3) nox13;;
- *) echo "Invalid OpenFlow version $OF_VERSION";;
- esac;;
y) ryu;;
0) OF_VERSION=1.0;;
3) OF_VERSION=1.3;;
diff --git a/vcflow.py b/vcflow.py
new file mode 100644
index 0000000..51df139
--- /dev/null
+++ b/vcflow.py
@@ -0,0 +1,1440 @@
+# -*- coding: cp1252 -*-
+from ryu.base import app_manager
+from ryu.controller import ofp_event
+from ryu.controller.handler import HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER
+from ryu.controller.handler import set_ev_cls
+from ryu.ofproto import ofproto_v1_0, ofproto_v1_3
+from ryu.ofproto import ofproto_v1_0_parser, ofproto_v1_3_parser
+from ryu.topology import switches
+from ryu.controller import handler, dpset
+from ryu.ofproto import ether
+from ryu.lib import mac, ofctl_v1_0, ofctl_v1_3, hub
+from ryu.lib.packet import ethernet, arp, packet, ipv4,icmp, tcp, udp, mpls, vlan, ether_types, llc
+from ryu.lib.ip import ipv4_to_bin, ipv4_to_str
+from ryu.lib.mac import haddr_to_bin
+from ryu.topology import event,api
+from ryu.topology.api import get_switch, get_link, get_host
+from ryu.utils import hex_array
+from time import time, sleep
+from threading import BoundedSemaphore
+from spc.spf import portas, calculo_dijkstra
+import sys
+import json
+import logging
+
+VCFLOW_CONFIGURATION_FILE = '/etc/vcflow/vcflow.conf'
+MAC_SRC_RENEWAL_INTERVAL = 60 #in seconds
+CONFIGURATION_DB_VERIFICATION_INTERVAL = 5 #in_seconds
+DESC_REQUEST_INTERVAL = 100 #in seconds
+INTERFACES_INFO_FILE = '/tmp/.interfaces_info.json'
+INTERFACES_INFO_OO_FILE = '/tmp/.interfaces_info_object.json'
+LLDP_TAGGED_VERIFICATION_FILE = '/tmp/.verification_lldp_discovery.txt'
+IDENTIFICATION_DB_VERIFICATION_FILE = '/tmp/.identification_db_verification.txt'
+FIRST_LINK_ADD_INTERVAL = 40 #in seconds
+ECHO_REQUEST_INTERVAL = 1
+TIMEOUT_ECHO_REQUEST = 60.0
+
+with open(VCFLOW_CONFIGURATION_FILE) as json_data_file:
+ data = json.load(json_data_file)
+ CONFIGURATION_DB = data['configuration_database']
+ IDENTIFICATION_DB = data['identification_database']
+ FIRST_BACKBONE_ID = data['l2vpn_vlan_circuits']['first_transport_vlan_id_of_circuits']
+
+class VCFlow(app_manager.RyuApp):
+ OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION, ofproto_v1_3.OFP_VERSION]
+
+ _CONTEXTS = {
+ 'dpset': dpset.DPSet,
+ } # Especifica os contextos que a classe quer utilizar
+
+ def __init__(self, *args, **kwargs):
+ super(VCFlow, self).__init__(*args, **kwargs)
+ self.dpset = kwargs['dpset']
+ self.obj_topologia={} # Dicionario para cada enlace (unidirecional), em que cada entrada tem o formato switch_src:{switch_dest:[port_src,port_dest]} ex. {1:{2: [2, 2]}, 2: {3: [3, 2]}, 3: {2: [2, 3]}}
+ self.s_obj_topologia = BoundedSemaphore()
+ self.obj_topologia_auxiliar=[] # Lista onde cada posicao e um dicionario: {switch_src:{switch_dest:[port_src,port_dest, custo]}} ex. {1:{2: [2, 2, 1]}, 2: {3: [3, 2, 1]}, 3: {2: [2, 3, 1]}}. E utilizado no caso de multiplos links, entre dois switches iguais, para manter historico
+ self.s_obj_topologia_auxiliar = BoundedSemaphore()
+ self.tabela_roteamento = None
+ self.s_tabela_roteamento = BoundedSemaphore()
+ self.mac_to_port = {} #ex.: {1: {'00:00:00:00:00:01': {'porta': 1, 'vlan_id': 10}, 'ce:cb:7d:05:9b:0d': {'porta': 2, 'vlan_id': 20}, 2: {'00:00:00:00:00:02': {'porta': 1, 'vlan_id': 20}, 'b6:fb:71:61:73:82': {'porta': 2, 'vlan_id': 30}}}
+ self.timestamp_novo_mac_src = {} # Dicionario com timestamps por MAC
+ self.timestamp_antigo_mac_src = {} # Dicionario com timestamps por MAC
+ self.intervalo_renovacao_mac_src = MAC_SRC_RENEWAL_INTERVAL
+
+ self.topology_api_app = self
+ self.switch_list = []
+ self.switches_in_topology = {}
+ self.switches_in_topology_file = {}
+ self.dpid_match_actions_dic = {}
+ self.dictionary_dpid_datapath = {} # Dicionario do tipo {DPID:datapath} para poder instalar regras proativamente em cada direcao do circuito
+
+ self.arquivo_configuracao_l2vpn = CONFIGURATION_DB
+ self.arquivo_identificacao_l2vpn = IDENTIFICATION_DB
+ self.dicionario_l2vpn = [] #Cada posicao da lista e uma linha do arquivo de configuracao que e um dicionario
+ self.dicionario_l2vpn_identificacao = [] #Cada posicao da lista e um dicionario com a identificacao do circuito (PE 1, ID PE 1, PE 2, ID PE 2, ID Backbone). As informacoes do circuito sao obtidas a partir do arquivo de configuracao e os IDs de Backbone sao setados em sequencia a partir do que e lido no arquivo de configuracao
+ self.frequencia_verificacao_arquivo_configuracao = CONFIGURATION_DB_VERIFICATION_INTERVAL # Tempo em segundos que a funcao verifica se ha nova configuracao no arquivo de configuracao
+ self.time_to_desc_request = DESC_REQUEST_INTERVAL #in seconds
+ self.desc_request_counter = 0
+ self.interfaces_info = INTERFACES_INFO_FILE
+ self.interfaces_info_object = INTERFACES_INFO_OO_FILE
+ self.verification_lldp_discovery_file = LLDP_TAGGED_VERIFICATION_FILE
+
+ self.last_link_add = 0
+ self.begging_time = time()
+
+ self.logger = logging.getLogger('spf_calculation_application')
+ if not len(self.logger.handlers):
+ self._set_logger()
+
+ self.thread_lldp_discovery_check = hub.spawn(self.lldp_discovery_check)
+ self.thread_desc_stats = hub.spawn(self.send_desc_stats_request)
+ self.thread_inicializacao = hub.spawn(self.inicializacao)
+ self.thread_echo_request = hub.spawn(self.echo_request)
+
+ def _set_logger(self):
+ self.logger.setLevel(logging.INFO)
+ self.logger.propagate = False
+ hdlr = logging.StreamHandler()
+ fmt_str = '[VCFlow][%(levelname)s] %(funcName)s | %(created)f | %(asctime)s: %(message)s'
+ hdlr.setFormatter(logging.Formatter(fmt_str))
+ self.logger.addHandler(hdlr)
+
+ def inicializacao(self):
+ hub.spawn(self.verificacao_arquivo_configuracao(self.arquivo_configuracao_l2vpn, self.arquivo_identificacao_l2vpn, self.frequencia_verificacao_arquivo_configuracao))
+
+ def echo_request(self):
+ while True:
+ self.logger.debug('Sending ECHO REQUESTs for DPIDs: %s.', str(self.dictionary_dpid_datapath))
+ for dpid in self.dictionary_dpid_datapath.keys():
+ self.logger.debug('Sending ECHO REQUEST for DPID: %s.', str(dpid))
+ datapath = self.dictionary_dpid_datapath[dpid]
+ self.send_echo_request(datapath)
+
+ hub.sleep(ECHO_REQUEST_INTERVAL)
+
+
+ def verificacao_arquivo_configuracao(self, arquivo_configuracao, arquivo_identificacao, frequencia_verificacao_l2vpn=300, arquivo_watchdog_databases=IDENTIFICATION_DB_VERIFICATION_FILE):
+ #sleep(2) # Sleep para aguardar conexao do switches ao controlador
+ primeira_leitura = True
+ dicionario_l2vpn_inicio_verificacao = self.dicionario_l2vpn
+ while True:
+ if primeira_leitura:
+ sleep(self.frequencia_verificacao_arquivo_configuracao) # Sleep para aguardar conexao do switches ao controlador
+ primeira_leitura = False
+ ids_backbone = [FIRST_BACKBONE_ID - 1]
+
+ else:
+ arquivo = open(arquivo_identificacao, 'r')
+ linhas = arquivo.readlines()
+ arquivo.close()
+ ids_backbone = []
+ dicionario_l2vpn_identificacao = {}
+ for linha in linhas:
+ linha = linha.split(' ')
+ if '#' in linha[0]:
+ continue
+ else:
+ ids_backbone.append(int(linha[-1]))
+
+ if ids_backbone == []: #Quando na ultima verificacao nao houve nenhuma entrada valida
+ ids_backbone = [FIRST_BACKBONE_ID - 1] # Primeira ID de backbone e 2
+
+ arquivo = open(arquivo_configuracao, 'r')
+ #self.dicionario_l2vpn = []
+ #self.dicionario_l2vpn_identificacao = []
+ linhas = arquivo.readlines()
+ arquivo.close()
+ for linha in linhas:
+ dicionario_l2vpn_verificacao ={}
+ dicionario_l2vpn_identificacao = {}
+ linha = linha.split(' ')
+ #break_reading = None
+
+ if '#' in linha[0]:
+ continue
+
+ elif (len(linha) == 6):
+ break_reading = False
+ for posicao in range(len(linha)):
+ try:
+ if posicao == 0:
+ dicionario_l2vpn_verificacao['switch_entrada'] = int(linha[posicao])
+ dicionario_l2vpn_identificacao['pe_1'] = int(linha[posicao])
+ elif posicao == 1:
+ dicionario_l2vpn_verificacao['porta_entrada'] = self.interface_name_reading(dicionario_l2vpn_verificacao['switch_entrada'], linha[posicao])
+ if dicionario_l2vpn_verificacao['porta_entrada'] == 0:
+ break_reading = True
+ break
+ elif posicao == 2:
+ try:
+ dicionario_l2vpn_verificacao['vlan_id_entrada'] = int(linha[posicao])
+ dicionario_l2vpn_identificacao['id_pe_1'] = int(linha[posicao])
+ #dicionario_l2vpn_verificacao['vlan_id_entrada_range'] = False
+ #dicionario_l2vpn_identificacao['id_pe_1_range'] = False
+ except ValueError: # Caso de range de VLANs em que a leitura e uma string
+ #dicionario_l2vpn_verificacao['vlan_id_entrada_range'] = True
+ #dicionario_l2vpn_identificacao['id_pe_1_range'] = True
+ dicionario_l2vpn_verificacao['vlan_id_entrada'] = range(int(linha[posicao].split('-')[0]), (int(linha[posicao].split('-')[1])+1))
+ dicionario_l2vpn_identificacao['id_pe_1'] = range(int(linha[posicao].split('-')[0]), (int(linha[posicao].split('-')[1])+1))
+ elif posicao == 3:
+ dicionario_l2vpn_verificacao['switch_saida'] = int(linha[posicao])
+ dicionario_l2vpn_identificacao['pe_2'] = int(linha[posicao])
+ elif posicao == 4:
+ dicionario_l2vpn_verificacao['porta_saida'] = self.interface_name_reading(dicionario_l2vpn_verificacao['switch_saida'], linha[posicao])
+ if dicionario_l2vpn_verificacao['porta_saida'] == 0:
+ break_reading = True
+ break
+ elif posicao == 5:
+ try:
+ dicionario_l2vpn_verificacao['vlan_id_saida'] = int(linha[posicao].replace('\n', ''))
+ dicionario_l2vpn_identificacao['id_pe_2'] = int(linha[posicao].replace('\n', ''))
+ #dicionario_l2vpn_verificacao['vlan_id_saida_range'] = False
+ #dicionario_l2vpn_identificacao['id_pe_2_range'] = False
+ except ValueError: # Caso de range de VLANs em que a leitura e uma string
+ #dicionario_l2vpn_verificacao['vlan_id_saida_range'] = True
+ #dicionario_l2vpn_identificacao['id_pe_2_range'] = True
+ dicionario_l2vpn_verificacao['vlan_id_saida'] = range(int(linha[posicao].replace('\n','').split('-')[0]), (int(linha[posicao].replace('\n','').split('-')[1])+1))
+ dicionario_l2vpn_identificacao['id_pe_2'] = range(int(linha[posicao].split('-')[0]), (int(linha[posicao].split('-')[1])+1))
+
+ except ValueError:
+ self.logger.info('Value error, please verify the configuration file: %s', str(arquivo_configuracao))
+
+ if break_reading:
+ self.logger.info('Port names configured wrongly for circuit entry %s. Please verify the configuration file: %s', str(linha), str(arquivo_configuracao))
+ continue
+
+ if ( dicionario_l2vpn_verificacao in self.dicionario_l2vpn ) or ( dicionario_l2vpn_verificacao == {} ):
+ continue
+ else:
+ self.dicionario_l2vpn.append(dicionario_l2vpn_verificacao)
+ next_id_backbone = max(ids_backbone) + 1
+ ids_backbone.append(next_id_backbone)
+ dicionario_l2vpn_identificacao['id_backbone'] = next_id_backbone
+ self.dicionario_l2vpn_identificacao.append(dicionario_l2vpn_identificacao)
+
+ if self.dicionario_l2vpn == [] :
+ self.logger.info('No valid input set in the configuration file: %s', str(arquivo_configuracao))
+ arquivo = open(arquivo_watchdog_databases, 'w')
+ arquivo.write('0')
+ arquivo.close()
+ else:
+ arquivo = open(arquivo_watchdog_databases, 'w')
+ arquivo.write('1')
+ arquivo.close()
+
+ arquivo = open(arquivo_identificacao, 'w')
+ arquivo.write('#Formato: PE 1 | ID PE 1 | PE 2 | ID PE 2 | ID Backbone\n')
+ for circuito in self.dicionario_l2vpn_identificacao:
+ arquivo.write(str(circuito['pe_1']) + " " + str(circuito['id_pe_1']) + " " + str(circuito['pe_2']) + " " + str(circuito['id_pe_2']) + " " + str(circuito['id_backbone']) + '\n')
+
+ arquivo.close()
+
+ if str(self.dicionario_l2vpn) != str(dicionario_l2vpn_inicio_verificacao):
+ self.logger.info('Configuration Database has been UPDATED.')
+ else:
+ self.logger.info('Configuration Database has NOT been UPDATED.')
+
+ self.logger.info('Configuration file %s has the following valid inputs: %s.', str(arquivo_configuracao), str(self.dicionario_l2vpn))
+ self.logger.info('Identification file %s has the following valid inputs: %s', str(arquivo_identificacao), str(self.dicionario_l2vpn_identificacao))
+ self.logger.info('Sleeping %s seconds for next configuration file verification.', str(frequencia_verificacao_l2vpn))
+
+ hub.sleep(frequencia_verificacao_l2vpn)
+ return
+
+
+ def interface_name_reading(self, dpid, interface_name):
+ self.logger.debug('Begging of interface name reading.')
+ #print "O switches in topology e: ", self.switches_in_topology
+ port_no = 0 # Port number que nao e utilizado nos switches, entado pode ser utilizado como caso de erro
+ #REMOVER PARA EMULACAO
+ """
+ interface_type = interface_name.split('0')[0]
+ self.logger.debug('Interface type is [Gigabit|TenGigabit]: %s.', str(interface_type))
+
+ if interface_type == 'g' or interface_type == 'G' or interface_type == 'gi' or interface_type == 'gig' or interface_type == 'Gig':
+ interface_name = interface_name.replace(interface_type, 'Gi')
+ self.logger.debug('Interface name modified to: %s.', str(interface_name))
+
+ elif interface_type == 't' or interface_type == 'T' or interface_type == 'te' or interface_type == 'ten' or interface_type == 'Ten':
+ interface_name = interface_name.replace(interface_type, 'Te')
+ self.logger.debug('Interface name modified to: %s.', str(interface_name))
+
+ elif interface_type != 'Gi' and interface_type != 'Te':
+ # Caso em que esta configurado errado
+ self.logger.debug('Interface type is different of Gi and Te. Port no is %s. Returning.', str(port_no))
+ return port_no
+ """
+ #FIM DA REMOCAO PARA EMULACAO
+
+ self.logger.debug('Dictionary self.switches_in_topology is: %s.', str(self.switches_in_topology))
+ name_read = False
+ if self.switches_in_topology.has_key(dpid):
+ self.logger.debug('Dictionary self.switches_in_topology has DPID: %s.', str(dpid))
+ for i in range(len(self.switches_in_topology[dpid])):
+ self.logger.debug('Interface evaluated from dictionary self.switches_in_topology is: %s.', str(self.switches_in_topology[dpid][i]))
+ try:
+ if interface_name == self.switches_in_topology[dpid][i]['name']:
+ port_no = int(self.switches_in_topology[dpid][i]['port_no'])
+ name_read = True
+ self.logger.debug('Interface name is: %s is equal to switches_in_topology[dpid][i][name]: %s, port no is: %s.', str(interface_name), str(self.switches_in_topology[dpid][i]['name']), port_no)
+
+ except Exception:
+ pass
+
+ if not name_read: #Se o nome nao for lido o name_read e False, logo not name_read e True
+ self.logger.info('Interface name: %s is not registered on DPID: %s.', str(interface_name), str(dpid))
+
+ try:
+ self.logger.debug('Returning port_no: %s.', str(port_no))
+ return port_no
+ except Exception:
+ return
+
+ return
+
+
+ def send_desc_stats_request(self):
+ while True:
+ if self.desc_request_counter == 0:
+ self.logger.info('#1 L2VPN Ryu App: initiated.')
+ self.logger.info('Sleeping 15 seconds waiting for switches to connect.')
+ #Aguarda os 15 segundos iniciais para os switches poderem se conectar ao controlador
+ hub.sleep(15)
+
+ else:
+ for datapath in self.dpset.dps.values():
+ ofp_parser = datapath.ofproto_parser
+
+ req = ofp_parser.OFPDescStatsRequest(datapath, 0)
+ datapath.send_msg(req)
+ self.logger.info('OFPDescStatsRequest sent to DPID %s.', str(datapath.id))
+
+ self.logger.info('Sleeping %s seconds for next OFPDescStatsRequest.', str(self.time_to_desc_request))
+ hub.sleep(self.time_to_desc_request)
+
+ self.desc_request_counter = self.desc_request_counter + 1
+
+ return
+
+ def lldp_discovery_check(self):
+ sleep(FIRST_LINK_ADD_INTERVAL) #Aguarda o tempo para que possa ocorrer o primeiro link_add
+ interval_between_begging_and_first_link_add = self.last_link_add - self.begging_time
+
+ if interval_between_begging_and_first_link_add < 0: # Caso que nao ocorre link add
+ self.lldp_discovery_verification(False)
+ self.logger.info('LLDP discovery is NOT working.')
+ else:
+ self.lldp_discovery_verification(True)
+ self.logger.info('LLDP discovery is working.')
+
+ return
+
+
+ def lldp_discovery_verification(self, behavior):
+ # Escreve em um arquivo que e utilizado como watchdog do processo Ryu. Caso o arquivo for 1 o funcionamento do LLDP tagged esta OK, caso seja 0 o funcionamento nao esta OK
+ arquivo = open(self.verification_lldp_discovery_file, 'w')
+ if behavior:
+ arquivo.write('1')
+ else:
+ arquivo.write('0')
+
+ arquivo.close()
+
+ return
+
+
+ def add_flow(self, datapath, priority, match, actions, buffer_id=None):
+ ofproto = datapath.ofproto
+ parser = datapath.ofproto_parser
+
+ try:
+ #Switches OpenFlow 1.3
+ inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
+ actions)]
+ except AttributeError:
+ #Switches OpenFlow 1.0
+ pass
+
+ if buffer_id:
+ try:
+ #Switches OpenFlow 1.3
+ mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
+ priority=priority, match=match,
+ instructions=inst, idle_timeout=60, hard_timeout=0)
+ self.logger.info('Adding flow for OF1.3, OFPFlowMod: %s.', str(mod))
+ except UnboundLocalError:
+ #Switches OpenFlow 1.0
+ mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
+ priority=priority, match=match,
+ actions=actions, idle_timeout=60, hard_timeout=0)
+ self.logger.info('Adding flow for OF1.0, OFPFlowMod: %s.', str(mod))
+
+ else:
+ try:
+ #Switches OpenFlow 1.3
+ mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
+ match=match, instructions=inst, idle_timeout=60, hard_timeout=0)
+ self.logger.info('Adding flow for OF1.3, OFPFlowMod: %s.', str(mod))
+ except UnboundLocalError:
+ #Switches OpenFlow 1.0
+ mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
+ match=match, actions=actions, idle_timeout=60, hard_timeout=0)
+ self.logger.info('Adding flow for OF1.0, OFPFlowMod: %s.', str(mod))
+ datapath.send_msg(mod)
+ self.logger.info('Flow added for DPID: %s ', str(datapath.id))
+ self.logger.info('Flow added with match: %s and actions %s', str(match), str(actions))
+
+ new_dic_match_actions = {'match': match, 'actions': actions}
+ continuacao = True
+ if self.dpid_match_actions_dic.has_key(datapath.id):
+ for dic_match_actions_comparison in self.dpid_match_actions_dic[datapath.id]:
+ if str(dic_match_actions_comparison) == str(new_dic_match_actions):
+ continuacao = False
+
+ if continuacao:
+ self.dpid_match_actions_dic[datapath.id].append(new_dic_match_actions)
+ self.logger.info('Match and Actions Dictionary for DPID: %s has been UPDATED: %s', str(datapath.id), str(self.dpid_match_actions_dic))
+
+ else:
+ self.dpid_match_actions_dic[datapath.id] = []
+ self.dpid_match_actions_dic[datapath.id].append(new_dic_match_actions)
+ self.logger.info('Match and Actions Dictionary for DPID: %s has been CREATED: %s', str(datapath.id), str(self.dpid_match_actions_dic))
+
+ return
+
+
+ def mod_flow(self, datapath, priority, match, actions, command, cookie=1, out_port=None):
+ ofproto = datapath.ofproto
+ parser = datapath.ofproto_parser
+
+ try:
+ # Switches OpenFlow 1.3
+ mod = parser.OFPFlowMod(datapath=datapath, match=match, cookie=cookie, command=command, out_port=ofproto.OFPP_ANY, out_group=ofproto.OFPG_ANY, priority=priority)
+ except AttributeError:
+ # Switches OpenFlow 1.0
+ mod = parser.OFPFlowMod(datapath=datapath, match=match, cookie=cookie, command=command, actions=actions, priority=priority)
+
+ datapath.send_msg(mod)
+ self.logger.info('Flow deleted for DPID: %s with match: %s and actions %s', str(datapath.id), str(match), str(actions))
+
+ return
+
+
+ def packet_out(self, datapath, msg, in_port, actions):
+ #O PACKET-OUT nao modifica a tag de VLAN entao retiramos o PACKET-OUT
+ #Nao da para fazer assim pois dependendo do numero de hops demorasse muito ate fechar o circuito
+ ofp = datapath.ofproto
+ ofp_parser = datapath.ofproto_parser
+
+ data = None
+ if (msg.buffer_id == ofp.OFP_NO_BUFFER):
+ data = msg.data
+
+ """
+ #actions2 = [ofp_parser.OFPActionOutput(out_port),ofp_parser.OFPActionSetField(vlan_vid=vlan_vid_modified)]
+ actions2 = [ofp_parser.OFPActionOutput(ofp.OFPP_TABLE)]
+ try:
+ self.logger.debug('Actions 2 is: %s', str(actions2))
+ except Exception:
+ pass
+ """
+
+ out = ofp_parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id, in_port=in_port, actions=actions, data=data)
+ #out = ofp_parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id, in_port=in_port, actions=actions2, data=data)
+ datapath.send_msg(out)
+ self.logger.info('Packet-out sent to DPID: %s in_port: %s actions: %s', str(datapath.id), str(in_port), str(actions))
+
+ return
+
+ def llc_drop_handler(self, datapath, msg, in_port, mac_dst):
+ # Dropa os pacotes LLC para nao ocorrer problemas quando os switches CE conectados aos endpoints tem VLAN ID diferentes e utilizam STP
+ ofp = datapath.ofproto
+ ofp_parser = datapath.ofproto_parser
+ dpid = datapath.id
+ match = ofp_parser.OFPMatch(in_port=in_port, eth_dst=mac_dst) #O Mac_dst e especifico de pacotes LLC
+ actions = []
+
+ self.add_flow(datapath, 2, match, actions)
+
+ return
+
+
+ def vlan_handler(self, datapath, vlan_id, in_port, mac_src, vlan_vid_modified_last_switch=0):
+ ofp = datapath.ofproto
+ ofp_parser = datapath.ofproto_parser
+ dpid = datapath.id
+ modificacao_vlan_vid = False
+ instalacao_proativa = False
+
+ #INICIO DO NOVO TRATAMENTO
+ self.lldp_discovery_verification(True)# Inicializa o arquivo de verificacao de funcionamento do LLDP tagged discovery em True (funcionando). E modificado caso ocorra erro no calculo do out_port.
+ switch_origem = dpid
+ #print "O switch origem e: ", switch_origem
+ switch_destino = 0
+ break_circuit_ident_loop = False
+ #out_port_found = False #Utilizado para garantir que o out_port e a modificacao de vlan ID nao sejam sobrescritos
+ for circuito_conf in self.dicionario_l2vpn:
+ for circuito_ident in self.dicionario_l2vpn_identificacao:
+ self.logger.info('Circuit from configuration database (circuit_conf) is: %s. Circuit from identification database (circuit_ident) is: %s.', str(circuito_conf), str(circuito_ident))
+ if (circuito_conf['switch_entrada'] == circuito_ident['pe_1']) and (circuito_conf['switch_saida'] == circuito_ident['pe_2']) and (circuito_conf['vlan_id_entrada'] == circuito_ident['id_pe_1']) and (circuito_conf['vlan_id_saida'] == circuito_ident['id_pe_2']):
+ self.logger.debug('circuit_conf == circuit_ident. Initiating handling...')
+ self.logger.debug('mac_to_port table is: %s.', str(self.mac_to_port))
+ for switch in self.mac_to_port.keys():
+ self.logger.debug('Switch from mac_to_port table is: %s.', str(switch))
+ if self.mac_to_port[switch].has_key(mac_src):
+ self.logger.debug('mac_to_port table for switch: %s has the MAC source: %s.', str(switch), str(mac_src))
+ if self.mac_to_port[switch][mac_src]['porta'] == circuito_conf['porta_entrada'] and switch == circuito_conf['switch_entrada'] and self.mac_to_port[switch][mac_src]['vlan_id'] == circuito_conf['vlan_id_entrada']:
+ #Identifica a direcao 1 => 2
+ self.logger.debug('Direction (1) => (2) identified.')
+ switch_destino = circuito_conf['switch_saida']
+ self.logger.debug('Destination switch is: %s.', str(switch_destino))
+ direcao = '1_to_2'
+
+ elif self.mac_to_port[switch][mac_src]['porta'] == circuito_conf['porta_saida'] and switch == circuito_conf['switch_saida'] and self.mac_to_port[switch][mac_src]['vlan_id'] == circuito_conf['vlan_id_saida']:
+ # Identifica a direcao 2 => 1
+ self.logger.debug('Direction (2) => (1) identified.')
+ switch_destino = circuito_conf['switch_entrada']
+ self.logger.debug('Destination switch is: %s.', str(switch_destino))
+ direcao = '2_to_1'
+
+ continuacao = False
+ if switch_destino != 0:
+ self.logger.info('Destination switch is different of 0. Continuing handling. Destination switch: %s', str(switch_destino))
+ continuacao = True
+ else:
+ self.logger.info('Destination switch is equal to 0. Stopping handling and looking for other circuit.')
+ continue
+
+ if continuacao:
+ vlan_id_range_in_check = False
+ vlan_id_in_check = False
+ vlan_id_range_out_check = False
+ vlan_id_out_check = False
+ try:
+ if vlan_id in circuito_conf['vlan_id_entrada']:
+ vlan_id_range_in_check = True
+ except TypeError:
+ vlan_id_in_check = True
+ try:
+ if vlan_id in circuito_conf['vlan_id_saida']:
+ vlan_id_range_out_check = True
+ except TypeError:
+ vlan_id_out_check = True
+
+ if direcao == '1_to_2':
+ self.logger.info('Direction is (1) => (2).')
+ #if ((vlan_id == circuito_conf['vlan_id_entrada']) or (vlan_id in circuito_conf['vlan_id_entrada'])) and (in_port == circuito_conf['porta_entrada']) and (dpid == circuito_conf['switch_entrada']):
+ if (vlan_id_range_in_check or vlan_id_in_check) and (in_port == circuito_conf['porta_entrada']) and (dpid == circuito_conf['switch_entrada']):
+ #Caso de entrada pelo switch_entrada
+ self.logger.info('VLAN_ID of the packet is equal to input_VLAN_ID(id_pe_1) from configuration database and packet entering on the input switch of the circuit.')
+ try:
+ self.logger.info('Topology object is: %s.', str(self.obj_topologia))
+ self.logger.info('Routing table is: %s.', str(self.tabela_roteamento.tabela_roteamento_completa))
+ except AttributeError:
+ self.logger.info('ERROR of routing table. Routing table is None. Possibly switches are not interconnected in the topology.')
+ try:
+ out_port = int(self.tabela_roteamento.tabela_roteamento_completa[switch_origem][switch_destino][0].porta_saida)
+ self.logger.info('Out port is: %s.', str(out_port))
+ except AttributeError:
+ self.logger.info('ERROR calculating out_port. Possibly LLDP tagged topology discovery is not working. Exiting vlan_handler.')
+ self.lldp_discovery_verification(False)
+ return
+ except KeyError:
+ self.logger.info('ERROR calculating out_port. Possibly topology object is NOT correct and routing table is NOT full.')
+ return
+ vlan_vid_modified = int(circuito_ident['id_backbone'])
+ self.logger.info('VLAN ID to be modified is: %s.', str(vlan_vid_modified))
+ try:
+ if vlan_id in circuito_conf['vlan_id_entrada']: #Garante que e o caso de VLAN range
+ vlan_vid_modified_last_switch = vlan_id #Utilizada no caso de vlan range, no ultimo switch na direcao 1_to_2 o vlan_id de saida seja igual ao vlan_id de entrada
+ except Exception:
+ pass
+
+ modificacao_vlan_vid = True
+ instalacao_proativa = True
+ break_circuit_ident_loop = True
+ break
+
+ elif vlan_id == circuito_ident['id_backbone']: #Somente entra quando nao entrou no primeiro if
+ self.logger.info('VLAN_ID of the packet is equal to the backbone VLAN ID for the circuit.')
+ self.logger.info('Routing table is: %s.', str(self.tabela_roteamento.tabela_roteamento_completa))
+ self.logger.info('Topology object is: %s.', str(self.obj_topologia))
+ if switch_origem == switch_destino:
+ # Caso do ultimo switch na direcao 1_to_2
+ self.logger.info('Packet entering on the exiting switch of the circuit (last switch on the direction (1) => (2)). Destination switch: %s', str(switch_destino))
+ out_port = int(circuito_conf['porta_saida'])
+ self.logger.info('Out port is: %s.', str(out_port))
+ if vlan_vid_modified_last_switch != 0: #Entra no caso de VLAN range
+ vlan_vid_modified = vlan_vid_modified_last_switch
+ else:
+ vlan_vid_modified = int(circuito_conf['vlan_id_saida'])
+ self.logger.info('VLAN ID to be modified is: %s.', str(vlan_vid_modified))
+ modificacao_vlan_vid = True
+ break_circuit_ident_loop = True
+ break
+
+ else:
+ # Caso de transporte pelo backbone
+ self.logger.info('Packet entering on any switch of the direction (1) => (2) of the circuit.')
+ try:
+ out_port = int(self.tabela_roteamento.tabela_roteamento_completa[switch_origem][switch_destino][0].porta_saida)
+ self.logger.info('Out port is: %s.', str(out_port))
+ except AttributeError:
+ self.logger.info('ERROR calculating out_port. Possibly LLDP tagged topology discovery is not working. Exiting vlan_handler.')
+ self.lldp_discovery_verification(False)
+ return
+ except KeyError:
+ self.logger.info('ERROR calculating out_port. Possibly topology object is NOT correct and routing table is NOT full.')
+ return
+
+ modificacao_vlan_vid = False
+ break_circuit_ident_loop = True
+ break
+
+ elif direcao == '2_to_1':
+ self.logger.info('Direction is (2) => (1).')
+ #if ((vlan_id == circuito_conf['vlan_id_saida']) or (vlan_id in circuito_conf['vlan_id_saida'])) and (in_port == circuito_conf['porta_saida']) and (dpid == circuito_conf['switch_saida']):
+ if (vlan_id_range_out_check or vlan_id_out_check) and (in_port == circuito_conf['porta_saida']) and (dpid == circuito_conf['switch_saida']):
+ # Caso de entrada pelo switch_saida
+ self.logger.info('VLAN_ID of the packet is equal to output_VLAN_ID(id_pe_2) from configuration database and packet entering on the exiting switch of the circuit.')
+ try:
+ self.logger.info('Topology object is: %s.', str(self.obj_topologia))
+ self.logger.info('Routing table is: %s.', str(self.tabela_roteamento.tabela_roteamento_completa))
+ except AttributeError:
+ self.logger.info('ERROR of routing table. Routing table is None. Possibly switches are not interconnected in the topology.')
+ try:
+ out_port = int(self.tabela_roteamento.tabela_roteamento_completa[switch_origem][switch_destino][0].porta_saida)
+ self.logger.info('Out port is: %s.', str(out_port))
+ except AttributeError:
+ self.logger.info('ERROR calculating out_port. Exiting vlan_handler.')
+ self.lldp_discovery_verification(False)
+ return
+ except KeyError:
+ self.logger.info('ERROR calculating out_port. Possibly topology object is NOT correct and routing table is NOT full.')
+ return
+ vlan_vid_modified = int(circuito_ident['id_backbone'])
+ self.logger.info('VLAN ID to be modified is: %s.', str(vlan_vid_modified))
+ try:
+ if vlan_id in circuito_conf['vlan_id_entrada']: #Garante que e o caso de VLAN range
+ vlan_vid_modified_last_switch = vlan_id #Utilizada no caso de vlan range, no ultimo switch na direcao 1_to_2 o vlan_id de saida seja igual ao vlan_id de entrada
+ except Exception:
+ pass
+ modificacao_vlan_vid = True
+ instalacao_proativa = True
+ break_circuit_ident_loop = True
+ break
+
+ elif vlan_id == circuito_ident['id_backbone']: #So entra se nao tiver entrado no primeiro if
+ self.logger.info('VLAN_ID of the packet is equal to the backbone VLAN ID for the circuit.')
+ self.logger.info('Routing table is: %s.', str(self.tabela_roteamento.tabela_roteamento_completa))
+ self.logger.info('Topology object is: %s.', str(self.obj_topologia))
+ if switch_origem == switch_destino:
+ # Caso do ultimo switch na direcao 2_to_1
+ self.logger.info('Packet entering on the entering switch of the circuit (last switch on the direction (2) => (1)). Destination switch: %s', str(switch_destino))
+ out_port = int(circuito_conf['porta_entrada'])
+ self.logger.info('Out port is: %s.', str(out_port))
+ if vlan_vid_modified_last_switch != 0: #Entra no caso de VLAN range
+ vlan_vid_modified = vlan_vid_modified_last_switch
+ else:
+ vlan_vid_modified = int(circuito_conf['vlan_id_entrada'])
+ self.logger.info('VLAN ID to be modified is: %s.', str(vlan_vid_modified))
+ modificacao_vlan_vid = True
+ break_circuit_ident_loop = True
+ break
+
+ else:
+ # Caso de transporte pelo backbone
+ self.logger.info('Packet entering on any switch of the direction (2) => (1) of the circuit.')
+ try:
+ out_port = int(self.tabela_roteamento.tabela_roteamento_completa[switch_origem][switch_destino][0].porta_saida)
+ self.logger.info('Out port is: %s.', str(out_port))
+ except AttributeError:
+ self.logger.info('ERROR calculating out_port. Exiting vlan_handler.')
+ self.lldp_discovery_verification(False)
+ return
+ except KeyError:
+ self.logger.info('ERROR calculating out_port. Possibly topology object is NOT correct and routing table is NOT full.')
+ return
+ modificacao_vlan_vid = False
+ break_circuit_ident_loop = True
+ break
+
+ if break_circuit_ident_loop:
+ break # Break circuit configuration loop
+
+ self.logger.info('Source switch is: %s and destination switch is: %s.', str(switch_origem), str(switch_destino))
+ try:
+ #Colocar um espaco
+ #REMOVER PARA EMULACAO
+ """
+ try:
+ # Switches OpenFlow 1.0
+ match = ofp_parser.OFPMatch(in_port=in_port, dl_src=haddr_to_bin(mac_src), dl_vlan=vlan_id)
+ openflow_version = '1.0'
+ self.logger.info('Match for OF1.0: %s.', str(match))
+ except KeyError:
+ # Switches OpenFlow 1.3
+ match = ofp_parser.OFPMatch(in_port=in_port, eth_src=mac_src, vlan_vid=vlan_id)
+ openflow_version = '1.3'
+ self.logger.info('Match for OF1.3: %s.', str(match))
+ """
+ #FIM DA REMOCAO PARA EMULACAO
+ #ADICAO PARA EMULACAO | MODIFICADO PARA TESTE FIBRE
+ try:
+ # Switches OpenFlow 1.0
+ match = ofp_parser.OFPMatch(in_port=in_port, dl_src=haddr_to_bin(mac_src))
+ openflow_version = '1.0'
+ self.logger.debug('Match for OF1.0: %s.', str(match))
+ except KeyError:
+ # Switches OpenFlow 1.3
+ match = ofp_parser.OFPMatch(in_port=in_port, eth_src=mac_src)
+ openflow_version = '1.3'
+ self.logger.debug('Match for OF1.3: %s.', str(match))
+ #FIM DA ADICAO PARA EMULACAO | FIM DA MODIFICACAO PARA TESTE FIBRE
+
+ try:
+ self.logger.info('Match is: %s.', str(match))
+ self.logger.info('Out port is: %s.', str(out_port))
+ except UnboundLocalError:
+ self.logger.info('Unable to calculate out_port')
+ return
+
+ if modificacao_vlan_vid:
+ #REMOCAO PARA EMULACAO | MODIFICADO PARA TESTE FIBRE
+ """
+ if openflow_version == '1.0': # Teve de ser feito utilizando laco IF pois o campo dl_vlan no SetField nao reporta erro quando o datapath e OpenFlow 1.3
+ #Switches OpenFlow 1.0
+ actions = [ofp_parser.OFPActionVlanVid(vlan_vid=vlan_vid_modified),ofp_parser.OFPActionOutput(out_port)]
+ else:
+ #Switches OpenFlow 1.3 or higher
+ actions = [ofp_parser.OFPActionSetField(vlan_vid=vlan_vid_modified),ofp_parser.OFPActionOutput(out_port)]
+ """
+ #FIM DA REMOCAO PARA EMULACAO | FIM DA MODIFICACAO PARA TESTE FIBRE
+ #ADICAO PARA EMULACAO | MODIFICADO PARA TESTE FIBRE
+ actions = [ofp_parser.OFPActionOutput(out_port)]
+ #FIM DA ADICAO PARA EMULACAO | FIM DA MODIFICACAO PARA TESTE FIBRE
+ else:
+ actions = [ofp_parser.OFPActionOutput(out_port)]
+
+ self.logger.info('Actions is: %s.', str(actions))
+ # Evita que regras indevidas sejam instaladas quando o roteador apresenta comportamento indevido
+ if in_port == out_port:
+ self.logger.info('in_port: %s is equal to out_port: %s. Returning from vlan_handler.', str(in_port), str(out_port))
+ return
+
+ self.add_flow(datapath, 2, match, actions)
+ #self.add_flow(datapath, 1, match, actions2)
+
+ #O PACKET-OUT nao modifica a tag de VLAN entao retiramos o PACKET-OUT
+ #self.packet_out(datapath, msg, in_port, actions)
+
+ #except UnboundLocalError:
+ # self.logger.info('Unable to find the correct out_port. Possible error on the configuration database or mismatch on SetField Action on the previous switch.')
+
+ except Exception:
+ self.logger.info('ERROR dealing with VLAN ID: %s on in_port: %s', str(vlan_id), str(in_port))
+ #raise
+
+ #SPLIT HORIZON
+ for interface in self.switches_in_topology[dpid][1::]:
+ try:
+ if in_port != interface['port_no']:
+ drop_in_port = interface['port_no']
+ if modificacao_vlan_vid:
+ try:
+ # Switches OpenFlow 1.0
+ match = ofp_parser.OFPMatch(in_port=drop_in_port, dl_src=haddr_to_bin(mac_src), dl_vlan=vlan_vid_modified)
+ except KeyError:
+ # Switches OpenFlow 1.3
+ match = ofp_parser.OFPMatch(in_port=drop_in_port, eth_src=mac_src, vlan_vid=vlan_vid_modified)
+ else:
+ try:
+ # Switches OpenFlow 1.0
+ match = ofp_parser.OFPMatch(in_port=drop_in_port, dl_src=haddr_to_bin(mac_src), dl_vlan=vlan_id)
+ except KeyError:
+ # Switches OpenFlow 1.3
+ match = ofp_parser.OFPMatch(in_port=drop_in_port, eth_src=mac_src, vlan_vid=vlan_id)
+ actions = []
+ self.logger.info('Installing split horizon flows for DPID: %s, match: %s', str(dpid), str(match))
+ self.add_flow(datapath, 1, match, actions)
+ except Exception:
+ self.logger.info('ERROR dealing with split horizon for DPID: %s', str(dpid))
+ # FIM SPLIT HORIZON
+
+ if instalacao_proativa:
+ #FAZER LOOP NOS OBJETOS DA CLASSE SAIDA INSTALANDO REGRA EM TODOS
+ #print "*** Realizando instalacao proativa. Sw ori: ", switch_origem, " Sw dest: ", switch_destino
+ #print "O objeto topologia e: ", self.obj_topologia
+ for salto in self.tabela_roteamento.tabela_roteamento_completa[switch_origem][switch_destino]:
+ #print "O salto analisado e: ", salto
+ if salto.num_salto != 0:
+ # Realiza a instalacao proativa na direcao 1_to_2 ou 2_to_1 no packet_in no primeiro switch do circuito
+ next_dpid = salto.switch
+ #print "O next_dpid e: ", next_dpid, "e o tipo e: ", type(next_dpid)
+ next_vlan_id = vlan_vid_modified
+ next_datapath = self.dictionary_dpid_datapath[next_dpid]
+ next_in_port = self.obj_topologia[salto_anterior.switch][next_dpid].int_entrada_dst # Representa a in_port do next_dpid do objeto de topologia que cada entrada e um objeto do tipo portas
+ self.logger.info('Instaling flow entries proactively for DPID: %s, VLAN ID: %s, in_port: %s.', str(next_dpid), str(next_vlan_id), str(next_in_port))
+ if salto.porta_saida == None and vlan_vid_modified_last_switch != 0: #Utilizada no caso de vlan range, no ultimo switch na direcao 1_to_2 o vlan_id de saida seja igual ao vlan_id de entrada
+ self.vlan_handler(next_datapath, next_vlan_id, next_in_port, mac_src, vlan_vid_modified_last_switch)
+ else:
+ self.vlan_handler(next_datapath, next_vlan_id, next_in_port, mac_src)
+
+ if salto.porta_saida == None: # Indica ser o endpoint do circuito virtual (utilizado para contabilizar o tempo de processamento de mensagens de sinalizacao)
+ self.logger.info('Endpoint of the virtual circuit, calculating flow_mod.')
+ salto_anterior = salto # Quando num_salto for igual a 0 o salto_anterior vai ser o switch_origem
+
+ return
+
+
+ @set_ev_cls(ofp_event.EventOFPDescStatsReply, MAIN_DISPATCHER)
+ def desc_stats_reply_handler(self, ev):
+ body = ev.msg.body
+ dpid = ev.msg.datapath.id
+
+ desc_dictionary = {'mfr_desc': body.mfr_desc, 'hw_desc': body.hw_desc, 'sw_desc': body.sw_desc, 'serial_num': body.serial_num, 'dp_desc': body.dp_desc, 'dp_id': dpid}
+
+ self.logger.info('OFPDescStatsReply DPID: %s received dictionary: %s', str(ev.msg.datapath.id), str(desc_dictionary))
+
+ if self.switches_in_topology.has_key(ev.msg.datapath.id):
+ if str(self.switches_in_topology[ev.msg.datapath.id][0]) != str(desc_dictionary) :
+ self.switches_in_topology[ev.msg.datapath.id][0] = desc_dictionary
+ self.switches_in_topology_file[desc_dictionary['dp_desc']] = {'Description': desc_dictionary, 'Interfaces': self.switches_in_topology[dpid][1::]}
+ file_interfaces = open(self.interfaces_info, 'w')
+ json.dump(self.switches_in_topology, file_interfaces, sort_keys=True, indent=4, separators=(',',': '))
+ file_interfaces.close()
+ self.logger.info('Global Dictionary {DPID: [sw_desc, ]} for DPID: %s has been UPDATED: %s', str(ev.msg.datapath.id), str(self.switches_in_topology))
+ file_interfaces = open(self.interfaces_info_object, 'w')
+ json.dump(self.switches_in_topology_file, file_interfaces, sort_keys=True, indent=4, separators=(',',': '))
+ file_interfaces.close()
+
+ else:
+ self.logger.info('Global Dictionary {DPID: [sw_desc, ]} for DPID: %s has NOT been UPDATED: %s', str(ev.msg.datapath.id), str(self.switches_in_topology))
+ else:
+ self.switches_in_topology[ev.msg.datapath.id] = []
+ self.switches_in_topology[ev.msg.datapath.id].append(desc_dictionary)
+ self.logger.info('Global Dictionary {DPID: [sw_desc, ]} for DPID: %s has been CREATED: %s', str(ev.msg.datapath.id), str(self.switches_in_topology))
+
+ return
+
+
+ #@set_ev_cls(stplib.EventPacketIn, MAIN_DISPATCHER)
+ @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
+ def packet_in_handler(self, ev):
+ # Referencia para o learning switch que aprende as portas dos hosts e: https://osrg.github.io/ryu-book/en/html/switching_hub.html
+ msg = ev.msg
+ dp = msg.datapath
+ ofp = dp.ofproto
+ ofp_parser = dp.ofproto_parser
+ dpid = dp.id
+ self.logger.debug('Packet-in received.')
+
+ pkt = packet.Packet(msg.data)
+ pkt_ethernet = pkt.get_protocol(ethernet.ethernet)
+ pkt_llc = pkt.get_protocol(llc.llc)
+ pkt_arp = pkt.get_protocol(arp.arp)
+ pkt_vlan = pkt.get_protocol(vlan.vlan)
+ pkt_ipv4 = pkt.get_protocol(ipv4.ipv4)
+
+ mac_dst = pkt_ethernet.dst
+ mac_src = pkt_ethernet.src
+
+ try:
+ # Switches OpenFlow 1.0
+ in_port = msg.in_port
+
+ except AttributeError:
+ # Switches OpenFlow 1.3
+ in_port = msg.match['in_port']
+
+ self.s_tabela_roteamento.acquire()
+
+ #MODIFICAR LINHA ABAIXO PARA EMULACAO (if)
+ if pkt_vlan and not pkt_llc:
+ #if pkt_vlan and pkt_vlan.ethertype != ether_types.ETH_TYPE_LLDP and not pkt_llc:
+ self.logger.info('VLAN packet received by DPID %s', str(dpid))
+ self.logger.info('VLAN ethertype is: %s', str(pkt_vlan.ethertype))
+
+ #ALTERADO PARA A EMULACAO (linha abaixo)
+ elif pkt_ethernet and pkt_ethernet.ethertype == ether_types.ETH_TYPE_LLDP:
+ #elif pkt_vlan and pkt_vlan.ethertype == ether_types.ETH_TYPE_LLDP:
+ self.logger.debug('LLDP packet received. Exiting PACKET_IN_HANDLER.')
+ self.logger.debug('Packet received by DPID: %s. MAC source is: %s. In_port is: %s. MAC destination is: %s.', str(dpid), str(mac_src), str(in_port), str(mac_dst))
+ self.s_tabela_roteamento.release()
+ return
+
+ #RETIRADO PARA TESTAR SE ACEITANDO OS PACOTES LLC O TRAFEGO E ENCAMINHADO A PARTIR DO SWITCH
+ elif pkt_llc:
+ self.logger.debug('LLC packet received. Analising flow to drop LLC and exiting PACKET_IN_HANDLER. Just flows that comes from endpoint AC are droped.')
+ self.llc_drop_handler(dp, msg, in_port, mac_dst)
+ self.s_tabela_roteamento.release()
+ return
+
+ for p in pkt:
+ try:
+ self.logger.debug('Header type received is: %s.', str(p.protocol_name))
+
+ if p.protocol_name == 'ethernet':
+ self.logger.debug('ETHERNET header: %s.', str(p))
+ elif p.protocol_name == 'vlan':
+ self.logger.debug('VLAN (802.1q) header: %s.', str(p))
+ elif p.protocol_name == 'arp':
+ self.logger.debug('ARP header: %s.', str(p))
+ elif p.protocol_name == 'icmp':
+ self.logger.debug('ICMP header: %s.', str(p))
+
+ except Exception:
+ pass
+
+ self.logger.info('Packet received by DPID: %s. MAC source is: %s. In_port is: %s. MAC destination is: %s.', str(dpid), str(mac_src), str(in_port), str(mac_dst))
+
+ #Verifica se o pacote e recebido em um endpoint. Caso nao seja, nao e realizado nenhum tratamento. Com isso e possivel que a regra no endpoint pare e seja novamente recebido um Packet-In
+ pacote_recebido_endpoint_caracteristica_completa = False
+ for circuito_conf in self.dicionario_l2vpn:
+ if (circuito_conf['switch_entrada'] == dpid and circuito_conf['porta_entrada'] == in_port and circuito_conf['vlan_id_entrada'] == pkt_vlan.vid) or (circuito_conf['switch_saida'] == dpid and circuito_conf['porta_saida'] == in_port and circuito_conf['vlan_id_saida'] == pkt_vlan.vid ):
+ pacote_recebido_endpoint_caracteristica_completa = True
+
+ if not pacote_recebido_endpoint_caracteristica_completa: #Caso em que o pacote e recebido em um switch que nao e endpoint
+ self.logger.info('Packet has not been received in an endpoint. Exiting packet_in_handling.')
+ self.s_tabela_roteamento.release()
+ return
+
+
+ self.timestamp_novo_mac_src[mac_src] = time()
+ continuacao = "sim" #Utilizado para fazer aprendizado ou renovacao de MAC source
+ # Pode verificar se o MAC src ja esta vinculado a um switch
+ # O problema com essa abordagem e que o host fica sempre amarrado ao switch. Na tabela mac_to_port o mac do host sempre estara vinculado ao switch ao qual estava conectado inicialmente
+ for switch_id in self.mac_to_port:
+ if self.timestamp_antigo_mac_src.has_key(mac_src):
+ if self.mac_to_port[switch_id].has_key(mac_src) and ( (self.timestamp_novo_mac_src[mac_src] - self.timestamp_antigo_mac_src[mac_src]) < self.intervalo_renovacao_mac_src ):
+ continuacao = "nao"
+ self.timestamp_antigo_mac_src[mac_src] = self.timestamp_novo_mac_src[mac_src]
+
+ # Aprende o endereco MAC para evitar o FLOOD da proxima vez
+ if (continuacao == "sim"):
+ if self.mac_to_port.has_key(dpid):
+ if (self.mac_to_port[dpid].has_key(mac_src)):
+ if (self.mac_to_port[dpid][mac_src]['porta'] != in_port):
+ self.mac_to_port[dpid][mac_src]['porta'] = in_port
+ self.mac_to_port[dpid][mac_src]['vlan_id'] = pkt_vlan.vid
+ self.logger.info('MAC_to_port table MAC: %s, DPID: %s, has been updated to port: %s. MAC_to_port UPDATED to: %s', str(mac_src), str(dpid), str(in_port), str(self.mac_to_port))
+ else:
+ for switch in self.mac_to_port.keys(): #Verifica se o MAC que ja tem na tabela foi alterado de switch. Se ja houver ele deleta
+ if self.mac_to_port[switch].has_key(mac_src):
+ del self.mac_to_port[switch][mac_src]
+ self.mac_to_port[dpid][mac_src] = {}
+ self.mac_to_port[dpid][mac_src]['porta'] = in_port
+ self.mac_to_port[dpid][mac_src]['vlan_id'] = pkt_vlan.vid
+ self.logger.info('MAC_to_port table MAC: %s, DPID: %s, has been created port: %s. MAC_to_port UPDATED to: %s', str(mac_src), str(dpid), str(in_port), str(self.mac_to_port))
+ else:
+ self.mac_to_port[dpid] = {}
+ self.mac_to_port[dpid][mac_src] = {}
+ self.mac_to_port[dpid][mac_src]['porta'] = in_port
+ self.mac_to_port[dpid][mac_src]['vlan_id'] = pkt_vlan.vid
+ self.logger.info('MAC_to_port table for DPID: %s, has been created MAC: %s, port: %s. MAC_to_port updated to: %s', str(dpid), str(mac_src), str(in_port), str(self.mac_to_port))
+
+ if pkt_vlan:
+ self.logger.info('VLAN tagged packet received: %s. Initiating handling.', str(pkt_vlan))
+ #self.vlan_handler(dp, msg, pkt_vlan, in_port, mac_src)
+ self.vlan_handler(dp, pkt_vlan.vid, in_port, mac_src)
+
+ self.s_tabela_roteamento.release()
+ return
+
+
+ def flow_entries_deletion_full(self):
+ for dpid in self.dictionary_dpid_datapath.keys():
+ datapath = self.dictionary_dpid_datapath[dpid]
+ ofp = datapath.ofproto
+
+ self.logger.debug('Match actions dictionary is: %s.', str(self.dpid_match_actions_dic))
+ if self.dpid_match_actions_dic.has_key(dpid):
+ self.logger.debug('List of match actions dictionaries for DPID: %s is: %s.', str(dpid), str(self.dpid_match_actions_dic[dpid]))
+ for actions_match_dic in self.dpid_match_actions_dic[dpid]:
+ actions = actions_match_dic['actions']
+ match = actions_match_dic['match']
+ self.mod_flow(datapath, 1, match, actions, ofp.OFPFC_DELETE)
+
+ else:
+ continue
+
+ self.dpid_match_actions_dic = {}
+ self.logger.info('Match and actions dictionary has been RESETED: %s.', str(self.dpid_match_actions_dic))
+
+ return
+
+ @handler.set_ev_cls(event.EventLinkAdd)
+ def link_add_handler(self, ev):
+ self.last_link_add = time()
+ switch_src = ev.link.src.dpid
+ switch_dest = ev.link.dst.dpid
+ port_src = ev.link.src.port_no
+ port_src_original = port_src
+ port_src_name = ev.link.src.name
+ port_dest = ev.link.dst.port_no
+ port_dest_name = ev.link.dst.name
+ port_dest_original = port_dest
+ self.logger.info('Link added. SW src: %s, port_src: %s, port_src_name: %s | SW dst: %s, port_dest: %s, port_dest_name: %s', str(switch_src), str(port_src), str(port_src_name), str(switch_dest), str(port_dest), str(port_dest_name))
+ self.s_obj_topologia.acquire()
+ self.s_obj_topologia_auxiliar.acquire()
+ self.s_tabela_roteamento.acquire()
+
+ #RETIRADA PARA EMULACAO
+ """
+ #INICIAR O COMENTARIO PARA TIRAR DUAS INTERFACES AQUI
+ #O padrao é utilizar as subinterface .10 com encapsulation dot1q any, porém o ASR9K-CBPF não encaminha as mensagens LLDP nesse caso. Então é necessário criar uma outra subinterface .11 com encapsulation priority-tagged para tratar os pacotes LLDP. Assim, o tratamento abaixo é para quando uma subinterface .10 se conectar a uma .11, o controlador associe a interface .10.
+ port_src_name_int_subint = {'interface': port_src_name.split(".")[0], 'sub_interface': port_src_name.split(".")[1]}
+ port_dest_name_int_subint = {'interface': port_dest_name.split(".")[0], 'sub_interface': port_dest_name.split(".")[1]}
+
+ if port_src_name_int_subint['sub_interface'] == "10" or port_dest_name_int_subint['sub_interface'] == "10":
+ self.logger.info('Subinterface of port source or destination of link add is .10 (encapsulation dot1q any). Link NOT ADDED. Exiting link_add_handler.')
+ self.s_obj_topologia.release()
+ self.s_obj_topologia_auxiliar.release()
+ self.s_tabela_roteamento.release()
+ return
+
+ elif port_src_name_int_subint['sub_interface'] == "11" and port_dest_name_int_subint['sub_interface'] == "11":
+ self.logger.info('Both subinterfaces are .11 (encapsulation dot1q priority-tagged). Link priority tagged added at')
+ if self.switches_in_topology.has_key(switch_src):
+ self.logger.debug('self.switches_in_topology has switch_src: %s.', str(switch_src))
+ for port in self.switches_in_topology[switch_src]:
+ #print "Entrando no for para porta: ", port
+ try:
+ #print "Antes do if de comparacao de portas para port['name]: ", port['name'], "e port_src_name_int_subint['interface'] + .10: ", port_src_name_int_subint['interface'] + ".10"
+ if port['name'] == (port_src_name_int_subint['interface'] + ".10"):
+ # print "O port name e igual, modificando o port_src"
+ port_src = port['port_no']
+ #print "O novo port_src e: ", port_src
+ except Exception:
+ pass
+
+ if self.switches_in_topology.has_key(switch_dest):
+ #print "self.switches_in_topology possui switch_dest: ", switch_dest
+ for port in self.switches_in_topology[switch_dest]:
+ #print "Entrando no for para porta: ", port
+ try:
+ #print "Antes do if de comparacao de portas para port['name]: ", port['name'], "e port_src_name_int_subint[ 'interface'] + .10: ", port_dest_name_int_subint['interface'] + ".10"
+ if port['name'] == (port_dest_name_int_subint['interface'] + ".10"):
+ #print "O port name e igual, modificando o port_dest"
+ port_dest = port['port_no']
+ #print "O novo port_dest e: ", port_dest
+ except Exception:
+ pass
+ """
+ #FIM DA RETIRADA PARA EMULACAO
+ #FINALIZAR O COMENTARIO PARA TIRAR DUAS INTERFACES AQUI
+
+ # INICIO DO TRATAMENTO DE MULTIPLOS LINKS ENTRE DOIS SWITCHES
+ self.obj_topologia_auxiliar.append({switch_src:{switch_dest: portas(port_src,port_dest)}})
+ self.logger.info('Auxiliary Topology object for link: %s, has been UPDATED. Auxiliary Topology object: %s', str({switch_src:{switch_dest: portas(port_src,port_dest)}}), str(self.obj_topologia_auxiliar))
+ # Gera o dicionario dicionario_sw_ori_sw_dest que contem as portas de entrada e saida dos switches destino e origem, respectivamente, para poder checar se ha simetria nos enlaces
+ dicionario_sw_ori_sw_dest = {'direto': [], 'inverso': []}
+ for posicao_enlace in range(len(self.obj_topologia_auxiliar)):
+ if self.obj_topologia_auxiliar[posicao_enlace].has_key(switch_src):
+ if self.obj_topologia_auxiliar[posicao_enlace][switch_src].has_key(switch_dest):
+ dicionario_sw_ori_sw_dest['direto'].append(self.obj_topologia_auxiliar[posicao_enlace][switch_src][switch_dest])
+
+ if self.obj_topologia_auxiliar[posicao_enlace].has_key(switch_dest):
+ if self.obj_topologia_auxiliar[posicao_enlace][switch_dest].has_key(switch_src):
+ dicionario_sw_ori_sw_dest['inverso'].append(self.obj_topologia_auxiliar[posicao_enlace][switch_dest][switch_src])
+ # Altera o objeto obj_topologia se houver simetria nos enlaces (no obj_topologia_auxiliar). No momento em que a rede esta convergindo enquanto nao ha bloqueio de portas ha multiplas atualizacoes do obj_topologia
+ for enlace_direto in dicionario_sw_ori_sw_dest['direto']:
+ for enlace_inverso in dicionario_sw_ori_sw_dest['inverso']:
+ if enlace_direto.int_saida_src == enlace_inverso.int_entrada_dst and enlace_direto.int_entrada_dst == enlace_inverso.int_saida_src:
+ if self.obj_topologia.has_key(switch_src):
+ self.obj_topologia[switch_src][switch_dest] = enlace_direto
+ else:
+ self.obj_topologia[switch_src] = {}
+ self.obj_topologia[switch_src][switch_dest] = enlace_direto
+ if self.obj_topologia.has_key(switch_dest):
+ self.obj_topologia[switch_dest][switch_src] = enlace_inverso
+ else:
+ self.obj_topologia[switch_dest] = {}
+ self.obj_topologia[switch_dest][switch_src] = enlace_inverso
+
+ self.logger.info('Topology object for switch source: %s and switch destination: %s, has been UPDATED (multiple links between same switches). Topology object: %s', str(switch_src), str(switch_dest), str(self.obj_topologia))
+ # FIM DO TRATAMENTO DE MULTIPLOS LINKS ENTRE DOIS SWITCHES
+
+ self.tabela_roteamento = calculo_dijkstra(self.obj_topologia)
+ self.logger.info('Routing table updated after link add at: %f : Routing table: %s', time(), str(self.tabela_roteamento))
+
+ if self.switches_in_topology.has_key(switch_src):
+ for i in range(len(self.switches_in_topology[switch_src])):
+ try:
+ if self.switches_in_topology[switch_src][i]['port_no'] == port_src:
+ self.switches_in_topology[switch_src][i]['link_state'] = 'UP'
+ except Exception:
+ pass
+
+ """ADICIONADO PARA VERIFICAR O FUNCIONAMENTO"""
+ """
+ if self.switches_in_topology.has_key(switch_src):
+ for i in range(len(self.switches_in_topology[switch_src])):
+ try:
+ if self.switches_in_topology[switch_src][i]['port_no'] == port_src:
+ self.switches_in_topology[switch_src][i]['link_state'] = 'UP'
+ except Exception:
+ pass
+ """
+ """FIM DO ADICIONADO PARA VERIFICACAO"""
+
+ self.flow_entries_deletion_full()
+
+ self.logger.info('Global Dictionary {DPID: [sw_desc, ]} for DPIDs: %s and %s has been UPDATED: %s', str(switch_src), str(switch_dest), str(self.switches_in_topology))
+
+ self.s_obj_topologia.release()
+ self.s_obj_topologia_auxiliar.release()
+ self.s_tabela_roteamento.release()
+ return
+
+
+ @handler.set_ev_cls(event.EventSwitchLeave)
+ def switch_leave_handler(self, ev):
+ try:
+ switch_leave = ev.switch.dp.id
+ except Exception: # Caso em que ocorre algum problema no framework que gera evento de switch_leave indevido
+ return
+ switches_sem_links = []
+ posicoes_para_delete = []
+ self.s_obj_topologia.acquire()
+ self.s_obj_topologia_auxiliar.acquire()
+ self.s_tabela_roteamento.acquire()
+ self.logger.info('DPID: %s has left the topology', str(switch_leave))
+
+ if self.obj_topologia.has_key(switch_leave): # deleta todas as entradas do obj_topologia que tem como switch_src o switch_leave
+ del self.obj_topologia[switch_leave]
+
+ for posicao in range(len(self.obj_topologia_auxiliar)):
+ if self.obj_topologia_auxiliar[posicao].has_key(switch_leave): # deleta todas as entradas do obj_topologia_auxiliar que tem como switch_src o switch_leave
+ posicoes_para_delete.append(posicao)
+
+ posicoes_para_delete.sort(reverse=True)
+
+ for posicao in posicoes_para_delete:
+ del self.obj_topologia_auxiliar[posicao]
+
+ if self.switches_in_topology.has_key(switch_leave):
+ del self.switches_in_topology[switch_leave]
+ file_interfaces = open(self.interfaces_info, 'w')
+ json.dump(self.switches_in_topology, file_interfaces, sort_keys=True, indent=4, separators=(',',': '))
+ file_interfaces.close()
+ self.logger.info('Global Dictionary {DPID: [sw_desc, ]} for DPID: %s has been UPDATED: %s', str(switch_leave), str(self.switches_in_topology))
+
+ #Deleta as entradas em que o switch que saiu seja um switch de destino do objeto topologia
+ for port_leave in ev.switch.ports: # ev.switch.ports é uma lista que contem objetos que contem o id do switch e porta que foram desligados
+ #print "A porta do switch que saiu e", str(port_leave)
+ #print "A porta do switch que saiu e", ṕort_leave.port_no
+ for sw_origem in self.obj_topologia: # Percorre as lista com os destino dos enlaces unidirecionais
+ if ( self.obj_topologia[sw_origem].has_key(switch_leave) ) and ( self.obj_topologia[sw_origem][switch_leave].int_entrada_dst == port_leave.port_no ):
+ del self.obj_topologia[sw_origem][switch_leave]
+
+ # Deleta as entradas do objeto topologia que sao referentes a switches que nao tem nenhum link
+ for switch in self.obj_topologia:
+ if ( self.obj_topologia[switch] == {} ):
+ switches_sem_links.append(switch)
+
+ for switch in switches_sem_links:
+ del self.obj_topologia[switch]
+
+ #self.switches.remove(switch_leave)
+
+ #print "Os switches na topologia apos delecao de switch sao: ", self.switches
+
+ self.flow_entries_deletion_full()
+
+ self.logger.info('Topology Object updated after switch leave: %s', str(self.obj_topologia))
+
+ self.tabela_roteamento = calculo_dijkstra(self.obj_topologia)
+ self.logger.info('Routing table updated after switch leave: %s', str(self.tabela_roteamento))
+
+ del self.dictionary_dpid_datapath[switch_leave]
+ self.logger.info('List of dictionaries {DPID: datapath} has been updated: %s', str(self.dictionary_dpid_datapath))
+
+ self.s_obj_topologia.release()
+ self.s_obj_topologia_auxiliar.release()
+ self.s_tabela_roteamento.release()
+ return
+
+ @handler.set_ev_cls(event.EventLinkDelete)
+ def link_del_handler(self, ev):
+ switch_src = ev.link.src.dpid
+ switch_dest = ev.link.dst.dpid
+ port_src = ev.link.src.port_no
+ port_dest = ev.link.dst.port_no
+ port_src_original = port_src
+ port_src_name = ev.link.src.name
+ port_dest_name = ev.link.dst.name
+ port_dest_original = port_dest
+ switches_sem_links = []
+ self.s_obj_topologia.acquire()
+ self.s_obj_topologia_auxiliar.acquire()
+ self.s_tabela_roteamento.acquire()
+ self.logger.info('Link deletion event. DPID src: %s, port_src: %s to DPID dst: %s, port_dst: %s has been deleted at', str(switch_src), str(port_src), str(switch_dest), str(port_dest))
+
+ # RETIRADO PARA EMULACAO
+ """
+ port_src_name_int_subint = {'interface': port_src_name.split(".")[0], 'sub_interface': port_src_name.split(".")[1]}
+ port_dest_name_int_subint = {'interface': port_dest_name.split(".")[0], 'sub_interface': port_dest_name.split(".")[1]}
+
+ if port_src_name_int_subint['sub_interface'] == "10" or port_dest_name_int_subint['sub_interface'] == "10":
+ self.logger.info('Subinterface of port source or destination of link add is .10 (encapsulation dot1q any). Link NOT DELETED. Exiting link_del_handler')
+ self.s_obj_topologia.release()
+ self.s_obj_topologia_auxiliar.release()
+ self.s_tabela_roteamento.release()
+ return
+
+ elif port_src_name_int_subint['sub_interface'] == "11" and port_dest_name_int_subint['sub_interface'] == "11":
+ self.logger.debug('Both subinterfaces are .11 (encapsulation dot1q priority-tagged).')
+ if self.switches_in_topology.has_key(switch_src):
+ self.logger.debug('self.switches_in_topology has switch_src: %s.', str(switch_src))
+ for port in self.switches_in_topology[switch_src]:
+ #print "Entrando no for para porta: ", port
+ try:
+ #print "Antes do if de comparacao de portas para port['name]: ", port['name'], "e port_src_name_int _subint['interface'] + .10: ", port_src_name_int_subint['interface'] + ".10"
+ if port['name'] == (port_src_name_int_subint['interface'] + ".10"):
+ # print "O port name e igual, modificando o port_src"
+ port_src = port['port_no']
+ #print "O novo port_src e: ", port_src
+ except Exception:
+ pass
+
+ if self.switches_in_topology.has_key(switch_dest):
+ #print "self.switches_in_topology possui switch_dest: ", switch_dest
+ for port in self.switches_in_topology[switch_dest]:
+ #print "Entrando no for para porta: ", port
+ try:
+ #print "Antes do if de comparacao de portas para port['name]: ", port['name'], "e port_src_name_int _subint[ 'interface'] + .10: ", port_dest_name_int_subint['interface'] + ".10"
+ if port['name'] == (port_dest_name_int_subint['interface'] + ".10"):
+ #print "O port name e igual, modificando o port_dest"
+ port_dest = port['port_no']
+ #print "O novo port_dest e: ", port_dest
+ except Exception:
+ pass
+ """
+ #FIM DA RETIRADA PARA EMULACAO
+
+ if self.obj_topologia.has_key(switch_src):
+ self.logger.info('Topology object: %s has key switch source: %s.', str(self.obj_topologia), str(switch_src))
+ if self.obj_topologia[switch_src].has_key(switch_dest):
+ self.logger.info('Topology object for switch source: %s has key switch destination: %s.', str(self.obj_topologia[switch_src]), str(switch_dest))
+ if ( self.obj_topologia[switch_src][switch_dest].int_saida_src == port_src ):
+ self.logger.info('Topology object for switch source: %s and switch destination: %s, %s is equal to port_src: %s', str(switch_src), str(switch_dest), str(self.obj_topologia[switch_src][switch_dest]), str(port_src))
+ if ( self.obj_topologia[switch_src][switch_dest].int_entrada_dst == port_dest ):
+ self.logger.info('Topology object for switch source: %s and switch destination: %s, %s is equal to port_dest: %s', str(switch_src), str(switch_dest), str(self.obj_topologia[switch_src][switch_dest]), str(port_dest))
+ self.logger.info('Deleting topology object: %s.', str(self.obj_topologia[switch_src][switch_dest]))
+ del self.obj_topologia[switch_src][switch_dest]
+
+ #Como o STP bloqueia as interfaces no sentido TX somente, ao detectar uma queda de enlace unidirecional, o enlace nos dois sentidos e excluido para que o calculo da tabela de roteamento possa ocorrer. Isso nao impede que no atualizacao de topologia o link unidirecional ativo que foi excluido seja reaprendido visto que todas as interfaces entram em estado de aprendizado (LEARNING)
+ if self.obj_topologia.has_key(switch_dest):
+ self.logger.info('Topology object: %s has key switch destination: %s.', str(self.obj_topologia), str(switch_dest))
+ if self.obj_topologia[switch_dest].has_key(switch_src):
+ self.logger.info('Topology object for switch destination: %s has key switch source: %s.', str(self.obj_topologia[switch_dest]), str(switch_src))
+ if ( self.obj_topologia[switch_dest][switch_src].int_saida_src == port_dest ):
+ self.logger.info('Topology object for switch destination: %s and switch source: %s, %s is equal to port_dest: %s', str(switch_dest), str(switch_src), str(self.obj_topologia[switch_dest][switch_src]), str(port_dest))
+ if ( self.obj_topologia[switch_dest][switch_src].int_entrada_dst == port_src ):
+ self.logger.info('Topology object for switch destination: %s and switch source: %s, %s is equal to port_src: %s', str(switch_dest), str(switch_src), str(self.obj_topologia[switch_dest][switch_src]), str(port_src))
+ self.logger.info('Deleting topology object: %s.', str(self.obj_topologia[switch_dest][switch_src]))
+ del self.obj_topologia[switch_dest][switch_src]
+
+ # INICIO DO TRATAMENTO PARA MULTIPLOS ENLACES ENTRE DOIS SWITCHES
+ delecao_enlace_auxiliar_direto = False
+ posicoes_a_serem_deletadas = []
+ # Define as posicoes no obj_topologia_auxiliar que devem ser excluidos devido a delecao de enlace
+ for posicao_enlace in range(len(self.obj_topologia_auxiliar)):
+ if self.obj_topologia_auxiliar[posicao_enlace].has_key(switch_src):
+ if self.obj_topologia_auxiliar[posicao_enlace][switch_src].has_key(switch_dest):
+ if str(self.obj_topologia_auxiliar[posicao_enlace][switch_src][switch_dest]) == str(portas(port_src, port_dest)):
+ posicoes_a_serem_deletadas.append(posicao_enlace)
+ delecao_enlace_auxiliar_direto = True
+ self.logger.info('Auxiliary Topology object for switch source: %s, port_src: %s, and switch destination: %s, port_dest: %s will be DELETED. Auxiliary Topology object: %s', str(switch_src), str(port_src), str(switch_dest), str(port_dest), str(self.obj_topologia_auxiliar))
+
+ # Deleta as posicoes definidas anteriormente do obj_topologia_auxiliar
+ for posicao_enlace in posicoes_a_serem_deletadas[::-1]:
+ self.logger.info('Auxiliary Topology Object: %s. Deleting object:: %s.', str(self.obj_topologia_auxiliar), str(self.obj_topologia_auxiliar[posicao_enlace]))
+ del self.obj_topologia_auxiliar[posicao_enlace]
+ # Gera o dicionario dicionario_sw_ori_sw_dest que contem as portas de entrada e saida dos switches destino e origem, respectivamente, para poder checar se ha simetria nos enlaces
+ dicionario_sw_ori_sw_dest = {'direto': [], 'inverso': []}
+ for posicao_enlace in range(len(self.obj_topologia_auxiliar)):
+ if self.obj_topologia_auxiliar[posicao_enlace].has_key(switch_src) and delecao_enlace_auxiliar_direto: # O delecao enlace_euxiliar_direto e para garantir que houve delecao do topologia auxiliar
+ if self.obj_topologia_auxiliar[posicao_enlace][switch_src].has_key(switch_dest):
+ dicionario_sw_ori_sw_dest['direto'].append(self.obj_topologia_auxiliar[posicao_enlace][switch_src][switch_dest])
+
+ if self.obj_topologia_auxiliar[posicao_enlace].has_key(switch_dest) and delecao_enlace_auxiliar_direto:
+ if self.obj_topologia_auxiliar[posicao_enlace][switch_dest].has_key(switch_src):
+ dicionario_sw_ori_sw_dest['inverso'].append(self.obj_topologia_auxiliar[posicao_enlace][switch_dest][switch_src])
+ # Altera o objeto obj_topologia se houver simetria nos enlaces (no obj_topologia_auxiliar). No momento em que a rede esta convergindo enquanto nao ha bloqueio de portas ha multiplas atualizacoes do obj_topologia
+ for enlace_direto in dicionario_sw_ori_sw_dest['direto']:
+ for enlace_inverso in dicionario_sw_ori_sw_dest['inverso']:
+ if enlace_direto.int_saida_src == enlace_inverso.int_entrada_dst and enlace_direto.int_entrada_dst == enlace_inverso.int_saida_src:
+ if self.obj_topologia.has_key(switch_src):
+ self.obj_topologia[switch_src][switch_dest] = enlace_direto
+ else:
+ self.obj_topologia[switch_src] = {}
+ self.obj_topologia[switch_src][switch_dest] = enlace_direto
+ if self.obj_topologia.has_key(switch_dest):
+ self.obj_topologia[switch_dest][switch_src] = enlace_inverso
+ else:
+ self.obj_topologia[switch_dest] = {}
+ self.obj_topologia[switch_dest][switch_src] = enlace_inverso
+ self.logger.info('Topology object for switch source: %s and switch destination: %s, has been UPDATED (multiple links between same switches). Topology object: %s', str(switch_src), str(switch_dest), str(self.obj_topologia))
+ # FIM DO TRATAMENTO PARA MULTIPLOS ENLACES ENTRE DOIS SWITCHES
+
+ # Deleta as entradas do objeto topologia que sao referentes a switches que nao tem nenhum link
+ for switch in self.obj_topologia:
+ if ( self.obj_topologia[switch] == {} ):
+ switches_sem_links.append(switch)
+
+ for switch in switches_sem_links:
+ del self.obj_topologia[switch]
+
+ self.logger.info('Topology Object updated after link deletion: %s', str(self.obj_topologia))
+
+ self.tabela_roteamento = calculo_dijkstra(self.obj_topologia)
+ self.logger.info('Routing table updated after link deletion at: %f : Routing table: %s', time(), str(self.tabela_roteamento))
+
+ if self.switches_in_topology.has_key(switch_src):
+ for i in range(len(self.switches_in_topology[switch_src])):
+ try:
+ if self.switches_in_topology[switch_src][i]['port_no'] == port_src:
+ self.switches_in_topology[switch_src][i]['link_state'] = 'DOWN'
+ except Exception:
+ pass
+
+ if self.switches_in_topology.has_key(switch_dest):
+ for i in range(len(self.switches_in_topology[switch_dest])):
+ try:
+ if self.switches_in_topology[switch_dest][i]['port_no'] == port_dest:
+ self.switches_in_topology[switch_dest][i]['link_state'] = 'DOWN'
+ except Exception:
+ pass
+
+ self.flow_entries_deletion_full()
+
+ self.logger.info('Global Dictionary {DPID: [sw_desc, ]} for DPIDs: %s and %s has been UPDATED: %s', str(switch_src), str(switch_dest), str(self.switches_in_topology))
+
+ self.s_obj_topologia.release()
+ self.s_obj_topologia_auxiliar.release()
+ self.s_tabela_roteamento.release()
+ return
+
+
+ @handler.set_ev_cls(event.EventSwitchEnter)
+ def switch_enter_handler(self, ev):
+ dic_dpid_datapath = {} #Dicionario contendo {DPID: datapath}
+ ports = []
+ switch_enter = ev.switch.dp.id
+
+ self.logger.info('Switch DPID: %s connected to controller at ', str(switch_enter))
+ self.dictionary_dpid_datapath[switch_enter] = ev.switch.dp
+ self.logger.info('List of dictionaries {DPID: datapath} updated: %s', str(self.dictionary_dpid_datapath))
+
+ for port in ev.switch.ports:
+ state = None
+ self.logger.debug('Switch port: %s', str(port))
+ if port._state == 0:
+ state = "ADMIN_UP"
+ elif port._state == 1:
+ state = "ADMIN_DOWN"
+ self.logger.debug('Port_no: %s, hw_addr: %s, name: %s, state: %s', str(port.port_no), str(port.hw_addr), str(port.name), str(state))
+ ports.append({'port_no': port.port_no, 'hw_addr': port.hw_addr, 'name': port.name, 'admin_state': state, 'link_state': None})
+
+ self.switches_in_topology[switch_enter] = [None]
+ for port in ports:
+ self.switches_in_topology[switch_enter].append(port)
+
+ self.logger.info('Global Dictionary {DPID: [sw_desc, ]} for DPID: %s has been CREATED: %s', str(switch_enter), str(self.switches_in_topology))
+ file_interfaces = open(self.interfaces_info, 'w')
+ json.dump(self.switches_in_topology, file_interfaces, sort_keys=True, indent=4, separators=(',',': '))
+ file_interfaces.close()
+
+ return
+
+
+ @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
+ def switch_features_handler(self, ev):
+ # Utilizado para evitar erro de Broken Pipe quando os switches se desconectam do controlador
+ datapath = ev.msg.datapath
+ datapath.socket.settimeout(TIMEOUT_ECHO_REQUEST)
+ return
+
+
+ @handler.set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
+ def port_status_handler(self, ev):
+ msg = ev.msg
+ dp = msg.datapath
+ ofp = dp.ofproto
+ state = None
+
+ if msg.reason == ofp.OFPPR_ADD:
+ reason = 'ADD'
+ elif msg.reason == ofp.OFPPR_DELETE:
+ reason = 'DELETE'
+ elif msg.reason == ofp.OFPPR_MODIFY:
+ reason = 'MODIFY'
+ else:
+ reason = 'unknown'
+
+ self.logger.info('OFPPortStatus received at: : reason=%s desc=%s', reason, msg.desc)
+ #print "O msg.desc e: ", msg.desc.port_no, msg.desc.name, msg.desc.state
+
+ if self.switches_in_topology.has_key(dp.id):
+ for i in range(len(self.switches_in_topology[dp.id])):
+ try:
+ if msg.desc.port_no == self.switches_in_topology[dp.id][i]['port_no'] and reason == 'MODIFY':
+ self.switches_in_topology[dp.id][i]['hw_addr'] = msg.desc.hw_addr
+ self.switches_in_topology[dp.id][i]['name'] = msg.desc.name
+ if msg.desc.state == 0:
+ state = "ADMIN_UP"
+
+ elif msg.desc.state == 1:
+ state = "ADMIN_DOWN"
+ self.switches_in_topology[dp.id][i]['admin_state'] = state
+
+ if msg.desc.port_no == self.switches_in_topology[dp.id][i]['port_no'] and reason == 'DELETE':
+ del self.switches_in_topology[dp.id][i]
+
+ except Exception:
+ pass
+
+ if reason == 'ADD':
+ if msg.desc.state == 0:
+ state = "ADMIN_UP"
+
+ elif msg.desc.state == 1:
+ state = "ADMIN_DOWN"
+ #self.switches_in_topology[dp.id].append(switch_port(msg.desc.port_no, msg.desc.hw_addr, msg.desc.name, state))
+ self.switches_in_topology[dp.id].append({'port_no': msg.desc.port_no, 'hw_addr': msg.desc.hw_addr, 'name': msg.desc.name, 'admin_state': state, 'link_state': None})
+ self.logger.info('Global Dictionary {DPID: [sw_desc, ]} for DPID: %s has been UPDATED: %s', str(dp.id), str(self.switches_in_topology))
+ file_interfaces = open(self.interfaces_info, 'w')
+ json.dump(self.switches_in_topology, file_interfaces, sort_keys=True, indent=4, separators=(',',': '))
+ file_interfaces.close()
+
+ if reason == 'MODIFY' or reason == 'DELETE':
+ if self.dpid_match_actions_dic.has_key(dp.id):
+ self.logger.debug('DPID match actions dictionary for DPID: %s is: %s.', str(dp.id), str(self.dpid_match_actions_dic[dp.id]))
+
+ for i in range(len(self.dpid_match_actions_dic[dp.id])):
+ self.logger.debug('Object of the match actions dictionary is: %s.', str(self.dpid_match_actions_dic[dp.id][i]))
+ self.logger.debug('port_status_handler: Match of the object is: %s.', str(self.dpid_match_actions_dic[dp.id][i]['match']))
+ if str(self.dpid_match_actions_dic[dp.id][i]['match']['in_port']) == str(msg.desc.port_no):
+ self.logger.info('Match and Actions Dictionary for DPID: %s has a match for port: %s. Deleting flow.', str(dp.id), str(msg.desc.port_no))
+ match = self.dpid_match_actions_dic[dp.id][i]['match']
+ actions = self.dpid_match_actions_dic[dp.id][i]['actions']
+ self.mod_flow(dp, 1, match, actions, ofp.OFPFC_DELETE)
+ return
+
+ def send_echo_request(self, datapath, data="aG9nZQ=="):
+ ofp_parser = datapath.ofproto_parser
+
+ req = ofp_parser.OFPEchoRequest(datapath, data)
+ datapath.send_msg(req)
+ self.logger.debug('OFPEchoRequest has been sent to: %s', str(datapath.id))
+ return
+
+
+ @set_ev_cls(ofp_event.EventOFPEchoReply,
+ [HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
+ def echo_reply_handler(self, ev):
+ self.logger.debug('OFPEchoReply received: data=%s',
+ hex_array(ev.msg.data))
+ return