Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion automated_tests/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,5 @@ behave_test/
/site
/env_tests
__pycache__/
doc_generator.py
doc_generator.py
/output
16 changes: 15 additions & 1 deletion automated_tests/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,4 +71,18 @@ Feature: pktvisor tests # features/pktvisor.feature:1
Took 0m2.312s


```
```

## Run behave using parallel process

You can use [behavex](https://github.com/hrcorval/behavex) to run the scenarios using multiprocess by simply run:

Examples:

> behavex -t @\<TAG\> --parallel-processes=2 --parallel-schema=scenario

> behavex -t @\<TAG\> --parallel-processes=5 --parallel-schema=feature

Running smoke tests:

> behavex -t=@smoke --parallel-processes=20 --parallel-scheme=scenario
5 changes: 5 additions & 0 deletions automated_tests/features/cleanup.feature
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
Feature: cleanup env

@cleanup
Scenario: remove dummy iface
Then Remove dummy interface
17 changes: 7 additions & 10 deletions automated_tests/features/environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,21 +6,18 @@


def before_scenario(context, scenario):
cleanup_container(PKTVISOR_CONTAINER_NAME)
context.containers_id = dict()
test_config.send_terminal_commands("modprobe -v dummy numdummies=1", sudo=True)
test_config.send_terminal_commands("ip link set dummy0 up", sudo=True)


def after_scenario(context, scenario):
cleanup_container(PKTVISOR_CONTAINER_NAME)
test_config.send_terminal_commands("rmmod dummy", sudo=True)
cleanup_container(PKTVISOR_CONTAINER_NAME, context.containers_id.keys())


def cleanup_container(name_prefix):
def cleanup_container(name_prefix, containers_id):
docker_client = docker.from_env()
containers = docker_client.containers.list(all=True)
for container in containers:
test_container = container.name.startswith(name_prefix)
if test_container is True:
container.stop()
container.remove()
for container_id in containers_id:
container = docker_client.containers.get(container_id)
container.stop()
container.remove()
69 changes: 60 additions & 9 deletions automated_tests/features/pktvisor.feature
Original file line number Diff line number Diff line change
@@ -1,77 +1,128 @@
Feature: pktvisor tests

@smoke
Scenario: pktvisor bootstrap
When run pktvisor instance on port available with user permission
Then the pktvisor container status must be running
And pktvisor API must be enabled
And 2 policies must be running


@smoke
Scenario: run multiple pktvisors instances using different ports
When run pktvisor instance on port available with user permission
And run pktvisor instance on port available with user permission
And run pktvisor instance on port available with user permission
Then all the pktvisor containers must be running
And 3 pktvisor's containers must be running
Then 3 pktvisor's containers must be running


@smoke
Scenario: run multiple pktvisors instances using the same port
When run pktvisor instance on port available with user permission
And run pktvisor instance on port unavailable with user permission
Then 1 pktvisor's containers must be running
And 1 pktvisor's containers must be exited


@smoke
Scenario: create a policy with all handlers using admin permission
Given that a pktvisor instance is running on port available with admin permission
When create a new policy with all handler(s)
Then 4 policies must be running
#1 policy default, 2 policies with resources and 1 policy created


@smoke
Scenario: create a policy with net handler using admin permission
Given that a pktvisor instance is running on port available with admin permission
When create a new policy with net handler(s)
Then 4 policies must be running


@smoke
Scenario: create a policy with dhcp handler using admin permission
Given that a pktvisor instance is running on port available with admin permission
When create a new policy with dhcp handler(s)
Then 4 policies must be running


@smoke
Scenario: create a policy with dns handler using admin permission
Given that a pktvisor instance is running on port available with admin permission
When create a new policy with dns handler(s)
Then 4 policies must be running


@smoke
Scenario: create a policy with pcap stats handler using admin permission
Given that a pktvisor instance is running on port available with admin permission
When create a new policy with pcap_stats handler(s)
Then 4 policies must be running

Scenario: delete all policies using admin permission

@smoke
Scenario: delete the default policy using admin permission
Given that a pktvisor instance is running on port available with admin permission
When delete 2 policies
Then 2 policies must be running
When delete 1 non-resource policies
Then 0 policies must be running


@smoke
Scenario: delete all non-resource policies using admin permission
Given that a pktvisor instance is running on port available with admin permission
And create a new policy with all handler(s)
When delete 2 non-resource policies
Then 0 policies must be running


Scenario: delete 1 policy using admin permission
@smoke
Scenario: delete 1 non-resource policy using admin permission
Given that a pktvisor instance is running on port available with admin permission
When create a new policy with all handler(s)
And delete 1 policies
And delete 1 non-resource policies
Then 2 policies must be running


@smoke
Scenario: delete the default-resource policy using admin permission
Given that a pktvisor instance is running on port available with admin permission
When delete 1 resource policies
Then 1 policies must be running


@smoke
Scenario: delete all resource policies using admin permission
Given that a pktvisor instance is running on port available with admin permission
And create a new policy with all handler(s)
When delete 2 resource policies
Then 2 policies must be running


@smoke
Scenario: delete 1 resource policy using admin permission
Given that a pktvisor instance is running on port available with admin permission
When create a new policy with all handler(s)
And delete 1 resource policies
Then 3 policies must be running


@smoke
Scenario: create a policy using user permission
Given that a pktvisor instance is running on port available with user permission
When try to create a new policy with all handler(s)
Then status code returned on response must be 404
And 2 policies must be running


@smoke
Scenario: delete 1 policy using user permission
Given that a pktvisor instance is running on port available with user permission
When try to delete a policy
Then status code returned on response must be 404
And 2 policies must be running


@dev
@smoke
Scenario Outline: pktvisor metrics
When run pktvisor instance on port <status_port> with <role> permission
And run mocked data <file_name> for this network
Expand All @@ -84,4 +135,4 @@ Scenario Outline: pktvisor metrics
| available | user | dns_ipv4_udp.pcap | running |
| available | user | dns_ipv4_tcp.pcap | running |
| available | user | dhcp-flow.pcap | running |
| available | user | dns_udp_mixed_rcode.pcap | running |
| available | user | dns_udp_mixed_rcode.pcap | running |
45 changes: 19 additions & 26 deletions automated_tests/features/steps/pktvisor.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,11 @@
def run_pktvisor(context, status_port, role):
availability = {"available": True, "unavailable": False}

context.pkt_port = check_port_is_available(availability[status_port])
context.pkt_port = check_port_is_available(context.containers_id, availability[status_port])
context.container_id = run_pktvisor_container("ns1labs/pktvisor", context.pkt_port, role)
assert_that(context.container_id, not_(equal_to(None)), "Failed to provision pktvisor container")
if context.container_id not in context.containers_id.keys():
context.containers_id[context.container_id] = str(context.pkt_port)
event = threading.Event()
event.wait(1)

Expand All @@ -45,21 +47,10 @@ def check_pkt_status(context, pkt_status):
assert_that(status, equal_to(pkt_status), f"pktvisor container {context.container_id} failed with status:{status}")


@step("all the pktvisor containers must be {pkt_status}")
def check_pktvisors_status(context, pkt_status):
docker_client = docker.from_env()

containers = docker_client.containers.list(all=True)
for container in containers:
is_test_container = container.name.startswith(PKTVISOR_CONTAINER_NAME)
if is_test_container is True:
status = container.status
assert_that(status, equal_to(pkt_status), f"pktvisor container {container.id} failed with status:{status}")


@step("{amount_of_pktvisor} pktvisor's containers must be {pkt_status}")
def assert_amount_of_pkt_with_status(context, amount_of_pktvisor, pkt_status):
containers_with_expected_status = check_amount_of_pkt_with_status(amount_of_pktvisor, pkt_status)
containers_with_expected_status = check_amount_of_pkt_with_status(amount_of_pktvisor, pkt_status,
context.containers_id.keys())
assert_that(len(set(containers_with_expected_status)), equal_to(int(amount_of_pktvisor)),
f"Amount of pktvisor container with referred status failed")

Expand Down Expand Up @@ -120,6 +111,11 @@ def check_metrics(context):
assert_that(is_json_valid, equal_to(True), f"Wrong data generated for {network_file}_{endpoint.replace('/','_')}")


@step("Remove dummy interface")
def remove_mocked_interface(context):
send_terminal_commands("rmmod dummy", sudo=True)


@threading_wait_until
def check_metrics_per_endpoint(endpoint, pkt_port, path_to_schema_file, event=None):
response = make_get_request(endpoint, pkt_port)
Expand Down Expand Up @@ -156,19 +152,17 @@ def run_pktvisor_container(container_image, port=10853, role="user", container_n


@threading_wait_until
def check_amount_of_pkt_with_status(amount_of_pktvisor, pkt_status, event=None):
def check_amount_of_pkt_with_status(amount_of_pktvisor, pkt_status, test_containers_id, event=None):
docker_client = docker.from_env()
containers = docker_client.containers.list(all=True)
containers_with_expected_status = list()
for container in containers:
is_test_container = container.name.startswith(PKTVISOR_CONTAINER_NAME)
if is_test_container is True:
status = container.status
if status == pkt_status:
containers_with_expected_status.append(container)
if len(set(containers_with_expected_status)) == int(amount_of_pktvisor):
event.set()
return containers_with_expected_status
for container_id in test_containers_id:
container = docker_client.containers.get(container_id)
status = container.status
if status == pkt_status:
containers_with_expected_status.append(container)
if len(set(containers_with_expected_status)) == int(amount_of_pktvisor):
event.set()
return containers_with_expected_status
return containers_with_expected_status


Expand Down Expand Up @@ -218,4 +212,3 @@ def validate_json(json_data, path_to_file):
return False

return True

24 changes: 18 additions & 6 deletions automated_tests/features/steps/policies.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import random
from behave import step
from utils import make_get_request, random_string, threading_wait_until
import re


@step("{amount_of_policies} policies {status_condition} be running")
Expand Down Expand Up @@ -41,13 +42,24 @@ def try_to_create_new_policy(context, handler):
create_new_policy(context, handler, pkt_port=context.pkt_port, expected_status_code=404)


@step("delete {amount_of_policies} policies")
def remove_policies(context, amount_of_policies):
@step("delete {amount_of_policies} {policy_type} policies")
def remove_policies(context, amount_of_policies, policy_type):
resources = list()
non_resources = list()
assert_that(policy_type, any_of(equal_to("resource"), equal_to("non-resource")), "Unexpected type of policy")
names_of_all_policies = make_get_request('policies', context.pkt_port).json().keys()
policies_to_remove = random.sample(names_of_all_policies, int(amount_of_policies))
for name in names_of_all_policies:
matching = re.match(r'^.+\-[a-zA-Z0-9]{16}\-resources$', name)
if matching:
resources.append(matching.group())
else:
assert_that(matching, equal_to(None))
non_resources.append(name)
policies_by_type = {"resource": resources, "non-resource": non_resources}
policies_to_remove = random.sample(policies_by_type[policy_type], int(amount_of_policies))
for policy in policies_to_remove:
remove_policy(policy, context.pkt_port)
response = get_policy(policy, 10853, 404)
response = get_policy(policy, context.pkt_port, 404)
assert_that(response.json(), has_key('error'), "Unexpected message for non existing policy")
assert_that(response.json(), has_value('policy does not exists'), "Unexpected message for non existing policy")

Expand All @@ -59,7 +71,7 @@ def try_to_delete_policies(context):
context.response = remove_policy(sample_policy, context.pkt_port, 404)


def assert_policy_creation(yaml_data, pkt_port=10853, expected_status_code=200): #todo arrumar status code
def assert_policy_creation(yaml_data, pkt_port=10853, expected_status_code=200):
"""

:param yaml_data: policy configurations
Expand All @@ -85,7 +97,7 @@ def get_policy(policy_name, pkt_port=10853, expected_status_code=200):
return make_get_request(endpoint, pkt_port, expected_status_code)


def remove_policy(policy_name, pkt_port=10853, expected_status_code=200): #todo arrumar status code
def remove_policy(policy_name, pkt_port=10853, expected_status_code=200):

"""
:param (str) policy_name: name of the policy to be fetched
Expand Down
Loading