LNST-project/lnst

add support for Netperf

Opened this issue · 0 comments

Historically we used to support Netperf as a TestModule, we moved to iperf3 when implementing lnst-next, however it would be nice to have full support for netperf again.

In addition to the current version of Netperf I have a local patch that updates some parts of it to be more "lnst-next" like but it's not complete. I'm posting it here in case it's relevant in the future or to serve as inspiration for anyone willing to pick this up. Feel free to start from scratch if the patch doesn't apply anymore or if there are better ideas about how to implement it.

From 76ac0f7b9dd3a3d1e1b381526078c7880f082845 Mon Sep 17 00:00:00 2001
From: Ondrej Lichtner <olichtne@redhat.com>
Date: Wed, 17 Oct 2018 09:34:27 +0200
Subject: [PATCH] modernising Netperf - removing evaluation logic, focusing on
 just the measurement

Not sure if it works, needs to be revisited and probably refactored
before merging in.

Signed-off-by: Ondrej Lichtner <olichtne@redhat.com>
---
 lnst/Tests/Netperf.py | 192 ++++++------------------------------------
 1 file changed, 26 insertions(+), 166 deletions(-)

diff --git a/lnst/Tests/Netperf.py b/lnst/Tests/Netperf.py
index 31f54a1..2a0a9cb 100644
--- a/lnst/Tests/Netperf.py
+++ b/lnst/Tests/Netperf.py
@@ -16,14 +16,10 @@ class Netserver(BaseTestModule):
 
     def wait_on_interrupt(self):
         try:
-            handler = signal.getsignal(signal.SIGINT)
-            signal.signal(signal.SIGINT, signal.default_int_handler)
             while True:
                 time.sleep(1)
         except KeyboardInterrupt:
             pass
-        finally:
-            signal.signal(signal.SIGINT, handler)
 
     def run(self):
         if not is_installed("netserver"):
@@ -72,12 +68,6 @@ class Netperf(BaseTestModule):
     debug = IntParam(default=0)
     opts = StrParam()
 
-    max_deviation = Param()
-
-    threshold = Param()
-    threshold_deviation = Param()
-    threshold_interval = Param()
-
     def __init__(self, **kwargs):
         super(Netperf, self).__init__(**kwargs)
 
@@ -100,80 +90,11 @@ class Netperf(BaseTestModule):
             if self.params.cpu_util not in ["both", "local", "remote"]:
                 raise TestModuleError("cpu_util can be 'both', 'local' or 'remote'")
 
-
-
-        if "threshold_deviation" in self.params:
-            self._check_threshold_param(self.params.threshold_deviation,
-                                        "threshold_deviation")
-
-        else:
-            self.params.threshold_deviation = {"rate": 0.0,
-                                               "unit": "bps"}
-
-
-        if "threshold" in self.params:
-            self._check_threshold_param(self.params.threshold,
-                                        "threshold")
-
-            rate = self.params.threshold["rate"]
-            deviation = self.params.threshold_deviation["rate"]
-            self.params.threshold_interval = (rate - deviation,
-                                              rate + deviation)
-
-        if "max_deviation" in self.params:
-            if not isinstance(self.params.max_deviation, dict):
-                raise TestModuleError("max_deviation is expected to be dictionary")
-
-            if 'type' not in self.params.max_deviation:
-                raise TestModuleError("max_deviation 'type' has to be specified ('percent' or 'absolute')")
-
-            if self.params.max_deviation['type'] not in ['percent', 'absolute']:
-                raise TestModuleError("max_deviation 'type' can be 'percent' or 'absolute'")
-
-
-
-            if self.params.max_deviation['type'] is 'percent':
-                if 'value' not in self.params.max_deviation:
-                    raise TestModuleError("max_deviation 'value' has to be specified")
-
-                self.params.max_deviation['value'] = float(self.params.max_deviation['value'])
-
-            if self.params.max_deviation['type'] is 'absolute':
-                if not isinstance(self.params.max_deviation, dict):
-                    raise TestModuleError("max_deviation 'value' is expected to be dictionary for 'absolute' type")
-
-                self.params.max_deviation['value'] = self._parse_threshold(self.params.max_deviation['value'],
-                                            "max_deviation 'value'")
-
-
-    def _check_threshold_param(self, threshold, name):
-            if not isinstance(threshold, dict):
-                raise TestModuleError("%s is expected to be dictionary", name)
-
-            if 'rate' not in threshold:
-                raise TestModuleError("%s expects 'rate' key in dictionary", name)
-
-            threshold['rate'] = float(threshold['rate'])
-
-            if 'unit' not in threshold:
-                raise TestModuleError("%s expects 'unit' key in dictionary", name)
-
-            if self.params.testname in ["TCP_STREAM", "UDP_STREAM",
-                                        "SCTP_STREAM", "SCTP_STREAM_MANY"]:
-                if threshold['unit'] is not 'bps':
-                    raise TestModuleError("unit can be 'bps' for STREAMs")
-            else:
-                if threshold['unit'] is not ['tps']:
-                    raise TestModuleError("unit can be 'tps' for RRs")
-
-
     def _is_omni(self):
         return self.params.testname in self._omni_tests
 
     def _compose_cmd(self):
-        """
-        composes commands for netperf and netserver based on xml recipe
-        """
+        """composes the netperf client command"""
         cmd = "netperf -H %s -f k" % self.params.server
         if self._is_omni():
             # -P 0 disables banner header of output
@@ -260,8 +181,16 @@ class Netperf(BaseTestModule):
         return res_val
 
     def _parse_omni_output(self, output):
-        res_val = {}
+        res = {}
+        for line in output.split('\n'):
+            try:
+                key, val = line.split('=')
+                res[key] = float(val)
+            except:
+                continue
+        #return res
 
+        res_val = {}
         pattern_throughput = "THROUGHPUT=(\d+\.\d+)"
         throughput = re.search(pattern_throughput, output)
 
@@ -421,11 +350,21 @@ class Netperf(BaseTestModule):
 
         return pretty_rate
 
-    def _run_client(self, cmd):
-        logging.debug("running as client...")
+    def run(self):
+        if not is_installed("netperf"):
+            res_data["msg"] = "Netperf is not installed on this machine!"
+            logging.error(res_data["msg"])
+            return False
+
+        self._res_data = {}
+        res_data = self._res_data
+
+        cmd = self._compose_cmd()
+        logging.debug("compiled command: %s" % cmd)
+        logging.debug("running netperf client...")
 
-        res_data = {}
         res_data["testname"] = self.params.testname
+        res_data["cmd"] = cmd
 
         rv = 0
         results = []
@@ -435,6 +374,7 @@ class Netperf(BaseTestModule):
                 logging.info("Netperf starting run %d" % i)
             clients = []
             client_results = []
+
             for i in range(0, self.params.num_parallel):
                 clients.append(ShellProcess(cmd))
 
@@ -490,93 +430,13 @@ class Netperf(BaseTestModule):
         if rv != 0 and self.params.runs == 1:
             res_data["msg"] = "Could not get performance throughput!"
             logging.info(res_data["msg"])
-            return (False, res_data)
+            return False
         elif rv != 0 and self.params.runs > 1:
             res_data["msg"] = "At least one of the Netperf runs failed, "\
                               "check the logs and result data for more "\
                               "information."
             logging.info(res_data["msg"])
-            return (False, res_data)
-
-        res_val = False
-        res_data["msg"] = "Measured rate was %.2f +-%.2f %s" %\
-                                            (rate_pretty["rate"],
-                                             rate_dev_pretty["rate"],
-                                             rate_pretty["unit"])
-        if rate > 0.0:
-            res_val = True
-        else:
-            res_val = False
-            return (res_val, res_data)
-
-        if "max_deviation" in self.params:
-            if self.params.max_deviation["type"] == "percent":
-                percentual_deviation = (rate_deviation / rate) * 100
-                if percentual_deviation > self.params.max_deviation["value"]:
-                    res_val = False
-                    res_data["msg"] = "Measured rate %.2f +-%.2f %s has bigger "\
-                                      "deviation than allowed (+-%.2f %%)" %\
-                                      (rate_pretty["rate"],
-                                       rate_dev_pretty["rate"],
-                                       rate_pretty["unit"],
-                                       self.params.max_deviation["value"])
-                    return (res_val, res_data)
-            elif self.params.max_deviation["type"] == "absolute":
-                if rate_deviation > self.params.max_deviation["value"]["rate"]:
-                    pretty_deviation = self._pretty_rate(self.params.max_deviation["value"]["rate"])
-                    res_val = False
-                    res_data["msg"] = "Measured rate %.2f +-%.2f %s has bigger "\
-                                      "deviation than allowed (+-%.2f %s)" %\
-                                      (rate_pretty["rate"],
-                                       rate_dev_pretty["rate"],
-                                       rate_pretty["unit"],
-                                       pretty_deviation["rate"],
-                                       pretty_deviation["unit"])
-                    return (res_val, res_data)
-        if "threshold_interval" in self.params:
-            result_interval = (rate - rate_deviation,
-                               rate + rate_deviation)
-
-            threshold_pretty = self._pretty_rate(self.params.threshold["rate"])
-            threshold_dev_pretty = self._pretty_rate(self.params.threshold_deviation["rate"],
-                                                     unit = threshold_pretty["unit"])
-
-            if self.params.threshold_interval[0] > result_interval[1]:
-                res_val = False
-                res_data["msg"] = "Measured rate %.2f +-%.2f %s is lower "\
-                                  "than threshold %.2f +-%.2f %s" %\
-                                  (rate_pretty["rate"],
-                                   rate_dev_pretty["rate"],
-                                   rate_pretty["unit"],
-                                   threshold_pretty["rate"],
-                                   threshold_dev_pretty["rate"],
-                                   threshold_pretty["unit"])
-                return (res_val, res_data)
-            else:
-                res_val = True
-                res_data["msg"] = "Measured rate %.2f +-%.2f %s is higher "\
-                                  "than threshold %.2f +-%.2f %s" %\
-                                  (rate_pretty["rate"],
-                                   rate_dev_pretty["rate"],
-                                   rate_pretty["unit"],
-                                   threshold_pretty["rate"],
-                                   threshold_dev_pretty["rate"],
-                                   threshold_pretty["unit"])
-                return (res_val, res_data)
-        return (res_val, res_data)
-
-    def run(self):
-        cmd = self._compose_cmd()
-        logging.debug("compiled command: %s" % cmd)
-        if not is_installed("netperf"):
-            res_data = {}
-            res_data["msg"] = "Netperf is not installed on this machine!"
-            logging.error(res_data["msg"])
-            self._res_data = res_data
             return False
 
-        (rv, res_data) = self._run_client(cmd)
-        self._res_data = res_data
-        if rv is False:
-            return False
+        res_data["msg"] = "Measurement successful"
         return True
-- 
2.33.0