210 lines
8.6 KiB
TeX
210 lines
8.6 KiB
TeX
% !TeX root = ../thesis.tex
|
|
%----------------------------------------------------------------------------
|
|
\appendix
|
|
%----------------------------------------------------------------------------
|
|
\chapter*{\fuggelek}\addcontentsline{toc}{chapter}{\fuggelek}
|
|
\setcounter{chapter}{\appendixnumber}
|
|
%\setcounter{equation}{0} % a fofejezet-szamlalo az angol ABC 6. betuje (F) lesz
|
|
\numberwithin{equation}{section}
|
|
\numberwithin{figure}{section}
|
|
\numberwithin{lstlisting}{section}
|
|
%\numberwithin{tabular}{section}
|
|
|
|
\section{Knative Autoscaler napl\'o feldolgoz\'as\'at v\'egző k\'od}
|
|
\label{sec:log-analyze}
|
|
\begin{lstlisting}[label=code:log-analyze]
|
|
class LogAnalyzer(Analyzer):
|
|
def __init__(self):
|
|
super().__init__(typeof='.txt')
|
|
self.concurrencypersec = []
|
|
self.podpersec = []
|
|
self.start = datetime.datetime.now()
|
|
self.end = datetime.datetime.now()
|
|
|
|
def listtodict(self, inlist: list) -> dict:
|
|
it = iter(inlist)
|
|
res_dct = dict(zip(it, it))
|
|
return res_dct
|
|
|
|
def processfile(
|
|
self,
|
|
fname,
|
|
shouldprint: bool = False) -> dict:
|
|
dictofsecs = {}
|
|
if 'date' in fname:
|
|
return {}
|
|
with open(fname, 'r') as inputFile:
|
|
line = inputFile.readline()
|
|
while line:
|
|
try:
|
|
linedict = json.loads(line)
|
|
try:
|
|
currdate = linedict['ts'].split(
|
|
'.')[0].replace('T', ' ')
|
|
dateformatted = datetime.datetime.strptime(
|
|
currdate, '%Y-%m-%d %H:%M:%S')
|
|
if self.start < dateformatted < self.end:
|
|
message = linedict['msg']
|
|
messagelist = re.split('[ =]', message)
|
|
messagedict = self.listtodict(messagelist)
|
|
messagedict['ts'] = dateformatted
|
|
if 'ObservedStableValue' in messagedict:
|
|
if messagedict['ts'] not in dictofsecs:
|
|
dictofsecs[messagedict['ts']] = {
|
|
'pod': [], 'cc': []}
|
|
dictofsecs[messagedict['ts']]['pod'].append(
|
|
float(messagedict['PodCount']))
|
|
dictofsecs[messagedict['ts']]['cc'].append(
|
|
float(messagedict['ObservedStableValue']))
|
|
except Exception as exception:
|
|
print(exception)
|
|
except json.JSONDecodeError:
|
|
continue
|
|
finally:
|
|
line = inputFile.readline()
|
|
return dictofsecs
|
|
|
|
def readconfigdates(self, directory='.'):
|
|
dates = []
|
|
with open(directory + "/dates.txt", 'r') as inputFile:
|
|
line = inputFile.readline().rstrip()
|
|
currline = 0
|
|
while line:
|
|
dateformatted = datetime.datetime.strptime(
|
|
line, '%Y-%m-%d %H:%M:%S')
|
|
dates.append(dateformatted)
|
|
line = inputFile.readline().rstrip()
|
|
currline += 1
|
|
self.start = dates[0]
|
|
self.end = dates[1]
|
|
|
|
def averagepersec(
|
|
self,
|
|
dictoftimes: dict,
|
|
shouldprint: bool = False) -> None:
|
|
for key, value in dictoftimes.items():
|
|
pod = value['pod']
|
|
concurrency = value['cc']
|
|
avgpod = average(pod)
|
|
avgcc = average(concurrency)
|
|
self.podpersec.append(avgpod)
|
|
self.concurrencypersec.append(avgcc)
|
|
if shouldprint:
|
|
print(avgpod)
|
|
print(avgcc)
|
|
|
|
def work(self, directory: str = '.') -> None:
|
|
files = super().getfiles(directory)
|
|
self.readconfigdates(directory)
|
|
filelines = {}
|
|
for afile in files:
|
|
filelines.update(self.processfile(afile))
|
|
self.averagepersec(filelines, False)
|
|
|
|
\end{lstlisting}
|
|
|
|
\section{Folyamatos terhel\'est gener\'al\'o m\'er\'est v\'egző szkriptr\'eszlet Bash nyelven}
|
|
\begin{lstlisting}[label=code:bash-banchmark-for]
|
|
if [[ $* == *"--for"* ]]; then
|
|
for num in 1 2 3 4 5 6 7 8 9 10; do
|
|
echo -e "for $num\n"
|
|
if $kubeless; then
|
|
if [[ $* == *"--loadtest"* ]]; then
|
|
loadtest -k -H "Host: $function.kubeless" --rps $rps -c $connection -t $time -p "$function_firendly".body http://$kuberhost/"$function" >./data/"$function"."$num".txt
|
|
else
|
|
hey -c "$connection" -q $rps -z "$time" -m POST -o csv -host "$function.kubeless" -D "$function_friendly".body -T "application/json" http://$kuberhost/"$function" >./data/"$function"."$num".csv
|
|
fi
|
|
else
|
|
if [[ $* == *"--loadtest"* ]]; then
|
|
loadtest -k -H "Host: $function.default.example.com" --rps $rps -c $connection -t $time http://$kuberhost/ >./data/"$function"."$num".for.csv
|
|
else
|
|
hey -c "$connection" -q $rps -z "$time" -m POST -o csv -host "$function.default.example.com" http://$kuberhost/ >./data/"$function"."$num".for.csv
|
|
fi
|
|
fi
|
|
done
|
|
fi
|
|
\end{lstlisting}
|
|
|
|
\clearpage
|
|
\section{Emelkedő terhel\'est megval\'os\'itő m\'er\'es Bash szkriptnyelven}
|
|
\begin{lstlisting}[label=code:bash-banchmark-climb]
|
|
if [[ $* == *"--climb"* ]]; then
|
|
while [[ $climb -lt $climb_max ]]; do
|
|
climb_rps=$((rps * climb))
|
|
echo -e "Rps: $climb_rps"
|
|
if $kubeless; then
|
|
if [[ $* == *"--loadtest"* ]]; then
|
|
loadtest -k -H "Host: $function.kubeless" --rps $climb_rps -c 1 -t $time -p "$function_firendly".body http://$kuberhost/"$function" >./data/"$function"."$climb_rps".climb.txt
|
|
else
|
|
hey -c $climb -q $rps -z $time -m POST -o csv -host "$function.kubeless" -D "$function_friendly".body -T "application/json" http://$kuberhost/"$function" >./data/"$function"."$climb_rps".climb.csv
|
|
fi
|
|
else
|
|
if [[ $* == *"--loadtest"* ]]; then
|
|
loadtest -k -H "Host: $function.default.example.com" --rps $climb_rps -c 1 -t $time http://$kuberhost/ >./data/"$function"."$climb_rps".climb.txt
|
|
else
|
|
hey -c $climb -q $rps -z $time -m POST -o csv -host "$function.default.example.com" http://$kuberhost/ >./data/"$function"."$climb_rps".climb.csv
|
|
fi
|
|
fi
|
|
climb=$((climb + 1))
|
|
done
|
|
fi
|
|
\end{lstlisting}
|
|
|
|
\section{Jmeter kimenet\'et feldolgoz\'o k\'odr\'eszlet, Python nyelven}
|
|
\begin{lstlisting}[label=code:jmeter-analyze]
|
|
class JmeterAnalyzer(CsvAnalyzer):
|
|
def __init__(self):
|
|
super().__init__()
|
|
self.responsepersec = {}
|
|
|
|
def processfile(
|
|
self,
|
|
fname,
|
|
shouldprint: bool = False):
|
|
with open(fname, 'r') as f:
|
|
data = csv.reader(f)
|
|
fields = next(data)
|
|
for row in data:
|
|
items = zip(fields, row)
|
|
item = {}
|
|
for (name, value) in items:
|
|
item[name] = value.strip()
|
|
sec = datetime.datetime.fromtimestamp(
|
|
int(item['timeStamp']) / 1000.0).strftime('%c')
|
|
if sec not in self.responsepersec:
|
|
self.responsepersec[sec] = []
|
|
self.responsepersec[sec].append(float(item['Latency']))
|
|
|
|
def collectinfo(self, shouldprint: bool = False) -> None:
|
|
self.walkresponsepersec(self.responsepersec, shouldprint)
|
|
\end{lstlisting}
|
|
|
|
\clearpage
|
|
\section{Hey kimenet\'et feldolgoz\'o k\'odr\'eszlet, Python nyelven}
|
|
\begin{lstlisting}[label=code:hey-analyze]
|
|
class HeyAnalyzer(CsvAnalyzer):
|
|
def __init__(self):
|
|
super().__init__()
|
|
|
|
def processfile(
|
|
self,
|
|
fname,
|
|
shouldprint: bool = False):
|
|
with open(fname, 'r') as f:
|
|
data = csv.reader(f)
|
|
fields = next(data)
|
|
responsepersec = {}
|
|
for row in data:
|
|
items = zip(fields, row)
|
|
item = {}
|
|
for(name, value) in items:
|
|
item[name] = value.strip()
|
|
sec = int(item['offset'].split('.')[0])
|
|
if sec not in responsepersec:
|
|
responsepersec[sec] = []
|
|
else:
|
|
responsepersec[sec].append(float(item['response-time']))
|
|
self.walkresponsepersec(responsepersec, shouldprint)
|
|
|
|
\end{lstlisting}
|