Creating a repo for everything on website
This commit is contained in:
15
README.md
15
README.md
@@ -1,3 +1,14 @@
|
||||
# flow-hugo
|
||||
# FlowWithHalvo
|
||||
|
||||
Blog for tech/cert and random stuff
|
||||
|
||||
# Make new site
|
||||
`hugo new site <sitename>`
|
||||
|
||||
# Make new blog post
|
||||
`hugo new posts/<postname>.md`
|
||||
|
||||
# Compile site
|
||||
`hugo`
|
||||
|
||||
|
||||
Hugo site for flow with halvo, my security blog
|
||||
6
archetypes/default.md
Normal file
6
archetypes/default.md
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
title: "{{ replace .Name "-" " " | title }}"
|
||||
date: {{ .Date }}
|
||||
draft: true
|
||||
---
|
||||
|
||||
93
config.toml
Normal file
93
config.toml
Normal file
@@ -0,0 +1,93 @@
|
||||
baseurl = "https://flow.halvo.me"
|
||||
languageCode = "en-us"
|
||||
title = "Flow With Halvo"
|
||||
# Add it only if you keep the theme in the `themes` directory.
|
||||
# Remove it if you use the theme as a remote Hugo Module.
|
||||
theme = "hugo-theme-terminal"
|
||||
paginate = 5
|
||||
|
||||
[params]
|
||||
custom_css = ["css/table.css"]
|
||||
# dir name of your main content (default is `content/posts`).
|
||||
# the list of set content will show up on your index page (baseurl).
|
||||
contentTypeName = "posts"
|
||||
|
||||
# ["orange", "blue", "red", "green", "pink"]
|
||||
themeColor = "pink"
|
||||
|
||||
# if you set this to 0, only submenu trigger will be visible
|
||||
showMenuItems = 0
|
||||
|
||||
# show selector to switch language
|
||||
showLanguageSelector = false
|
||||
|
||||
# set theme to full screen width
|
||||
fullWidthTheme = false
|
||||
|
||||
# center theme with default width
|
||||
centerTheme = true
|
||||
|
||||
# if your resource directory contains an image called `cover.(jpg|png|webp)`,
|
||||
# then the file will be used as a cover automatically.
|
||||
# With this option you don't have to put the `cover` param in a front-matter.
|
||||
autoCover = true
|
||||
|
||||
# set post to show the last updated
|
||||
# If you use git, you can set `enableGitInfo` to `true` and then post will automatically get the last updated
|
||||
showLastUpdated = false
|
||||
|
||||
# set a custom favicon (default is a `themeColor` square)
|
||||
# favicon = "favicon.ico"
|
||||
|
||||
# Provide a string as a prefix for the last update date. By default, it looks like this: 2020-xx-xx [Updated: 2020-xx-xx] :: Author
|
||||
# updatedDatePrefix = "Updated"
|
||||
|
||||
# set all headings to their default size (depending on browser settings)
|
||||
# oneHeadingSize = true # default
|
||||
|
||||
# whether to show a page's estimated reading time
|
||||
# readingTime = false # default
|
||||
|
||||
# whether to show a table of contents
|
||||
# can be overridden in a page's front-matter
|
||||
# Toc = false # default
|
||||
|
||||
# set title for the table of contents
|
||||
# can be overridden in a page's front-matter
|
||||
# TocTitle = "Table of Contents" # default
|
||||
|
||||
|
||||
[languages]
|
||||
[languages.en]
|
||||
languageName = "English"
|
||||
title = "Flow With Halvor"
|
||||
subtitle = "Basic Security Blog"
|
||||
owner = "Paul Halvorsen"
|
||||
keywords = ""
|
||||
copyright = ""
|
||||
menuMore = "Show more"
|
||||
readMore = "Read more"
|
||||
readOtherPosts = "Read other posts"
|
||||
newerPosts = "Newer posts"
|
||||
olderPosts = "Older posts"
|
||||
missingContentMessage = "Page not found..."
|
||||
missingBackButtonLabel = "Back to home page"
|
||||
|
||||
[languages.en.params.logo]
|
||||
logoText = "Flow With Halvo"
|
||||
logoHomeLink = "/"
|
||||
|
||||
[languages.en.menu]
|
||||
[[languages.en.menu.main]]
|
||||
identifier = "about"
|
||||
name = "About"
|
||||
url = "/about"
|
||||
[[languages.en.menu.main]]
|
||||
identifier = "showcase"
|
||||
name = "Showcase"
|
||||
url = "/showcase"
|
||||
[[languages.en.menu.main]]
|
||||
identifier = "personal"
|
||||
name = "Other Posts"
|
||||
url = "/pposts"
|
||||
|
||||
103
content/about/index.md
Normal file
103
content/about/index.md
Normal file
@@ -0,0 +1,103 @@
|
||||
# Summary
|
||||
|
||||
I'm a Software Engineer with over 11 years development and 15 years professional experience, with exposure to C, Python, PHP, JavaScript, Java, and C++ languages; various SQL databases; JQuery and Pytest frameworks; Docker containerization; and Rest API, JSON, XML, and nginx technologies.
|
||||
|
||||
# Work Experience
|
||||
|
||||
## Binary Defense
|
||||
|
||||
**Sr Software Engineer**: April 2022 - Present
|
||||
|
||||
- Python development using pyenv, pipenv, cython, docker build environment, gitlab pipelines, and static compilation
|
||||
- Develop security alarms for Windows, Linux (Debian and RedHat), and MacOS
|
||||
- Design and build containment for all platforms upon detected compromise
|
||||
|
||||
## Kyrus Tech
|
||||
|
||||
**Sr Software Engineer**: Nov 2020 - April 2022
|
||||
|
||||
- Perform test driven development for all tasks: C, Python/Pytest, Docker, GitLab CI/CD
|
||||
- Build covert communications and file transfers proxy: HTTPS, Apache Thrift, Rest API
|
||||
- Design compact router fingerprinting and vulnerability analysis: Android, HTTPS, TCP/IP, StreamCypher Encryption
|
||||
- Modify existing code to suppress logging from inside the Linux Kernel: various Linux Kernel versions, Ghidra
|
||||
|
||||
## Parsons
|
||||
|
||||
**Cyber Security Software Engineer**: Apr 2018 - Nov 2020
|
||||
|
||||
- Continue development of covert Windows application: C, C++, Python
|
||||
- Build modular solution for plug and play architecture
|
||||
- Design custom API for minimal data transfer to back-end
|
||||
- Encrypt storage and comms using AES shared key to maintain confidentiality and integrity
|
||||
- Build prototype back-end service for file storage and search: Java, Tomcat, Niagarafiles (NiFi), nginx, Hadoop, MySQL, LDAP, RBAC
|
||||
- Create API for uploading files via web interface or CLI
|
||||
- Track and maintain multi-level user access
|
||||
- Generate metadata for searching
|
||||
|
||||
## NSA
|
||||
|
||||
**Security Software Engineer**: Nov 2011 - Apr 2018
|
||||
|
||||
- RedTeam DevOps development of browser enumeration, manipulation, and exploitation: PHP, JavaScript, JQuery, CSS, Python, MySQL, Java, Apache, Tomcat, Linux, Windows, Chrome, Firefox, Safari, IE, Edge
|
||||
- Design Rest and JSON API to transfer data between targets, server, and UI
|
||||
- Deliver covert JavaScript to targets for enumeration and exploitation
|
||||
- Design front-end to provide a dynamic UI with real time target data and graphs and charts for in-depth data
|
||||
- Design MySQL database to hold and quickly query enumeration and exploitation data
|
||||
- Update PHP back-end for security and performance
|
||||
- Advise and develop vulnerability mitigation strategies for various military and government customers
|
||||
- Train and provide SOPs to NSA RedTeam operators for various tools
|
||||
|
||||
**Systems Engineer**: Sept 2009 - Nov 2011
|
||||
|
||||
- Deploy, maintain, and monitor 30+ systems with 130+ Red Hat Enterprise Linux (RHEL) servers: LDAP, DNS, Apache, NiFi, Hadoop, Apache, Puppet, DHCP, PXE
|
||||
- Develop and deploy monitoring, reporting, and issue correcting scripts: Python
|
||||
- Organize, train, and participate in team performing 24x7 call-in rotation
|
||||
- Responsible for 5+ domestic and foreign system deployments
|
||||
|
||||
## Salisbury University
|
||||
|
||||
**Software Developer**: Nov 2006 - May 2008
|
||||
|
||||
- Funded through the Wallops Flight Facility (NASA)
|
||||
- Provide simplified UI and scenario builder for the Satellite Tool Kit (STK): Managed C++
|
||||
- Design risk assessment scenarios for launch vehicles and UAVs over the DELMARVA peninsula
|
||||
- Collaborate with Geographic Information Science (GIS) for mapping
|
||||
|
||||
**Lab Administrator**: Sept 2007 - May 2009
|
||||
|
||||
- Support Math and CS departments at SU
|
||||
- Maintain the Linux labs on campus: dual boot OpenSUSE, WindowsXP, and OpenSUSE server
|
||||
- Perform backups, updates, user management (LDAP), disk quotas, and remote access
|
||||
|
||||
# Education
|
||||
|
||||
University of Maryland Baltimore Campus
|
||||
|
||||
: Masters in Computer Science; 2013. Thesis: "Stateless Detection of Malicious Traffic: Emphasis on User Privacy"
|
||||
|
||||
Salisbury University
|
||||
|
||||
: Bachelors in Computer Science; 2009. Magna Cum-Laude
|
||||
|
||||
Security+
|
||||
|
||||
: ID: COMP001021281239; Exp Date: 04/04/2024
|
||||
|
||||
Royal Military College (RMC Canada)
|
||||
|
||||
: Training in OpenBSD development and administration
|
||||
|
||||
# Miscellaneous
|
||||
|
||||
RedBlue Conference
|
||||
|
||||
: Presented combination web enumeration/exploitation tool
|
||||
|
||||
National Conference for Undergrad Research (NCUR)
|
||||
|
||||
: Presented development of STK scenario building and manipulation
|
||||
|
||||
SANS Courses
|
||||
|
||||
: Staying up-to-date on security research
|
||||
|
||||
18
content/posts/README.md
Normal file
18
content/posts/README.md
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
title: "README"
|
||||
date: 2019-08-01
|
||||
draft: false
|
||||
---
|
||||
|
||||
# Security Blog
|
||||
|
||||
This blog is various summaries of minor research, reading, and independant learning in regards to computer security.
|
||||
|
||||
Mostly this blog is to satisfy the requiremnts for my Security+ certificate.
|
||||
|
||||
# Cert ID
|
||||
|
||||
Security+ ID: COMP001021281239
|
||||
|
||||
Exp Date: 04/04/2024
|
||||
|
||||
85
content/posts/bad-malware-analysis-character-count.md
Normal file
85
content/posts/bad-malware-analysis-character-count.md
Normal file
@@ -0,0 +1,85 @@
|
||||
---
|
||||
title: "Bad Malware Analysis: Character Count"
|
||||
date: 2020-03-06
|
||||
draft: false
|
||||
---
|
||||
|
||||
# Introduction
|
||||
I'm thinking of doing a series on bad malware analysis. Hopefully it'll be fun and at least a little informative.
|
||||
|
||||
|
||||
Today's post consists of performing a string analysis on malware. Where most string analysis looks at the big picture, I thought I would take it a step further and look at individual characters. This approach is terrible, as you will soon see.
|
||||
|
||||
# Why Strings
|
||||
|
||||
If you've made it this far, I'm assuming you already have some basic knowledge of computers and maybe even looking at malware. As such, you may already know what string analysis is all about, but here is a quick crash course on strings.
|
||||
|
||||
Strings is a way to look at what readable characters are available in a binary and is low hanging fruit for a researcher. This is why a lot of researchers will start with this step to gain basic insight into a piece of malware.
|
||||
|
||||
Some of the strings spit out are function calls, both internal and library calls. This can inform the researcher on expected behaviors. In the case of very specific function calls, signatures (or at least an indicator) can be created for firewalls and anti-virus. Strings can also output messages put in by the developer, these can range from cryptic to debug statements (in the lucky cases). Again this can give the researcher information or the ability to create signatures/indicators.
|
||||
|
||||
In order for a signature to be created from strings, it needs to be very very specific in order to avoid false positives. As such this is rarely done and string analysis is usually reserved for giving a researcher a leg up before diving deeper into their analysis.
|
||||
|
||||
|
||||
Indicators are a little more practical for use with strings. The more indicators the more confident you can be that this is a piece of malware.
|
||||
|
||||
# Why Characters
|
||||
|
||||
Now for my terrible way of using strings ... character analysis.
|
||||
|
||||
How can we make analysing strings terrible, by braking it down into individual characters. This can then be used for generating an indicator for a confidence value.
|
||||
|
||||
Cutting to the chase, if a piece of software has a lot of the following characters, it may be malware:
|
||||
|
||||
v j ; , 4 q 5 /
|
||||
|
||||
## Why Those Characters
|
||||
|
||||
How did I come to such a wild conclusion that v's and j's are a problem ... time for some terrible analysis.
|
||||
|
||||
## Where Are the Samples
|
||||
|
||||
To perform my analysis, I pulled down around 500 samples of malware from theZoo (https://thezoo.morirt.com/) and dasMalwerk (https://dasmalwerk.eu/). For samples of benign software I grabbed all of /bin on Fedora and 200 libraries from C:/Windows directory.
|
||||
|
||||
# How Was it Analysed
|
||||
|
||||
Next I wrote a python program to run strings, loop through each individual character, make them lowercase, then count. This was done for both malware and benign samples, then compared in two ways:
|
||||
1. Count the total number of characters in the malware samples and the total number in the benign. Then subtract the two. Sort and look
|
||||
2. Take the ratio of each character count to the file size for the malware and benign samples. Average that across all files, then subtract and compare. (don't worry I'll explain)
|
||||
|
||||
## Basic Count
|
||||
|
||||
The basic count is fairly self explanatory, just keep a running tally of characters and subtract. Here are the top ten characters most likely and least likely to be in malware:
|
||||
|
||||
" 5 j q 4 , 6 2 1 3
|
||||
|
||||
r i c o e _ h t $ a
|
||||
|
||||
This is terrible for many reasons, but specifically because it is un-weighted. So if there is a single piece of small malware that uses the number '5' very heavily (or small benign that uses the letter 'a'), but no others do, it could show up here.
|
||||
|
||||
I wanted to find a way to weigh the characters, such that a single sample couldn't skew all of thebad-malware-analysis-character-count results.
|
||||
|
||||
## Ratio Analysis
|
||||
|
||||
Here's where it get's more complicated and I'll try to explain.
|
||||
1. Keep a running tally per malware sample (not a total for all samples)
|
||||
2. Calculate the ratio of file size to count: Letter Count / File Size
|
||||
3. Average the ratios: sum(ratio) / Number of Files
|
||||
4. Compare to the benign samples using steps 1-3
|
||||
|
||||
Hopefully that made sense. This was so even if one file had a ton of one character, it wouldn't skew the results. So what were the top 10 malware characters and top 10 benign:
|
||||
|
||||
v j ; d l , 4 q 5 /
|
||||
|
||||
_ s g r o $ f i a "
|
||||
|
||||
Obviously this gives pretty big differences: double quote went from being the worst offender to most benign. Using the ratio gives a much better analysis since it doesn't allow a single sample to skew the results.
|
||||
|
||||
# So What Now
|
||||
|
||||
Using the ratio is probably good on it's own, so how did I come up with my character dirty list. I looked at the worst offenders of both ways to analyse the code and came up with the list:
|
||||
|
||||
v j ; , 4 q 5 /
|
||||
|
||||
More analysis needs to be done to determine the threshold for confidence. For now, the higher the ratio of those 8 characters to size of file, the more confident you can be it is Malware.
|
||||
|
||||
52
content/posts/bad-malware-analysis-hash-letter-counts.md
Normal file
52
content/posts/bad-malware-analysis-hash-letter-counts.md
Normal file
@@ -0,0 +1,52 @@
|
||||
---
|
||||
title: "Bad Malware Analysis: Hash Letter Counts"
|
||||
date: 2020-04-12
|
||||
draft: false
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
For this bad malware analysis, I thought I would continue the theme of counting letters ... that way I could use most of my old code :)
|
||||
|
||||
Today, I decided to hash each file using sha512. Hashing is supposed to be completely random, so this is almost a test of that as well. I used around 3000 malicious samples and 1800 benign, so lets get started.
|
||||
|
||||
# Why Hash, Why sha512
|
||||
|
||||
Hashing binaries is done all the time to verify downloads, check for changes, provide signatures, provide low hanging fruit for malware signatures, and many more purposes. It is so widely used, I was wondering if it was possible to use the hash itself as a flag to determine if this could be malware (beyond just a hash table).
|
||||
|
||||
In reality using a hash as a signature should not work. Hashes are meant to be random, thus there should be no discernible pattern, right? Lets see what I found out.
|
||||
|
||||
The reason I decided to do the letter count on hashes was for two reasons; 1) it pseudo tests the randomness of hashing and 2) I just thought it would be interesting.
|
||||
|
||||
The reason I decided sha512 is also two fold; 1) it's long, so it'll provide some of the most data and 2) sha in general is one of the most accepted hashing algorithms, so I went with that.
|
||||
|
||||
# What Was My Result
|
||||
|
||||
Surprising! There seems to be a pattern of what characters show up most in hashes for malware.
|
||||
|
||||
# What!
|
||||
|
||||
Yep, it appears that if you see around 3% more f's and 1% more 7's and 5's in your sha512 hash, then you might have some malware.
|
||||
|
||||
## That Can't be Right!
|
||||
|
||||
Hard to believe that is what it seems like 'f, 7, and 5' show up more and 'e and 6' show up 1% less in malware.
|
||||
|
||||
# Ok, So How Was it Done
|
||||
|
||||
## Where are the Samples
|
||||
|
||||
Same as my string analysis, to perform my hash analysis, I pulled down around 500 samples of malware from [theZoo](https://thezoo.morirt.com/) and [dasMalwerk](https://dasmalwerk.eu/). For samples of benign software I grabbed all of /bin on Fedora and 200 libraries from C:/Windows directory.
|
||||
|
||||
## How was it Analysed
|
||||
|
||||
I modified my program from doing string checks to perform the hash analysis. Now, instead of running strings on each of the files it performs a sha512 hash. I then averaged the number of each character seen for each file. This means I counted the number of '1's seen for all malicious file hashes, then dividing by the total number of files.
|
||||
|
||||
This was done for all characters for each malicious and benign binaries. After that I subtracted the benign averages from the malicious averages and divided by the original value.
|
||||
|
||||
# Why?
|
||||
|
||||
So a difference of 1 - 2% is not that much, but 3% seems more significant. This shouldn't happen, all characters should show up about evenly. This can probably be accounted for with just the samples that I had chosen. Choose a different set of 1000 binaries and the results could be different.
|
||||
|
||||
If this wasn't bad malware analysis, I wouldn't stop here. I'd download some more samples and continue ... but well it is bad, so make a signature in your IDS for sha512, and if you see more f's in the hash, then you might have some malware.
|
||||
|
||||
42
content/posts/bad-malware-analysis-string-size-ratio.md
Normal file
42
content/posts/bad-malware-analysis-string-size-ratio.md
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
title: "Bad Malware Analysis: String Count vs File Size"
|
||||
date: 2021-03-08T20:20:31Z
|
||||
draft: false
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
Next up in bad malware analysis is comparing the size of a file to the output of of the command strings. The idea here is that malware may contain less strings per KB than benign binaries. This would make logical sense as many malware samples are packed, encrypted, and/or stored in the data section of the binary, to be extracted later. This is done to help obfuscate them from hash signatures.
|
||||
|
||||
# Samples
|
||||
|
||||
There are around 500 malware samples, coming from two sources: [theZoo](https://thezoo.morirt.com/) and [dasMalwerk](https://dasmalwerk.eu/). For samples of benign software I grabbed 200 libraries from C:/Windows directory.
|
||||
|
||||
# Calculations
|
||||
|
||||
Using python I wrote a quick script to count the number of strings returned (separated by a newline) and compared it to the size (in KB) to the file. I performed this using strings of min size 2, 3, 4, 5, and 6. Why those numbers ... because that is where I decided to stop. The average strings per KB was then calculated.
|
||||
|
||||
| String Min Len | Benign (Str/KB) | Mal (Str/KB) | % Diff |
|
||||
| --------------:| ---------------:| ------------:| --------:|
|
||||
| 2 | 51.54 | 51.70 | - 0.31 % |
|
||||
| 3 | 24.15 | 20.52 | 15.03 % |
|
||||
| 4 | 12.40 | 9.70 | 21.77 % |
|
||||
| 5 | 5.59 | 5.58 | 0.18 % |
|
||||
| 6 | 4.32 | 3.96 | 8.33 % |
|
||||
|
||||
# Results
|
||||
|
||||
The results are kinda in line with what I thought. Most of the malicious binaries have less strings per KB than the benign. Surprisingly looking at a minimum string length of two and five, the benign and malicious binaries have about the same number of strings per KB. The string length of two makes sense as a lot of stings that small come down to random bytes in the binary looking like strings.
|
||||
|
||||
The five string length (and six is pretty close too) is kinda surprising tho. It may be able to be explained by debug messages or something similar. If this wasn't bad malware analysis I would look into it more. It could also be due to long strings in binaries being low occurrence anyway. Malicious code wants to give as little away as possible and benign code would probably just use external resources at that point.
|
||||
|
||||
It appears the sweet spot for comparing malicious to benign binaries is four. At this length there are around 22% more strings per KB in benign binaries than malicious.
|
||||
|
||||
Overall the results were in line with what I expected, however they were a lot closer than I thought they would be.
|
||||
|
||||
# Future Work
|
||||
|
||||
If this were not bad malware analysis I would continue to look at the individual strings for patterns ... oh wait that was in previous bad malware analysis.
|
||||
|
||||
It may be time to combine these bad ways of analyzing the strings and see if we can make meaningful predictions. I've always wanted to play around with neural nets in python, maybe there will be a way to use all my bad string analysis together to form a decent confidence value.
|
||||
|
||||
@@ -0,0 +1,62 @@
|
||||
---
|
||||
title: "Bad Password Analysis: Consecutive Character Patterns"
|
||||
date: 2020-09-16
|
||||
draft: false
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
Continuing from my Bad Malware Analysis, we now take a look at Bad Password Analysis. Mostly this is just for the fun of it, but we'll see if we can learn anything along the way.
|
||||
|
||||
In this Bad Malware Analysis post, we'll look at consecutive character frequency. I've done analysis on two and three consecutive characters and compared it to a word frequency list generated from subtitle archives.
|
||||
|
||||
# Data
|
||||
|
||||
The passwords come from several leaks. These include honeynet, myspace, rockyou, hotmail, phpbb, and tuscl lists. All of these lists contain the count of how many times a password was used as well. Total there are 14,584,438 unique passwords.
|
||||
|
||||
For comparison, I'm using an English word frequency list generated from subtitles. There are 1,656,996 unique words.
|
||||
|
||||
I wrote a quick script to combine these into a single text file, to remove all duplicates and update all counts. This was the list used for all further analysis.
|
||||
|
||||
# Algorithm
|
||||
Everything is written in python.
|
||||
|
||||
A few decisions needed to be made before analyzing the data. The first thing I decided was to not worry about substitutions. So in my analysis @ does not equal a. This is a limitation, since it would provide a more accurate representation of characters in passwords. Second, all passwords and English words where set to lower case. This way patterns would be more apparent. If the goal is cracking, it's incredibly easy to just change cases.
|
||||
|
||||
The algorithm loops through each of the word lists grabbing the word/password and their frequency. The word is then split into individual characters and each character pair is counted by adding the frequency.
|
||||
|
||||
I.E. The algorithm will take the word 'PaSsAs' with a frequency of 20 and do the following.
|
||||
|
||||
passas -> [p,a,s,s,a,s]
|
||||
pa = 20
|
||||
as = 40
|
||||
ss = 20
|
||||
sa = 20
|
||||
|
||||
There is an option to turn off the use of frequency. I've analyzed this below as well.
|
||||
|
||||
Given a starting character, will this analysis allow us to predict what the next character will be?
|
||||
|
||||
# Analysis
|
||||
For the analysis, I looked at with and without frequency counts. Within that, I did an internal comparison of frequency of each character set seen as well as a comparison with the dictionary values.
|
||||
|
||||
## With Frequency
|
||||
With frequency taken into account, the top 100 password two character combinations only cover 11% of the all combinations. This seems rather low (I know very technical), so intuition says, this is not a good way to predict the password. In addition the top combination is 's2' which only constitutes 0.15% of combinations.
|
||||
|
||||
Lets compare this to the dictionary words. The top 100 combinations cover a staggering 60% of all combinations. This would be a good predictor for what would be the next letter in English. The top combination in the dictionary data 'th' covers almost 3% of all combinations.
|
||||
|
||||
Comparing the two further we can see 10 character combinations shared in the top 100 password and dictionary characters. I was expecting this to be higher, but this could be due to character substitutions in passwords. Such as 'mo' is in the dictionary top 100 but not for passwords. However 'm0' is in the top 100 password list.
|
||||
|
||||
## Without Frequency
|
||||
Without taking into account the frequency of words and passwords doesn't change munch. The top 100 passwords now accounts for 35% of all combinations, which seems like it could be a better predictor. But this weighs good unique passwords the same as common ones. Dictionary gets worse at only 45% of combinations accounted for in the top 100.
|
||||
|
||||
Without taking into account frequency, '08' becomes the top password combination at 0.79% and 'se' becomes the top dictionary combination at 1.13%.
|
||||
|
||||
Surprisingly, without taking frequency into account, we see less substitutions in the password data. This means we now see 64 out of 100 duplicates between the data. This is closer to what I would have expected. Most people tend to use dictionary words for their passwords, so it would make sense to see duplicates across the data.
|
||||
|
||||
# Conclusion
|
||||
This is probably not a good way to go about cracking passwords. Mostly this data simply shows to use dictionary word lists and substitution lists.
|
||||
|
||||
We could have done a few things better. One of which is look at common substitutions and see how that changes things. In many of the passwords, the standard alpha characters are replaced by numbers and symbols; such as @ or 4 for a, 5 or $ for s, and so on.
|
||||
|
||||
|
||||
81
content/posts/bad-password-analysis-dictionary-words.md
Normal file
81
content/posts/bad-password-analysis-dictionary-words.md
Normal file
@@ -0,0 +1,81 @@
|
||||
---
|
||||
title: "Bad Password Analysis Dictionary Words"
|
||||
date: 2021-03-11T18:55:01Z
|
||||
draft: false
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
For this episode of bad analysis, we are going to be looking at word frequency in passwords. Overall this isn't terrible analysis, but what makes it bad is I'm just looking for the first occurrence of a dictionary word in each password. This will miss *a lot* of words in passwords.
|
||||
|
||||
Additionally we will miss words because:
|
||||
1. Only the first dictionary word in each password is used
|
||||
2. Only American English words found in the Linux American English dictionary are used
|
||||
3. No common replacements are used for numbers or symbols (all of those are just blanked out)
|
||||
4. No common misspellings are corrected
|
||||
5. Plurals are considered unique words
|
||||
|
||||
We are missing a lot of words in these passwords, but that is why this is bad analysis.
|
||||
|
||||
# Data
|
||||
|
||||
The passwords come from several leaks. These include honey-net, MySpace, rockyou, hotmail, phpbb, and tuscl lists. All of these lists contain the count of how many times a password was used as well. Total there are 14,584,438 unique passwords.
|
||||
|
||||
This took forever to loop through, pulling out the words, then comparing them to the dictionary words. My code is only single threaded and doesn't use any additional efficiencies. It took around 15 hours to complete ... so if anything went wrong, I'm not running it again :) Maybe at some point I'll multi thread it and see if it can run a little faster.
|
||||
|
||||
I'm comparing the password list to the American English word list found on Linux. There may be a more complete list somewhere out there, but this worked for me.
|
||||
|
||||
# Results
|
||||
|
||||
## Raw Data
|
||||
|
||||
The word were extracted, counted, and sorted. There were 68,402 unique words, the top 10 words account for around 5% of total words seen, and there were 21,191 unique words only seen in their own password.
|
||||
|
||||
Here are the top 10 words used in the passwords (with the caveats above):
|
||||
|
||||
All percentages are approximate
|
||||
|
||||
| Word | Percentage |
|
||||
| ---- | ---------: |
|
||||
| love | 2.0 % |
|
||||
| baby | 0.7 % |
|
||||
| password | 0.4 % |
|
||||
| angel | 0.4 % |
|
||||
| ana | 0.4 % |
|
||||
| princess | 0.3 % |
|
||||
| sexy | 0.3 % |
|
||||
| girl | 0.2 % |
|
||||
| and | 0.2 % |
|
||||
| ito | 0.2 % |
|
||||
|
||||
## Additional Fun Stuff
|
||||
|
||||
How positive are people's passwords. Using a list of positive words found at [Positive List](https://gist.github.com/mkulakowski2/4289437) and a list of negative words found at [Negative List](https://gist.github.com/mkulakowski2/4289441), I've compared to our word frequency from our list.
|
||||
|
||||
Positive words were used 1,172,617 times and negative words were used 1,172,617. As an optimist at heart, this didn't surprise me too much. Let's take a closer look and look at the top 5 words in each category.
|
||||
|
||||
| Positive | Number | Negative | Number |
|
||||
| -------- | -------: | --------: | ------: |
|
||||
| love | 442,689 | f\*\*k | 53,969 |
|
||||
| angel | 98,154 | rocky | 41,655 |
|
||||
| sexy | 65,062 | mar | 38,915 |
|
||||
| sweet | 44,192 | bi\*\*h | 38,262 |
|
||||
| lover | 39,794 | crazy | 21,330 |
|
||||
|
||||
Looking at positive and negative occurrences has it's own issues beyond just the word analysis. As you can see there are certain omissions that I would think would be in positive, like "baby." There are also inclusions in negative that I would not have made, such as "mar" which could just be March for someone's birthday. Better lists would need to be found or crafted, or entire passwords would need a language processor to determine if they are negative or positive.
|
||||
|
||||
# Conclusion
|
||||
|
||||
Not much to conclude here, mostly this was for fun. Don't use dictionary words in your password, it doesn't take long to loop through the dictionary, and if you do, try to use longer random words, rather then meaningful ones.
|
||||
|
||||
People tend to be more positive in their passwords which is nice to see.
|
||||
|
||||
This was a lot of fun to implement and I may come back to this to see if I can improve upon looking at words.
|
||||
|
||||
# Future Work
|
||||
|
||||
- Thread all the things, maybe it'll run faster.
|
||||
- Look for more than just the first word in each password
|
||||
- Replace numbers with common letters (like 4 becomes a and 3 becomes e)
|
||||
- Maybe look for plurals as the same
|
||||
|
||||
56
content/posts/exploring-enrollment-over-secure-transport.md
Normal file
56
content/posts/exploring-enrollment-over-secure-transport.md
Normal file
@@ -0,0 +1,56 @@
|
||||
---
|
||||
title: "Exploring Enrollment over Secure Transport"
|
||||
date: 2023-03-30
|
||||
draft: false
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
Currently one of my projects uses "pinned" certs to securely communicate back to a REST service. These are pinned to allow for truly secure authentication of the server, eliminating a rogue certificate authority (CA) to issue a fake cert and allow for man-in-the-middle (MITM) attacks. This is a huge hassle as the server and client need to stay in sync. This involves cutting a new release just to update certs and trying to get them deployed in the expiration/reissue window. [Enrollment over Secure Transport](https://www.rfc-editor.org/rfc/rfc7030.html) (EST) should provide a better way to issue certs from the server so the client just has to request the new ones and download them.
|
||||
|
||||
# What is EST
|
||||
|
||||
EST allows a client to authenticate to the EST server, which then delivers a client cert. This could be unique to the client or generic for all clients. Issued certificates can then be used to re-authenticate to the EST to get the updated cert. By having this re-authentication method, a client can automatically get the most up-to-date cert in a secure way. By not having it compiled into the binary (i.e. pinning) a new release is not needed to simply update the cert.
|
||||
|
||||
To do this, the client authenticates to the EST server, either via public/private key pair or username/password, and the client authenticates the server, either through the same public/private key challenge or external CA. Once authenticated, the EST server will issue the correct cert. All communication is over a TLS connection.
|
||||
|
||||
# Possible Setup
|
||||
|
||||
First, no to username/password. With username/password authentication, the client will be reliant on an external CA to authorize the server, which is what "pinning" was supposed remove. So, if username/password is used, there is no real need for an EST server and the client can just connect directly to the server (for our use case).
|
||||
|
||||
So using pub/priv key pair seems to be the best way to move forward. The EST server will have public keys for each client that will be connecting, the clients in turn will either have their key built into the binary or distributed offline. These two methods are almost pure foils of each other, so a pro for one is a con for the other. So let's just look at my preference (spoiler alert) having the client keep track of their key.
|
||||
|
||||
Pros of giving the client their key to keep track of:
|
||||
|
||||
- Keeping the key separate makes it easier to revoke if compromised, just give them a new key and have them restart their software
|
||||
- By keeping it separate it should also mean that it'll be less likely to become compromised, since reverse engineering the binary won't be an issue
|
||||
- Once initial connection is established the key can be deleted and it's no longer on the client's machine
|
||||
|
||||
Cons of a separate key
|
||||
|
||||
- The client must keep track of the key, they could accidentally delete it and need it re-issued
|
||||
- It adds additional steps to installation as the key needs to be in place for anything to work
|
||||
|
||||
Being able to easily revoke and re-issue a private key is the deciding factor for me. This is the true solve to the problem of pinning. Building in the private key helps with the pinning issue as it doesn't need to be updated as frequently, but it really just delays the issue. Yes it's more work for the client to get everything setup, but a little inconvenience shouldn't get in the way of good security.
|
||||
|
||||
# Final Proposal
|
||||
|
||||
The final setup could look something like this:
|
||||
|
||||
1. Pub/Priv key pair is generated
|
||||
1. Pub key is put onto the EST which maintains a database of pub key to TLS cert relationships
|
||||
1. Pub/Piv key pair is given to the client to be put in place upon software installation
|
||||
|
||||
Once the software is installed it would:
|
||||
|
||||
1. Pull in the priv key
|
||||
1. Connect to the EST
|
||||
1. Establish trust through an encrypted challenge response
|
||||
1. EST issues a unique TLS cert to that client
|
||||
1. Client uses TLS cert to connect and authenticate to backend server
|
||||
1. When TLS cert expires, it can be used to re-auth with the EST and download the next TLS cert
|
||||
|
||||
# Conclusion
|
||||
|
||||
Using this method of authentication with a pub/priv key pair to an EST, then using the issued TLS cert for authentication is the best way to remove the need for pinned certificates and username/passwords. The private key is the primary way the client authenticates, since it uses that key pair to get the TLS cert. Using the TLS cert for authentication makes it so a client doesn't need to continuously update passwords. By having the private key separate from the binary, and the TLS cert for authentication, it becomes relatively simple to re-issue creds when a system is compromised.
|
||||
|
||||
34
content/posts/fast-flux-botnet-overview.md
Normal file
34
content/posts/fast-flux-botnet-overview.md
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
title: "Fast Flux Botnet Overview"
|
||||
date: 2019-09-26
|
||||
draft: false
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
In this post we will explore a brief overview of the fast-flux (FF) technique used by botnets. [Here is my full paper](/security/FastFluxPaper.pdf) with more detail regarding what a botnet is and how FF works.
|
||||
|
||||
# Botnet Overview
|
||||
|
||||
Botnets are a major threat to all those connected to the Internet. They are used for distributing spam, hosts for malicious code, sending phishing attacks, and performing a variety of attacks, including denial of service (DOS). Many botnets will use DNS names to control or connect to the botnet. This would seemingly be easy to shutdown, just block the particular domain, however through a technique called fast-flux (FF), botnets are able to evade detection and mitigation.
|
||||
|
||||
# Fast Flux Overview
|
||||
|
||||
Fast-flux is the process of quickly changing the domain name or IP addresses associated with a domain in order to hide the bot-master, or command and control (CC), for the botnet. These fast changes are accomplished through two primary technologies, dynamic DNS (DynDNS) and round robin.
|
||||
|
||||
To quickly change the names associated with the botnet, fast-flux uses dynamic DNS (DynDNS). DynDNS's original purpose was for those individuals who did not have static IP addresses, allowing them to quickly update the name-address relationship as needed. Botnets will keep a list of names that they cycle through, bringing up new names either as needed or randomly. The bots will then have several locations (including built in lists) to check the new CC domain.
|
||||
|
||||
By quickly changing the domain associated with the bot-master, it is effectively impossible to use DNS names to setup a rule in a firewall to block connections to the bot-master. However a savvy admin would then check what IP is being contacted and block all those connections. Enter round robin.
|
||||
|
||||
Round robin was a technique developed for load balancing. Sites that see a large amount of traffic need to balance that traffic between several servers. In this way none get bogged down too much. Fast-flux botnets use this technique to hide their CC IP addresses. A botnet will setup a series of front-end proxies that are disposable. Bot-master's will use DynDNS to add and remove IP addresses associated with the domain and round-robin will rotate through them. This way the CC stays hidden and no firewall rules can be created to block on IP address.
|
||||
|
||||
In addition to DynDNS and round-robin, some botnets will be double-fluxed. In this technique a botnet will setup its own name servers and rotate through them as well. More detail is in the paper.
|
||||
|
||||
# Detection/Mitigation
|
||||
|
||||
There are two primary ways of detecting and mitigating fast-fluxing botnets that need to be used in conjunction. The first is to look at the time to live (TTL) for DNS entries to be cashed. Fast-fluxing botnets tend to use very short TTL values compared to legitimate domains. The second is keeping a "FF Activity Index" or how often name-address relationships change. The "FF Activity Index" will hold both how often the IP address for a given domain changes and how often domains change for a single IP address. Even looking at these two indicators still yields a number of false positives. More details in the paper.
|
||||
|
||||
# Conclusion
|
||||
|
||||
Botnets are getting more sophisticated and more research is needed to detect these techniques. The best way to block these connections is to attempt to stop the CC directly. Most hide behind proxies and many use FF techniques to hide those. FF is an arms race between detection and ever more sophisticated ways of hiding activities.
|
||||
|
||||
51
content/posts/random-algorithm-analysis.md
Normal file
51
content/posts/random-algorithm-analysis.md
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
title: "Random Algorithm Analysis"
|
||||
date: 2020-04-17
|
||||
draft: false
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
After reading through "Silence on the Wire" by Michal Zalewski for the 8th time, I decided I wanted to try the random algorithm analysis he did in Chapter 10. He looked at the relationship between sequential numbers by graphing them in a 3D scatter plot. My idea was to see if any of the algorithms had been updated to make them more secure.
|
||||
|
||||
There was a problem with that however. I only own one computer and it's too low power to run VMs. So I was stuck with the python algorithm, shuf, urandom, and two online random number generators. This was a big limitation and I hope to update this whenever I get a new computer.
|
||||
|
||||
# The Importance
|
||||
|
||||
Random algorithms cannot be predictable for security reasons. All encryption algorithms use random digits to generate keys. If the keys are predictable, than encryption can be broken. In "Silence on the Wire" it showed some random algorithms having limited range or predictable patterns to reduce the search space. Luckily the new algorithms seem to be doing better.
|
||||
|
||||
# The Math
|
||||
|
||||
Using the math in "Silence on the Wire" to create the graphs allows me to compare more directly to Mr Zalewski's. Of course this ended up not really mattering, since I was so limited. For a better explanation see the book Chapter 10, but here is a quick run down. Using data samples S0, S1, S2, being a randomly generated sequence, then calculate the deltas and graph those.
|
||||
|
||||
D2 = S2 - S1
|
||||
D3 = S3 - S2
|
||||
.
|
||||
.
|
||||
.
|
||||
DN = DN - DN-1
|
||||
|
||||
Then we graph the deltas in a 3D scatter plot using the following points:
|
||||
|
||||
P2 = (D4, D5, D6)
|
||||
P3 = (D7, D8, D9)
|
||||
.
|
||||
.
|
||||
.
|
||||
|
||||
# The Samples
|
||||
|
||||
The data came from the following locations; JS Math, Python's numby package, random.org, Bash shuf, and urandom. Here are the graphs that were produced ... don't get excited, they are all basically the same:
|
||||
|
||||
Unfortunetally my blog server crashed, so I've lost the images for now, I'll add them in later. The long and short is they all look basically the same.
|
||||
|
||||
# Conclusion
|
||||
|
||||
Why are these all basically the same ... probably because they all use the same exact algorithm. I was hoping Python had it's own built in PRNG, but it appears to use whatever the host uses. The shuf command and urandom make sense that they are the same. Shuf is kinda just a wrapper around urandom to give the user more control.
|
||||
|
||||
I was also hoping that these couple of web sites would use their own, but it appears they just run on top of Linux servers using urandom.
|
||||
|
||||
The positive to this is it hopefully means more eyes on the random algorithm, making sure they are unpredictable. The problem is if a flaw is found in urandom, it will affect almost everything.
|
||||
|
||||
If anyone wants the code just message me ... it's not complicated.
|
||||
|
||||
524
content/posts/rsa-optimization.md
Normal file
524
content/posts/rsa-optimization.md
Normal file
@@ -0,0 +1,524 @@
|
||||
---
|
||||
title: "RSA Optimization"
|
||||
date: 2022-12-06
|
||||
draft: false
|
||||
---
|
||||
|
||||
# INTRODUCTION
|
||||
|
||||
RSA is a public key cryptosystem, which was named after the creators of
|
||||
the algorithm: Rivest, Shamir, and Adleman [@STALLINGS]. It is widely
|
||||
used for both confidentiality and authentication. The main advantage for
|
||||
using RSA is the keys are created in such a way that the public key is
|
||||
publish and can be used to encrypt all messages to the owner of the
|
||||
public key. Unlike symmetric key schemes, RSA does not require sender
|
||||
and receiver to agree on a common key to encrypt and decrypt messages.
|
||||
To send an encrypted message, the encryptor only has to look up the
|
||||
public key of the intended recipient.
|
||||
|
||||
The RSA algorithm works as follows. First, a public key and private key
|
||||
pair are generated. The public key is published and can be used to
|
||||
encrypt messages and verify signatures of the owner of the public key.
|
||||
The corresponding private key is known only to the owner and can be used
|
||||
to decrypt messages encrypted with the owner's public key. The private
|
||||
key can also be used to sign messages. The public and private keys are
|
||||
generated by choosing two distinct large primes, `p` and `q`. Then `n`
|
||||
is computed by multiplying `p` and `q` together. Since `n` is the
|
||||
product of two primes, we can compute Euler's totient function by
|
||||
`φ(n)=(p-1)(q-1)`. Then, an integer `e` is chosen such that
|
||||
`1<e<φ(n)` and `gcd(e,φ(n))=1`, where gcd stands for
|
||||
greatest common denominator. Finally an integer `d` is computed such
|
||||
that `d=e^{-1}(mod φ(n))`. Thus, the public key is the modulus
|
||||
`n` and the encryption exponent `e`. The private key is the decryption
|
||||
exponent `d`.
|
||||
|
||||
RSA is criticized for the amount of time it takes to encrypt and decrypt
|
||||
messages. To encrypt a message, the sender uses the public key, `(n,e)`.
|
||||
The message is converted to an integer `m`, such that `0<m<n`. Then, the
|
||||
ciphertext message `c` is computed by `c=m^{e}(mod n)`. The message
|
||||
can be recovered by using the private key, `d`, and the formula
|
||||
`m=c^{d}(mod n)`. Currently to be considered secure, the size of `n`
|
||||
needs to be 1024 bits or more. This makes RSA computationally slow
|
||||
because arithmetic modulo large numbers is notoriously slow. For
|
||||
example, on small handheld devices, RSA decryption with 1024 bit modulus
|
||||
can take up to 40 seconds [@BONEH]. Also, RSA decryption can
|
||||
significantly reduce the number of requests a web server can handle. In
|
||||
addition, as computers become more powerful, 1024-bit RSA will not be
|
||||
considered secure. This means that in the future, the size of the
|
||||
modulus will increase. Therefore, we need to create methods we can use
|
||||
RSA in a practical fashion.
|
||||
|
||||
To improve the speed of RSA encryption and decryption, we propose to
|
||||
create an instruction set architecture that will be made solely for RSA.
|
||||
In this paper, we will talk strictly about using RSA for encrypting and
|
||||
decrypting messages. However the same instruction set architecture we
|
||||
propose in this paper can be used for signing and verifying messages
|
||||
with RSA.
|
||||
|
||||
# CHARACTERISTICS OF RSA
|
||||
|
||||
There are three areas that the RSA can be optimized: finding the
|
||||
encryption and decryption exponent, prime number generation, and
|
||||
encrypting and decrypting messages. In this section we will describe how
|
||||
finding the encryption and decrypting exponent, prime number generation,
|
||||
and encrypting and decrypting the message is usually done, without
|
||||
specialized instructions.
|
||||
|
||||
## ENCRYPTION AND DECRYPTION EXPONENT
|
||||
|
||||
The current approach to verifying that the encryption exponent is
|
||||
coprime to the `φ(n)` is by using the Euclidean Algorithm. To find
|
||||
the decryption exponent, the Extended Euclidean Algorithm is used.
|
||||
Therefore, we will include these algorithms in our architecture and
|
||||
create instructions to accommodate the Euclidean Algorithm and Extended
|
||||
Euclidean Algorithm.
|
||||
|
||||
The Euclidean Algorithm can be executed using the following pseudocode
|
||||
[@STALLINGS]:
|
||||
|
||||
;; load phi of n (computed using Euler's totient function) and encryption exponent
|
||||
load r1, location_of_phi(n)
|
||||
load r2, location_of_encryption_exponent
|
||||
|
||||
loop while r2 not equal to zero
|
||||
|
||||
div t0, r1, r2 ;; t0 = r1 / r2
|
||||
mult t1, r2, t0 ;; t1 = r2 * r
|
||||
sub t2, r1, t1 ;; t2 = r1 - t1
|
||||
|
||||
store r2, r1 ;; r1 = r2
|
||||
store t2, r2 ;; r2 = t2
|
||||
|
||||
;; end loop
|
||||
|
||||
return r2 ;; this is equal to the gcd of phi of n and the encryption exponent
|
||||
|
||||
|
||||
If the `gcd(φ(n), e) = 1`, then `e` is a valid encryption exponent
|
||||
for RSA, and we can now find the decryption exponent, `d`. If the
|
||||
`gcd(φ(n), e) \neq 1`, then a new encryption exponent `e` needs to
|
||||
be chosen such that `1<e<φ(n)`. Then, the Euclidean Algorithm
|
||||
needs to be ran again to verify that `gcd(φ(n), e) = 1`.
|
||||
Typically, to execute the Euclidean Algorithm, a divide, multiply, and
|
||||
subtract instruction would be needed in each iteration of the loop. In
|
||||
the design section, we will describe an instruction that will combine
|
||||
the divide, multiply, and subtract instruction into one instruction.
|
||||
|
||||
Once the encryption exponent, `e` is chosen, we need to find the
|
||||
decryption exponent `d`. The decryption exponent, `d` needs to satisfy
|
||||
the following formula: `d*e = 1 (mod φ(n))`. Another way of
|
||||
describing `d` is it is the multiplicative inverse of `e` modular
|
||||
`φ(n)`. The Extended Euclidean Algorithm can be executed using the
|
||||
following pseudocode [@CORMEN]:
|
||||
|
||||
;; load phi of n (computed using Euler's totient function) and encryption exponent
|
||||
load r1, location_of_phi(n)
|
||||
load r2, location_of_encryption_exponent
|
||||
|
||||
;; values that will be used and updated in the loop of the algorithm
|
||||
store 0, r3 ;; r3 = 0
|
||||
store 0, r4 ;; r4 = 0
|
||||
store 1, r5 ;; r5 = 1
|
||||
store 1, r6 ;; r6 = 1
|
||||
|
||||
loop while r2 not equal to zero
|
||||
|
||||
div t0, r1, r2 ;; t0 = r1/r2
|
||||
store r2, t1 ;; t1 = r2
|
||||
mult t2, r1, t0 ;; t2 = r1 * t0
|
||||
sub r2, r2, t2 ;; r2 = r2 - t2
|
||||
store t1, r1 ;; r1 = t1
|
||||
store r3, t3 ;; t3 = r3
|
||||
mult t4, t0, r3 ;; t4 = t0 * r3
|
||||
sub r3, r5, t4 ;; r3 = r5 - t4
|
||||
store r3, r5 ;; r5 = r3
|
||||
store r4, t5 ;; t5 = r4
|
||||
mult t6, t0, r4 ;; t6 = t0 * r4
|
||||
sub r4, r6, t6 ;; r4 = r6 - t6
|
||||
store t5, r6 ;; r6 = t5
|
||||
|
||||
;; end loop
|
||||
|
||||
return r5, r6 ;; r5 and r6 are e and d
|
||||
|
||||
Since we chose an `e` such that `gcd(φ(n), e) = 1`, there will
|
||||
always exist a `d` such that `ed = 1 (mod φ(n))`. The Extended
|
||||
Euclidean Algorithm returns the integer `d`. Typically to execute the
|
||||
Extended Euclidean Algorithm, one divide, six stores, three multiplies,
|
||||
and three subtracts are needed within the loop. In the design section,
|
||||
we will use two instructions that will combine some of the instructions
|
||||
used in the Extended Euclidean Algorithm to reduce the number of stalls
|
||||
within the loop.
|
||||
|
||||
## PRIME NUMBER GENERATION
|
||||
|
||||
The common approach to generating large primes in making encryption and
|
||||
decryption keys is to randomly select integers and test them for
|
||||
primality. Since for large numbers, being sure of primality can be an
|
||||
expensive operation, algorithms are used that find "probable primes."
|
||||
These algorithms test numbers for primality with a certainty over a
|
||||
desired threshold. The particular algorithm we will investigate is known
|
||||
as Rabin's test, whose high-level use in prime generators as
|
||||
follows[@BEAUCHEMIN]:
|
||||
|
||||
;;Generates a prime number of binary length l and certainty factor k
|
||||
;;The number will have a exp(4,-k) chance of being composite (non-prime)
|
||||
function genPrime (l, k):
|
||||
repeat
|
||||
n <- Randomly selected l-digit odd number
|
||||
until repeatRabin(n, k) = "prime"
|
||||
return n
|
||||
|
||||
function repeatRabin(n, k)
|
||||
from i <- 0 to k or until done
|
||||
done <- rabin(n) = "composite"
|
||||
end
|
||||
if done
|
||||
return "composite"
|
||||
else
|
||||
return "prime"
|
||||
|
||||
function rabin(n)
|
||||
s <- the number of trailing zeros in binary form of n-1
|
||||
t <- n-1 with trailing zeros removed
|
||||
a <- integer randomly selected between 2 and n-2
|
||||
x <- exp(a,d) mod n
|
||||
if x = 1 or x = n - 1
|
||||
return "prime"
|
||||
for r <- 1 ... s - 1
|
||||
x <- exp(x,2) mod n
|
||||
if x = 1 return "composite"
|
||||
if x = n - 1 return "prime"
|
||||
end for
|
||||
return "composite"
|
||||
|
||||
This technique will take a long amount of time for large values of `t`
|
||||
and `s`. For large values of `t`, exponentiation done in a typical
|
||||
manner would take an increasing number of iterations to calculate (as
|
||||
shown below). The instructions to calculate `x^{2} (mod n)` would be
|
||||
executed `s` times. These two factors indicate a heavy reliance on the
|
||||
ability of a system to calculate exponentiation.
|
||||
|
||||
## ENCRYPTION AND DECRYPTION
|
||||
|
||||
One aspect of RSA to improve upon is performing large exponentiation.
|
||||
Currently the implementation of exponentiation is performed by the
|
||||
compiler, which creates inconsistencies in speed between compilers and
|
||||
is generally not optimal. The general practice for compilers is to loop
|
||||
for the value in the exponent while accumulating the total.
|
||||
|
||||
Current pseudocode for how compilers handle exponentiation [@COHEN]:
|
||||
|
||||
function power (base, exponent)
|
||||
set result = base
|
||||
loop from 1 to exponent do
|
||||
result = result * base
|
||||
return result
|
||||
|
||||
This technique is very slow, especially for large exponents. It will
|
||||
cause a lot of stalls as the processor waits for the results of the
|
||||
previous computation. It will also stall waiting for another multiplier
|
||||
to be available. We can lessen the number of stalls using a technique
|
||||
known as exponentiation by squaring. This technique is explained further
|
||||
in the design section.
|
||||
|
||||
# DESIGN
|
||||
|
||||
In this section, we will describe specialized instructions that will be
|
||||
used for prime number generation, computing the encryption and
|
||||
decryption exponent, and encrypting a decrypting a message.
|
||||
|
||||
## ENCRYPTION AND DECRYPTION EXPONENT
|
||||
|
||||
The issue with implementing the Euclidean Algorithm the traditional way
|
||||
is a divide, multiply, and subtract instructions are needed for each
|
||||
iteration of the loop. We noticed that the Euclidean Algorithm is
|
||||
finding a modulus in each iteration of the loop, thus we propose to use
|
||||
a modulus instruction for the Euclidean Algorithm, which is defined
|
||||
below.
|
||||
|
||||
mod x, y, z : x = z - ( (z/y) * y)
|
||||
|
||||
By using the modulus instruction, we have reduced the number of stalls
|
||||
in each loop. Unlike the traditional implementation, which requires both
|
||||
the multiply and subtraction instruction to stall until the instruction
|
||||
before it writes it's value to memory, the modulus instruction has all
|
||||
the values immediately.
|
||||
|
||||
Therefore, we can implement the Euclidean Algorithm with the following
|
||||
pseudocode:
|
||||
|
||||
;; load phi of n (computed using Euler's totient function) and encryption exponent
|
||||
load r1, location_of_phi(n)
|
||||
load r2, location_of_encryption_exponent
|
||||
|
||||
loop while r2 not equal to zero
|
||||
|
||||
mod t0, r1, r2 ;; t0 = r2 - ( (r2/r1) * r1)
|
||||
|
||||
store r2, r1 ;;r1 = r2
|
||||
store t0, r2 ;;r2 = t2
|
||||
|
||||
;; end loop
|
||||
|
||||
return r2 ;; this is equal to the gcd of phi of n and the encryption exponent
|
||||
|
||||
|
||||
Using the mod instruction, we can eliminate the use of the divide,
|
||||
multiply, and subtract instructions. Also, we use only one temporary
|
||||
register, as opposed to three temporary registers. We will provide an
|
||||
analysis to the speedup that the modulus instruction gives the
|
||||
implementation of the Euclidean Algorithm in the justification and
|
||||
analysis section.
|
||||
|
||||
For the implementation of the Extended Euclidean Algorithm, we propose
|
||||
to use two specialized instructions: a modular instruction and a
|
||||
multiply-subtract instruction. The modular instruction combines the
|
||||
divide, multiply, and subtract instruction, and is defined the same as
|
||||
it is above, in the Euclidean Algorithm. The multiply-subtract
|
||||
instruction is defined as follows:
|
||||
|
||||
multsub w, x, y, z : w = x - (y * z)
|
||||
|
||||
Like in the Euclidean Algorithm, by using the modular instruction in the
|
||||
Extended Euclidean Algorithm, we are reducing the number of stalls in
|
||||
each loop iteration. We are further reducing the number of stalls by
|
||||
also using the multiply-subtract instruction. Thus, we can implement the
|
||||
Extended Euclidean Algorithm with the following pseudocode:
|
||||
|
||||
;; load phi of n (computed using Euler's totient function) and encryption exponent
|
||||
load r1, location_of_phi(n)
|
||||
load r2, location_of_encryption_exponent
|
||||
|
||||
store 0, r3 ;; r3 = 0
|
||||
store 0, r4 ;; r4 = 0
|
||||
store 1, r5 ;; r5 = 1
|
||||
store 1, r6 ;; r6 = 1
|
||||
|
||||
loop while r2 not equal to zero
|
||||
|
||||
div t0, r1, r2 ;; t0 = r1/r2
|
||||
store r2, t1 ;; t1 = r2
|
||||
mod r2, r1, r2 ;; r2 = r2 - ( (r2/r1) * r1)
|
||||
store t1, r1 ;; r1 = t1
|
||||
store r3, t2 ;; t2 = r3
|
||||
multsub r3, r5, t0, r3 ;; r3 = r5 - (t0 * r3)
|
||||
store t2, r5 ;; r5 = t2
|
||||
store r4, t3 ;; t3 = r4
|
||||
multsub r4, r6, t0, r4 ;; r4 = r6 - (t0 * r3)
|
||||
store t3, r6 ;; r6 = t3
|
||||
|
||||
;; end loop
|
||||
|
||||
return r5, r6 ;; r5 and r6 are e and d
|
||||
|
||||
Using the modular and multiply-subtract instructions, we have reduced
|
||||
the amount of instructions in the loop of the implementation of the
|
||||
Extended Euclidean Algorithm from 13 instructions to 10 instructions. We
|
||||
also reduce the temporary registers from five to three. We will provide
|
||||
an analysis to the speedup given to the Extended Euclidean Algorithm by
|
||||
using the modular instruction and the multiply-subtract instruction in
|
||||
the justification and analysis section.
|
||||
|
||||
## PRIME NUMBER GENERATION, ENCRYPTION, AND DECRYPTION
|
||||
|
||||
One issue already discussed in the previous section is that of stalls
|
||||
during large exponents. The way exponentiation is handled causes many
|
||||
stalls slowing down the program. One way to lessen the number of stalls
|
||||
is by using a technique called exponentiation by squaring. This
|
||||
technique uses the square of the result to accumulate the final solution
|
||||
instead of just the base. In this way it'll divide the number of
|
||||
multiplies in half, thus divide the number of stalls in half.
|
||||
|
||||
The pseudocode for how exponentiation by squaring will work [@COHEN]:
|
||||
|
||||
function power (base, exponent)
|
||||
set binaryExp = (n1, n2, n3, ... nm)
|
||||
;; where binaryExp is the binary representation of exponent and n1 is 1
|
||||
set result = base
|
||||
set singleResult = 1
|
||||
loop for x from 0 to (length of binaryExp - 1) do
|
||||
result = result * result
|
||||
if binaryExp[x] == 1
|
||||
singleResult = singleResult * base
|
||||
|
||||
result = result * singleResult
|
||||
return result
|
||||
|
||||
An example for calculating 3 to the 10th is as follows:
|
||||
|
||||
result := 3
|
||||
binaryExp := "1010"
|
||||
|
||||
Iteration for digit 2:
|
||||
result := result^{2} = 3^{2} = 9
|
||||
1010bin - Digit equals "0"
|
||||
|
||||
Iteration for digit 3:
|
||||
result := result^{2} = (32)2 = 34 = 81
|
||||
1010bin - Digit equals "1" --> result := result*3 = (32)2*3 = 35 = 243
|
||||
|
||||
Iteration for digit 4:
|
||||
result := result2 = ((32)2*3)2 = 310 = 59049
|
||||
1010bin - Digit equals "0"
|
||||
|
||||
return result
|
||||
|
||||
The idea is to implement this exponentiation by squares algorithm in the
|
||||
processor using a pow command. This pow command takes the destination
|
||||
register, the register containing the base and the register containing
|
||||
the exponent. This command then runs the loop using one multiplier to
|
||||
accumulate the square and one multiplier to accumulate the value when a
|
||||
1 is hit. The loop will grab one binary digit at a time from the
|
||||
exponent register. We will always run the result accumulator, then
|
||||
depending on the digit, may use the second accumulator. It will then run
|
||||
through one more multiplier to multiply the squares by the 1's
|
||||
multiplier.
|
||||
|
||||
# JUSTIFICATION AND ANALYSIS
|
||||
|
||||
In this section, we will describe how our specialized instructions will
|
||||
improve the performance of the RSA encryption.
|
||||
|
||||
## ENCRYPTION AND DECRYPTION EXPONENT
|
||||
|
||||
Using the modular instruction in the Euclidean Algorithm, we can reduce
|
||||
the number of stalls needed. Instead of needing to stall for the result
|
||||
of the divide and the result of the multiply within the loop, the
|
||||
modular instruction does not have to stall for any previous results. In
|
||||
the loop of the Euclidean Algorithm without the modular instruction, two
|
||||
of the five instructions need to stall to wait for a previous result.
|
||||
|
||||
CPI = Ideal CPI + The number of stalls per instruction
|
||||
|
||||
CPIold = 1 + 2/5
|
||||
CPIold = 1.4
|
||||
|
||||
CPInew = 1 (since no instructions in the loop need to stall for a result)
|
||||
|
||||
Thus the speedup for the Euclidean Algorithm in the loop is:
|
||||
Speedup = CPIold / CPI new
|
||||
Speedup = 1.4 / 1
|
||||
Speedup = 1.4
|
||||
|
||||
Thus, with respect to stalls, we have a speedup of 1.4 by using the
|
||||
modular instruction in the Euclidean Algorithm. Of course, the latency
|
||||
of the modular instruction is likely to be longer than the latency of
|
||||
the divide, multiply, and subtract instructions, since it is performing
|
||||
three calculations in one instruction. However, since we are building a
|
||||
system only for RSA, we can use specialized hardware to reduce the
|
||||
latency of the modular instruction to receive the speedup of 1.4. As
|
||||
mentioned in the design section, another advantage to using the modular
|
||||
instruction is we reduce the number of temporary registers from three to
|
||||
one.
|
||||
|
||||
We are assume that the store instruction finishes in one clock cycle.
|
||||
Thus, in the Extended Euclidean Algorithm, three of the thirteen
|
||||
instructions in the loop need to stall for previous results. However, by
|
||||
using the modular and multipy-subtract instructions, no instruction
|
||||
needs to stall to wait for a previous result to be available.
|
||||
|
||||
CPI = Ideal CPI + The number of stalls per instruction
|
||||
|
||||
CPIold = 1 + 3/13
|
||||
CPIold = 1.23
|
||||
|
||||
CPInew = 1 (since no instructions in the loop need to stall for a result)
|
||||
|
||||
Thus the speedup for the Euclidean Algorithm in the loop is:
|
||||
Speedup = CPIold / CPI new
|
||||
Speedup = 1.23 / 1
|
||||
Speedup = 1.23
|
||||
|
||||
Thus with respect to stalls, we have a speedup of 1.23 by using the
|
||||
modular and multiply-subtract instructions in the Extended Euclidean
|
||||
Algorithm. As mentioned above, it is likely that the latency of the
|
||||
modular and multiply-subtract instructions are higher than the
|
||||
traditional instructions. However, we are only building a system for RSA
|
||||
encryption and decryption, so we can build hardware that will reduce the
|
||||
latency of the modular and multiply-subtract instructions to receive the
|
||||
speedup of 1.23. Also, an advantage to using the modular and
|
||||
multiply-subtract instructions is we reduce the number of temporary
|
||||
registers needed from five to three.
|
||||
|
||||
## PRIME NUMBER GENERATION, ENCRYPTION, AND DECRYPTION
|
||||
|
||||
Using the pow command we can reduce stalls of large exponents in half.
|
||||
Since the algorithm breaks the exponent into a binary representation of
|
||||
its self and loops through the digits, the worse case scenario is
|
||||
looping half the number of times. One example of this is if the exponent
|
||||
is eight which is 1000 in base two. Since the number of loops is divided
|
||||
in half, the number of multiplies are also divided in half. Even with a
|
||||
large number of ones in the base two representation of the exponent,
|
||||
this can use a seperate multiplyer, since it doesn't rely on the result
|
||||
of the squares.
|
||||
|
||||
Using the above pseudocode we can assume that approximately 1/4 of the
|
||||
instructions are multiplies, thus 1/4 will stall. Using that knowledge
|
||||
and the knowledge that the number of stalls is cut in half we can use
|
||||
the following equations to determin the overall speedup:
|
||||
|
||||
CPI = Ideal CPI + The number of stalls per instruction
|
||||
|
||||
CPI old = 1 + 1/4
|
||||
CPI old = 1.25
|
||||
|
||||
CPI new = 1 + 1/4/2
|
||||
CPI new = 1 + 1/8
|
||||
CPI new = 1.125
|
||||
|
||||
Thus the speedup of this section is:
|
||||
Speedup = CPI old / CPI new
|
||||
Speedup = 1.25/1.125
|
||||
Speedup = 1.11
|
||||
|
||||
# CONCLUSIONS
|
||||
|
||||
In analyzing the typical algorithms used as a part of RSA, we have
|
||||
identified two primary bottlenecks in both encryption and decryption:
|
||||
modulus and exponentiation operations. We propose that because these two
|
||||
operations are so prevalent, they warrant specialized instructions. We
|
||||
also noticed that it is beneficial to use a multiply-subtract
|
||||
instruction to increase speedup while finding a decryption exponent.
|
||||
These specialized instructions improve performance by reducing the
|
||||
number of stalls, and therefore improving ideal CPI. The process of
|
||||
finding encryption and decryption exponents benefits from the increased
|
||||
efficiency of modulus operations. Likewise, the processes of prime
|
||||
generation and encryption and decryption in general, both benefit from a
|
||||
faster exponentiation operation.
|
||||
|
||||
We find that by implementing these instructions, the whole procedure
|
||||
finds an overall speedup, from specific speedups of 1.11 for all uses of
|
||||
exponentiation, 1.4 for finding an encryption exponent, and 1.23 for
|
||||
finding a decryption exponent. Thus, we met our goals for improving the
|
||||
performance of the RSA algorithm by creating a specialized instruction
|
||||
set architecture.
|
||||
|
||||
A further investigation we could make is to look at specific hardware
|
||||
that supports our instruction set architecture. To gain the maximum
|
||||
speedup, we need to use hardware that reduces the latency for all the
|
||||
specialized instructions to a negligible amount. Since we are creating a
|
||||
system with the sole task of encrypting and decrypting messages using
|
||||
RSA, we can create hardware that allows the specialized instructions to
|
||||
have a latency comparable to the traditional instructions.
|
||||
|
||||
# Bibliography
|
||||
|
||||
3 Beauchemin, Pierre, Brassard, Crepeau, Claude, Goutier, Claude, and
|
||||
Pomerance, Carl The Generation of Random Numbers That Are Probably Prime
|
||||
|
||||
Bellare, M., Garay, J., and Rabin T. Fast Batch Verification for Modular
|
||||
Exponentiation and Digital Signatures. *Advances in Cryptology-
|
||||
Eurocrypt 98 Proceedings, Lecture Notes in Computer Sciencey*, 1998.
|
||||
|
||||
Boneh, Dan and Shacham, Horavl. Fast Variants of RSA. *Appears in
|
||||
Cryptobytes*, 2002. Cohen, H., Frey, G. (editors): Handbook of elliptic
|
||||
and hyperelliptic curve cryptography. Discrete Math. Appl., (Chapman and
|
||||
Hall/CRC 2006).
|
||||
|
||||
Cormen, T., Leiserson, C., Rivest, R., and Stein, C. Introduction to
|
||||
Algorithms, Second Edition. MIT Press and McGraw-Hill, 2001.
|
||||
|
||||
Stallings, W., Cryptography and Network Security 2nd ed. (Prentice-Hall,
|
||||
1998)
|
||||
|
||||
81
content/posts/secure-coding-in-c-summations-concurrency.md
Normal file
81
content/posts/secure-coding-in-c-summations-concurrency.md
Normal file
@@ -0,0 +1,81 @@
|
||||
---
|
||||
title: "Concurrency: Summations of Secure Coding in C and C++"
|
||||
date: 2023-01-27
|
||||
draft: false
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
Continuing summarizing the themes in "Secure Coding in C and C++" by Robert C. Seacord, we will discuss concurrency. When code runs at the same time needing access to the same resources lots of issues can occur. These can be from the annoying of getting the incorrect data, halting deadlocks, to vulnerabilities.
|
||||
|
||||
The tl;dr; use `mutex`'s. There are a lot of methods for controlling concurrency, but many use `mutex`'s in the background anyway. A `mutex` is the closest thing to guaranteed sequential access, without risking deadlocks.
|
||||
|
||||
## Importance
|
||||
|
||||
To quote Robert C. Seacord, "There is increasing evidence that the era of steadily improving single CPU performance is over. ... Consequently, single-threaded applications performance has largely stalled as additional cores provide little to no advantage for such applications"
|
||||
|
||||
In other words, the only real way to improve performance is through multi-threaded/multi-process applications, thus being able to handle concurrence is very important.
|
||||
|
||||
# The Big Issue
|
||||
|
||||
Race Conditions! That's the big issue, when two or more threads or processes attempt to access the same memory or files. The issue comes in when; two writes happen concurrently, reads occur before writes, reads occur during writes. This can lead to incorrect values being read, incorrect values being set, or corrupted memory. These types of flaws, and insufficient fixes can cause vulnerabilities in the programs as well.
|
||||
|
||||
# How Do We Keep Memory Access Sane
|
||||
|
||||
So what is the fix. There are several possible ways to keep things in sync, but the number one way that will "always" work is a `mutex`. In fact most of the other "solutions" are just an abstracted `mutex`. We will go briefly over a couple solutions: global variables, `mutex`, and atomic operations.
|
||||
|
||||
## Shared/Global Variables
|
||||
|
||||
A simple solution, that is **NOT** robust is simply having a shared "lock" variable. A variable, we'll call `int lock`, which is a `1` when locked and `0` when unlocked, is accessible between threads. When a thread wants to access a memory location it simply checks that the variable is in the unlocked state, `0`, locks it by setting it to `1`, then accessing the memory location. At the end of it's access, it simply sets the variable back to `0` to "unlock" the memory location.
|
||||
|
||||
Simple, but not robust. It suffers from three main flaws that a `mutex` solves. The first is a second thread could lock the memory location after the first thread checks that it's unlocked. Thread 1 `t1` checks the value of `lock` and sees that it's `0`, thread 2 `t2` then locks `lock`. Both `t1` and `t2` think they both hold the lock and both access the memory at the same time.
|
||||
|
||||
The second problem is, in order to implement this, any given thread would have to enter a `loop` `sleep` cycle to keep checking the lock value. This could use valuable CPU time.
|
||||
|
||||
The third issue is compiler optimization (future blog coming regarding that hot mess). When a compiler is optimizing a loop, it may only read a given variable once, if there is no indication that it will change. Since it's being changed in another thread in another part of the program, the compiler may cause the variable to never change, thus causing a *deadlock*. The other thing compilers like to do to optimize things, is rearrange the order of operations if it thinks it doesn't matter. This can lead to other forms of *read before writes* or *deadlocks*.
|
||||
|
||||
The third issue *can* be solved through compiler directives, but that still doesn't solve the first two issues.
|
||||
|
||||
## `mutex`
|
||||
|
||||
Fundamentally, a `mutex` isn't much different than a shared variable. The `mutex` itself is shared among all threads. The biggest difference is, it doesn't suffer from either of the three issues. The `threading` library handles things properly such that a "check" on the `mutex` and a "lock" happen atomically (meaning that nothing can happen in between). This handles the issue of reading the variable before another thread writes and the compiler trying to optimize things. `mutex`es also handle waiting a little different thus need less CPU to wait.
|
||||
|
||||
The only drawback to the `mutex` is that it can still cause a *deadlock* when not used properly. If a `mutex` isn't properly unlocked (either due to programmer error, or improper error handling) then the `mutex` might not be released, thus locking up other threads. It can also lock other threads if it keeps it open for a "long time" even if it will eventually close the `mutex`.
|
||||
|
||||
To solve the possible *deadlock* of not unlocking the `mutex`, `automic` operations were added.
|
||||
|
||||
## Atomic Operations
|
||||
|
||||
Atomic operations attempt to solve the issue of forgetting to unlock the `mutex`. An atomic operation is a single function call that perform multiple actions on a single shared variable. These operations can be checking and setting (thus making them semi useful as a shared locking variable), swapping values, or writing values.
|
||||
|
||||
Atomic operations are very limited in their use case, since there is only so many built in methods. If they work for your use case there really isn't much down side to using them. However since they are limited and use a `mutex` in the background anyway, a `mutex` with proper error handling and releasing is probably the best way to go.
|
||||
|
||||
## Other Solutions
|
||||
|
||||
Lock Guard:
|
||||
- C++ object that handles a `mutex`, useful for not having to worry about unlocking the `mutex`, only real downside is it's C++ only
|
||||
|
||||
Fences:
|
||||
- An attempt to tell the compiler not to re-arrange operations, can still lead to data races. Even if the compiler doesn't optimize things, just how and when the operations get scheduled on the CPU can mess with memory operations
|
||||
|
||||
Semaphore:
|
||||
- `mutex` with a counter. Can have good specific use cases, but just uses a `mutex` in the background. Unless needed, just use a `mutex`
|
||||
|
||||
# Obvious bias is Obvious
|
||||
|
||||
Just use a `mutex`. Most of the additional solutions either are simply a `mutex` in the background or cause other issues. A `mutex` will just work. Just be sure to properly unlock when done and possibly have timeouts in case another thread gets stuck.
|
||||
|
||||
With a `mutex` you have way more control over the code and way more flexibility in how it's used. An arbitrary amount of code can be put in between without having to finagle a use case into a limited number of function calls.
|
||||
|
||||
# Keep it Sane
|
||||
|
||||
There is one additional tip for concurrency, lock as little code as possible. By having as few operations as possible between a `mutex` lock and unlock it reduces possibilities of timeouts, gridlocks, and crashing. It also helps to reduce the possibility of forgetting to unlock. Do not surround an entire method (or methods) with locks, rather just the read and write operations.
|
||||
|
||||
## The Dreaded `GOTO`
|
||||
|
||||
When it comes to locking, `goto` is your friend. Have an error or exception inside a lock, `goto` the unlock. This also works for clearing memory, have an error `goto` the `free` and memory cleanup. Keep the `goto` sane by only jumping to within the current method.
|
||||
|
||||
# Conclusion
|
||||
|
||||
Just use a `mutex`, everything else is either more prone to errors, more limiting, or just uses a `mutex` in the background anyway. Keep things sane by locking as little code as possible. And always make sure to throw locks around accessing common memory space
|
||||
|
||||
48
content/posts/secure-coding-in-c-summations-free-and-null.md
Normal file
48
content/posts/secure-coding-in-c-summations-free-and-null.md
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
title: "Set to NULL After Free: Summations of Secure Coding in C and C++"
|
||||
date: 2022-08-17
|
||||
draft: false
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
Continuing the series of summarizing the themes in "Secure Coding in C and C++" by Robert C. Seacord, we will discuss freeing pointers. The title of this section is specifically about setting to `NULL` after calling free, but this post will cover a lot more than that. Here we will discuss the problems with forgetting to free, functions whose return value needs to be freed, and freeing without allocating/double free.
|
||||
|
||||
As for the title of this piece, some of the most common problems can be solved simply by setting pointers to `NULL` at declaration or after freeing.
|
||||
|
||||
This is written for an audience that has a broad overview of security concepts. Not much time is spent explaining each concept, and I encourage everyone to read the book.
|
||||
|
||||
# Always `free` When Done
|
||||
|
||||
First off lets discuss why `free` is important. Without freeing variables, best case scenario is you end up with leaked memory and worst case could introduce vulnerabilities.
|
||||
|
||||
## Memory Leaks
|
||||
|
||||
When non-pointer variables are declared, they are restricted to the scope in which they were created. The operating system will clear the memory at the end of the scope. For pointers however, allocated memory is not restricted by scope. So if a pointer is not cleared before the end of scope, that memory will still be held by the process. Depending on how large these allocations are, you could fill memory quite quickly. At best this will lead to crashing your own program (if the OS restricts memory), at worst you will crash the system.
|
||||
|
||||
One of the best ways to handle this is with `goto`'s. Yes, despite the hate for `goto` statements, this is a perfect use for them. Set an anchor at the end of the function, and have all clearing code there. Then during any error within the function, jump to the anchor using `goto`. The thing is, the `goto` must be within the same method and later in the logic. A `goto` statement should never leave the method scope and should never more earlier in the function.
|
||||
|
||||
Also by using the `goto` and anchor, it will prevent another possible vulnerability, use after free. This is also discussed in the next section.
|
||||
|
||||
## Vulnerabilities
|
||||
|
||||
The other problem with forgetting to call free is allowing an attacker to gain access to your memory space, which could cause sensitive data to be leaked. By exploiting other vulnerabilities an attacker could gain access to memory that was supposed to be freed. Another problem with forgetting to free is denial of service attacks. An attacker can specifically target the memory leak to overload the system.
|
||||
|
||||
Another vulnerability isn't forgetting to free, but forgetting that you did free. Use after free can be a big issue. If an attacker can fill the memory space that was previously freed, when the program uses the pointer again, instead of erroring out the vulnerable program will use the new data. This could result in code execution, depending on how the memory is used.
|
||||
|
||||
# Knowing When to `free`
|
||||
|
||||
When you as the developer call `calloc`, `malloc` or almost anything else with an `alloc` it's pretty clear that those need to be freed. You declared the pointer and created the memory. But there are other situations that are not as clear, when calling functions that allocate memory.
|
||||
|
||||
These functions could either be built in functions like `strdup` or ones which you write yourself. Be sure to check and write documentation to be clear on if the memory needs to be freed. This type of allocation can be very easy to forget and could be an easy place to cause memory leaks. The best way to verify, is double check all declared pointers if they need to be freed before finishing the method.
|
||||
|
||||
This is a perfect situation for a `goto` to an anchor at the end of the method. Then there only needs to be a single `free` preventing use after free and double free. It also then requires only a single return. This will prevent returning before freeing, reducing the risk of memory leaks.
|
||||
|
||||
# Knowing When NOT to `free`
|
||||
|
||||
Knowing when not to free is not as big of an issue as not freeing, but can still cause issues. Double frees, freeing before allocating, and freeing without allocating can cause your program to crash. It doesn't cause additional errors, but can make you vulnerable to denial of service.
|
||||
|
||||
# Conclusion
|
||||
|
||||
Freeing is vitally important to keeping your programs safe. All allocations need to be freed and it's best to free at the end of the method the pointer was allocated in. This will help prevent use after frees and forgetting to free. An anchor at the end of a method with `goto` is the best way to accomplish this.
|
||||
|
||||
@@ -0,0 +1,86 @@
|
||||
---
|
||||
title: "Always null Terminate (Part 2): Summations of Secure Coding in C and C++"
|
||||
date: 2022-08-13
|
||||
draft: false
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
Series on summarizing themes in "Secure Coding in C and C++" by Robert C. Seacord, part 2. Find part 1 here [Always null Terminate (Part 1)]({{<ref "secure-coding-in-c-summations-null-terminate.md">}}). We are currently going through this book in our work book club and there are a lot of good themes that seem to be threaded through the book. These are my notes, thoughts, and summaries on some of what I've read and our book club have discussed.
|
||||
|
||||
This is written for an audience that has a broad overview of security concepts. Not much time is spent explaining each concept, and I encourage everyone to read the book.
|
||||
|
||||
The first theme to discuss is always `null` terminating `char *` or `char array` buffers (unless you have a *very* specific reason for not). This is very important to help prevent buffer overflows, reading arbitrary memory, accessing 'inaccessible' memory. This is part 2 where we will discuss string cat and length. For a brief discussion on string copy see [part 1]({{<ref "secure-coding-in-c-summations-null-terminate.md">}}).
|
||||
|
||||
# Functions Needing null
|
||||
|
||||
One of the important reasons to `null` terminate is there are several very common functions that require `null` termination. Even some that you wouldn't necessarily think of. Without having `null` at the end of the buffer, it creates a situation where things could go wrong.
|
||||
|
||||
## String Cat
|
||||
|
||||
The next set of functions to look at are concatenating strings. These not only need to be `null` terminated, but they also need to be properly allocated. If they are not a concatenation could overwrite `null` terminators, and the resulting string could cause errors further in the code. Memory allocation will be discussed further in another post. First I'm going to throw a table at you, it gives a summary of string concat functions and how they handle some of the issues. We will discuss further after the table.
|
||||
|
||||
|| Buffer Overflow Protection | Guarantees Null Termination | May Truncate String | Allocates Dynamic Memory |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| strcat() | No | No | No | No |
|
||||
| strncat() | Yes | No | Yes | No |
|
||||
| strlcat() | Yes | Yes | Yes | No |
|
||||
| strcat\_s() | Yes | Yes | No | No |
|
||||
|
||||
Lets go over each function:
|
||||
|
||||
### strcat
|
||||
|
||||
```
|
||||
char *strcat(char *dest, char *src)
|
||||
```
|
||||
|
||||
This function is basic and needs careful programming. The destination **must** be at least the total length of both strings plus the `null` terminator. If it is smaller, it **will** overflow. It's also best to null the memory ahead of time, guaranteeing the last character is `null`. Proper memory allocation will be in a future post.
|
||||
|
||||
`strcat` copies the source until it hits the first `null` character, into destination, starting at the first `null` character. This means there are two things to watch out for:
|
||||
|
||||
1. This could lead to reading arbitrary memory
|
||||
1. Binary buffers may be corrupted since they can contain `null` characters within the string (use `memcpy` instead)
|
||||
|
||||
Arbitrary memory reads can be a problem since it could mean revealing data meant to be secret. Depending on where memory is allocated, sensitive data could be revealed to the user.
|
||||
|
||||
Be sure to set the last character to `null` after the `strcat` is completed.
|
||||
|
||||
### strncat
|
||||
|
||||
```
|
||||
strncat(char *dest, char *src, size_t src_len)
|
||||
```
|
||||
|
||||
`strncat` attempts to solve some of the issues with `strcat` but still requires careful programming. For one the `src` does not need to be `null` terminated as long as it is *at least as long as `src_len`*. `strncat` will copy `src_len` characters into the `dest`, or until it hits a `null` byte. In this case `dest` needs to be **at least** as long as the original string plus `src_len`. If it is not, it can still lead to buffer overflows.
|
||||
|
||||
In addition if `src` is not `null` terminated and `src_len` is longer than the length of `src` then `strncat` will still copy arbitrary memory.
|
||||
|
||||
`strncat` helps the developer watch for these issues but doesn't actually solve them.
|
||||
|
||||
|
||||
### strlcat
|
||||
|
||||
```
|
||||
size_t strlcat(char *dst, const char *src, size_t size)
|
||||
```
|
||||
|
||||
`strlcat` is pretty much identical to `strncat` so it has many of the same issues. Since `size` is the size of the destination, it is an improvement for two reasons:
|
||||
|
||||
1. Destination is guaranteed to be `null` terminated (so long as `size` is one less than the total length of the destination).
|
||||
1. It returns the attempted length copied (so the length of source).
|
||||
|
||||
Point one is great so you don't need to worry as much about pre setting the memory of the destination, or setting the last byte after the copy.
|
||||
|
||||
Point two is good so you can compare `size` to the return value to see if the source was truncated.
|
||||
|
||||
## Sensing a Theme
|
||||
|
||||
There are two themes for string concatenating, one is **`null` terminate all character buffers**, the second is proper memory allocation. This will be discussed in a future post.
|
||||
|
||||
Every one of these functions require the source and destination to be `null` terminated. If they are not, or if there is a `null` in the middle, it will cause issues!
|
||||
|
||||
# Conclusion
|
||||
|
||||
`null` termination is important so that we don't accidentally read or write to arbitrary memory. This concludes the discussion on `null` termination, the next post will cover proper memory allocation.
|
||||
|
||||
@@ -0,0 +1,96 @@
|
||||
---
|
||||
title: "Always null Terminate: Summations of Secure Coding in C and C++"
|
||||
date: 2021-09-01
|
||||
draft: false
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
Welcome to the next series, summarizing themes in "Secure Coding in C and C++" by Robert C. Seacord. We are currently going through this book in our work book club and there are a lot of good themes that seem to be threaded through the book. These are my notes, thoughts, and summaries on some of what I've read and our book club have discussed.
|
||||
|
||||
This is written for an audience that has a broad overview of security concepts. Not much time is spent explaining each concept, and I encourage everyone to read the book.
|
||||
|
||||
The first theme to discuss is always `null` terminating `char *` or `char array` buffers (unless you have a *very* specific reason for not). This is very important to help prevent buffer overflows, reading arbitrary memory, accessing 'inaccessible' memory.
|
||||
|
||||
# Functions Needing null
|
||||
|
||||
One of the important reasons to `null` terminate is there are several very common functions that require `null` termination. Even some that you wouldn't necessarily think of. Without having `null` at the end of the buffer, it creates a situation where things could go wrong.
|
||||
|
||||
## String Copy
|
||||
|
||||
The first set of functions to look at are copying strings. These not only need to be `null` terminated, but they also need to be properly allocated. Memory allocation will be discussed further in another post. First I'm going to throw a table at you, it gives a summary of string copy functions and how they handle some of the issues. We will discuss further after the table.
|
||||
|
||||
|| Buffer Overflow Protection | Guarantees Null Termination | May Truncate String | Allocates Dynamic Memory |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| strcpy() | No | No | No | No |
|
||||
| strncpy() | Yes | No | Yes | No |
|
||||
| strlcpy() | Yes | Yes | Yes | No |
|
||||
| strdup() | Yes | Yes | No | Yes |
|
||||
|
||||
Lets go over each function:
|
||||
|
||||
### strcpy
|
||||
|
||||
```
|
||||
strcpy(char *dest, char *src)
|
||||
```
|
||||
|
||||
This function is super basic and needs a lot of careful programming. The destination **must** be at least the length of the source plus the `null` terminator. If it is smaller, it **will** overflow. If destination is the correct size, but not initialized, destination is not guaranteed to be `null` terminated. Proper memory allocation will be in a future post.
|
||||
|
||||
`strcpy` also copies the source until it hits the first `null` character. This means there are two things to watch out for:
|
||||
|
||||
1. This could lead to reading arbitrary memory
|
||||
1. Binary buffers may not copy the entire thing, since there could be null bytes inside of the buffer
|
||||
|
||||
Arbitrary memory reads can be a problem since it could mean revealing data meant to be secret. Depending on where memory is allocated, sensitive data could be revealed to the user.
|
||||
|
||||
### strncpy
|
||||
|
||||
```
|
||||
strncpy(char *dest, char *src, size_t dest_len)
|
||||
```
|
||||
|
||||
This supposedly solves some of the issues with `strcpy`, but it doesn't really. `strncpy` **is not considered a secure alternative**. Careful coding is still very necessary.
|
||||
|
||||
The problem with `strncpy` is it still doesn't verify anything. It still reads source until `null`, so the two issues above still apply. It also doesn't guarantee `null` termination in destination, just like `strcpy`.
|
||||
|
||||
The only thing it does is *helps* with buffer overflows. However, if the `dest_len` is larger than `dest`, it will still buffer overflow.
|
||||
|
||||
So `strncpy` can still read arbitrary memory and can still buffer overflow (tho overflows are more difficult).
|
||||
|
||||
### strlcpy
|
||||
|
||||
```
|
||||
size_t strlcpy(char *dst, const char *src, size_t size)
|
||||
```
|
||||
|
||||
`strlcpy` is pretty much identical to `strncpy` so it has many of the same issues. Since `size` is the size of the destination, it is an improvement for two reasons:
|
||||
|
||||
1. Destination is guaranteed to be `null` terminated (so long as `size` is one less than the total length of the destination).
|
||||
1. It returns the attempted length copied (so the length of source).
|
||||
|
||||
Point one is great so you don't need to worry as much about pre setting the memory of the destination, or setting the last byte after the copy.
|
||||
|
||||
Point two is good so you can compare `size` to the return value to see if the source was truncated.
|
||||
|
||||
### strdup
|
||||
|
||||
```
|
||||
char *strdup(const char *s);
|
||||
```
|
||||
|
||||
`strdup` is super basic, but because of that it's probably the hardest to mess up. It dynamically allocates the correct amount of memory and performs the copy, putting a `null` terminator at the end.
|
||||
|
||||
The only thing to note is that it reads until the `null` terminator.
|
||||
|
||||
One important thing to note, the returned value must be `free`'d
|
||||
|
||||
## Sensing a Theme
|
||||
|
||||
See the theme yet ... **`null` terminate all character buffers**
|
||||
|
||||
Every one of these functions require the source to be `null` terminated. If they are not, or if there is a `null` in the middle, it will cause issues!
|
||||
|
||||
# Conclusion
|
||||
|
||||
`null` terminating is very important to prevent accessing or writing to memory locations that should not be accessed. In this post we discussed copying strings. In the next post, we will continue this theme with concatenating strings.
|
||||
31
content/posts/stateless-detection-of-malicious-traffic.md
Normal file
31
content/posts/stateless-detection-of-malicious-traffic.md
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
title: "Stateless Detection of Malicious Traffic"
|
||||
date: 2019-08-23
|
||||
draft: false
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
In order to allow flexibility in deployment location and to preserve user privacy we have performed research into stateless classification of network traffic. Because traffic does not always follow the same path through a network, by not worrying about state, we can deploy anywhere. We also use only one direction of traffic as replies could also follow a different path through the network. And by not requiring data within the packet, we can perform analysis on encrypted traffic as well.
|
||||
|
||||
Our research shows that it is possible to determine if traffic is malicious by using packets traveling in a single direction and without the data contained in the packet. Our research also shows that with the use of timing, time to live (TTL) value, source IPs, destination IPs, and ports, it is possible to determine if the traffic is malicious. Through our research we have shown it is possible to show, with some confidence, if traffic is malicious regardless of location, and while preserving user privacy.
|
||||
|
||||
This post serves as an introduction to my master's thesis of the same title. [Full paper for those interested.](/security/StatelessDetectionOfMaliciousTraffic.pdf)
|
||||
|
||||
# What Was Done
|
||||
|
||||
The system we developed for this research was an intrusion detection system (IDS), thus does not block any traffic. Most IDS's use specific signatures for traffic. These are inflexible and will only detect the specific attack. If the traffic is modified in any way, it will no longer be detected. Instead of signatures, our system looks at ongoing traffic patterns.
|
||||
|
||||
Signatures work great for intrusion prevention systems (IPS), since if you want to block traffic, you want to be sure it is malicious. However, malicious actors regularly change signatures of attacks in order to work around IPSs.
|
||||
|
||||
Our system differs since it uses patterns. Because of this, we cannot say for certain if traffic is malicious, but rather provide a confidence value. This does not work for an IDS, but will detect traffic even when a signature changes. Using this confidence value, a security researcher could investigate the traffic further. Determine if it is malicious and a signature if necessary.
|
||||
|
||||
We used three primary data points to determine if traffic was malicious: destination port, TTL, and packet frequency. To actually perform the classification, we used a software package called WEKA (an open source trainable algorithm) and focused on bayesnet classification.
|
||||
|
||||
# Conclusions
|
||||
|
||||
While performing the research, we observed that port only usage provided the least confidence. This isn't surprising, since it will only be useful for network scans. Packet frequency proved to be a better data point for classification. It appeared that benign traffic had a burst at the beginning, with fairly regular communication for the rest of a session. Malicious traffic would have a large burst of traffic followed by nothing, or very little traffic. TTL proved to be one of the best signatures. This is due to the fact that most benign traffic is to a few locations, which are usually physically close. TTL for malicious traffic is usually smaller, either due to further physical locations, as part of the attack, or for the attacker to gain further information about the victim network.
|
||||
|
||||
Frequency, TTL, and ports could each provide some level of confidence, but with their powers combine we can achieve a fairly high level of confidence, with a low false positive rate (see paper for full details).
|
||||
|
||||
Our research shows that it is possible to provide a level of confidence without requiring deep packet inspection and without keeping a copy of the traffic. It can be used to initiate further investigation on how traffic is malicious.
|
||||
38
content/pposts/mtg_quotes.md
Normal file
38
content/pposts/mtg_quotes.md
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
title: "Magic the Gathering Quotes"
|
||||
date: 2021-03-12T18:48:12Z
|
||||
draft: false
|
||||
---
|
||||
|
||||
# Intro
|
||||
|
||||
These are just some quotes I pulled off of MTG cards a long long time ago (in no particular order)
|
||||
|
||||
# Quotes
|
||||
|
||||
- Your past is a map to where you will go
|
||||
- Destruction is the work of an afternoon, creation is the work of a lifetime
|
||||
- Close your ears to the voice of greed and you can turn a gift for one into a gift for many
|
||||
- Nature is the endless dance between life and death
|
||||
- Dose your passion for power consume your soul
|
||||
- You have to live life to love life
|
||||
- The simplest way to plan ahead is merely be ready for anything
|
||||
- Destiny, chance, fate, fortune - they're all just ways of claiming your success without claiming your failures
|
||||
- Precision is often more valuable than force
|
||||
- Experience is a good teacher, not a kind one
|
||||
- Every question has a proper answer, every soul has a proper place
|
||||
- It is the duty of the strong to oppose any who threaten the weak
|
||||
- The best way to teach is by example
|
||||
- The tiniest ripple tells a story ten fathoms deep
|
||||
- Some dreams should not come to be
|
||||
- Without order comes errors
|
||||
- A lie always returns; be careful how you catch it
|
||||
- Wisdom clears my eyes
|
||||
- Every truth holds the seed of betrayal
|
||||
- A finely crafted blade will never meet as many blows on the battlefield as it did on the anvil
|
||||
- The highway of fear is the shortest route to defeat
|
||||
- There is purity in all things, even hatred
|
||||
- Good strategists seize opportunities, great strategists make their own
|
||||
- Beauty stirs the memory like a sweet perfume excites the air
|
||||
- Webs of illusion unravel in the light of truth
|
||||
|
||||
13
old/config.toml.bak
Executable file
13
old/config.toml.bak
Executable file
@@ -0,0 +1,13 @@
|
||||
baseURL = "https://flow.halvo.me/"
|
||||
languageCode = "en-us"
|
||||
title = "Flow With Halvo"
|
||||
#theme = "hugo-coder"
|
||||
#theme = "ananke"
|
||||
#theme = "beautifulhugo"
|
||||
#theme = "tranquilpeak"
|
||||
#theme = "Binario"
|
||||
theme = "hugo-theme-terminal"
|
||||
|
||||
[params]
|
||||
custom_css = ["css/table.css"]
|
||||
themeColor = "pink"
|
||||
@@ -0,0 +1 @@
|
||||
.chroma{color:#f8f8f2;background-color:#272822}.chroma .err{color:#960050;background-color:#1e0010}.chroma .lntd{vertical-align:top;padding:0;margin:0;border:0}.chroma .lntable{border-spacing:0;padding:0;margin:0;border:0;width:auto;overflow:auto;display:block}.chroma .hl{display:block;width:100%;background-color:#ffc}.chroma .lnt{margin-right:.4em;padding:0 .4em;color:#7f7f7f}.chroma .ln{margin-right:.4em;padding:0 .4em;color:#7f7f7f}.chroma .k{color:#66d9ef}.chroma .kc{color:#66d9ef}.chroma .kd{color:#66d9ef}.chroma .kn{color:#f92672}.chroma .kp{color:#66d9ef}.chroma .kr{color:#66d9ef}.chroma .kt{color:#66d9ef}.chroma .na{color:#a6e22e}.chroma .nc{color:#a6e22e}.chroma .no{color:#66d9ef}.chroma .nd{color:#a6e22e}.chroma .ne{color:#a6e22e}.chroma .nf{color:#a6e22e}.chroma .nx{color:#a6e22e}.chroma .nt{color:#f92672}.chroma .l{color:#ae81ff}.chroma .ld{color:#e6db74}.chroma .s{color:#e6db74}.chroma .sa{color:#e6db74}.chroma .sb{color:#e6db74}.chroma .sc{color:#e6db74}.chroma .dl{color:#e6db74}.chroma .sd{color:#e6db74}.chroma .s2{color:#e6db74}.chroma .se{color:#ae81ff}.chroma .sh{color:#e6db74}.chroma .si{color:#e6db74}.chroma .sx{color:#e6db74}.chroma .sr{color:#e6db74}.chroma .s1{color:#e6db74}.chroma .ss{color:#e6db74}.chroma .m{color:#ae81ff}.chroma .mb{color:#ae81ff}.chroma .mf{color:#ae81ff}.chroma .mh{color:#ae81ff}.chroma .mi{color:#ae81ff}.chroma .il{color:#ae81ff}.chroma .mo{color:#ae81ff}.chroma .o{color:#f92672}.chroma .ow{color:#f92672}.chroma .c{color:#75715e}.chroma .ch{color:#75715e}.chroma .cm{color:#75715e}.chroma .c1{color:#75715e}.chroma .cs{color:#75715e}.chroma .cp{color:#75715e}.chroma .cpf{color:#75715e}.chroma .gd{color:#f92672}.chroma .ge{font-style:italic}.chroma .gi{color:#a6e22e}.chroma .gs{font-weight:700}.chroma .gu{color:#75715e}
|
||||
@@ -0,0 +1 @@
|
||||
{"Target":"theme/css/syntax.min.css","MediaType":"text/css","Data":{}}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1 @@
|
||||
{"Target":"style.css","MediaType":"text/css","Data":{}}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1 @@
|
||||
{"Target":"sass/main.min.2e81bbed97b8b282c1aeb57488cc71c8d8c8ec559f3931531bd396bf31e0d4dd.css","MediaType":"text/css","Data":{"Integrity":"sha256-LoG77Ze4soLBrrV0iMxxyNjI7FWfOTFTG9OWvzHg1N0="}}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1 @@
|
||||
{"Target":"css/coder.min.ec198d25949ddd79a670b1ead43ca88e0bc2c1343266d0df0a9eeb7f3f207777.css","MediaType":"text/css","Data":{"Integrity":"sha256-7BmNJZSd3XmmcLHq1DyojgvCwTQyZtDfCp7rfz8gd3c="}}
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1 @@
|
||||
{"Target":"css/style.min.eac77496566fd7d5768fd650ddb0b2b181ca6a2d7c5fdd6fe6b8ba4bf47e566f.css","MediaType":"text/css","Data":{"Integrity":"sha256-6sd0llZv19V2j9ZQ3bCysYHKai18X91v5ri6S/R+Vm8="}}
|
||||
@@ -0,0 +1 @@
|
||||
{"Target":"theme/scss/style.min.css","MediaType":"text/css","Data":{}}
|
||||
16
static/css/table.css
Executable file
16
static/css/table.css
Executable file
@@ -0,0 +1,16 @@
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
th, td {
|
||||
text-align: left;
|
||||
padding: 8px;
|
||||
}
|
||||
|
||||
th {
|
||||
background-color: #d2d2d2;
|
||||
}
|
||||
|
||||
tr:nth-child(even) {background-color: #d2d2d2;}
|
||||
|
||||
BIN
static/security/FastFluxPaper.pdf
Normal file
BIN
static/security/FastFluxPaper.pdf
Normal file
Binary file not shown.
BIN
static/security/StatelessDetectionOfMaliciousTraffic.pdf
Normal file
BIN
static/security/StatelessDetectionOfMaliciousTraffic.pdf
Normal file
Binary file not shown.
1
themes/hugo-theme-terminal
Submodule
1
themes/hugo-theme-terminal
Submodule
Submodule themes/hugo-theme-terminal added at 007d7f3df6
Reference in New Issue
Block a user