fully implemented linktype rawip
This commit is contained in:
parent
09c8a7438f
commit
4bfd00ac05
|
@ -2,10 +2,10 @@
|
|||
// But at this point of development it seems like this overhead is unjust.
|
||||
|
||||
extern crate serde_json;
|
||||
use std::fs::File;
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
//use std::thread::{spawn, JoinHandle};
|
||||
//use std::sync::mpsc::{channel, Receiver};
|
||||
|
@ -28,28 +28,31 @@ pub struct Config {
|
|||
#[derive(Debug)]
|
||||
pub struct FileInfo {
|
||||
pub encapsulation_type: u16,
|
||||
pub file_size: u64,
|
||||
pub metadata: std::fs::Metadata,
|
||||
pub file_size: u64,
|
||||
pub metadata: std::fs::Metadata,
|
||||
}
|
||||
|
||||
impl FileInfo {
|
||||
fn new(file: std::path::PathBuf, encapsulation_type: u16) -> FileInfo {
|
||||
FileInfo {
|
||||
encapsulation_type: encapsulation_type,
|
||||
file_size: fs::metadata(&file).unwrap().len(),
|
||||
metadata: fs::metadata(&file).unwrap(),
|
||||
FileInfo {
|
||||
encapsulation_type: encapsulation_type,
|
||||
file_size: fs::metadata(&file).unwrap().len(),
|
||||
metadata: fs::metadata(&file).unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
pub fn from_json_file() -> Option<Config> {
|
||||
let config_file = File::open("parser.json").expect("file should open read only");
|
||||
let json: serde_json::Value = serde_json::from_reader(config_file).unwrap();
|
||||
Some(Config {
|
||||
filter: json.get("filter").unwrap().as_str().unwrap().to_owned(),
|
||||
regex_filter: json.get("regex_filter").unwrap().as_str().unwrap().to_owned(),
|
||||
regex_filter: json
|
||||
.get("regex_filter")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap()
|
||||
.to_owned(),
|
||||
insert_max: json.get("insert_max").unwrap().as_u64().unwrap() as usize,
|
||||
pcap_file: json.get("pcap_file").unwrap().as_str().unwrap().to_owned(),
|
||||
connection: format!(
|
||||
|
@ -69,25 +72,27 @@ pub fn from_json_file() -> Option<Config> {
|
|||
})
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
File signature and encapsulation type from file
|
||||
See: https://www.tcpdump.org/linktypes.html
|
||||
See: https://www.tcpdump.org/linktypes.html
|
||||
*/
|
||||
// Futher:file.len() is included in metadata() but only shows up if called explicitly. Maybe this is not needed at all in the end
|
||||
// This would be needed for comparability over time. print metadata and you will see
|
||||
fn bytes_from_file( entry: std::path::PathBuf ) -> Result<([u8;4], u16, u16), std::io::Error> {
|
||||
let mut magic_number: [u8;4] = [0;4];
|
||||
let mut buffer: [u8;32] = [0;32];
|
||||
let mut _file = File::open(entry.to_owned())?;
|
||||
_file.read_exact(&mut buffer)?;
|
||||
magic_number.clone_from_slice(&buffer[0..4]);
|
||||
let enc_pcap: u16 = LittleEndian::read_u16(&buffer[20..22]);
|
||||
let enc_pcapng: u16 = LittleEndian::read_u16(&buffer[12..14]);
|
||||
fn bytes_from_file(entry: std::path::PathBuf) -> Result<([u8; 4], u16, u16), std::io::Error> {
|
||||
let mut magic_number: [u8; 4] = [0; 4];
|
||||
let mut buffer: [u8; 32] = [0; 32];
|
||||
let mut _file = File::open(entry.to_owned())?;
|
||||
_file.read_exact(&mut buffer)?;
|
||||
magic_number.clone_from_slice(&buffer[0..4]);
|
||||
let enc_pcap: u16 = LittleEndian::read_u16(&buffer[20..22]);
|
||||
let enc_pcapng: u16 = LittleEndian::read_u16(&buffer[12..14]);
|
||||
|
||||
Ok((magic_number, enc_pcap, enc_pcapng))
|
||||
Ok((magic_number, enc_pcap, enc_pcapng))
|
||||
}
|
||||
|
||||
pub fn map_pcap_dir ( pcap_dir: &str ) -> Option<std::collections::HashMap<std::path::PathBuf, FileInfo>> {
|
||||
pub fn map_pcap_dir(
|
||||
pcap_dir: &str,
|
||||
) -> Option<std::collections::HashMap<std::path::PathBuf, FileInfo>> {
|
||||
// Well, this conditional intermezzo seems to be best practice. See std::fs doc
|
||||
let mut pcap_map = HashMap::new();
|
||||
if let Ok(entries) = fs::read_dir(pcap_dir) {
|
||||
|
@ -95,10 +100,14 @@ pub fn map_pcap_dir ( pcap_dir: &str ) -> Option<std::collections::HashMap<std::
|
|||
if let Ok(entry) = entry {
|
||||
if let Ok(_file_type) = entry.file_type() {
|
||||
if entry.metadata().unwrap().is_file() {
|
||||
let (magic_number, enc_pcap, enc_pcapng) = bytes_from_file(entry.path()).unwrap();
|
||||
let (magic_number, enc_pcap, enc_pcapng) =
|
||||
bytes_from_file(entry.path()).unwrap();
|
||||
match magic_number {
|
||||
PCAPNG_SIGNATURE => pcap_map.insert(entry.path(), FileInfo::new(entry.path(), enc_pcapng) ),
|
||||
PCAP_SIGNATURE | PCAP_SIGNATURE_BE => pcap_map.insert(entry.path(), FileInfo::new(entry.path(), enc_pcap)), // TEST: Endiannes for SIGNATURE_BE may be incorrect after introducing fn bytes_from_file()
|
||||
PCAPNG_SIGNATURE => pcap_map
|
||||
.insert(entry.path(), FileInfo::new(entry.path(), enc_pcapng)),
|
||||
PCAP_SIGNATURE | PCAP_SIGNATURE_BE => {
|
||||
pcap_map.insert(entry.path(), FileInfo::new(entry.path(), enc_pcap))
|
||||
} // TEST: Endiannes for SIGNATURE_BE may be incorrect after introducing fn bytes_from_file()
|
||||
_ => None,
|
||||
};
|
||||
// println!("{:?}", &entry.metadata().unwrap().modified());
|
||||
|
@ -111,5 +120,3 @@ pub fn map_pcap_dir ( pcap_dir: &str ) -> Option<std::collections::HashMap<std::
|
|||
}
|
||||
Some(pcap_map)
|
||||
}
|
||||
|
||||
|
||||
|
|
143
src/main.rs
143
src/main.rs
|
@ -8,9 +8,7 @@ mod serializer;
|
|||
use tokio_postgres::types::ToSql;
|
||||
use tokio_postgres::{Error, NoTls};
|
||||
|
||||
|
||||
fn query_string(insert_max: &usize) -> String {
|
||||
|
||||
let mut insert_template = String::with_capacity(insert_max * 8 + 43);
|
||||
insert_template.push_str("INSERT INTO json_dump (packet) Values ");
|
||||
|
||||
|
@ -30,11 +28,11 @@ async fn main() -> Result<(), Error> {
|
|||
|
||||
// TODO: hash file metadata, so its state is comparable at times and can be written to a db table (e.g. after system crash)
|
||||
// This db table should include UUIDs so it can be joined effectively
|
||||
let pcap_map = configure::map_pcap_dir( &config.pcap_dir ).unwrap();
|
||||
let pcap_map = configure::map_pcap_dir(&config.pcap_dir).unwrap();
|
||||
|
||||
println!("{:?}", pcap_map.iter());
|
||||
|
||||
/* db connection */
|
||||
println!("{:?}", pcap_map.iter());
|
||||
|
||||
/* db connection */
|
||||
let (client, connection) = tokio_postgres::connect(&config.connection, NoTls).await?;
|
||||
|
||||
tokio::spawn(async move {
|
||||
|
@ -54,75 +52,86 @@ async fn main() -> Result<(), Error> {
|
|||
.await?;
|
||||
|
||||
/* device or file input */
|
||||
|
||||
|
||||
match config.is_device {
|
||||
false => for (_pcap_file, _pcap_info) in pcap_map.iter() {
|
||||
println!("{:?}",&_pcap_file);
|
||||
// TODO: Tuning vector capacity according to mean average & std dev of packet size
|
||||
let v: Vec<parser::QryData> = parser::parse(&_pcap_file, &config.filter, &config.regex_filter);
|
||||
//let mut v = Vec::<parser::QryData>::with_capacity(35536);
|
||||
//v.extend(parser::parse(&_pcap_file, &config.filter));
|
||||
false => {
|
||||
for (_pcap_file, _pcap_info) in pcap_map.iter() {
|
||||
println!("{:?}", &_pcap_file);
|
||||
// TODO: Tuning vector capacity according to mean average & std dev of packet size
|
||||
let v: Vec<parser::QryData> =
|
||||
parser::parse(&_pcap_file, &config.filter, &config.regex_filter);
|
||||
//let mut v = Vec::<parser::QryData>::with_capacity(35536);
|
||||
//v.extend(parser::parse(&_pcap_file, &config.filter));
|
||||
|
||||
let packets_serialized = serializer::serialize_packets(v);
|
||||
//let mut packets_serialized = Vec::<serde_json::Value>::with_capacity(35536);
|
||||
//packets_serialized.extend(serializer::serialize_packets(v));
|
||||
let packets_serialized = serializer::serialize_packets(v);
|
||||
//let mut packets_serialized = Vec::<serde_json::Value>::with_capacity(35536);
|
||||
//packets_serialized.extend(serializer::serialize_packets(v));
|
||||
|
||||
/* Query */
|
||||
let chunk_count = packets_serialized.len() / config.insert_max;
|
||||
let remainder: usize = packets_serialized.len() % config.insert_max;
|
||||
let chunker = &packets_serialized.len() < &config.insert_max;
|
||||
match chunker {
|
||||
true => {
|
||||
let insert_str = query_string(&packets_serialized.len());
|
||||
let statement_false = client.prepare(&insert_str).await?;
|
||||
/* Query */
|
||||
let chunk_count = packets_serialized.len() / config.insert_max;
|
||||
let remainder: usize = packets_serialized.len() % config.insert_max;
|
||||
let chunker = &packets_serialized.len() < &config.insert_max;
|
||||
match chunker {
|
||||
true => {
|
||||
let insert_str = query_string(&packets_serialized.len());
|
||||
let statement_false = client.prepare(&insert_str).await?;
|
||||
client
|
||||
.query_raw(
|
||||
&statement_false,
|
||||
packets_serialized.iter().map(|p| p as &dyn ToSql),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
false => {
|
||||
let insert_str = query_string(&config.insert_max);
|
||||
let statement = client.prepare(&insert_str).await?;
|
||||
|
||||
for _i in 0..chunk_count {
|
||||
let (_input, _) = packets_serialized.split_at(config.insert_max);
|
||||
client
|
||||
.query_raw(
|
||||
&statement,
|
||||
_input.to_vec().iter().map(|p| p as &dyn ToSql),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
if remainder > 0 {
|
||||
let rem_str = query_string(&remainder);
|
||||
let statement_remainder = client.prepare(&rem_str).await?;
|
||||
let (_garbage, _input) =
|
||||
packets_serialized.split_at(packets_serialized.len() - remainder);
|
||||
client
|
||||
.query_raw(
|
||||
&statement_remainder,
|
||||
_input.to_vec().iter().map(|p| p as &dyn ToSql),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
true => {
|
||||
let insert_str = query_string(&config.insert_max);
|
||||
let statement = client.prepare(&insert_str).await?;
|
||||
loop {
|
||||
let v: Vec<parser::QryData> = parser::parse_device(
|
||||
&config.device,
|
||||
&config.filter,
|
||||
&config.insert_max,
|
||||
&config.regex_filter,
|
||||
);
|
||||
let packets_serialized = serializer::serialize_packets(v);
|
||||
client
|
||||
.query_raw(
|
||||
&statement_false,
|
||||
&statement,
|
||||
packets_serialized.iter().map(|p| p as &dyn ToSql),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
false => {
|
||||
let insert_str = query_string(&config.insert_max);
|
||||
let statement = client.prepare(&insert_str).await?;
|
||||
|
||||
for _i in 0..chunk_count {
|
||||
let (_input, _) = packets_serialized.split_at(config.insert_max);
|
||||
client
|
||||
.query_raw(&statement, _input.to_vec().iter().map(|p| p as &dyn ToSql))
|
||||
.await?;
|
||||
}
|
||||
|
||||
if remainder > 0 {
|
||||
let rem_str = query_string(&remainder);
|
||||
let statement_remainder = client.prepare(&rem_str).await?;
|
||||
let (_garbage, _input) =
|
||||
packets_serialized.split_at(packets_serialized.len() - remainder);
|
||||
client
|
||||
.query_raw(
|
||||
&statement_remainder,
|
||||
_input.to_vec().iter().map(|p| p as &dyn ToSql),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
true => {
|
||||
let insert_str = query_string(&config.insert_max);
|
||||
let statement = client.prepare(&insert_str).await?;
|
||||
loop {
|
||||
let v: Vec<parser::QryData> = parser::parse_device(&config.device, &config.filter, &config.insert_max, &config.regex_filter);
|
||||
let packets_serialized = serializer::serialize_packets(v);
|
||||
client
|
||||
.query_raw(
|
||||
&statement,
|
||||
packets_serialized.iter().map(|p| p as &dyn ToSql),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
},
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -4,29 +4,29 @@ extern crate eui48;
|
|||
mod packet_handler;
|
||||
use pcap::{Capture, Linktype};
|
||||
use regex::bytes::Regex;
|
||||
use std::str;
|
||||
use std::convert::TryInto;
|
||||
use std::str;
|
||||
//use std::thread::{spawn, JoinHandle};
|
||||
//use std::sync::mpsc::{channel, Receiver};
|
||||
|
||||
|
||||
/* protocol ids, LittleEndian */
|
||||
const ETH_P_IPV6: usize = 0xDD86;
|
||||
const ETH_P_IP: usize = 0x08;
|
||||
const TCP: usize = 0x06;
|
||||
const UDP: usize = 0x11;
|
||||
const ETH_P_ARP: usize = 0x0608;
|
||||
const ETH_P_RARP: usize = 0x3580;
|
||||
const ETH_P_ARP: usize = 0x0608;
|
||||
const ETH_P_RARP: usize = 0x3580;
|
||||
const ETHER_HDRLEN: usize = 14;
|
||||
|
||||
/*
|
||||
/*
|
||||
QryData could be written in the sense of QryData{ ... frame: .., packet: .., segment:.. }
|
||||
On the one hand, only the actual type of frame/packet/segment would be contained in the resulting struct.
|
||||
On the one hand, only the actual type of frame/packet/segment would be contained in the resulting struct.
|
||||
So, increased benefit in serialization/cpu time, could result in less data to be serialized, depending on layout.
|
||||
On the other hand, each datagram::type needs to implement traits which would need to be dynamically dispatched by returning any of these types per iso level from a single function each. The result would be a performance decrease.
|
||||
On the other hand, each datagram::type needs to implement traits which would need to be dynamically dispatched by returning any of these types per iso level from a single function each. The result would be a performance decrease.
|
||||
See: https://doc.rust-lang.org/book/ch10-02-traits.html#returning-types-that-implement-traits
|
||||
See: https://doc.rust-lang.org/book/ch17-02-trait-objects.html#trait-objects-perform-dynamic-dispatch
|
||||
Then again, parser logic would be fewer lines + more unified using the latter method. Maybe better optimizable as well? Maybe this is a nice tradeoff?
|
||||
TODO: Implement and benchmark dynamically dispatched packet data in conjunction with restructured QryData.
|
||||
TODO: Implement and benchmark dynamically dispatched packet data in conjunction with restructured QryData.
|
||||
*/
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct QryData {
|
||||
|
@ -42,115 +42,120 @@ pub struct QryData {
|
|||
pub reg_res: Option<String>,
|
||||
}
|
||||
|
||||
fn init_qrydata( ) -> Result<QryData, core::fmt::Error> {
|
||||
Ok(QryData {
|
||||
id: 0,
|
||||
time: 0.0,
|
||||
data: None,
|
||||
ether_header: None::<packet_handler::EtherHeader>,
|
||||
ipv4_header: None::<packet_handler::IpV4Header>,
|
||||
ipv6_header: None::<packet_handler::IpV6Header>,
|
||||
tcp_header: None::<packet_handler::TcpHeader>,
|
||||
udp_header: None::<packet_handler::UdpHeader>,
|
||||
arp_header: None::<packet_handler::ArpHeader>,
|
||||
reg_res: None::<String>,
|
||||
})
|
||||
|
||||
#[allow(dead_code)]
|
||||
enum EncapsulationType {
|
||||
// pcap::Linktype::get_name() is unsafe.
|
||||
EN10MB = 1, // See: https://docs.rs/pcap/0.7.0/src/pcap/lib.rs.html#247-261
|
||||
RAW = 101, // Would this be an issue?
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
enum EncapsulationType { // pcap::Linktype::get_name() is unsafe.
|
||||
EN10MB = 1, // See: https://docs.rs/pcap/0.7.0/src/pcap/lib.rs.html#247-261
|
||||
RAW = 101, // Would this be an issue?
|
||||
}
|
||||
|
||||
impl QryData {
|
||||
// This is not cool!
|
||||
// This will get modularized into subfunctions
|
||||
fn encap_en10mb( packet_data: &[u8] ) -> Option<QryData> {
|
||||
let mut pkg: QryData = init_qrydata().unwrap();
|
||||
pkg.ether_header = Some(packet_handler::ethernet_handler(packet_data)).unwrap();
|
||||
match pkg.ether_header.unwrap().ether_type as usize {
|
||||
// This is not cool!
|
||||
// Implementing objectoriented is slower by 3-10%. Variance is all over the place. It's awful but modular!
|
||||
// Guess I'll do a roolback and do a different design
|
||||
|
||||
fn new() -> QryData {
|
||||
QryData {
|
||||
id: 0,
|
||||
time: 0.0,
|
||||
data: None,
|
||||
ether_header: None::<packet_handler::EtherHeader>,
|
||||
ipv4_header: None::<packet_handler::IpV4Header>,
|
||||
ipv6_header: None::<packet_handler::IpV6Header>,
|
||||
tcp_header: None::<packet_handler::TcpHeader>,
|
||||
udp_header: None::<packet_handler::UdpHeader>,
|
||||
arp_header: None::<packet_handler::ArpHeader>,
|
||||
reg_res: None::<String>,
|
||||
}
|
||||
}
|
||||
|
||||
fn encap_en10mb(&mut self, packet_data: &[u8]) -> Result<(), core::fmt::Error> {
|
||||
//let mut pkg: QryData = new().unwrap();
|
||||
self.ether_header = Some(packet_handler::ethernet_handler(packet_data)).unwrap();
|
||||
match self.ether_header.unwrap().ether_type as usize {
|
||||
ETH_P_IP => {
|
||||
pkg.ipv4_header = Some(packet_handler::ip_handler(packet_data)).unwrap();
|
||||
let protocol_type = pkg.ipv4_header.unwrap().ip_protocol.clone() as usize;
|
||||
let l3_header_length = pkg.ipv4_header.unwrap().ip_ihl;
|
||||
pkg.transport_layer(packet_data, protocol_type, l3_header_length).unwrap();
|
||||
self.ipv4_header = Some(packet_handler::ip_handler(packet_data, ETHER_HDRLEN)).unwrap();
|
||||
let protocol_type = self.ipv4_header.unwrap().ip_protocol as usize;
|
||||
let l3_header_length = self.ipv4_header.unwrap().ip_ihl;
|
||||
self.transport_layer(packet_data, protocol_type, l3_header_length, ETHER_HDRLEN)
|
||||
.unwrap();
|
||||
}
|
||||
ETH_P_IPV6 => {
|
||||
pkg.ipv6_header = Some(packet_handler::ipv6_handler(packet_data)).unwrap();
|
||||
let protocol_type = pkg.ipv6_header.unwrap().next_header.clone() as usize;
|
||||
pkg.transport_layer(packet_data, protocol_type, 10).unwrap();
|
||||
|
||||
self.ipv6_header = Some(packet_handler::ipv6_handler(packet_data, ETHER_HDRLEN)).unwrap();
|
||||
let protocol_type = self.ipv6_header.unwrap().next_header as usize;
|
||||
self.transport_layer(packet_data, protocol_type, 10, ETHER_HDRLEN)
|
||||
.unwrap();
|
||||
}
|
||||
ETH_P_ARP | ETH_P_RARP => {
|
||||
pkg.arp_header = Some(packet_handler::arp_handler(packet_data)).unwrap();
|
||||
self.arp_header = Some(packet_handler::arp_handler(packet_data, ETHER_HDRLEN)).unwrap();
|
||||
}
|
||||
_ => println!("Network protocol not implemented"),
|
||||
}
|
||||
Some(pkg)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn encap_raw ( packet_data: &[u8] ) -> Option<QryData> {
|
||||
let mut pkg: QryData = init_qrydata().unwrap();
|
||||
let ip_version: usize = ((packet_data[0] & 0xf0) >> 4).try_into().unwrap();
|
||||
match ip_version {
|
||||
4 => {
|
||||
pkg.ipv4_header = Some(packet_handler::ip_handler(packet_data)).unwrap();
|
||||
let protocol_type = pkg.ipv4_header.unwrap().ip_protocol.clone() as usize;
|
||||
let l3_header_length = pkg.ipv4_header.unwrap().ip_ihl;
|
||||
pkg.transport_layer(packet_data, protocol_type, l3_header_length).unwrap();
|
||||
}
|
||||
6 => {
|
||||
pkg.ipv6_header = Some(packet_handler::ipv6_handler(packet_data)).unwrap();
|
||||
let protocol_type = pkg.ipv6_header.unwrap().next_header.clone() as usize;
|
||||
pkg.transport_layer(packet_data, protocol_type, 10).unwrap();
|
||||
}
|
||||
_ => println!("Network Protocol not implemented")
|
||||
}
|
||||
Some(pkg)
|
||||
fn encap_raw(&mut self, packet_data: &[u8]) -> Result<(), core::fmt::Error> {
|
||||
// let mut pkg: QryData = new().unwrap();
|
||||
let ip_version: usize = ((packet_data[0] & 0xf0) >> 4).try_into().unwrap();
|
||||
//println!("{:?}", &ip_version);
|
||||
match ip_version {
|
||||
4 => {
|
||||
//println!("v4");
|
||||
self.ipv4_header = Some(packet_handler::ip_handler(packet_data, 0)).unwrap();
|
||||
let protocol_type = self.ipv4_header.unwrap().ip_protocol as usize;
|
||||
let l3_header_length = self.ipv4_header.unwrap().ip_ihl;
|
||||
self.transport_layer(packet_data, protocol_type, l3_header_length, 0)
|
||||
.unwrap();
|
||||
}
|
||||
6 => {
|
||||
//println!("v6");
|
||||
self.ipv6_header = Some(packet_handler::ipv6_handler(packet_data, 0)).unwrap();
|
||||
let protocol_type = self.ipv6_header.unwrap().next_header as usize;
|
||||
self.transport_layer(packet_data, protocol_type, 10, 0)
|
||||
.unwrap();
|
||||
}
|
||||
_ => println!("Network Protocol not implemented"),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// TODO: impl correct Err type and use in Result
|
||||
fn transport_layer (&mut self, packet_data: &[u8], protocol_type: usize, l3_header_length: u32) -> Result<(), core::fmt::Error> {
|
||||
match protocol_type {
|
||||
TCP => {
|
||||
|
||||
self.tcp_header = Some(packet_handler::tcp_handler(
|
||||
l3_header_length,
|
||||
packet_data,
|
||||
|
||||
))
|
||||
.unwrap();
|
||||
self.data = Some(packet_handler::payload_handler(
|
||||
l3_header_length,
|
||||
self.tcp_header.unwrap().data_offset,
|
||||
packet_data,
|
||||
)).unwrap();
|
||||
}
|
||||
UDP => {
|
||||
|
||||
self.udp_header = Some(packet_handler::udp_handler(
|
||||
l3_header_length,
|
||||
packet_data,
|
||||
))
|
||||
.unwrap();
|
||||
self.data = Some(packet_handler::payload_handler(
|
||||
l3_header_length,
|
||||
7,
|
||||
packet_data,
|
||||
)).unwrap();
|
||||
}
|
||||
_ => println!("Transport layer protocol not implemented"),
|
||||
}
|
||||
fn transport_layer(
|
||||
&mut self,
|
||||
packet_data: &[u8],
|
||||
protocol_type: usize,
|
||||
l3_header_length: u32,
|
||||
ether_hdrlen: usize,
|
||||
) -> Result<(), core::fmt::Error> {
|
||||
match protocol_type {
|
||||
TCP => {
|
||||
self.tcp_header =
|
||||
Some(packet_handler::tcp_handler(l3_header_length, packet_data, ether_hdrlen)).unwrap();
|
||||
self.data = Some(packet_handler::payload_handler(
|
||||
l3_header_length,
|
||||
self.tcp_header.unwrap().data_offset,
|
||||
packet_data,
|
||||
ether_hdrlen
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
UDP => {
|
||||
self.udp_header =
|
||||
Some(packet_handler::udp_handler(l3_header_length, packet_data, ether_hdrlen)).unwrap();
|
||||
self.data = Some(packet_handler::payload_handler(
|
||||
l3_header_length,
|
||||
7,
|
||||
packet_data,
|
||||
ether_hdrlen
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
_ => println!("Transport layer protocol not implemented"),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* Regex parse _complete_ package */
|
||||
fn flag_carnage(re: &Regex, payload: &[u8]) -> Option<String> {
|
||||
let mut flags: String = String::new();
|
||||
|
@ -168,7 +173,7 @@ fn flag_carnage(re: &Regex, payload: &[u8]) -> Option<String> {
|
|||
}
|
||||
|
||||
pub fn parse(parse_file: &std::path::Path, filter_str: &str, regex_filter: &str) -> Vec<QryData> {
|
||||
let mut me: QryData = init_qrydata().unwrap();
|
||||
//let mut me: QryData = QryData::new();
|
||||
let mut v: Vec<QryData> = Vec::new();
|
||||
|
||||
let mut cap = Capture::from_file(parse_file).unwrap();
|
||||
|
@ -177,43 +182,58 @@ pub fn parse(parse_file: &std::path::Path, filter_str: &str, regex_filter: &str)
|
|||
println!("{:?}", &linktype);
|
||||
let re = Regex::new(regex_filter).unwrap();
|
||||
while let Ok(packet) = cap.next() {
|
||||
|
||||
let mut me = QryData::new();
|
||||
match linktype {
|
||||
// Syntax is clunky, but no num_derive + num_traits dependencies.
|
||||
Linktype(1) => me = QryData::encap_en10mb(packet.data).unwrap(), // EN10MB
|
||||
Linktype(101) => me = QryData::encap_raw(packet.data).unwrap(), // RAW
|
||||
Linktype(1) => me.encap_en10mb(packet.data).unwrap(), //me = QryData::encap_en10mb(packet.data).unwrap(), // EN10MB
|
||||
Linktype(12) => me.encap_raw(packet.data).unwrap(), //me = QryData::encap_raw(packet.data).unwrap(), // RAW
|
||||
_ => (),
|
||||
};
|
||||
|
||||
|
||||
me.time = (packet.header.ts.tv_usec as f64 / 1000000.0) + packet.header.ts.tv_sec as f64;
|
||||
//me.data = Some(packet.data.to_vec());
|
||||
me.reg_res = flag_carnage(&re, packet.data); // Regex overhead is between 4-9% --single threaded-- on complete packet [u8] data
|
||||
v.push(me.clone());
|
||||
//v.push(me.clone());
|
||||
v.push(QryData {
|
||||
id: 0,
|
||||
time: me.time,
|
||||
data: me.data,
|
||||
ether_header: me.ether_header,
|
||||
ipv4_header: me.ipv4_header,
|
||||
ipv6_header: me.ipv6_header,
|
||||
tcp_header: me.tcp_header,
|
||||
udp_header: me.udp_header,
|
||||
arp_header: me.arp_header,
|
||||
reg_res: me.reg_res,
|
||||
});
|
||||
}
|
||||
v
|
||||
}
|
||||
|
||||
|
||||
/* This could need some love */
|
||||
pub fn parse_device(parse_device: &str, filter_str: &str, insert_max: &usize, regex_filter: &str) -> Vec<QryData> {
|
||||
let mut me: QryData = init_qrydata ( ).unwrap();
|
||||
pub fn parse_device(
|
||||
parse_device: &str,
|
||||
filter_str: &str,
|
||||
insert_max: &usize,
|
||||
regex_filter: &str,
|
||||
) -> Vec<QryData> {
|
||||
//let mut me: QryData = QryData::new ( );
|
||||
let mut v: Vec<QryData> = Vec::new();
|
||||
let mut cap = Capture::from_device(parse_device).unwrap().open().unwrap();
|
||||
Capture::filter(&mut cap, &filter_str).unwrap();
|
||||
let linktype = cap.get_datalink();
|
||||
let re = Regex::new(regex_filter).unwrap();
|
||||
'parse: while let Ok(packet) = cap.next() {
|
||||
match linktype {
|
||||
Linktype(1) => me = QryData::encap_en10mb(packet.data).unwrap(),
|
||||
Linktype(101) => me = QryData::encap_raw(packet.data).unwrap(),
|
||||
let mut me = QryData::new();
|
||||
match linktype {
|
||||
Linktype(1) => me.encap_en10mb(packet.data).unwrap(), //me = QryData::encap_en10mb(packet.data).unwrap(),
|
||||
Linktype(101) => me.encap_raw(packet.data).unwrap(), //me = QryData::encap_raw(packet.data).unwrap(),
|
||||
_ => (),
|
||||
}
|
||||
};
|
||||
|
||||
me.time = (packet.header.ts.tv_usec as f64 / 1000000.0) + packet.header.ts.tv_sec as f64;
|
||||
me.time = (packet.header.ts.tv_usec as f64 / 1000000.0) + packet.header.ts.tv_sec as f64;
|
||||
me.reg_res = flag_carnage(&re, packet.data);
|
||||
|
||||
v.push(me.clone());
|
||||
|
||||
|
||||
v.push(me.clone());
|
||||
|
||||
if &v.len() >= insert_max {
|
||||
break 'parse;
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
extern crate bitfield;
|
||||
extern crate byteorder;
|
||||
extern crate eui48;
|
||||
extern crate serde;
|
||||
pub extern crate serde;
|
||||
use bitfield::bitfield;
|
||||
use byteorder::{BigEndian, ByteOrder, LittleEndian};
|
||||
use eui48::{Eui48, MacAddress};
|
||||
|
@ -13,14 +13,13 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
|||
/* ethernet */
|
||||
const ETH_ALEN: usize = 6;
|
||||
const ETH_TLEN: usize = 2;
|
||||
const ETHER_HDRLEN: usize = 14;
|
||||
//const ETHER_HDRLEN: usize = 14;
|
||||
|
||||
|
||||
#[derive(Debug, Clone, Copy,Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
pub struct EtherHeader {
|
||||
pub ether_dhost: MacAddress,
|
||||
pub ether_shost: MacAddress,
|
||||
pub ether_type: i32,
|
||||
pub ether_dhost: MacAddress,
|
||||
pub ether_shost: MacAddress,
|
||||
pub ether_type: i32,
|
||||
}
|
||||
|
||||
// TODO: implement optional ethernet vlan shim header fields
|
||||
|
@ -89,9 +88,9 @@ impl<T: AsRef<[u8]> + AsMut<[u8]>> BitfieldIpV4Header<T> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn ip_handler(packet_data: &[u8]) -> Option<IpV4Header> {
|
||||
pub fn ip_handler(packet_data: &[u8], ether_hdrlen: usize) -> Option<IpV4Header> {
|
||||
let mut raw_hdr: [u8; 20] = [0; 20];
|
||||
raw_hdr.copy_from_slice(&packet_data[ETHER_HDRLEN..ETHER_HDRLEN + 20]);
|
||||
raw_hdr.copy_from_slice(&packet_data[ether_hdrlen..ether_hdrlen + 20]);
|
||||
let ip_header = BitfieldIpV4Header(raw_hdr);
|
||||
|
||||
Some(IpV4Header {
|
||||
|
@ -125,9 +124,9 @@ pub struct IpV6Header {
|
|||
pub destination_address: IpAddr,
|
||||
}
|
||||
|
||||
pub fn ipv6_handler(packet_data: &[u8]) -> Option<IpV6Header> {
|
||||
pub fn ipv6_handler(packet_data: &[u8], ether_hdrlen: usize) -> Option<IpV6Header> {
|
||||
let mut raw_hdr: [u8; 40] = [0; 40];
|
||||
raw_hdr.copy_from_slice(&packet_data[ETHER_HDRLEN..ETHER_HDRLEN + 40]);
|
||||
raw_hdr.copy_from_slice(&packet_data[ether_hdrlen..ether_hdrlen + 40]);
|
||||
|
||||
Some(IpV6Header {
|
||||
version: (&raw_hdr[0] & 0xf0) >> 4,
|
||||
|
@ -187,10 +186,10 @@ bitfield! {
|
|||
get_urgent_pointer, _: 159,144;
|
||||
}
|
||||
|
||||
pub fn tcp_handler(ip_hlen: u32, packet_data: &[u8]) -> Option<TcpHeader> {
|
||||
pub fn tcp_handler(ip_hlen: u32, packet_data: &[u8], ether_hdrlen: usize) -> Option<TcpHeader> {
|
||||
let mut raw_hdr: [u8; 20] = [0; 20];
|
||||
raw_hdr.copy_from_slice(
|
||||
&packet_data[ETHER_HDRLEN + ip_hlen as usize * 4..ETHER_HDRLEN + ip_hlen as usize * 4 + 20],
|
||||
&packet_data[ether_hdrlen + ip_hlen as usize * 4..ether_hdrlen+ ip_hlen as usize * 4 + 20],
|
||||
);
|
||||
let tcp_header = BitfieldTcpHeader(raw_hdr);
|
||||
|
||||
|
@ -221,40 +220,38 @@ pub fn tcp_handler(ip_hlen: u32, packet_data: &[u8]) -> Option<TcpHeader> {
|
|||
pub struct ArpHeader {
|
||||
pub htype: u16,
|
||||
pub ptype: u16,
|
||||
pub hlen: u8,
|
||||
pub plen: u8,
|
||||
pub oper: u16,
|
||||
pub sha: MacAddress,
|
||||
pub spa: IpAddr,
|
||||
pub tha: MacAddress,
|
||||
pub tpa: IpAddr,
|
||||
pub hlen: u8,
|
||||
pub plen: u8,
|
||||
pub oper: u16,
|
||||
pub sha: MacAddress,
|
||||
pub spa: IpAddr,
|
||||
pub tha: MacAddress,
|
||||
pub tpa: IpAddr,
|
||||
}
|
||||
|
||||
pub fn arp_handler(packet_data: &[u8]) -> Option<ArpHeader> {
|
||||
pub fn arp_handler(packet_data: &[u8], ether_hdrlen: usize) -> Option<ArpHeader> {
|
||||
let mut raw_hdr: [u8; 28] = [0; 28];
|
||||
raw_hdr.copy_from_slice(
|
||||
&packet_data[ETHER_HDRLEN .. ETHER_HDRLEN + 28]
|
||||
);
|
||||
raw_hdr.copy_from_slice(&packet_data[ether_hdrlen..ether_hdrlen + 28]);
|
||||
|
||||
let mut _sha: [u8; 6] = [0; 6];
|
||||
let mut _tha: [u8; 6] = [0; 6];
|
||||
|
||||
|
||||
_sha.copy_from_slice(&raw_hdr[8..14]);
|
||||
_tha.copy_from_slice(&raw_hdr[18..24]);
|
||||
|
||||
Some(ArpHeader{
|
||||
Some(ArpHeader {
|
||||
htype: BigEndian::read_u16(&raw_hdr[0..2]),
|
||||
ptype: BigEndian::read_u16(&raw_hdr[2..4]),
|
||||
hlen: raw_hdr[4],
|
||||
plen: raw_hdr[5],
|
||||
oper: BigEndian::read_u16(&raw_hdr[6..8]),
|
||||
sha: MacAddress::new(_sha as Eui48),
|
||||
spa: IpAddr::V4(Ipv4Addr::from(BigEndian::read_u32(&raw_hdr[14..18]))),
|
||||
tha: MacAddress::new( _tha as Eui48 ),
|
||||
tpa: IpAddr::V4(Ipv4Addr::from(BigEndian::read_u32(&raw_hdr[24..28]))),
|
||||
hlen: raw_hdr[4],
|
||||
plen: raw_hdr[5],
|
||||
oper: BigEndian::read_u16(&raw_hdr[6..8]),
|
||||
sha: MacAddress::new(_sha as Eui48),
|
||||
spa: IpAddr::V4(Ipv4Addr::from(BigEndian::read_u32(&raw_hdr[14..18]))),
|
||||
tha: MacAddress::new(_tha as Eui48),
|
||||
tpa: IpAddr::V4(Ipv4Addr::from(BigEndian::read_u32(&raw_hdr[24..28]))),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
/* udp */
|
||||
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
|
||||
pub struct UdpHeader {
|
||||
|
@ -264,10 +261,10 @@ pub struct UdpHeader {
|
|||
pub checksum: u16,
|
||||
}
|
||||
|
||||
pub fn udp_handler(ip_hlen: u32, packet_data: &[u8]) -> Option<UdpHeader> {
|
||||
pub fn udp_handler(ip_hlen: u32, packet_data: &[u8], ether_hdrlen: usize) -> Option<UdpHeader> {
|
||||
let mut raw_hdr: [u8; 8] = [0; 8];
|
||||
raw_hdr.copy_from_slice(
|
||||
&packet_data[ETHER_HDRLEN + ip_hlen as usize * 4..ETHER_HDRLEN + ip_hlen as usize * 4 + 8],
|
||||
&packet_data[ether_hdrlen + ip_hlen as usize * 4..ether_hdrlen + ip_hlen as usize * 4 + 8],
|
||||
);
|
||||
|
||||
Some(UdpHeader {
|
||||
|
@ -279,8 +276,8 @@ pub fn udp_handler(ip_hlen: u32, packet_data: &[u8]) -> Option<UdpHeader> {
|
|||
}
|
||||
|
||||
/* payload */
|
||||
pub fn payload_handler(ip_hlen: u32, data_offset: u32, packet_data: &[u8]) -> Option<Vec<u8>> {
|
||||
pub fn payload_handler(ip_hlen: u32, data_offset: u32, packet_data: &[u8], ether_hdrlen: usize) -> Option<Vec<u8>> {
|
||||
let (_head, tail) =
|
||||
packet_data.split_at(ETHER_HDRLEN + ip_hlen as usize * 4 + data_offset as usize * 4);
|
||||
packet_data.split_at(ether_hdrlen + ip_hlen as usize * 4 + data_offset as usize * 4);
|
||||
Some(tail.to_vec())
|
||||
}
|
||||
|
|
|
@ -27,29 +27,35 @@ impl Serialize for parser::QryData {
|
|||
}
|
||||
|
||||
pub fn serialize_packets(v: Vec<parser::QryData>) -> Vec<serde_json::Value> {
|
||||
|
||||
/* rayon parallelized */
|
||||
// TODO: Benchmark. As far as I tested, this reaps no benefit.
|
||||
let packets_serialized = v.par_iter().map(|x| serde_json::to_value(x).unwrap()).collect();
|
||||
//let packets_serialized: Vec<serde_json::Value> = v.par_iter().map(|x| json!(x)).collect();
|
||||
let packets_serialized = v
|
||||
.par_iter()
|
||||
.map(|x| serde_json::to_value(x).unwrap())
|
||||
.collect();
|
||||
//let packets_serialized: Vec<serde_json::Value> = v.par_iter().map(|x| json!(x)).collect();
|
||||
|
||||
packets_serialized
|
||||
}
|
||||
|
||||
|
||||
// This is way faster than serialize_packets() but I can not figure out how to properly select parts from the resulting json structure as an sql query
|
||||
// This is way faster than serialize_packets() but I can not figure out how to properly select parts from the resulting json structure as an sql query
|
||||
#[allow(dead_code)]
|
||||
pub fn serialize_packets_as_string(v: Vec<parser::QryData>) -> Vec<serde_json::Value> {
|
||||
let mut packets_serialized: Vec<serde_json::Value> = Vec::with_capacity(v.len() * 2);
|
||||
for packet in v.iter() {
|
||||
packets_serialized.push(serde_json::Value::String(serde_json::to_string(&packet).unwrap()));
|
||||
}
|
||||
for packet in v.iter() {
|
||||
packets_serialized.push(serde_json::Value::String(
|
||||
serde_json::to_string(&packet).unwrap(),
|
||||
));
|
||||
}
|
||||
|
||||
packets_serialized
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn smallest_prime_divisor(remainder: usize ) -> usize {
|
||||
let smallest_divisor: usize = (2..(remainder/2)).into_par_iter().find_first(|x| remainder % x == 0).unwrap();
|
||||
fn smallest_prime_divisor(remainder: usize) -> usize {
|
||||
let smallest_divisor: usize = (2..(remainder / 2))
|
||||
.into_par_iter()
|
||||
.find_first(|x| remainder % x == 0)
|
||||
.unwrap();
|
||||
smallest_divisor
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue