implemented regex parser to stdout
This commit is contained in:
parent
2f3d08b21d
commit
8e553bee9e
99
src/main.rs
99
src/main.rs
|
@ -1,13 +1,13 @@
|
||||||
extern crate tokio_postgres;
|
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
extern crate tokio;
|
extern crate tokio;
|
||||||
|
extern crate tokio_postgres;
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use serde::ser::{Serialize, Serializer, SerializeStruct};
|
use serde::ser::{Serialize, SerializeStruct, Serializer};
|
||||||
use std::fs::File;
|
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
|
use std::fs::File;
|
||||||
mod parser;
|
mod parser;
|
||||||
use tokio_postgres::types::ToSql;
|
use tokio_postgres::types::ToSql;
|
||||||
use tokio_postgres::{NoTls, Error};
|
use tokio_postgres::{Error, NoTls};
|
||||||
|
|
||||||
impl Serialize for parser::QryData {
|
impl Serialize for parser::QryData {
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
@ -44,7 +44,9 @@ fn serialize_packets ( v: Vec<parser::QryData> ) -> Vec<serde_json::Value> {
|
||||||
fn query_string(insert_max: &usize) -> String {
|
fn query_string(insert_max: &usize) -> String {
|
||||||
let mut insert_template: String = "INSERT INTO json_dump (packet) Values ".to_owned();
|
let mut insert_template: String = "INSERT INTO json_dump (packet) Values ".to_owned();
|
||||||
|
|
||||||
for insert in 0..insert_max-1 { insert_template.push_str( &(format!("(${}), ", insert+1)) );}
|
for insert in 0..insert_max - 1 {
|
||||||
|
insert_template.push_str(&(format!("(${}), ", insert + 1)));
|
||||||
|
}
|
||||||
insert_template.push_str(&(format!("(${})", insert_max)));
|
insert_template.push_str(&(format!("(${})", insert_max)));
|
||||||
|
|
||||||
insert_template
|
insert_template
|
||||||
|
@ -52,7 +54,6 @@ fn query_string ( insert_max: &usize ) -> String {
|
||||||
|
|
||||||
#[tokio::main(core_threads = 4)] // By default, tokio_postgres uses the tokio crate as its runtime.
|
#[tokio::main(core_threads = 4)] // By default, tokio_postgres uses the tokio crate as its runtime.
|
||||||
async fn main() -> Result<(), Error> {
|
async fn main() -> Result<(), Error> {
|
||||||
|
|
||||||
/* Init values from file */
|
/* Init values from file */
|
||||||
let file = File::open("parser.json").expect("file should open read only");
|
let file = File::open("parser.json").expect("file should open read only");
|
||||||
|
|
||||||
|
@ -60,17 +61,27 @@ async fn main() -> Result<(), Error> {
|
||||||
let filter = json.get("filter").unwrap().as_str().unwrap();
|
let filter = json.get("filter").unwrap().as_str().unwrap();
|
||||||
let insert_max = json.get("insert_max").unwrap().as_u64().unwrap() as usize;
|
let insert_max = json.get("insert_max").unwrap().as_u64().unwrap() as usize;
|
||||||
let pcap_file = json.get("pcap_file").unwrap().as_str().unwrap();
|
let pcap_file = json.get("pcap_file").unwrap().as_str().unwrap();
|
||||||
let host = ["host=", json.get("database_host").unwrap().as_str().unwrap()].join("");
|
let host = [
|
||||||
let user = ["user=", json.get("database_user").unwrap().as_str().unwrap()].join("");
|
"host=",
|
||||||
let password = ["password=", json.get("database_password").unwrap().as_str().unwrap()].join("");
|
json.get("database_host").unwrap().as_str().unwrap(),
|
||||||
|
]
|
||||||
|
.join("");
|
||||||
|
let user = [
|
||||||
|
"user=",
|
||||||
|
json.get("database_user").unwrap().as_str().unwrap(),
|
||||||
|
]
|
||||||
|
.join("");
|
||||||
|
let password = [
|
||||||
|
"password=",
|
||||||
|
json.get("database_password").unwrap().as_str().unwrap(),
|
||||||
|
]
|
||||||
|
.join("");
|
||||||
let connection = [host, user, password].join(" ");
|
let connection = [host, user, password].join(" ");
|
||||||
let device = json.get("parse_device").unwrap().as_str().unwrap();
|
let device = json.get("parse_device").unwrap().as_str().unwrap();
|
||||||
let is_device = json.get("from_device").unwrap().as_bool().unwrap();
|
let is_device = json.get("from_device").unwrap().as_bool().unwrap();
|
||||||
|
|
||||||
|
|
||||||
/* db connection */
|
/* db connection */
|
||||||
let (client, connection) =
|
let (client, connection) = tokio_postgres::connect(&connection, NoTls).await?;
|
||||||
tokio_postgres::connect(&connection, NoTls).await?;
|
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
if let Err(e) = connection.await {
|
if let Err(e) = connection.await {
|
||||||
|
@ -78,12 +89,18 @@ async fn main() -> Result<(), Error> {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
client.execute("DROP TABLE IF EXISTS json_dump", &[]).await?;
|
client
|
||||||
client.execute("CREATE TABLE json_dump ( ID serial NOT NULL PRIMARY KEY, packet json NOT NULL)", &[]).await?;
|
.execute("DROP TABLE IF EXISTS json_dump", &[])
|
||||||
|
.await?;
|
||||||
|
client
|
||||||
|
.execute(
|
||||||
|
"CREATE TABLE json_dump ( ID serial NOT NULL PRIMARY KEY, packet json NOT NULL)",
|
||||||
|
&[],
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
/* device or file input */
|
/* device or file input */
|
||||||
if false == is_device {
|
if false == is_device {
|
||||||
|
|
||||||
let v: Vec<parser::QryData> = parser::parse(&pcap_file, &filter);
|
let v: Vec<parser::QryData> = parser::parse(&pcap_file, &filter);
|
||||||
let packets_serialized = serialize_packets(v);
|
let packets_serialized = serialize_packets(v);
|
||||||
|
|
||||||
|
@ -96,18 +113,23 @@ async fn main() -> Result<(), Error> {
|
||||||
true => {
|
true => {
|
||||||
let insert_str = query_string(&packets_serialized.len());
|
let insert_str = query_string(&packets_serialized.len());
|
||||||
let statement_false = client.prepare(&insert_str).await?;
|
let statement_false = client.prepare(&insert_str).await?;
|
||||||
client.query_raw(&statement_false, packets_serialized.iter().map(|p| p as &dyn ToSql)).await?;
|
client
|
||||||
},
|
.query_raw(
|
||||||
|
&statement_false,
|
||||||
|
packets_serialized.iter().map(|p| p as &dyn ToSql),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
false => {
|
false => {
|
||||||
|
|
||||||
let insert_str = query_string(&insert_max);
|
let insert_str = query_string(&insert_max);
|
||||||
let statement = client.prepare(&insert_str).await?;
|
let statement = client.prepare(&insert_str).await?;
|
||||||
|
|
||||||
|
|
||||||
for _i in 0..chunk_count {
|
for _i in 0..chunk_count {
|
||||||
let (_input, _) = packets_serialized.split_at(insert_max);
|
let (_input, _) = packets_serialized.split_at(insert_max);
|
||||||
client.query_raw(&statement, _input.to_vec().iter().map(|p| p as &dyn ToSql)).await?;
|
client
|
||||||
|
.query_raw(&statement, _input.to_vec().iter().map(|p| p as &dyn ToSql))
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
println!("Packets, total:{:?}", packets_serialized.len());
|
println!("Packets, total:{:?}", packets_serialized.len());
|
||||||
|
@ -117,32 +139,40 @@ async fn main() -> Result<(), Error> {
|
||||||
if remainder > 0 {
|
if remainder > 0 {
|
||||||
let rem_str = query_string(&remainder);
|
let rem_str = query_string(&remainder);
|
||||||
let statement_remainder = client.prepare(&rem_str).await?;
|
let statement_remainder = client.prepare(&rem_str).await?;
|
||||||
let (_garbage, _input) =packets_serialized.split_at(packets_serialized.len()-remainder);
|
let (_garbage, _input) =
|
||||||
client.query_raw(&statement_remainder, _input.to_vec().iter().map(|p| p as &dyn ToSql),).await?;
|
packets_serialized.split_at(packets_serialized.len() - remainder);
|
||||||
}
|
client
|
||||||
|
.query_raw(
|
||||||
|
&statement_remainder,
|
||||||
|
_input.to_vec().iter().map(|p| p as &dyn ToSql),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
let insert_str = query_string(&insert_max);
|
let insert_str = query_string(&insert_max);
|
||||||
let statement = client.prepare(&insert_str).await?;
|
let statement = client.prepare(&insert_str).await?;
|
||||||
loop {
|
loop {
|
||||||
let v: Vec<parser::QryData> = parser::parse_device(&device, &filter, &insert_max);
|
let v: Vec<parser::QryData> = parser::parse_device(&device, &filter, &insert_max);
|
||||||
let packets_serialized = serialize_packets(v);
|
let packets_serialized = serialize_packets(v);
|
||||||
client.query_raw(&statement, packets_serialized.iter().map(|p| p as &dyn ToSql),).await?;
|
client
|
||||||
|
.query_raw(
|
||||||
|
&statement,
|
||||||
|
packets_serialized.iter().map(|p| p as &dyn ToSql),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_insert_json() {
|
fn test_insert_json() {
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
let mut client = Client::connect("host=localhost user=postgres password=password", NoTls).unwrap();
|
let mut client =
|
||||||
|
Client::connect("host=localhost user=postgres password=password", NoTls).unwrap();
|
||||||
let john = json!({
|
let john = json!({
|
||||||
"name": "John Doe",
|
"name": "John Doe",
|
||||||
"age": 43,
|
"age": 43,
|
||||||
|
@ -153,9 +183,12 @@ use serde_json::json;
|
||||||
"empty": []
|
"empty": []
|
||||||
});
|
});
|
||||||
|
|
||||||
client.execute("DROP TABLE IF EXISTS json_dump", &[]).unwrap();
|
client
|
||||||
client.execute("CREATE TABLE json_dump ( ID serial NOT NULL PRIMARY KEY, data json NOT NULL)", &[]);
|
.execute("DROP TABLE IF EXISTS json_dump", &[])
|
||||||
|
.unwrap();
|
||||||
|
client.execute(
|
||||||
|
"CREATE TABLE json_dump ( ID serial NOT NULL PRIMARY KEY, data json NOT NULL)",
|
||||||
|
&[],
|
||||||
|
);
|
||||||
client.query("INSERT INTO json_dump ( data ) VALUES ($1)", &[&john]);
|
client.query("INSERT INTO json_dump ( data ) VALUES ($1)", &[&john]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,19 +1,19 @@
|
||||||
extern crate byteorder;
|
|
||||||
extern crate bitfield;
|
extern crate bitfield;
|
||||||
|
extern crate byteorder;
|
||||||
extern crate eui48;
|
extern crate eui48;
|
||||||
mod packet_handler;
|
mod packet_handler;
|
||||||
use pcap::Capture;
|
|
||||||
use eui48::MacAddress;
|
use eui48::MacAddress;
|
||||||
|
use pcap::Capture;
|
||||||
//use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
//use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||||
use std::str;
|
use std::str;
|
||||||
|
use regex::bytes::Regex;
|
||||||
|
use regex::bytes::Match;
|
||||||
|
|
||||||
/* protocol ids, LittleEndian */
|
/* protocol ids, LittleEndian */
|
||||||
const ETH_P_IPV6: usize = 0xDD86;
|
const ETH_P_IPV6: usize = 0xDD86;
|
||||||
const ETH_P_IP: usize = 0x08;
|
const ETH_P_IP: usize = 0x08;
|
||||||
const TCP: usize = 0x06;
|
const TCP: usize = 0x06;
|
||||||
|
|
||||||
|
|
||||||
fn build_ether() -> packet_handler::EtherHeader {
|
fn build_ether() -> packet_handler::EtherHeader {
|
||||||
packet_handler::EtherHeader {
|
packet_handler::EtherHeader {
|
||||||
ether_dhost: (MacAddress::new([0; 6])).to_hex_string(),
|
ether_dhost: (MacAddress::new([0; 6])).to_hex_string(),
|
||||||
|
@ -22,7 +22,6 @@ fn build_ether () -> packet_handler::EtherHeader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// TODO: wrap packet_handler types inside Option<T>
|
// TODO: wrap packet_handler types inside Option<T>
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct QryData {
|
pub struct QryData {
|
||||||
|
@ -35,6 +34,17 @@ pub struct QryData{
|
||||||
pub tcp_header: Option<packet_handler::TcpHeader>,
|
pub tcp_header: Option<packet_handler::TcpHeader>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn flag_carnage( re: &Regex, payload: &[u8]) -> Option<String> {
|
||||||
|
//let _payload: [u8] = payload.copy_from_slice(&payload);
|
||||||
|
for mat in re.find_iter(payload){
|
||||||
|
//println!("{:?}", mat.as_bytes().to_owned().as_string());
|
||||||
|
println!("{:?}", std::str::from_utf8(mat.as_bytes()));
|
||||||
|
}
|
||||||
|
|
||||||
|
Some("test".to_owned())
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
pub fn parse(parse_file: &str, filter_str: &str) -> Vec<QryData> {
|
pub fn parse(parse_file: &str, filter_str: &str) -> Vec<QryData> {
|
||||||
let ether_init = build_ether();
|
let ether_init = build_ether();
|
||||||
|
|
||||||
|
@ -46,37 +56,55 @@ pub fn parse (parse_file: &str, filter_str: &str) -> Vec<QryData> {
|
||||||
ipv4_header: None::<packet_handler::IpV4Header>,
|
ipv4_header: None::<packet_handler::IpV4Header>,
|
||||||
ipv6_header: None::<packet_handler::IpV6Header>,
|
ipv6_header: None::<packet_handler::IpV6Header>,
|
||||||
tcp_header: None::<packet_handler::TcpHeader>,
|
tcp_header: None::<packet_handler::TcpHeader>,
|
||||||
|
|
||||||
};
|
};
|
||||||
let mut v: Vec<QryData> = Vec::new();
|
let mut v: Vec<QryData> = Vec::new();
|
||||||
|
|
||||||
let mut cap = Capture::from_file(parse_file).unwrap();
|
let mut cap = Capture::from_file(parse_file).unwrap();
|
||||||
Capture::filter(&mut cap, &filter_str).unwrap();
|
Capture::filter(&mut cap, &filter_str).unwrap();
|
||||||
|
let re = Regex::new(r"(?:http|https):[[::punct::]]?").unwrap();
|
||||||
while let Ok(packet) = cap.next() {
|
while let Ok(packet) = cap.next() {
|
||||||
me.time = (packet.header.ts.tv_usec as f64 / 1000000.0) + packet.header.ts.tv_sec as f64;
|
me.time = (packet.header.ts.tv_usec as f64 / 1000000.0) + packet.header.ts.tv_sec as f64;
|
||||||
me.data = Some(packet.data.to_vec());
|
me.data = Some(packet.data.to_vec());
|
||||||
|
flag_carnage( &re, packet.data);
|
||||||
me.ether_header = packet_handler::ethernet_handler(packet.data);
|
me.ether_header = packet_handler::ethernet_handler(packet.data);
|
||||||
if ETH_P_IP == me.ether_header.ether_type as usize {
|
if ETH_P_IP == me.ether_header.ether_type as usize {
|
||||||
me.ipv6_header = None::<packet_handler::IpV6Header>;
|
me.ipv6_header = None::<packet_handler::IpV6Header>;
|
||||||
me.ipv4_header = Some(packet_handler::ip_handler(packet.data)).unwrap();
|
me.ipv4_header = Some(packet_handler::ip_handler(packet.data)).unwrap();
|
||||||
if TCP == me.ipv4_header.unwrap().ip_protocol as usize {
|
if TCP == me.ipv4_header.unwrap().ip_protocol as usize {
|
||||||
me.tcp_header = Some(packet_handler::tcp_handler( me.ipv4_header.unwrap().ip_ihl, packet.data )).unwrap();
|
me.tcp_header = Some(packet_handler::tcp_handler(
|
||||||
me.data= packet_handler::payload_handler( me.ipv4_header.unwrap().ip_ihl, me.tcp_header.unwrap().data_offset, packet.data);
|
me.ipv4_header.unwrap().ip_ihl,
|
||||||
|
packet.data,
|
||||||
|
))
|
||||||
|
.unwrap();
|
||||||
|
me.data = packet_handler::payload_handler(
|
||||||
|
me.ipv4_header.unwrap().ip_ihl,
|
||||||
|
me.tcp_header.unwrap().data_offset,
|
||||||
|
packet.data,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
if ETH_P_IPV6 == me.ether_header.ether_type as usize {
|
if ETH_P_IPV6 == me.ether_header.ether_type as usize {
|
||||||
me.ipv4_header = None::<packet_handler::IpV4Header>;
|
me.ipv4_header = None::<packet_handler::IpV4Header>;
|
||||||
me.ipv6_header = Some(packet_handler::ipv6_handler(packet.data)).unwrap();
|
me.ipv6_header = Some(packet_handler::ipv6_handler(packet.data)).unwrap();
|
||||||
if TCP == me.ipv6_header.unwrap().next_header as usize {
|
if TCP == me.ipv6_header.unwrap().next_header as usize {
|
||||||
me.tcp_header = Some(packet_handler::tcp_handler(10, packet.data)).unwrap();
|
me.tcp_header = Some(packet_handler::tcp_handler(10, packet.data)).unwrap();
|
||||||
me.data = packet_handler::payload_handler( 10, me.tcp_header.unwrap().data_offset, packet.data);
|
me.data = packet_handler::payload_handler(
|
||||||
|
10,
|
||||||
|
me.tcp_header.unwrap().data_offset,
|
||||||
|
packet.data,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
v.push(QryData{id:0, time:me.time, data: me.data, ether_header:me.ether_header, ipv4_header: me.ipv4_header, ipv6_header: me.ipv6_header, tcp_header: me.tcp_header});
|
v.push(QryData {
|
||||||
|
id: 0,
|
||||||
|
time: me.time,
|
||||||
|
data: me.data,
|
||||||
|
ether_header: me.ether_header,
|
||||||
|
ipv4_header: me.ipv4_header,
|
||||||
|
ipv6_header: me.ipv6_header,
|
||||||
|
tcp_header: me.tcp_header,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
v
|
v
|
||||||
}
|
}
|
||||||
|
@ -97,6 +125,7 @@ pub fn parse_device (parse_device: &str, filter_str: &str, insert_max: &usize) -
|
||||||
let mut cap = Capture::from_device(parse_device).unwrap().open().unwrap();
|
let mut cap = Capture::from_device(parse_device).unwrap().open().unwrap();
|
||||||
Capture::filter(&mut cap, &filter_str).unwrap();
|
Capture::filter(&mut cap, &filter_str).unwrap();
|
||||||
|
|
||||||
|
let re = Regex::new(r"(?:http|https):[[::punct::]]").unwrap();
|
||||||
'parse: while let Ok(packet) = cap.next() {
|
'parse: while let Ok(packet) = cap.next() {
|
||||||
me.time = (packet.header.ts.tv_usec as f64 / 1000000.0) + packet.header.ts.tv_sec as f64;
|
me.time = (packet.header.ts.tv_usec as f64 / 1000000.0) + packet.header.ts.tv_sec as f64;
|
||||||
me.data = Some(packet.data.to_vec());
|
me.data = Some(packet.data.to_vec());
|
||||||
|
@ -105,22 +134,40 @@ pub fn parse_device (parse_device: &str, filter_str: &str, insert_max: &usize) -
|
||||||
me.ipv6_header = None::<packet_handler::IpV6Header>;
|
me.ipv6_header = None::<packet_handler::IpV6Header>;
|
||||||
me.ipv4_header = Some(packet_handler::ip_handler(packet.data)).unwrap();
|
me.ipv4_header = Some(packet_handler::ip_handler(packet.data)).unwrap();
|
||||||
if TCP == me.ipv4_header.unwrap().ip_protocol as usize {
|
if TCP == me.ipv4_header.unwrap().ip_protocol as usize {
|
||||||
me.tcp_header = Some(packet_handler::tcp_handler( me.ipv4_header.unwrap().ip_ihl, packet.data )).unwrap();
|
me.tcp_header = Some(packet_handler::tcp_handler(
|
||||||
me.data= packet_handler::payload_handler( me.ipv4_header.unwrap().ip_ihl, me.tcp_header.unwrap().data_offset, packet.data);
|
me.ipv4_header.unwrap().ip_ihl,
|
||||||
|
packet.data,
|
||||||
|
))
|
||||||
|
.unwrap();
|
||||||
|
me.data = packet_handler::payload_handler(
|
||||||
|
me.ipv4_header.unwrap().ip_ihl,
|
||||||
|
me.tcp_header.unwrap().data_offset,
|
||||||
|
packet.data,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
if ETH_P_IPV6 == me.ether_header.ether_type as usize {
|
if ETH_P_IPV6 == me.ether_header.ether_type as usize {
|
||||||
me.ipv4_header = None::<packet_handler::IpV4Header>;
|
me.ipv4_header = None::<packet_handler::IpV4Header>;
|
||||||
me.ipv6_header = Some(packet_handler::ipv6_handler(packet.data)).unwrap();
|
me.ipv6_header = Some(packet_handler::ipv6_handler(packet.data)).unwrap();
|
||||||
if TCP == me.ipv6_header.unwrap().next_header as usize {
|
if TCP == me.ipv6_header.unwrap().next_header as usize {
|
||||||
me.tcp_header = Some(packet_handler::tcp_handler(10, packet.data)).unwrap();
|
me.tcp_header = Some(packet_handler::tcp_handler(10, packet.data)).unwrap();
|
||||||
me.data = packet_handler::payload_handler( 10, me.tcp_header.unwrap().data_offset, packet.data);
|
me.data = packet_handler::payload_handler(
|
||||||
|
10,
|
||||||
|
me.tcp_header.unwrap().data_offset,
|
||||||
|
packet.data,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
v.push(QryData{id:0, time:me.time, data: me.data, ether_header:me.ether_header, ipv4_header: me.ipv4_header, ipv6_header: me.ipv6_header, tcp_header: me.tcp_header});
|
v.push(QryData {
|
||||||
|
id: 0,
|
||||||
|
time: me.time,
|
||||||
|
data: me.data,
|
||||||
|
ether_header: me.ether_header,
|
||||||
|
ipv4_header: me.ipv4_header,
|
||||||
|
ipv6_header: me.ipv6_header,
|
||||||
|
tcp_header: me.tcp_header,
|
||||||
|
});
|
||||||
|
|
||||||
if &v.len() >= insert_max {
|
if &v.len() >= insert_max {
|
||||||
break 'parse;
|
break 'parse;
|
||||||
|
@ -128,4 +175,3 @@ pub fn parse_device (parse_device: &str, filter_str: &str, insert_max: &usize) -
|
||||||
}
|
}
|
||||||
v
|
v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,19 +1,18 @@
|
||||||
extern crate eui48;
|
|
||||||
extern crate byteorder;
|
|
||||||
extern crate bitfield;
|
extern crate bitfield;
|
||||||
|
extern crate byteorder;
|
||||||
|
extern crate eui48;
|
||||||
extern crate serde;
|
extern crate serde;
|
||||||
use byteorder::{ByteOrder, BigEndian, LittleEndian};
|
use bitfield::bitfield;
|
||||||
use eui48::{MacAddress, Eui48};
|
use byteorder::{BigEndian, ByteOrder, LittleEndian};
|
||||||
|
use eui48::{Eui48, MacAddress};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||||
use bitfield::{bitfield};
|
|
||||||
use serde::{Serialize, Deserialize};
|
|
||||||
|
|
||||||
/* ethernet */
|
/* ethernet */
|
||||||
const ETH_ALEN: usize = 6;
|
const ETH_ALEN: usize = 6;
|
||||||
const ETH_TLEN: usize = 2;
|
const ETH_TLEN: usize = 2;
|
||||||
const ETHER_HDRLEN: usize = 14;
|
const ETHER_HDRLEN: usize = 14;
|
||||||
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct EtherHeader {
|
pub struct EtherHeader {
|
||||||
// pub ether_dhost: MacAddress,
|
// pub ether_dhost: MacAddress,
|
||||||
|
@ -58,7 +57,6 @@ pub struct IpV4Header {
|
||||||
pub ip_header_checksum: u32,
|
pub ip_header_checksum: u32,
|
||||||
pub ip_source_address: IpAddr,
|
pub ip_source_address: IpAddr,
|
||||||
pub ip_destination_address: IpAddr,
|
pub ip_destination_address: IpAddr,
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bitfield! {
|
bitfield! {
|
||||||
|
@ -137,11 +135,9 @@ pub fn ipv6_handler ( packet_data: &[u8] ) -> Option<IpV6Header> {
|
||||||
_tail.copy_from_slice(raw_hdr);
|
_tail.copy_from_slice(raw_hdr);
|
||||||
//let mut rdr = Cursor::new(_tail);
|
//let mut rdr = Cursor::new(_tail);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Some(IpV6Header {
|
Some(IpV6Header {
|
||||||
version: (&raw_hdr[0] & 0xf0) >> 4,
|
version: (&raw_hdr[0] & 0xf0) >> 4,
|
||||||
traffic_class: ((&raw_hdr[0] & 0x0f) >> 4)| ((&raw_hdr[1] & 0xf0 <<4)) ,
|
traffic_class: ((&raw_hdr[0] & 0x0f) >> 4) | (&raw_hdr[1] & 0xf0 << 4),
|
||||||
flow_label: BigEndian::read_u32(&[0x00, (&_tail[1] & 0x0f), _tail[2], _tail[3]]),
|
flow_label: BigEndian::read_u32(&[0x00, (&_tail[1] & 0x0f), _tail[2], _tail[3]]),
|
||||||
payload_length: BigEndian::read_u16(&[_tail[4], _tail[5]]),
|
payload_length: BigEndian::read_u16(&[_tail[4], _tail[5]]),
|
||||||
next_header: _tail[6],
|
next_header: _tail[6],
|
||||||
|
@ -221,7 +217,6 @@ pub struct TcpHeader {
|
||||||
pub urgent_pointer: u32,
|
pub urgent_pointer: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bitfield! {
|
bitfield! {
|
||||||
struct BitfieldTcpHeader ( MSB0 [u8] );
|
struct BitfieldTcpHeader ( MSB0 [u8] );
|
||||||
u32;
|
u32;
|
||||||
|
@ -272,8 +267,6 @@ pub fn tcp_handler ( ip_hlen: u32, packet_data: &[u8] ) -> Option<TcpHeader> {
|
||||||
checksum: tcp_header.get_checksum(),
|
checksum: tcp_header.get_checksum(),
|
||||||
urgent_pointer: tcp_header.get_urgent_pointer(),
|
urgent_pointer: tcp_header.get_urgent_pointer(),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* arp */
|
/* arp */
|
||||||
|
@ -324,11 +317,11 @@ pub fn arp_handler ( packet_data: &[u8] ) -> Option<ArpHeader> {
|
||||||
_tail.copy_from_slice(raw_hdr);
|
_tail.copy_from_slice(raw_hdr);
|
||||||
|
|
||||||
let arp_header = BitfieldArpHeader(_tail);
|
let arp_header = BitfieldArpHeader(_tail);
|
||||||
let _sha: [u8;6] = [0;6]; let _tha: [u8;6] = [0;6];
|
let _sha: [u8; 6] = [0; 6];
|
||||||
|
let _tha: [u8; 6] = [0; 6];
|
||||||
_tail[8..13].copy_from_slice(&_sha);
|
_tail[8..13].copy_from_slice(&_sha);
|
||||||
_tail[18..23].copy_from_slice(&_tha);
|
_tail[18..23].copy_from_slice(&_tha);
|
||||||
|
|
||||||
|
|
||||||
Some(ArpHeader {
|
Some(ArpHeader {
|
||||||
htype: arp_header.get_htype(),
|
htype: arp_header.get_htype(),
|
||||||
ptype: arp_header.get_ptype(),
|
ptype: arp_header.get_ptype(),
|
||||||
|
@ -368,6 +361,7 @@ pub fn udp_handler ( ip_hlen: u32, packet_data: &[u8] ) -> Option<UdpHeader> {
|
||||||
|
|
||||||
/* payload */
|
/* payload */
|
||||||
pub fn payload_handler(ip_hlen: u32, data_offset: u32, packet_data: &[u8]) -> Option<Vec<u8>> {
|
pub fn payload_handler(ip_hlen: u32, data_offset: u32, packet_data: &[u8]) -> Option<Vec<u8>> {
|
||||||
let (_head, tail)= packet_data.split_at(ETHER_HDRLEN+ip_hlen as usize * 4+data_offset as usize * 4);
|
let (_head, tail) =
|
||||||
|
packet_data.split_at(ETHER_HDRLEN + ip_hlen as usize * 4 + data_offset as usize * 4);
|
||||||
Some(tail.to_vec())
|
Some(tail.to_vec())
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
//extern crate rayon;
|
||||||
|
//extern crate regex;
|
||||||
|
//use regex::Regex;
|
||||||
|
//
|
||||||
|
//struct Regex {
|
||||||
|
// string: &'static str,
|
||||||
|
// regex: ::regex::bytes::Regex,
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//impl Regex {
|
||||||
|
// fn new (string: &'static str) ->Regex {
|
||||||
|
// Regex{
|
||||||
|
// string: string,
|
||||||
|
// regex: ::regex::bytes::Regex::new(string).unwrap(),
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//
|
||||||
|
//
|
||||||
|
//pub fn parse_regex ( reg_str: &str,
|
Loading…
Reference in New Issue