fully implemented linktype rawip

This commit is contained in:
gurkenhabicht 2020-06-13 03:17:13 +02:00
parent 09c8a7438f
commit 4bfd00ac05
5 changed files with 297 additions and 258 deletions

View File

@ -2,10 +2,10 @@
// But at this point of development it seems like this overhead is unjust.
extern crate serde_json;
use std::fs::File;
use byteorder::{ByteOrder, LittleEndian};
use std::collections::HashMap;
use std::fs;
use std::fs::File;
use std::io::prelude::*;
//use std::thread::{spawn, JoinHandle};
//use std::sync::mpsc::{channel, Receiver};
@ -42,14 +42,17 @@ impl FileInfo {
}
}
pub fn from_json_file() -> Option<Config> {
let config_file = File::open("parser.json").expect("file should open read only");
let json: serde_json::Value = serde_json::from_reader(config_file).unwrap();
Some(Config {
filter: json.get("filter").unwrap().as_str().unwrap().to_owned(),
regex_filter: json.get("regex_filter").unwrap().as_str().unwrap().to_owned(),
regex_filter: json
.get("regex_filter")
.unwrap()
.as_str()
.unwrap()
.to_owned(),
insert_max: json.get("insert_max").unwrap().as_u64().unwrap() as usize,
pcap_file: json.get("pcap_file").unwrap().as_str().unwrap().to_owned(),
connection: format!(
@ -75,9 +78,9 @@ File signature and encapsulation type from file
*/
// Futher:file.len() is included in metadata() but only shows up if called explicitly. Maybe this is not needed at all in the end
// This would be needed for comparability over time. print metadata and you will see
fn bytes_from_file( entry: std::path::PathBuf ) -> Result<([u8;4], u16, u16), std::io::Error> {
let mut magic_number: [u8;4] = [0;4];
let mut buffer: [u8;32] = [0;32];
fn bytes_from_file(entry: std::path::PathBuf) -> Result<([u8; 4], u16, u16), std::io::Error> {
let mut magic_number: [u8; 4] = [0; 4];
let mut buffer: [u8; 32] = [0; 32];
let mut _file = File::open(entry.to_owned())?;
_file.read_exact(&mut buffer)?;
magic_number.clone_from_slice(&buffer[0..4]);
@ -87,7 +90,9 @@ fn bytes_from_file( entry: std::path::PathBuf ) -> Result<([u8;4], u16, u16), st
Ok((magic_number, enc_pcap, enc_pcapng))
}
pub fn map_pcap_dir ( pcap_dir: &str ) -> Option<std::collections::HashMap<std::path::PathBuf, FileInfo>> {
pub fn map_pcap_dir(
pcap_dir: &str,
) -> Option<std::collections::HashMap<std::path::PathBuf, FileInfo>> {
// Well, this conditional intermezzo seems to be best practice. See std::fs doc
let mut pcap_map = HashMap::new();
if let Ok(entries) = fs::read_dir(pcap_dir) {
@ -95,10 +100,14 @@ pub fn map_pcap_dir ( pcap_dir: &str ) -> Option<std::collections::HashMap<std::
if let Ok(entry) = entry {
if let Ok(_file_type) = entry.file_type() {
if entry.metadata().unwrap().is_file() {
let (magic_number, enc_pcap, enc_pcapng) = bytes_from_file(entry.path()).unwrap();
let (magic_number, enc_pcap, enc_pcapng) =
bytes_from_file(entry.path()).unwrap();
match magic_number {
PCAPNG_SIGNATURE => pcap_map.insert(entry.path(), FileInfo::new(entry.path(), enc_pcapng) ),
PCAP_SIGNATURE | PCAP_SIGNATURE_BE => pcap_map.insert(entry.path(), FileInfo::new(entry.path(), enc_pcap)), // TEST: Endiannes for SIGNATURE_BE may be incorrect after introducing fn bytes_from_file()
PCAPNG_SIGNATURE => pcap_map
.insert(entry.path(), FileInfo::new(entry.path(), enc_pcapng)),
PCAP_SIGNATURE | PCAP_SIGNATURE_BE => {
pcap_map.insert(entry.path(), FileInfo::new(entry.path(), enc_pcap))
} // TEST: Endiannes for SIGNATURE_BE may be incorrect after introducing fn bytes_from_file()
_ => None,
};
// println!("{:?}", &entry.metadata().unwrap().modified());
@ -111,5 +120,3 @@ pub fn map_pcap_dir ( pcap_dir: &str ) -> Option<std::collections::HashMap<std::
}
Some(pcap_map)
}

View File

@ -8,9 +8,7 @@ mod serializer;
use tokio_postgres::types::ToSql;
use tokio_postgres::{Error, NoTls};
fn query_string(insert_max: &usize) -> String {
let mut insert_template = String::with_capacity(insert_max * 8 + 43);
insert_template.push_str("INSERT INTO json_dump (packet) Values ");
@ -30,7 +28,7 @@ async fn main() -> Result<(), Error> {
// TODO: hash file metadata, so its state is comparable at times and can be written to a db table (e.g. after system crash)
// This db table should include UUIDs so it can be joined effectively
let pcap_map = configure::map_pcap_dir( &config.pcap_dir ).unwrap();
let pcap_map = configure::map_pcap_dir(&config.pcap_dir).unwrap();
println!("{:?}", pcap_map.iter());
@ -56,10 +54,12 @@ async fn main() -> Result<(), Error> {
/* device or file input */
match config.is_device {
false => for (_pcap_file, _pcap_info) in pcap_map.iter() {
println!("{:?}",&_pcap_file);
false => {
for (_pcap_file, _pcap_info) in pcap_map.iter() {
println!("{:?}", &_pcap_file);
// TODO: Tuning vector capacity according to mean average & std dev of packet size
let v: Vec<parser::QryData> = parser::parse(&_pcap_file, &config.filter, &config.regex_filter);
let v: Vec<parser::QryData> =
parser::parse(&_pcap_file, &config.filter, &config.regex_filter);
//let mut v = Vec::<parser::QryData>::with_capacity(35536);
//v.extend(parser::parse(&_pcap_file, &config.filter));
@ -90,7 +90,10 @@ async fn main() -> Result<(), Error> {
for _i in 0..chunk_count {
let (_input, _) = packets_serialized.split_at(config.insert_max);
client
.query_raw(&statement, _input.to_vec().iter().map(|p| p as &dyn ToSql))
.query_raw(
&statement,
_input.to_vec().iter().map(|p| p as &dyn ToSql),
)
.await?;
}
@ -108,12 +111,18 @@ async fn main() -> Result<(), Error> {
}
}
}
},
}
}
true => {
let insert_str = query_string(&config.insert_max);
let statement = client.prepare(&insert_str).await?;
loop {
let v: Vec<parser::QryData> = parser::parse_device(&config.device, &config.filter, &config.insert_max, &config.regex_filter);
let v: Vec<parser::QryData> = parser::parse_device(
&config.device,
&config.filter,
&config.insert_max,
&config.regex_filter,
);
let packets_serialized = serializer::serialize_packets(v);
client
.query_raw(
@ -122,7 +131,7 @@ async fn main() -> Result<(), Error> {
)
.await?;
}
},
}
}
Ok(())
}

View File

@ -4,12 +4,11 @@ extern crate eui48;
mod packet_handler;
use pcap::{Capture, Linktype};
use regex::bytes::Regex;
use std::str;
use std::convert::TryInto;
use std::str;
//use std::thread::{spawn, JoinHandle};
//use std::sync::mpsc::{channel, Receiver};
/* protocol ids, LittleEndian */
const ETH_P_IPV6: usize = 0xDD86;
const ETH_P_IP: usize = 0x08;
@ -17,6 +16,7 @@ const TCP: usize = 0x06;
const UDP: usize = 0x11;
const ETH_P_ARP: usize = 0x0608;
const ETH_P_RARP: usize = 0x3580;
const ETHER_HDRLEN: usize = 14;
/*
QryData could be written in the sense of QryData{ ... frame: .., packet: .., segment:.. }
@ -42,8 +42,20 @@ pub struct QryData {
pub reg_res: Option<String>,
}
fn init_qrydata( ) -> Result<QryData, core::fmt::Error> {
Ok(QryData {
#[allow(dead_code)]
enum EncapsulationType {
// pcap::Linktype::get_name() is unsafe.
EN10MB = 1, // See: https://docs.rs/pcap/0.7.0/src/pcap/lib.rs.html#247-261
RAW = 101, // Would this be an issue?
}
impl QryData {
// This is not cool!
// Implementing objectoriented is slower by 3-10%. Variance is all over the place. It's awful but modular!
// Guess I'll do a roolback and do a different design
fn new() -> QryData {
QryData {
id: 0,
time: 0.0,
data: None,
@ -54,103 +66,96 @@ fn init_qrydata( ) -> Result<QryData, core::fmt::Error> {
udp_header: None::<packet_handler::UdpHeader>,
arp_header: None::<packet_handler::ArpHeader>,
reg_res: None::<String>,
})
}
}
}
#[allow(dead_code)]
enum EncapsulationType { // pcap::Linktype::get_name() is unsafe.
EN10MB = 1, // See: https://docs.rs/pcap/0.7.0/src/pcap/lib.rs.html#247-261
RAW = 101, // Would this be an issue?
}
impl QryData {
// This is not cool!
// This will get modularized into subfunctions
fn encap_en10mb( packet_data: &[u8] ) -> Option<QryData> {
let mut pkg: QryData = init_qrydata().unwrap();
pkg.ether_header = Some(packet_handler::ethernet_handler(packet_data)).unwrap();
match pkg.ether_header.unwrap().ether_type as usize {
fn encap_en10mb(&mut self, packet_data: &[u8]) -> Result<(), core::fmt::Error> {
//let mut pkg: QryData = new().unwrap();
self.ether_header = Some(packet_handler::ethernet_handler(packet_data)).unwrap();
match self.ether_header.unwrap().ether_type as usize {
ETH_P_IP => {
pkg.ipv4_header = Some(packet_handler::ip_handler(packet_data)).unwrap();
let protocol_type = pkg.ipv4_header.unwrap().ip_protocol.clone() as usize;
let l3_header_length = pkg.ipv4_header.unwrap().ip_ihl;
pkg.transport_layer(packet_data, protocol_type, l3_header_length).unwrap();
self.ipv4_header = Some(packet_handler::ip_handler(packet_data, ETHER_HDRLEN)).unwrap();
let protocol_type = self.ipv4_header.unwrap().ip_protocol as usize;
let l3_header_length = self.ipv4_header.unwrap().ip_ihl;
self.transport_layer(packet_data, protocol_type, l3_header_length, ETHER_HDRLEN)
.unwrap();
}
ETH_P_IPV6 => {
pkg.ipv6_header = Some(packet_handler::ipv6_handler(packet_data)).unwrap();
let protocol_type = pkg.ipv6_header.unwrap().next_header.clone() as usize;
pkg.transport_layer(packet_data, protocol_type, 10).unwrap();
self.ipv6_header = Some(packet_handler::ipv6_handler(packet_data, ETHER_HDRLEN)).unwrap();
let protocol_type = self.ipv6_header.unwrap().next_header as usize;
self.transport_layer(packet_data, protocol_type, 10, ETHER_HDRLEN)
.unwrap();
}
ETH_P_ARP | ETH_P_RARP => {
pkg.arp_header = Some(packet_handler::arp_handler(packet_data)).unwrap();
self.arp_header = Some(packet_handler::arp_handler(packet_data, ETHER_HDRLEN)).unwrap();
}
_ => println!("Network protocol not implemented"),
}
Some(pkg)
Ok(())
}
fn encap_raw ( packet_data: &[u8] ) -> Option<QryData> {
let mut pkg: QryData = init_qrydata().unwrap();
fn encap_raw(&mut self, packet_data: &[u8]) -> Result<(), core::fmt::Error> {
// let mut pkg: QryData = new().unwrap();
let ip_version: usize = ((packet_data[0] & 0xf0) >> 4).try_into().unwrap();
//println!("{:?}", &ip_version);
match ip_version {
4 => {
pkg.ipv4_header = Some(packet_handler::ip_handler(packet_data)).unwrap();
let protocol_type = pkg.ipv4_header.unwrap().ip_protocol.clone() as usize;
let l3_header_length = pkg.ipv4_header.unwrap().ip_ihl;
pkg.transport_layer(packet_data, protocol_type, l3_header_length).unwrap();
//println!("v4");
self.ipv4_header = Some(packet_handler::ip_handler(packet_data, 0)).unwrap();
let protocol_type = self.ipv4_header.unwrap().ip_protocol as usize;
let l3_header_length = self.ipv4_header.unwrap().ip_ihl;
self.transport_layer(packet_data, protocol_type, l3_header_length, 0)
.unwrap();
}
6 => {
pkg.ipv6_header = Some(packet_handler::ipv6_handler(packet_data)).unwrap();
let protocol_type = pkg.ipv6_header.unwrap().next_header.clone() as usize;
pkg.transport_layer(packet_data, protocol_type, 10).unwrap();
//println!("v6");
self.ipv6_header = Some(packet_handler::ipv6_handler(packet_data, 0)).unwrap();
let protocol_type = self.ipv6_header.unwrap().next_header as usize;
self.transport_layer(packet_data, protocol_type, 10, 0)
.unwrap();
}
_ => println!("Network Protocol not implemented")
_ => println!("Network Protocol not implemented"),
}
Some(pkg)
Ok(())
}
// TODO: impl correct Err type and use in Result
fn transport_layer (&mut self, packet_data: &[u8], protocol_type: usize, l3_header_length: u32) -> Result<(), core::fmt::Error> {
fn transport_layer(
&mut self,
packet_data: &[u8],
protocol_type: usize,
l3_header_length: u32,
ether_hdrlen: usize,
) -> Result<(), core::fmt::Error> {
match protocol_type {
TCP => {
self.tcp_header = Some(packet_handler::tcp_handler(
l3_header_length,
packet_data,
))
.unwrap();
self.tcp_header =
Some(packet_handler::tcp_handler(l3_header_length, packet_data, ether_hdrlen)).unwrap();
self.data = Some(packet_handler::payload_handler(
l3_header_length,
self.tcp_header.unwrap().data_offset,
packet_data,
)).unwrap();
}
UDP => {
self.udp_header = Some(packet_handler::udp_handler(
l3_header_length,
packet_data,
ether_hdrlen
))
.unwrap();
}
UDP => {
self.udp_header =
Some(packet_handler::udp_handler(l3_header_length, packet_data, ether_hdrlen)).unwrap();
self.data = Some(packet_handler::payload_handler(
l3_header_length,
7,
packet_data,
)).unwrap();
ether_hdrlen
))
.unwrap();
}
_ => println!("Transport layer protocol not implemented"),
}
Ok(())
}
}
/* Regex parse _complete_ package */
fn flag_carnage(re: &Regex, payload: &[u8]) -> Option<String> {
let mut flags: String = String::new();
@ -168,7 +173,7 @@ fn flag_carnage(re: &Regex, payload: &[u8]) -> Option<String> {
}
pub fn parse(parse_file: &std::path::Path, filter_str: &str, regex_filter: &str) -> Vec<QryData> {
let mut me: QryData = init_qrydata().unwrap();
//let mut me: QryData = QryData::new();
let mut v: Vec<QryData> = Vec::new();
let mut cap = Capture::from_file(parse_file).unwrap();
@ -177,37 +182,52 @@ pub fn parse(parse_file: &std::path::Path, filter_str: &str, regex_filter: &str)
println!("{:?}", &linktype);
let re = Regex::new(regex_filter).unwrap();
while let Ok(packet) = cap.next() {
let mut me = QryData::new();
match linktype {
// Syntax is clunky, but no num_derive + num_traits dependencies.
Linktype(1) => me = QryData::encap_en10mb(packet.data).unwrap(), // EN10MB
Linktype(101) => me = QryData::encap_raw(packet.data).unwrap(), // RAW
Linktype(1) => me.encap_en10mb(packet.data).unwrap(), //me = QryData::encap_en10mb(packet.data).unwrap(), // EN10MB
Linktype(12) => me.encap_raw(packet.data).unwrap(), //me = QryData::encap_raw(packet.data).unwrap(), // RAW
_ => (),
};
me.time = (packet.header.ts.tv_usec as f64 / 1000000.0) + packet.header.ts.tv_sec as f64;
//me.data = Some(packet.data.to_vec());
me.reg_res = flag_carnage(&re, packet.data); // Regex overhead is between 4-9% --single threaded-- on complete packet [u8] data
v.push(me.clone());
//v.push(me.clone());
v.push(QryData {
id: 0,
time: me.time,
data: me.data,
ether_header: me.ether_header,
ipv4_header: me.ipv4_header,
ipv6_header: me.ipv6_header,
tcp_header: me.tcp_header,
udp_header: me.udp_header,
arp_header: me.arp_header,
reg_res: me.reg_res,
});
}
v
}
/* This could need some love */
pub fn parse_device(parse_device: &str, filter_str: &str, insert_max: &usize, regex_filter: &str) -> Vec<QryData> {
let mut me: QryData = init_qrydata ( ).unwrap();
pub fn parse_device(
parse_device: &str,
filter_str: &str,
insert_max: &usize,
regex_filter: &str,
) -> Vec<QryData> {
//let mut me: QryData = QryData::new ( );
let mut v: Vec<QryData> = Vec::new();
let mut cap = Capture::from_device(parse_device).unwrap().open().unwrap();
Capture::filter(&mut cap, &filter_str).unwrap();
let linktype = cap.get_datalink();
let re = Regex::new(regex_filter).unwrap();
'parse: while let Ok(packet) = cap.next() {
let mut me = QryData::new();
match linktype {
Linktype(1) => me = QryData::encap_en10mb(packet.data).unwrap(),
Linktype(101) => me = QryData::encap_raw(packet.data).unwrap(),
Linktype(1) => me.encap_en10mb(packet.data).unwrap(), //me = QryData::encap_en10mb(packet.data).unwrap(),
Linktype(101) => me.encap_raw(packet.data).unwrap(), //me = QryData::encap_raw(packet.data).unwrap(),
_ => (),
}
};
me.time = (packet.header.ts.tv_usec as f64 / 1000000.0) + packet.header.ts.tv_sec as f64;
me.reg_res = flag_carnage(&re, packet.data);

View File

@ -1,7 +1,7 @@
extern crate bitfield;
extern crate byteorder;
extern crate eui48;
extern crate serde;
pub extern crate serde;
use bitfield::bitfield;
use byteorder::{BigEndian, ByteOrder, LittleEndian};
use eui48::{Eui48, MacAddress};
@ -13,10 +13,9 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
/* ethernet */
const ETH_ALEN: usize = 6;
const ETH_TLEN: usize = 2;
const ETHER_HDRLEN: usize = 14;
//const ETHER_HDRLEN: usize = 14;
#[derive(Debug, Clone, Copy,Serialize, Deserialize)]
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct EtherHeader {
pub ether_dhost: MacAddress,
pub ether_shost: MacAddress,
@ -89,9 +88,9 @@ impl<T: AsRef<[u8]> + AsMut<[u8]>> BitfieldIpV4Header<T> {
}
}
pub fn ip_handler(packet_data: &[u8]) -> Option<IpV4Header> {
pub fn ip_handler(packet_data: &[u8], ether_hdrlen: usize) -> Option<IpV4Header> {
let mut raw_hdr: [u8; 20] = [0; 20];
raw_hdr.copy_from_slice(&packet_data[ETHER_HDRLEN..ETHER_HDRLEN + 20]);
raw_hdr.copy_from_slice(&packet_data[ether_hdrlen..ether_hdrlen + 20]);
let ip_header = BitfieldIpV4Header(raw_hdr);
Some(IpV4Header {
@ -125,9 +124,9 @@ pub struct IpV6Header {
pub destination_address: IpAddr,
}
pub fn ipv6_handler(packet_data: &[u8]) -> Option<IpV6Header> {
pub fn ipv6_handler(packet_data: &[u8], ether_hdrlen: usize) -> Option<IpV6Header> {
let mut raw_hdr: [u8; 40] = [0; 40];
raw_hdr.copy_from_slice(&packet_data[ETHER_HDRLEN..ETHER_HDRLEN + 40]);
raw_hdr.copy_from_slice(&packet_data[ether_hdrlen..ether_hdrlen + 40]);
Some(IpV6Header {
version: (&raw_hdr[0] & 0xf0) >> 4,
@ -187,10 +186,10 @@ bitfield! {
get_urgent_pointer, _: 159,144;
}
pub fn tcp_handler(ip_hlen: u32, packet_data: &[u8]) -> Option<TcpHeader> {
pub fn tcp_handler(ip_hlen: u32, packet_data: &[u8], ether_hdrlen: usize) -> Option<TcpHeader> {
let mut raw_hdr: [u8; 20] = [0; 20];
raw_hdr.copy_from_slice(
&packet_data[ETHER_HDRLEN + ip_hlen as usize * 4..ETHER_HDRLEN + ip_hlen as usize * 4 + 20],
&packet_data[ether_hdrlen + ip_hlen as usize * 4..ether_hdrlen+ ip_hlen as usize * 4 + 20],
);
let tcp_header = BitfieldTcpHeader(raw_hdr);
@ -230,11 +229,9 @@ pub struct ArpHeader {
pub tpa: IpAddr,
}
pub fn arp_handler(packet_data: &[u8]) -> Option<ArpHeader> {
pub fn arp_handler(packet_data: &[u8], ether_hdrlen: usize) -> Option<ArpHeader> {
let mut raw_hdr: [u8; 28] = [0; 28];
raw_hdr.copy_from_slice(
&packet_data[ETHER_HDRLEN .. ETHER_HDRLEN + 28]
);
raw_hdr.copy_from_slice(&packet_data[ether_hdrlen..ether_hdrlen + 28]);
let mut _sha: [u8; 6] = [0; 6];
let mut _tha: [u8; 6] = [0; 6];
@ -242,7 +239,7 @@ pub fn arp_handler(packet_data: &[u8]) -> Option<ArpHeader> {
_sha.copy_from_slice(&raw_hdr[8..14]);
_tha.copy_from_slice(&raw_hdr[18..24]);
Some(ArpHeader{
Some(ArpHeader {
htype: BigEndian::read_u16(&raw_hdr[0..2]),
ptype: BigEndian::read_u16(&raw_hdr[2..4]),
hlen: raw_hdr[4],
@ -250,7 +247,7 @@ pub fn arp_handler(packet_data: &[u8]) -> Option<ArpHeader> {
oper: BigEndian::read_u16(&raw_hdr[6..8]),
sha: MacAddress::new(_sha as Eui48),
spa: IpAddr::V4(Ipv4Addr::from(BigEndian::read_u32(&raw_hdr[14..18]))),
tha: MacAddress::new( _tha as Eui48 ),
tha: MacAddress::new(_tha as Eui48),
tpa: IpAddr::V4(Ipv4Addr::from(BigEndian::read_u32(&raw_hdr[24..28]))),
})
}
@ -264,10 +261,10 @@ pub struct UdpHeader {
pub checksum: u16,
}
pub fn udp_handler(ip_hlen: u32, packet_data: &[u8]) -> Option<UdpHeader> {
pub fn udp_handler(ip_hlen: u32, packet_data: &[u8], ether_hdrlen: usize) -> Option<UdpHeader> {
let mut raw_hdr: [u8; 8] = [0; 8];
raw_hdr.copy_from_slice(
&packet_data[ETHER_HDRLEN + ip_hlen as usize * 4..ETHER_HDRLEN + ip_hlen as usize * 4 + 8],
&packet_data[ether_hdrlen + ip_hlen as usize * 4..ether_hdrlen + ip_hlen as usize * 4 + 8],
);
Some(UdpHeader {
@ -279,8 +276,8 @@ pub fn udp_handler(ip_hlen: u32, packet_data: &[u8]) -> Option<UdpHeader> {
}
/* payload */
pub fn payload_handler(ip_hlen: u32, data_offset: u32, packet_data: &[u8]) -> Option<Vec<u8>> {
pub fn payload_handler(ip_hlen: u32, data_offset: u32, packet_data: &[u8], ether_hdrlen: usize) -> Option<Vec<u8>> {
let (_head, tail) =
packet_data.split_at(ETHER_HDRLEN + ip_hlen as usize * 4 + data_offset as usize * 4);
packet_data.split_at(ether_hdrlen + ip_hlen as usize * 4 + data_offset as usize * 4);
Some(tail.to_vec())
}

View File

@ -27,29 +27,35 @@ impl Serialize for parser::QryData {
}
pub fn serialize_packets(v: Vec<parser::QryData>) -> Vec<serde_json::Value> {
/* rayon parallelized */
// TODO: Benchmark. As far as I tested, this reaps no benefit.
let packets_serialized = v.par_iter().map(|x| serde_json::to_value(x).unwrap()).collect();
let packets_serialized = v
.par_iter()
.map(|x| serde_json::to_value(x).unwrap())
.collect();
//let packets_serialized: Vec<serde_json::Value> = v.par_iter().map(|x| json!(x)).collect();
packets_serialized
}
// This is way faster than serialize_packets() but I can not figure out how to properly select parts from the resulting json structure as an sql query
#[allow(dead_code)]
pub fn serialize_packets_as_string(v: Vec<parser::QryData>) -> Vec<serde_json::Value> {
let mut packets_serialized: Vec<serde_json::Value> = Vec::with_capacity(v.len() * 2);
for packet in v.iter() {
packets_serialized.push(serde_json::Value::String(serde_json::to_string(&packet).unwrap()));
packets_serialized.push(serde_json::Value::String(
serde_json::to_string(&packet).unwrap(),
));
}
packets_serialized
}
#[allow(dead_code)]
fn smallest_prime_divisor(remainder: usize ) -> usize {
let smallest_divisor: usize = (2..(remainder/2)).into_par_iter().find_first(|x| remainder % x == 0).unwrap();
fn smallest_prime_divisor(remainder: usize) -> usize {
let smallest_divisor: usize = (2..(remainder / 2))
.into_par_iter()
.find_first(|x| remainder % x == 0)
.unwrap();
smallest_divisor
}