From eab3f008c9fb7b4ea7a7d7b938791fb99e627932 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rom=C3=A9o=20Phang?= Date: Thu, 16 May 2024 20:27:14 +0200 Subject: [PATCH] feat: added SSL_.._ex supports --- src/ssl_sniffer/README.md | 3 +- src/ssl_sniffer/ebpf/loader.c | 8 ++- src/ssl_sniffer/ebpf/main.bpf.c | 81 +++++++++++++++++------ src/ssl_sniffer/include/ebpf/definition.h | 2 +- src/ssl_sniffer/include/ebpf/struct_bpf.h | 4 +- 5 files changed, 70 insertions(+), 28 deletions(-) diff --git a/src/ssl_sniffer/README.md b/src/ssl_sniffer/README.md index c505d22..8e28be5 100644 --- a/src/ssl_sniffer/README.md +++ b/src/ssl_sniffer/README.md @@ -15,6 +15,7 @@ It supports out-of-the-box the following applications: - `curl` - `wget` - `nginx` (not all versions, some have inbuilt SSL/TLS support) +- `php`, `python` (tested so far) & many more... ## Usage @@ -72,4 +73,4 @@ A `bpftrace_demo.sh` script is provided to try to sniff any encrypted SSL/TLS tr As we don't really know how big the SSL/TLS traffic is, going for size defined structures is not a good idea as we may either waste space for small packets or truncate big packets. To avoid this, `ssl_sniffer` uses a ring buffer and transfers the data from the kernel to the user space through chunks of constant size. These chunks are then reassembled in the user space to get the full packet (TODO). > [!NOTE] -> This is an implementation of [Google's Chunking Trick](https://lpc.events/event/11/contributions/938/attachments/909/1788/BPF_Security_Google.pdf) at the [Linux Plumbers Conference 2021](https://lpc.events/2021/). \ No newline at end of file +> This contains an implementation of [Google's Chunking Trick](https://lpc.events/event/11/contributions/938/attachments/909/1788/BPF_Security_Google.pdf) at the [Linux Plumbers Conference 2021](https://lpc.events/2021/). \ No newline at end of file diff --git a/src/ssl_sniffer/ebpf/loader.c b/src/ssl_sniffer/ebpf/loader.c index a19a915..dca17da 100644 --- a/src/ssl_sniffer/ebpf/loader.c +++ b/src/ssl_sniffer/ebpf/loader.c @@ -102,6 +102,10 @@ int ssl_attach_openssl(char *program_path) __ATTACH_UPROBE(program_path, "SSL_write", probe_ssl_write_return, true); __ATTACH_UPROBE(program_path, "SSL_read", probe_ssl_rw_enter, false); __ATTACH_UPROBE(program_path, "SSL_read", probe_ssl_read_return, true); + __ATTACH_UPROBE(program_path, "SSL_read_ex", probe_ex_ssl_rw_enter, false); + __ATTACH_UPROBE(program_path, "SSL_read_ex", probe_ssl_read_return, true); + __ATTACH_UPROBE(program_path, "SSL_write_ex", probe_ex_ssl_rw_enter, false); + __ATTACH_UPROBE(program_path, "SSL_write_ex", probe_ssl_write_return, true); return 0; } @@ -139,7 +143,7 @@ int ssl_attach_nss(char *program_path) return 0; } -static void log_event(struct data_event *event) +static void log_event(struct chunk_event *event) { char *op = event->op == 1 ? "SSL_OP_READ" : "SSL_OP_WRITE"; fprintf(stdout, "--------------------------------------------------\n"); @@ -153,7 +157,7 @@ static void log_event(struct data_event *event) static int handle_event(void *ctx, void *data, size_t len) { - struct data_event *event = (struct data_event *)data; + struct chunk_event *event = (struct chunk_event *)data; log_event(event); return 0; } diff --git a/src/ssl_sniffer/ebpf/main.bpf.c b/src/ssl_sniffer/ebpf/main.bpf.c index 4af5877..a424244 100644 --- a/src/ssl_sniffer/ebpf/main.bpf.c +++ b/src/ssl_sniffer/ebpf/main.bpf.c @@ -51,36 +51,36 @@ struct } chunk_processing_map SEC(".maps"); // Lookup maps +struct rw_event +{ + u64 buff; + u64 len; +}; + struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, u64); - __type(value, u64); // buffer address + __type(value, struct rw_event); __uint(max_entries, 1024); } ptr_ssl_rw_buff SEC(".maps"); SEC("uprobe") int recursive_chunks(struct pt_regs *ctx) { - u64 key = bpf_get_current_pid_tgid(); // PID + SMP processor ID (that should be unique enough) + u64 ptid = bpf_get_current_pid_tgid(); + u64 key = ptid; // PID + SMP processor ID (that should be unique enough) key = (key << 32) | bpf_get_smp_processor_id(); struct chunk_processing *cp = bpf_map_lookup_elem(&chunk_processing_map, &key); if (!cp) return 0; - // do we hit limits of tail calls? - if (cp->loop_count > 30) - { - bpf_printk("recursive_chunks: Hit tail call limit\n"); - bpf_map_delete_elem(&chunk_processing_map, &key); - return 0; - } - // Reserve rb - struct data_event *event = bpf_ringbuf_reserve(&rb, sizeof(struct data_event), 0); + struct chunk_event *event = bpf_ringbuf_reserve(&rb, sizeof(struct chunk_event), 0); if (!event) { bpf_printk("recursive_chunks: Failed to reserve ringbuf\n"); + bpf_map_delete_elem(&ptr_ssl_rw_buff, &ptid); bpf_map_delete_elem(&chunk_processing_map, &key); return 0; } @@ -105,12 +105,22 @@ int recursive_chunks(struct pt_regs *ctx) // Submit the event bpf_ringbuf_submit(event, 0); + // did we hit limits of tail calls? + if (cp->loop_count >= 32) + { + bpf_printk("recursive_chunks: Hit tail call limit\n"); + bpf_map_delete_elem(&ptr_ssl_rw_buff, &ptid); + bpf_map_delete_elem(&chunk_processing_map, &key); + return 0; + } + if (cp->len_left != 0) { bpf_tail_call(ctx, &tailcall_map, REC_CHUNK_RB_PROG); } else { + bpf_map_delete_elem(&ptr_ssl_rw_buff, &ptid); // TODO: wtf not working? we have a memleak here bpf_map_delete_elem(&chunk_processing_map, &key); } return 0; @@ -120,27 +130,36 @@ static __always_inline int handle_rw_exit(struct pt_regs *ctx, int is_write) { u64 pid_tgid = bpf_get_current_pid_tgid(); u32 pid = pid_tgid >> 32; - u64 *buf = bpf_map_lookup_elem(&ptr_ssl_rw_buff, &pid_tgid); - if (!buf) + struct rw_event *event = bpf_map_lookup_elem(&ptr_ssl_rw_buff, &pid_tgid); + if (!event) return 0; + size_t len_struct = 0; + bpf_probe_read_user(&len_struct, sizeof(size_t), (void *)(event->len)); + int resp = PT_REGS_RC_CORE(ctx); if (resp <= 0) return 0; + if (len_struct == 0) + { + len_struct = resp; + } + + u64 *buf = &event->buff; // Create the chunk processing struct struct chunk_processing cp = {0}; cp.loop_count = 0; cp.pid = pid; cp.ts = bpf_ktime_get_ns(); cp.op = is_write ? SSL_OP_WRITE : SSL_OP_READ; - cp.len_left = resp; + cp.len_left = len_struct; cp.buffer = *buf; cp.offset = 0; bpf_get_current_comm(&cp.comm, sizeof(cp.comm)); // Store the chunk processing struct - u64 key = bpf_get_current_pid_tgid(); // PID + SMP processor ID + u64 key = pid_tgid; key = (key << 32) | bpf_get_smp_processor_id(); bpf_map_update_elem(&chunk_processing_map, &key, &cp, 0); @@ -148,22 +167,33 @@ static __always_inline int handle_rw_exit(struct pt_regs *ctx, int is_write) return 0; } -SEC("uprobe/fd_attach_ssl") -int probe_fd_attach_ssl(struct pt_regs *ctx) +SEC("uprobe/ssl_rw_enter") +int probe_ssl_rw_enter(struct pt_regs *ctx) { - bpf_printk("fd_attach_ssl\n"); + u64 buf = PT_REGS_PARM2_CORE(ctx); + if (!buf) + return 0; + + u64 pid_tgid = bpf_get_current_pid_tgid(); + struct rw_event rw = {0}; + rw.buff = buf; + bpf_map_update_elem(&ptr_ssl_rw_buff, &pid_tgid, &rw, 0); return 0; } -SEC("uprobe/ssl_rw_enter") -int probe_ssl_rw_enter(struct pt_regs *ctx) +SEC("uprobe/ex_ssl_rw_enter") +int probe_ex_ssl_rw_enter(struct pt_regs *ctx) { u64 buf = PT_REGS_PARM2_CORE(ctx); - if (!buf) + u64 len = PT_REGS_PARM4_CORE(ctx); + if (!buf || !len) return 0; u64 pid_tgid = bpf_get_current_pid_tgid(); - bpf_map_update_elem(&ptr_ssl_rw_buff, &pid_tgid, &buf, 0); + struct rw_event rw = {0}; + rw.buff = buf; + rw.len = len; + bpf_map_update_elem(&ptr_ssl_rw_buff, &pid_tgid, &rw, 0); return 0; } @@ -179,4 +209,11 @@ int probe_ssl_write_return(struct pt_regs *ctx) return (handle_rw_exit(ctx, 1)); } +SEC("uprobe/fd_attach_ssl") +int probe_fd_attach_ssl(struct pt_regs *ctx) +{ + bpf_printk("fd_attach_ssl\n"); + return 0; +} + char _license[] SEC("license") = "GPL"; \ No newline at end of file diff --git a/src/ssl_sniffer/include/ebpf/definition.h b/src/ssl_sniffer/include/ebpf/definition.h index a59ee4e..4bc6660 100644 --- a/src/ssl_sniffer/include/ebpf/definition.h +++ b/src/ssl_sniffer/include/ebpf/definition.h @@ -12,7 +12,7 @@ * Chunk map */ -#define BUFFER_ENTRY_SIZE 256*1024*6 +#define BUFFER_ENTRY_SIZE 256*1024*2 /* * Tailcalls diff --git a/src/ssl_sniffer/include/ebpf/struct_bpf.h b/src/ssl_sniffer/include/ebpf/struct_bpf.h index 25c3bbb..d34f723 100644 --- a/src/ssl_sniffer/include/ebpf/struct_bpf.h +++ b/src/ssl_sniffer/include/ebpf/struct_bpf.h @@ -18,10 +18,10 @@ typedef enum { * @param len Length of data * @param data Data */ -struct data_event { - __u32 pid; +struct chunk_event { __u64 key; int part; + __u32 pid; __u64 ts; char comm[TASK_COMM_LEN]; ssl_op_t op;