Skip to content

Commit

Permalink
feat: added SSL_.._ex supports
Browse files Browse the repository at this point in the history
  • Loading branch information
rphang committed May 16, 2024
1 parent 4bed713 commit eab3f00
Show file tree
Hide file tree
Showing 5 changed files with 70 additions and 28 deletions.
3 changes: 2 additions & 1 deletion src/ssl_sniffer/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ It supports out-of-the-box the following applications:
- `curl`
- `wget`
- `nginx` (not all versions, some have inbuilt SSL/TLS support)
- `php`, `python` (tested so far)
& many more...

## Usage
Expand Down Expand Up @@ -72,4 +73,4 @@ A `bpftrace_demo.sh` script is provided to try to sniff any encrypted SSL/TLS tr
As we don't really know how big the SSL/TLS traffic is, going for size defined structures is not a good idea as we may either waste space for small packets or truncate big packets. To avoid this, `ssl_sniffer` uses a ring buffer and transfers the data from the kernel to the user space through chunks of constant size. These chunks are then reassembled in the user space to get the full packet (TODO).

> [!NOTE]
> This is an implementation of [Google's Chunking Trick](https://lpc.events/event/11/contributions/938/attachments/909/1788/BPF_Security_Google.pdf) at the [Linux Plumbers Conference 2021](https://lpc.events/2021/).
> This contains an implementation of [Google's Chunking Trick](https://lpc.events/event/11/contributions/938/attachments/909/1788/BPF_Security_Google.pdf) at the [Linux Plumbers Conference 2021](https://lpc.events/2021/).
8 changes: 6 additions & 2 deletions src/ssl_sniffer/ebpf/loader.c
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,10 @@ int ssl_attach_openssl(char *program_path)
__ATTACH_UPROBE(program_path, "SSL_write", probe_ssl_write_return, true);
__ATTACH_UPROBE(program_path, "SSL_read", probe_ssl_rw_enter, false);
__ATTACH_UPROBE(program_path, "SSL_read", probe_ssl_read_return, true);
__ATTACH_UPROBE(program_path, "SSL_read_ex", probe_ex_ssl_rw_enter, false);
__ATTACH_UPROBE(program_path, "SSL_read_ex", probe_ssl_read_return, true);
__ATTACH_UPROBE(program_path, "SSL_write_ex", probe_ex_ssl_rw_enter, false);
__ATTACH_UPROBE(program_path, "SSL_write_ex", probe_ssl_write_return, true);
return 0;
}

Expand Down Expand Up @@ -139,7 +143,7 @@ int ssl_attach_nss(char *program_path)
return 0;
}

static void log_event(struct data_event *event)
static void log_event(struct chunk_event *event)
{
char *op = event->op == 1 ? "SSL_OP_READ" : "SSL_OP_WRITE";
fprintf(stdout, "--------------------------------------------------\n");
Expand All @@ -153,7 +157,7 @@ static void log_event(struct data_event *event)

static int handle_event(void *ctx, void *data, size_t len)
{
struct data_event *event = (struct data_event *)data;
struct chunk_event *event = (struct chunk_event *)data;
log_event(event);
return 0;
}
Expand Down
81 changes: 59 additions & 22 deletions src/ssl_sniffer/ebpf/main.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,36 +51,36 @@ struct
} chunk_processing_map SEC(".maps");

// Lookup maps
struct rw_event
{
u64 buff;
u64 len;
};

struct
{
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, u64);
__type(value, u64); // buffer address
__type(value, struct rw_event);
__uint(max_entries, 1024);
} ptr_ssl_rw_buff SEC(".maps");

SEC("uprobe")
int recursive_chunks(struct pt_regs *ctx)
{
u64 key = bpf_get_current_pid_tgid(); // PID + SMP processor ID (that should be unique enough)
u64 ptid = bpf_get_current_pid_tgid();
u64 key = ptid; // PID + SMP processor ID (that should be unique enough)
key = (key << 32) | bpf_get_smp_processor_id();
struct chunk_processing *cp = bpf_map_lookup_elem(&chunk_processing_map, &key);
if (!cp)
return 0;

// do we hit limits of tail calls?
if (cp->loop_count > 30)
{
bpf_printk("recursive_chunks: Hit tail call limit\n");
bpf_map_delete_elem(&chunk_processing_map, &key);
return 0;
}

// Reserve rb
struct data_event *event = bpf_ringbuf_reserve(&rb, sizeof(struct data_event), 0);
struct chunk_event *event = bpf_ringbuf_reserve(&rb, sizeof(struct chunk_event), 0);
if (!event)
{
bpf_printk("recursive_chunks: Failed to reserve ringbuf\n");
bpf_map_delete_elem(&ptr_ssl_rw_buff, &ptid);
bpf_map_delete_elem(&chunk_processing_map, &key);
return 0;
}
Expand All @@ -105,12 +105,22 @@ int recursive_chunks(struct pt_regs *ctx)
// Submit the event
bpf_ringbuf_submit(event, 0);

// did we hit limits of tail calls?
if (cp->loop_count >= 32)
{
bpf_printk("recursive_chunks: Hit tail call limit\n");
bpf_map_delete_elem(&ptr_ssl_rw_buff, &ptid);
bpf_map_delete_elem(&chunk_processing_map, &key);
return 0;
}

if (cp->len_left != 0)
{
bpf_tail_call(ctx, &tailcall_map, REC_CHUNK_RB_PROG);
}
else
{
bpf_map_delete_elem(&ptr_ssl_rw_buff, &ptid); // TODO: wtf not working? we have a memleak here
bpf_map_delete_elem(&chunk_processing_map, &key);
}
return 0;
Expand All @@ -120,50 +130,70 @@ static __always_inline int handle_rw_exit(struct pt_regs *ctx, int is_write)
{
u64 pid_tgid = bpf_get_current_pid_tgid();
u32 pid = pid_tgid >> 32;
u64 *buf = bpf_map_lookup_elem(&ptr_ssl_rw_buff, &pid_tgid);
if (!buf)
struct rw_event *event = bpf_map_lookup_elem(&ptr_ssl_rw_buff, &pid_tgid);
if (!event)
return 0;

size_t len_struct = 0;
bpf_probe_read_user(&len_struct, sizeof(size_t), (void *)(event->len));

int resp = PT_REGS_RC_CORE(ctx);
if (resp <= 0)
return 0;

if (len_struct == 0)
{
len_struct = resp;
}

u64 *buf = &event->buff;
// Create the chunk processing struct
struct chunk_processing cp = {0};
cp.loop_count = 0;
cp.pid = pid;
cp.ts = bpf_ktime_get_ns();
cp.op = is_write ? SSL_OP_WRITE : SSL_OP_READ;
cp.len_left = resp;
cp.len_left = len_struct;
cp.buffer = *buf;
cp.offset = 0;
bpf_get_current_comm(&cp.comm, sizeof(cp.comm));

// Store the chunk processing struct
u64 key = bpf_get_current_pid_tgid(); // PID + SMP processor ID
u64 key = pid_tgid;
key = (key << 32) | bpf_get_smp_processor_id();
bpf_map_update_elem(&chunk_processing_map, &key, &cp, 0);

bpf_tail_call(ctx, &tailcall_map, REC_CHUNK_RB_PROG);
return 0;
}

SEC("uprobe/fd_attach_ssl")
int probe_fd_attach_ssl(struct pt_regs *ctx)
SEC("uprobe/ssl_rw_enter")
int probe_ssl_rw_enter(struct pt_regs *ctx)
{
bpf_printk("fd_attach_ssl\n");
u64 buf = PT_REGS_PARM2_CORE(ctx);
if (!buf)
return 0;

u64 pid_tgid = bpf_get_current_pid_tgid();
struct rw_event rw = {0};
rw.buff = buf;
bpf_map_update_elem(&ptr_ssl_rw_buff, &pid_tgid, &rw, 0);
return 0;
}

SEC("uprobe/ssl_rw_enter")
int probe_ssl_rw_enter(struct pt_regs *ctx)
SEC("uprobe/ex_ssl_rw_enter")
int probe_ex_ssl_rw_enter(struct pt_regs *ctx)
{
u64 buf = PT_REGS_PARM2_CORE(ctx);
if (!buf)
u64 len = PT_REGS_PARM4_CORE(ctx);
if (!buf || !len)
return 0;

u64 pid_tgid = bpf_get_current_pid_tgid();
bpf_map_update_elem(&ptr_ssl_rw_buff, &pid_tgid, &buf, 0);
struct rw_event rw = {0};
rw.buff = buf;
rw.len = len;
bpf_map_update_elem(&ptr_ssl_rw_buff, &pid_tgid, &rw, 0);
return 0;
}

Expand All @@ -179,4 +209,11 @@ int probe_ssl_write_return(struct pt_regs *ctx)
return (handle_rw_exit(ctx, 1));
}

SEC("uprobe/fd_attach_ssl")
int probe_fd_attach_ssl(struct pt_regs *ctx)
{
bpf_printk("fd_attach_ssl\n");
return 0;
}

char _license[] SEC("license") = "GPL";
2 changes: 1 addition & 1 deletion src/ssl_sniffer/include/ebpf/definition.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
* Chunk map
*/

#define BUFFER_ENTRY_SIZE 256*1024*6
#define BUFFER_ENTRY_SIZE 256*1024*2

/*
* Tailcalls
Expand Down
4 changes: 2 additions & 2 deletions src/ssl_sniffer/include/ebpf/struct_bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@ typedef enum {
* @param len Length of data
* @param data Data
*/
struct data_event {
__u32 pid;
struct chunk_event {
__u64 key;
int part;
__u32 pid;
__u64 ts;
char comm[TASK_COMM_LEN];
ssl_op_t op;
Expand Down

0 comments on commit eab3f00

Please sign in to comment.