Skip to content

Commit

Permalink
feat: added chunks
Browse files Browse the repository at this point in the history
  • Loading branch information
rphang committed May 16, 2024
1 parent 5bae343 commit 4bed713
Show file tree
Hide file tree
Showing 6 changed files with 157 additions and 37 deletions.
9 changes: 8 additions & 1 deletion src/ssl_sniffer/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,4 +65,11 @@ Tested on:

## Extra

A `bpftrace_demo.sh` script is provided to try to sniff any encrypted SSL/TLS traffic on specified programs. It will use `bpftrace` to compile and load the eBPF program. It's a simplier version of the `ssl_sniffer` tool with truncated output.
A `bpftrace_demo.sh` script is provided to try to sniff any encrypted SSL/TLS traffic on specified programs. It will use `bpftrace` to compile and load the eBPF program. It's a simplier version of the `ssl_sniffer` tool with truncated output.

## Technical Details

As we don't really know how big the SSL/TLS traffic is, going for size defined structures is not a good idea as we may either waste space for small packets or truncate big packets. To avoid this, `ssl_sniffer` uses a ring buffer and transfers the data from the kernel to the user space through chunks of constant size. These chunks are then reassembled in the user space to get the full packet (TODO).

> [!NOTE]
> This is an implementation of [Google's Chunking Trick](https://lpc.events/event/11/contributions/938/attachments/909/1788/BPF_Security_Google.pdf) at the [Linux Plumbers Conference 2021](https://lpc.events/2021/).
14 changes: 13 additions & 1 deletion src/ssl_sniffer/ebpf/loader.c
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <bpf/libbpf.h>
#include <bpf/bpf.h>

#include "ebpf/loader.h"
#include "ebpf/struct_bpf.h"
Expand Down Expand Up @@ -75,6 +76,16 @@ int ssl_load()
fprintf(stderr, "Failed to load BPF skeleton\n");
return 1;
}

int map_prog_array_fd = bpf_map__fd(skel->maps.tailcall_map);
int prog_fd = bpf_program__fd(skel->progs.recursive_chunks);
int index = REC_CHUNK_RB_PROG;
err = bpf_map_update_elem(map_prog_array_fd, &index, &prog_fd, BPF_ANY);
if (err)
{
fprintf(stderr, "Failed to update tailcall map: %d\n", err);
return 1;
}
return 0;
}

Expand Down Expand Up @@ -131,7 +142,8 @@ int ssl_attach_nss(char *program_path)
static void log_event(struct data_event *event)
{
char *op = event->op == 1 ? "SSL_OP_READ" : "SSL_OP_WRITE";
fprintf(stdout, "[+] %s(%d), ts: %llu, op: %s, len: %d --> \n", event->comm, event->pid, event->ts, op, event->len);
fprintf(stdout, "--------------------------------------------------\n");
fprintf(stdout, "[+|%llu-%d] %s(%d), ts: %llu, op: %s, len: %d, --> \n", event->key, event->part, event->comm, event->pid, event->ts, op, event->len);
for (int i = 0; i < event->len; i++)
{
fprintf(stdout, "%c", event->data[i]);
Expand Down
127 changes: 98 additions & 29 deletions src/ssl_sniffer/ebpf/main.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,52 +29,122 @@
_min1 < _min2 ? _min1 : _min2; \
})

// Internal maps (mostly for ptr tracking, other maps are in headers)
// Chunk processing storage map
struct chunk_processing
{
int loop_count;
u32 pid;
u64 ts;
ssl_op_t op;
char comm[TASK_COMM_LEN];
size_t len_left;
u64 buffer;
u64 offset;
};

struct
{
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, u64);
__type(value, struct chunk_processing);
__uint(max_entries, 1024);
} chunk_processing_map SEC(".maps");

// Lookup maps
struct
{
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, u32); // pid
__type(key, u64);
__type(value, u64); // buffer address
__uint(max_entries, 1024);
} ptr_ssl_rw_buff SEC(".maps");

SEC("uprobe")
int recursive_chunks(struct pt_regs *ctx)
{
u64 key = bpf_get_current_pid_tgid(); // PID + SMP processor ID (that should be unique enough)
key = (key << 32) | bpf_get_smp_processor_id();
struct chunk_processing *cp = bpf_map_lookup_elem(&chunk_processing_map, &key);
if (!cp)
return 0;

// do we hit limits of tail calls?
if (cp->loop_count > 30)
{
bpf_printk("recursive_chunks: Hit tail call limit\n");
bpf_map_delete_elem(&chunk_processing_map, &key);
return 0;
}

// Reserve rb
struct data_event *event = bpf_ringbuf_reserve(&rb, sizeof(struct data_event), 0);
if (!event)
{
bpf_printk("recursive_chunks: Failed to reserve ringbuf\n");
bpf_map_delete_elem(&chunk_processing_map, &key);
return 0;
}
// Read the chunk
size_t len = min((size_t)cp->len_left, (size_t)MAX_DATA_LEN);
bpf_probe_read_user(&event->data, len, (void *)(cp->buffer + cp->offset));

// Copy data
event->key = key;
event->part = cp->loop_count;
event->pid = cp->pid;
event->ts = cp->ts;
event->op = cp->op;
event->len = len;
bpf_probe_read_user(&event->comm, sizeof(event->comm), cp->comm);

// Update the chunk processing struct
cp->loop_count++;
cp->len_left -= len;
cp->offset += len;

// Submit the event
bpf_ringbuf_submit(event, 0);

if (cp->len_left != 0)
{
bpf_tail_call(ctx, &tailcall_map, REC_CHUNK_RB_PROG);
}
else
{
bpf_map_delete_elem(&chunk_processing_map, &key);
}
return 0;
}

static __always_inline int handle_rw_exit(struct pt_regs *ctx, int is_write)
{
u64 pid_tgid = bpf_get_current_pid_tgid();
u32 pid = pid_tgid >> 32;
u64 *buf = bpf_map_lookup_elem(&ptr_ssl_rw_buff, &pid);
u64 *buf = bpf_map_lookup_elem(&ptr_ssl_rw_buff, &pid_tgid);
if (!buf)
return 0;

int resp = PT_REGS_RC_CORE(ctx);
if (resp <= 0)
return 0;
if (resp > MAX_DATA_LEN)
bpf_printk("we might lose some data (%d), need some recursive read\n", resp);
u32 read_len = min((size_t)resp, (size_t)MAX_DATA_LEN);

// Prepare to send to user space (ring buffer)
int msg_len = sizeof(struct data_event);
struct data_event *msg = bpf_ringbuf_reserve(&rb, msg_len, 0);
if (!msg)
return 0;

u64 ts = bpf_ktime_get_ns();

bpf_core_read(&msg->pid, sizeof(msg->pid), &pid);
bpf_get_current_comm(&msg->comm, TASK_COMM_LEN);
bpf_core_read(&msg->ts, sizeof(msg->ts), &ts);
msg->op = is_write ? SSL_OP_WRITE : SSL_OP_READ;
msg->len = resp;

//if (!is_write)
// bpf_probe_write_user((void *)*buf, "HTTP/1.1 200 OK\nContent-Length: 12\n\nHello World\n\00", 50);
// We can fake the data being sent back to user space but difference in read size will be detected

bpf_core_read_user(&msg->data, read_len, (void *)*buf);
// Sending to ring buffer
bpf_ringbuf_submit(msg, 0);
// Create the chunk processing struct
struct chunk_processing cp = {0};
cp.loop_count = 0;
cp.pid = pid;
cp.ts = bpf_ktime_get_ns();
cp.op = is_write ? SSL_OP_WRITE : SSL_OP_READ;
cp.len_left = resp;
cp.buffer = *buf;
cp.offset = 0;
bpf_get_current_comm(&cp.comm, sizeof(cp.comm));

// Store the chunk processing struct
u64 key = bpf_get_current_pid_tgid(); // PID + SMP processor ID
key = (key << 32) | bpf_get_smp_processor_id();
bpf_map_update_elem(&chunk_processing_map, &key, &cp, 0);

bpf_tail_call(ctx, &tailcall_map, REC_CHUNK_RB_PROG);
return 0;
}

Expand All @@ -93,8 +163,7 @@ int probe_ssl_rw_enter(struct pt_regs *ctx)
return 0;

u64 pid_tgid = bpf_get_current_pid_tgid();
u32 pid = pid_tgid >> 32;
bpf_map_update_elem(&ptr_ssl_rw_buff, &pid, &buf, 0);
bpf_map_update_elem(&ptr_ssl_rw_buff, &pid_tgid, &buf, 0);
return 0;
}

Expand Down
23 changes: 23 additions & 0 deletions src/ssl_sniffer/include/ebpf/definition.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
#ifndef __DEF_SSL_BPF_H
#define __DEF_SSL_BPF_H

/*
* Chunk struct
*/

#define TASK_COMM_LEN 16
#define MAX_DATA_LEN 1064

/*
* Chunk map
*/

#define BUFFER_ENTRY_SIZE 256*1024*6

/*
* Tailcalls
*/

#define REC_CHUNK_RB_PROG 0

#endif
12 changes: 10 additions & 2 deletions src/ssl_sniffer/include/ebpf/maps_bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,7 @@
#define __MAPS_BPF_H

#include <bpf/bpf_helpers.h>

#define BUFFER_ENTRY_SIZE 256*1024
#include "definition.h"

/*
This implementation uses BPF_MAP_TYPE_RINGBUF to store the data events.
Expand All @@ -25,6 +24,15 @@ struct {
__uint(max_entries, BUFFER_ENTRY_SIZE);
} rb SEC(".maps");

// Tailcall map
struct
{
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 2);
__type(key, __u32);
__type(value, __u32); // Hold our BPF programs for the tailcalls
} tailcall_map SEC(".maps");

// Lookup maps
// FD <-> SSL CTX (TODO: Look into this)
struct {
Expand Down
9 changes: 5 additions & 4 deletions src/ssl_sniffer/include/ebpf/struct_bpf.h
Original file line number Diff line number Diff line change
@@ -1,26 +1,27 @@
#ifndef __STRUCTS_BPF_H
#define __STRUCTS_BPF_H

#define TASK_COMM_LEN 16
#define MAX_DATA_LEN 16536
#include "definition.h"

typedef enum {
SSL_OP_WRITE = 0,
SSL_OP_READ = 1
} ssl_op_t;

/**
* @brief Struct for passing data from kernel to user space, for each write/recv SSL operation
* @brief Chunk struct for passing data from kernel to user space, for each write/recv SSL operation.
*
* @param pid Process ID
* @param tgid Thread Group ID
* @param ts Timestamp
* @param comm Command name
* @param op Operation type
* @param len Length of data
* @param data Data
*/
struct data_event {
__u32 pid;
__u64 key;
int part;
__u64 ts;
char comm[TASK_COMM_LEN];
ssl_op_t op;
Expand Down

0 comments on commit 4bed713

Please sign in to comment.