enter eBPF, libpcap and CGO = bloat
This commit is contained in:
100
hypd/server/bpf_endian.h
Normal file
100
hypd/server/bpf_endian.h
Normal file
@ -0,0 +1,100 @@
|
||||
// Code lifted from the folks at Cilium from ebpf-go repo
|
||||
|
||||
#ifndef __BPF_ENDIAN__
|
||||
#define __BPF_ENDIAN__
|
||||
|
||||
/*
|
||||
* Isolate byte #n and put it into byte #m, for __u##b type.
|
||||
* E.g., moving byte #6 (nnnnnnnn) into byte #1 (mmmmmmmm) for __u64:
|
||||
* 1) xxxxxxxx nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx
|
||||
* 2) nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 00000000
|
||||
* 3) 00000000 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn
|
||||
* 4) 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 00000000
|
||||
*/
|
||||
#define ___bpf_mvb(x, b, n, m) ((__u##b)(x) << (b-(n+1)*8) >> (b-8) << (m*8))
|
||||
|
||||
#define ___bpf_swab16(x) ((__u16)( \
|
||||
___bpf_mvb(x, 16, 0, 1) | \
|
||||
___bpf_mvb(x, 16, 1, 0)))
|
||||
|
||||
#define ___bpf_swab32(x) ((__u32)( \
|
||||
___bpf_mvb(x, 32, 0, 3) | \
|
||||
___bpf_mvb(x, 32, 1, 2) | \
|
||||
___bpf_mvb(x, 32, 2, 1) | \
|
||||
___bpf_mvb(x, 32, 3, 0)))
|
||||
|
||||
#define ___bpf_swab64(x) ((__u64)( \
|
||||
___bpf_mvb(x, 64, 0, 7) | \
|
||||
___bpf_mvb(x, 64, 1, 6) | \
|
||||
___bpf_mvb(x, 64, 2, 5) | \
|
||||
___bpf_mvb(x, 64, 3, 4) | \
|
||||
___bpf_mvb(x, 64, 4, 3) | \
|
||||
___bpf_mvb(x, 64, 5, 2) | \
|
||||
___bpf_mvb(x, 64, 6, 1) | \
|
||||
___bpf_mvb(x, 64, 7, 0)))
|
||||
|
||||
/* LLVM's BPF target selects the endianness of the CPU
|
||||
* it compiles on, or the user specifies (bpfel/bpfeb),
|
||||
* respectively. The used __BYTE_ORDER__ is defined by
|
||||
* the compiler, we cannot rely on __BYTE_ORDER from
|
||||
* libc headers, since it doesn't reflect the actual
|
||||
* requested byte order.
|
||||
*
|
||||
* Note, LLVM's BPF target has different __builtin_bswapX()
|
||||
* semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE
|
||||
* in bpfel and bpfeb case, which means below, that we map
|
||||
* to cpu_to_be16(). We could use it unconditionally in BPF
|
||||
* case, but better not rely on it, so that this header here
|
||||
* can be used from application and BPF program side, which
|
||||
* use different targets.
|
||||
*/
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
# define __bpf_ntohs(x) __builtin_bswap16(x)
|
||||
# define __bpf_htons(x) __builtin_bswap16(x)
|
||||
# define __bpf_constant_ntohs(x) ___bpf_swab16(x)
|
||||
# define __bpf_constant_htons(x) ___bpf_swab16(x)
|
||||
# define __bpf_ntohl(x) __builtin_bswap32(x)
|
||||
# define __bpf_htonl(x) __builtin_bswap32(x)
|
||||
# define __bpf_constant_ntohl(x) ___bpf_swab32(x)
|
||||
# define __bpf_constant_htonl(x) ___bpf_swab32(x)
|
||||
# define __bpf_be64_to_cpu(x) __builtin_bswap64(x)
|
||||
# define __bpf_cpu_to_be64(x) __builtin_bswap64(x)
|
||||
# define __bpf_constant_be64_to_cpu(x) ___bpf_swab64(x)
|
||||
# define __bpf_constant_cpu_to_be64(x) ___bpf_swab64(x)
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
# define __bpf_ntohs(x) (x)
|
||||
# define __bpf_htons(x) (x)
|
||||
# define __bpf_constant_ntohs(x) (x)
|
||||
# define __bpf_constant_htons(x) (x)
|
||||
# define __bpf_ntohl(x) (x)
|
||||
# define __bpf_htonl(x) (x)
|
||||
# define __bpf_constant_ntohl(x) (x)
|
||||
# define __bpf_constant_htonl(x) (x)
|
||||
# define __bpf_be64_to_cpu(x) (x)
|
||||
# define __bpf_cpu_to_be64(x) (x)
|
||||
# define __bpf_constant_be64_to_cpu(x) (x)
|
||||
# define __bpf_constant_cpu_to_be64(x) (x)
|
||||
#else
|
||||
# error "Fix your compiler's __BYTE_ORDER__?!"
|
||||
#endif
|
||||
|
||||
#define bpf_htons(x) \
|
||||
(__builtin_constant_p(x) ? \
|
||||
__bpf_constant_htons(x) : __bpf_htons(x))
|
||||
#define bpf_ntohs(x) \
|
||||
(__builtin_constant_p(x) ? \
|
||||
__bpf_constant_ntohs(x) : __bpf_ntohs(x))
|
||||
#define bpf_htonl(x) \
|
||||
(__builtin_constant_p(x) ? \
|
||||
__bpf_constant_htonl(x) : __bpf_htonl(x))
|
||||
#define bpf_ntohl(x) \
|
||||
(__builtin_constant_p(x) ? \
|
||||
__bpf_constant_ntohl(x) : __bpf_ntohl(x))
|
||||
#define bpf_cpu_to_be64(x) \
|
||||
(__builtin_constant_p(x) ? \
|
||||
__bpf_constant_cpu_to_be64(x) : __bpf_cpu_to_be64(x))
|
||||
#define bpf_be64_to_cpu(x) \
|
||||
(__builtin_constant_p(x) ? \
|
||||
__bpf_constant_be64_to_cpu(x) : __bpf_be64_to_cpu(x))
|
||||
|
||||
#endif /* __BPF_ENDIAN__ */
|
64
hypd/server/hyp_bpf.c
Normal file
64
hypd/server/hyp_bpf.c
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
Copyright © 2024 Steven Polley <himself@stevenpolley.net>
|
||||
*/
|
||||
|
||||
//go:build ignore
|
||||
#include "vmlinux.h"
|
||||
#include "bpf_endian.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
|
||||
char __license[] SEC("license") = "BSD";
|
||||
|
||||
#define ETH_P_IP 0x0800
|
||||
#define IP_FRAGMENTED 65343
|
||||
|
||||
// representation of knock data that gets sent to userspace
|
||||
struct knock_data {
|
||||
__u32 srcip; // 4 bytes
|
||||
__u16 dstport; // 2 bytes
|
||||
__u16 pad; // required padding - struct must be multiple of 4 bytes
|
||||
};
|
||||
|
||||
// ring buffer used to send data to userspace
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_RINGBUF);
|
||||
__uint(max_entries, 1 << 24);
|
||||
} rb SEC(".maps");
|
||||
|
||||
// force emitting struct event into the ELF
|
||||
const struct knock_data *unused __attribute__((unused));
|
||||
|
||||
// hook into xpress data path attach point
|
||||
SEC("xdp")
|
||||
int xdp_prog_func(struct xdp_md *ctx) {
|
||||
|
||||
// xdp gives us the raw frame with no structures, it must be parsed
|
||||
void *data = (void *)(long)ctx->data;
|
||||
void *data_end = (void *)(long)ctx->data_end;
|
||||
|
||||
// parse ethernet header
|
||||
struct ethhdr *eth = data;
|
||||
|
||||
if ((void *)eth + sizeof(*eth) <= data_end) {
|
||||
// parse IP header
|
||||
struct iphdr *ip = data + sizeof(*eth);
|
||||
if ((void *)ip + sizeof(*ip) <= data_end) {
|
||||
if (ip->protocol == IPPROTO_UDP) {
|
||||
// parse UDP header
|
||||
struct udphdr *udp = (void *)ip + sizeof(*ip);
|
||||
if ((void *)udp + sizeof(*udp) <= data_end)
|
||||
{
|
||||
// pack into knock structure and send to userspace
|
||||
struct knock_data knock;
|
||||
knock.srcip = bpf_ntohl(ip->saddr);
|
||||
knock.dstport = bpf_htons(udp->dest);
|
||||
knock.pad = 0;
|
||||
bpf_ringbuf_output(&rb, &knock, sizeof(knock), BPF_RB_FORCE_WAKEUP);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return XDP_PASS;
|
||||
}
|
@ -5,18 +5,24 @@ Copyright © 2024 Steven Polley <himself@stevenpolley.net>
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"deadbeef.codes/steven/hyp/otphyp"
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/pcap"
|
||||
"github.com/cilium/ebpf/link"
|
||||
"github.com/cilium/ebpf/ringbuf"
|
||||
"github.com/cilium/ebpf/rlimit"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go --type knock_data hyp_bpf hyp_bpf.c
|
||||
|
||||
// Client is used to keep track of a client attempting to perform an authentic knock sequence
|
||||
type Client struct {
|
||||
Progress int // index of current progress in sequence. Value of 1 means first port has been matched
|
||||
@ -31,7 +37,7 @@ type KnockSequence struct {
|
||||
}
|
||||
|
||||
var (
|
||||
clients map[string]*Client // Contains a map of clients
|
||||
clients map[uint32]*Client // Contains a map of clients, key is IPv4 address
|
||||
knockSequences []KnockSequence // We have 3 valid knock sequences at any time to account for clock skew
|
||||
sharedSecret string // base32 encoded shared secret used for totp
|
||||
)
|
||||
@ -40,46 +46,96 @@ var (
|
||||
// it sets up the pcap on the capture device and starts a goroutine
|
||||
// to rotate the knock sequence
|
||||
func PacketServer(captureDevice string) error {
|
||||
|
||||
iface, err := net.InterfaceByName(captureDevice)
|
||||
if err != nil {
|
||||
log.Fatalf("lookup network iface %q: %v", captureDevice, err)
|
||||
}
|
||||
|
||||
secretBytes, err := os.ReadFile("hyp.secret")
|
||||
if err != nil {
|
||||
log.Fatalf("failed to read file 'hyp.secret': %v", err)
|
||||
}
|
||||
sharedSecret = string(secretBytes)
|
||||
|
||||
clients = make(map[string]*Client, 0)
|
||||
clients = make(map[uint32]*Client, 0)
|
||||
knockSequences = []KnockSequence{}
|
||||
|
||||
// Open pcap handle on device
|
||||
handle, err := pcap.OpenLive(captureDevice, 126, true, pcap.BlockForever)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open pcap on capture device: %w", err)
|
||||
}
|
||||
packetSource := gopacket.NewPacketSource(handle, handle.LinkType())
|
||||
|
||||
// Setup a goroutine to periodically rotate the authentic knock sequence
|
||||
go rotateSequence(handle)
|
||||
go rotateSequence()
|
||||
|
||||
// Read from the pcap handle until we exit
|
||||
for packet := range packetSource.Packets() {
|
||||
handlePacket(packet) // Do something with a packet here.
|
||||
////////////////////////////////////
|
||||
|
||||
// Allow the current process to lock memory for eBPF resources.
|
||||
if err := rlimit.RemoveMemlock(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
|
||||
// Load pre-compiled programs into the kernel.
|
||||
objs := hyp_bpfObjects{}
|
||||
if err := loadHyp_bpfObjects(&objs, nil); err != nil {
|
||||
log.Fatalf("loading objects: %v", err)
|
||||
}
|
||||
defer objs.Close()
|
||||
|
||||
// Attach the program.
|
||||
l, err := link.AttachXDP(link.XDPOptions{
|
||||
Program: objs.XdpProgFunc,
|
||||
Interface: iface.Index,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("could not attach XDP program: %v", err)
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
log.Printf("Attached XDP program to iface %q (index %d)", iface.Name, iface.Index)
|
||||
log.Printf("Press Ctrl-C to exit and remove the program")
|
||||
|
||||
rd, err := ringbuf.NewReader(objs.Rb)
|
||||
if err != nil {
|
||||
log.Fatalf("could not open ring buffer reader: %v", err)
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
var event hyp_bpfKnockData
|
||||
for {
|
||||
record, err := rd.Read()
|
||||
if err != nil {
|
||||
if errors.Is(err, ringbuf.ErrClosed) {
|
||||
log.Println("eBPF ring buffer closed, exiting...")
|
||||
return nil
|
||||
}
|
||||
log.Printf("error reading from ring buffer reader: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := binary.Read(bytes.NewBuffer(record.RawSample), binary.LittleEndian, &event); err != nil {
|
||||
log.Printf("error parsing ringbuf event: %v", err)
|
||||
continue
|
||||
}
|
||||
handleKnock(event)
|
||||
}
|
||||
}
|
||||
|
||||
// intToIP converts IPv4 number to net.IP
|
||||
func intToIP(ipNum uint32) net.IP {
|
||||
ip := make(net.IP, 4)
|
||||
binary.BigEndian.PutUint32(ip, ipNum)
|
||||
return ip
|
||||
}
|
||||
|
||||
// packets that match the BPF filter get passed to handlePacket
|
||||
func handlePacket(packet gopacket.Packet) {
|
||||
port := binary.BigEndian.Uint16(packet.TransportLayer().TransportFlow().Dst().Raw())
|
||||
srcip := packet.NetworkLayer().NetworkFlow().Src().String()
|
||||
func handleKnock(knockEvent hyp_bpfKnockData) {
|
||||
|
||||
client, ok := clients[srcip]
|
||||
client, ok := clients[knockEvent.Srcip]
|
||||
if !ok { // client doesn't exist yet
|
||||
for i, knockSequence := range knockSequences { // identify which of the 3 authentic knock sequences is matched
|
||||
if knockSequence.Used { // skip over sequences that are already used to prevent replay attack
|
||||
continue
|
||||
}
|
||||
if port == knockSequence.PortSequence[0] {
|
||||
if knockEvent.Dstport == knockSequence.PortSequence[0] {
|
||||
// Create the client and mark the knock sequence as used
|
||||
clients[srcip] = &Client{Progress: 1, Sequence: knockSequence.PortSequence}
|
||||
clients[knockEvent.Srcip] = &Client{Progress: 1, Sequence: knockSequence.PortSequence}
|
||||
knockSequences[i].Used = true
|
||||
}
|
||||
}
|
||||
@ -89,23 +145,23 @@ func handlePacket(packet gopacket.Packet) {
|
||||
// if it's wrong, reset progress
|
||||
// TBD: vulnerable to sweep attack - this won't be triggered if a wrong packet doesn't match BPF filter
|
||||
// TBD: make the sweep attack fix on by default, but configurable to be off to allow for limited BPF filter for extremely low overhead as compromise.
|
||||
if port != client.Sequence[client.Progress] {
|
||||
delete(clients, srcip)
|
||||
fmt.Printf("port '%d' is in sequence, but came at unexpected order - resetting progress", port)
|
||||
if knockEvent.Dstport != client.Sequence[client.Progress] {
|
||||
delete(clients, knockEvent.Srcip)
|
||||
fmt.Printf("port '%d' is in sequence, but came at unexpected order - resetting progress", knockEvent.Dstport)
|
||||
return
|
||||
}
|
||||
|
||||
// Client increases progress through sequence and checks if sequence is completed
|
||||
client.Progress++
|
||||
if client.Progress >= len(client.Sequence) {
|
||||
delete(clients, srcip)
|
||||
handleSuccess(srcip) // The magic function, the knock is completed
|
||||
delete(clients, knockEvent.Srcip)
|
||||
handleSuccess(intToIP(knockEvent.Srcip)) // The magic function, the knock is completed
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Used to rotate the authentic port knock sequence
|
||||
func rotateSequence(handle *pcap.Handle) {
|
||||
func rotateSequence() {
|
||||
for {
|
||||
|
||||
// Generate new knock sequences with time skew support
|
||||
@ -120,12 +176,6 @@ func rotateSequence(handle *pcap.Handle) {
|
||||
}
|
||||
fmt.Println("New sequences:", knockSequences)
|
||||
|
||||
// Set BPF filter
|
||||
err := setPacketFilter(handle)
|
||||
if err != nil {
|
||||
log.Printf("failed to change packet filter: %v", err)
|
||||
}
|
||||
|
||||
// Sleep until next 30 second offset
|
||||
time.Sleep(time.Until(time.Now().Truncate(time.Second * 30).Add(time.Second * 30)))
|
||||
|
||||
@ -134,31 +184,10 @@ func rotateSequence(handle *pcap.Handle) {
|
||||
}
|
||||
}
|
||||
|
||||
// Given a pcap handle and list of authentic port knock sequences, configures a BPF filter
|
||||
func setPacketFilter(handle *pcap.Handle) error {
|
||||
filter := "udp && ("
|
||||
for i, knockSequence := range knockSequences {
|
||||
for j, port := range knockSequence.PortSequence {
|
||||
if i == 0 && j == 0 {
|
||||
filter += fmt.Sprint("port ", port)
|
||||
} else {
|
||||
filter += fmt.Sprint(" || port ", port)
|
||||
}
|
||||
}
|
||||
}
|
||||
filter += ")"
|
||||
err := handle.SetBPFFilter(filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set BPF filter '%s': %v", filter, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TBD: Implement - this is a temporary routine to demonstrate an application
|
||||
func handleSuccess(srcip string) {
|
||||
func handleSuccess(srcip net.IP) {
|
||||
fmt.Println("Success for ", srcip)
|
||||
|
||||
cmd := exec.Command("iptables", "-A", "INPUT", "-p", "tcp", "-s", srcip, "--dport", "22", "-j", "ACCEPT")
|
||||
cmd := exec.Command("iptables", "-A", "INPUT", "-p", "tcp", "-s", fmt.Sprint(srcip), "--dport", "22", "-j", "ACCEPT")
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
log.Printf("failed to execute iptables command for '%s': %v", srcip, err)
|
||||
|
112576
hypd/server/vmlinux.h
Normal file
112576
hypd/server/vmlinux.h
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user