Commit 8c050664 authored by Jordan Sissel's avatar Jordan Sissel

Merge pull request #156 from elasticsearch/gofmt

gofmt -tabs=false -tabwidth=2
parents d2ba8895 ae65de48
......@@ -2,8 +2,8 @@ package main
import (
"encoding/json"
"os"
"log"
"os"
"time"
)
......
package main
import (
"testing"
"encoding/json"
"testing"
)
type FileConfig struct {
......@@ -14,8 +14,16 @@ func TestJSONLoading(t *testing.T) {
var f File
err := json.Unmarshal([]byte("{ \"paths\": [ \"/var/log/fail2ban.log\" ], \"fields\": { \"type\": \"fail2ban\" } }"), &f)
if err != nil { t.Fatalf("json.Unmarshal failed") }
if len(f.Paths) != 1 { t.FailNow() }
if f.Paths[0] != "/var/log/fail2ban.log" { t.FailNow() }
if f.Fields["type"] != "fail2ban" { t.FailNow() }
if err != nil {
t.Fatalf("json.Unmarshal failed")
}
if len(f.Paths) != 1 {
t.FailNow()
}
if f.Paths[0] != "/var/log/fail2ban.log" {
t.FailNow()
}
if f.Fields["type"] != "fail2ban" {
t.FailNow()
}
}
......@@ -32,4 +32,3 @@ func is_file_renamed(file string, info os.FileInfo, fileinfo map[string]os.FileI
}
return false
}
package main
import (
"os" // for File and friends
"log"
"bufio"
"bytes"
"io"
"bufio"
"log"
"os" // for File and friends
"time"
)
......
package main
import (
"flag"
"log"
"os"
"time"
"flag"
"runtime/pprof"
"time"
)
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file")
var spool_size = flag.Uint64("spool-size", 1024, "Maximum number of events to spool before a flush is forced.")
var idle_timeout = flag.Duration("idle-flush-time", 5 * time.Second, "Maximum time to wait for a full spool before flushing anyway")
var idle_timeout = flag.Duration("idle-flush-time", 5*time.Second, "Maximum time to wait for a full spool before flushing anyway")
var config_file = flag.String("config", "", "The config file to load")
var use_syslog = flag.Bool("log-to-syslog", false, "Log to syslog instead of stdout")
var from_beginning = flag.Bool("from-beginning", false, "Read new files from the beginning, instead of the end")
......
package main
import (
"time"
"path/filepath"
"encoding/json"
"os"
"log"
"os"
"path/filepath"
"time"
)
func Prospect(fileconfig FileConfig, output chan *FileEvent) {
......@@ -49,7 +49,9 @@ func resume_tracking(fileconfig FileConfig, fileinfo map[string]os.FileInfo, out
// if the file is the same inode/device as we last saw,
// start a harvester on it at the last known position
info, err := os.Stat(path)
if err != nil { continue }
if err != nil {
continue
}
if is_file_same(path, info, state) {
// same file, seek to last known position
......@@ -58,7 +60,7 @@ func resume_tracking(fileconfig FileConfig, fileinfo map[string]os.FileInfo, out
for _, pathglob := range fileconfig.Paths {
match, _ := filepath.Match(pathglob, path)
if match {
harvester := Harvester{Path: path, Fields: fileconfig.Fields, Offset: state.Offset }
harvester := Harvester{Path: path, Fields: fileconfig.Fields, Offset: state.Offset}
go harvester.Harvest(output)
break
}
......
package main
import (
"math/rand"
"bytes"
"encoding/binary"
"encoding/pem"
"compress/zlib"
"crypto/tls"
"crypto/x509"
"net"
"encoding/binary"
"encoding/pem"
"fmt"
"io"
"os"
"io/ioutil"
"log"
"time"
"compress/zlib"
"strconv"
"math/rand"
"net"
"os"
"regexp"
"fmt"
"strconv"
"time"
)
var hostname string
......@@ -65,24 +65,40 @@ func Publishv1(input chan []*FileEvent,
socket = connect(config)
}
SendPayload: for {
SendPayload:
for {
// Abort if our whole request takes longer than the configured
// network timeout.
socket.SetDeadline(time.Now().Add(config.timeout))
// Set the window size to the length of this payload in events.
_, err = socket.Write([]byte("1W"))
if err != nil { oops(err); continue }
if err != nil {
oops(err)
continue
}
binary.Write(socket, binary.BigEndian, uint32(len(events)))
if err != nil { oops(err); continue }
if err != nil {
oops(err)
continue
}
// Write compressed frame
socket.Write([]byte("1C"))
if err != nil { oops(err); continue }
if err != nil {
oops(err)
continue
}
binary.Write(socket, binary.BigEndian, uint32(len(compressed_payload)))
if err != nil { oops(err); continue }
if err != nil {
oops(err)
continue
}
_, err = socket.Write(compressed_payload)
if err != nil { oops(err); continue }
if err != nil {
oops(err)
continue
}
// read ack
response := make([]byte, 0, 6)
......@@ -127,7 +143,9 @@ func connect(config *NetworkConfig) (socket *tls.Conn) {
tlsconfig.RootCAs = x509.NewCertPool()
pemdata, err := ioutil.ReadFile(config.SSLCA)
if err != nil { log.Fatalf("Failure reading CA certificate: %s\n", err) }
if err != nil {
log.Fatalf("Failure reading CA certificate: %s\n", err)
}
block, _ := pem.Decode(pemdata)
if block == nil {
......@@ -146,7 +164,7 @@ func connect(config *NetworkConfig) (socket *tls.Conn) {
for {
// Pick a random server from the list.
hostport := config.Servers[rand.Int() % len(config.Servers)]
hostport := config.Servers[rand.Int()%len(config.Servers)]
submatch := hostport_re.FindSubmatch([]byte(hostport))
if submatch == nil {
log.Fatalf("Invalid host:port given: %s", hostport)
......@@ -156,12 +174,12 @@ func connect(config *NetworkConfig) (socket *tls.Conn) {
addresses, err := net.LookupHost(host)
if err != nil {
log.Printf("DNS lookup failure \"%s\": %s\n", host, err);
log.Printf("DNS lookup failure \"%s\": %s\n", host, err)
time.Sleep(1 * time.Second)
continue
}
address := addresses[rand.Int() % len(addresses)]
address := addresses[rand.Int()%len(addresses)]
addressport := fmt.Sprintf("%s:%s", address, port)
log.Printf("Connecting to %s (%s) \n", addressport, host)
......@@ -198,13 +216,13 @@ func writeDataFrame(event *FileEvent, sequence uint32, output io.Writer) {
// sequence number
binary.Write(output, binary.BigEndian, uint32(sequence))
// 'pair' count
binary.Write(output, binary.BigEndian, uint32(len(*event.Fields) + 4))
binary.Write(output, binary.BigEndian, uint32(len(*event.Fields)+4))
writeKV("file", *event.Source, output)
writeKV("host", hostname, output)
writeKV("offset", strconv.FormatInt(event.Offset, 10), output)
writeKV("line", *event.Text, output)
for k, v := range(*event.Fields) {
for k, v := range *event.Fields {
writeKV(k, v, output)
}
}
......
......@@ -34,4 +34,3 @@ func Registrar(input chan []*FileEvent) {
}
}
}
// +build !windows
package main
import (
"encoding/json"
"os"
"log"
"os"
)
func WriteRegistry(state map[string]*FileState, path string) {
......
......@@ -2,8 +2,8 @@ package main
import (
"encoding/json"
"os"
"log"
"os"
)
func WriteRegistry(state map[string]*FileState, path string) {
......
......@@ -24,7 +24,7 @@ func Spool(input chan *FileEvent,
next_flush_time := time.Now().Add(idle_timeout)
for {
select {
case event := <- input:
case event := <-input:
//append(spool, event)
spool[spool_i] = event
spool_i++
......@@ -40,7 +40,7 @@ func Spool(input chan *FileEvent,
spool_i = 0
}
case <- ticker.C:
case <-ticker.C:
//fmt.Println("tick")
if now := time.Now(); now.After(next_flush_time) {
// if current time is after the next_flush_time, flush!
......
// +build !windows
package main
import (
"log"
"log/syslog"
)
func configureSyslog() {
writer, err := syslog.New(syslog.LOG_INFO | syslog.LOG_DAEMON, "logstash-forwarder")
writer, err := syslog.New(syslog.LOG_INFO|syslog.LOG_DAEMON, "logstash-forwarder")
if err != nil {
log.Fatalf("Failed to open syslog: %s\n", err)
return
......
......@@ -3,5 +3,5 @@ package main
import "log"
func configureSyslog() {
log.Printf("Logging to syslog not supported on this platform\n");
log.Printf("Logging to syslog not supported on this platform\n")
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment