Browse Source

Initial commit

master
anggabayu 6 years ago
commit
1aa1cc1360
  1. BIN
      .DS_Store
  2. 21
      lumberjack/LICENSE
  3. 11
      lumberjack/chown.go
  4. 19
      lumberjack/chown_linux.go
  5. 543
      lumberjack/lumberjack.go
  6. 247
      model/device.go
  7. 468
      model/message.go
  8. 62
      model/message_test.go
  9. 78
      model/server.go
  10. BIN
      parser/.DS_Store
  11. 573
      parser/devicePrs.go
  12. 445
      parser/devicePrsInternal.go
  13. 409
      parser/devicePrs_test.go
  14. BIN
      server/.DS_Store
  15. 325
      server/devicePrsNet.go
  16. BIN
      server/exe/.DS_Store
  17. 15947
      server/exe/applog.log
  18. BIN
      server/exe/applog.sqlite
  19. 60
      server/exe/config-server.hjson
  20. 160
      server/exe/config.hjson
  21. 102
      server/exe/deviceServer.go
  22. 133
      server/ruptela_test.go
  23. 172
      utility/logwriter.go
  24. 39
      utility/watcher/config.hjson
  25. 340
      utility/watcher/log_watcher.go
  26. 20
      vendor/github.com/gansidui/gotcp/LICENSE
  27. 25
      vendor/github.com/gansidui/gotcp/README.md
  28. 215
      vendor/github.com/gansidui/gotcp/conn.go
  29. 42
      vendor/github.com/gansidui/gotcp/examples/echo/client/client.go
  30. 69
      vendor/github.com/gansidui/gotcp/examples/echo/echoProtocol.go
  31. 70
      vendor/github.com/gansidui/gotcp/examples/echo/server/server.go
  32. 50
      vendor/github.com/gansidui/gotcp/examples/telnet/server/server.go
  33. 115
      vendor/github.com/gansidui/gotcp/examples/telnet/telnetProtocol.go
  34. 13
      vendor/github.com/gansidui/gotcp/protocol.go
  35. 68
      vendor/github.com/gansidui/gotcp/server.go

BIN
.DS_Store vendored

Binary file not shown.

21
lumberjack/LICENSE

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Nate Finch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

11
lumberjack/chown.go

@ -0,0 +1,11 @@
// +build !linux
package lumberjack
import (
"os"
)
func chown(_ string, _ os.FileInfo) error {
return nil
}

19
lumberjack/chown_linux.go

@ -0,0 +1,19 @@
package lumberjack
import (
"os"
"syscall"
)
// os_Chown is a var so we can mock it out during tests.
var os_Chown = os.Chown
func chown(name string, info os.FileInfo) error {
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode())
if err != nil {
return err
}
f.Close()
stat := info.Sys().(*syscall.Stat_t)
return os_Chown(name, int(stat.Uid), int(stat.Gid))
}

543
lumberjack/lumberjack.go

@ -0,0 +1,543 @@
// Package lumberjack provides a rolling logger.
//
// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
// thusly:
//
// import "gopkg.in/natefinch/lumberjack.v2"
//
// The package name remains simply lumberjack, and the code resides at
// https://github.com/natefinch/lumberjack under the v2.0 branch.
//
// Lumberjack is intended to be one part of a logging infrastructure.
// It is not an all-in-one solution, but instead is a pluggable
// component at the bottom of the logging stack that simply controls the files
// to which logs are written.
//
// Lumberjack plays well with any logging package that can write to an
// io.Writer, including the standard library's log package.
//
// Lumberjack assumes that only one process is writing to the output files.
// Using the same lumberjack configuration from multiple processes on the same
// machine will result in improper behavior.
package lumberjack
import (
"compress/gzip"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
)
const (
backupTimeFormat = "2006-01-02T15-04-05.000"
compressSuffix = ".gz"
defaultMaxSize = 100
)
// ensure we always implement io.WriteCloser
var _ io.WriteCloser = (*Logger)(nil)
// Logger is an io.WriteCloser that writes to the specified filename.
//
// Logger opens or creates the logfile on first Write. If the file exists and
// is less than MaxSize megabytes, lumberjack will open and append to that file.
// If the file exists and its size is >= MaxSize megabytes, the file is renamed
// by putting the current time in a timestamp in the name immediately before the
// file's extension (or the end of the filename if there's no extension). A new
// log file is then created using original filename.
//
// Whenever a write would cause the current log file exceed MaxSize megabytes,
// the current file is closed, renamed, and a new log file created with the
// original name. Thus, the filename you give Logger is always the "current" log
// file.
//
// Backups use the log file name given to Logger, in the form
// `name-timestamp.ext` where name is the filename without the extension,
// timestamp is the time at which the log was rotated formatted with the
// time.Time format of `2006-01-02T15-04-05.000` and the extension is the
// original extension. For example, if your Logger.Filename is
// `/var/log/foo/server.log`, a backup created at 6:30pm on Nov 11 2016 would
// use the filename `/var/log/foo/server-2016-11-04T18-30-00.000.log`
//
// Cleaning Up Old Log Files
//
// Whenever a new logfile gets created, old log files may be deleted. The most
// recent files according to the encoded timestamp will be retained, up to a
// number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
// with an encoded timestamp older than MaxAge days are deleted, regardless of
// MaxBackups. Note that the time encoded in the timestamp is the rotation
// time, which may differ from the last time that file was written to.
//
// If MaxBackups and MaxAge are both 0, no old log files will be deleted.
type Logger struct {
// Filename is the file to write logs to. Backup log files will be retained
// in the same directory. It uses <processname>-lumberjack.log in
// os.TempDir() if empty.
Filename string `json:"filename" yaml:"filename"`
// MaxSize is the maximum size in megabytes of the log file before it gets
// rotated. It defaults to 100 megabytes.
MaxSize int `json:"maxsize" yaml:"maxsize"`
// MaxAge is the maximum number of days to retain old log files based on the
// timestamp encoded in their filename. Note that a day is defined as 24
// hours and may not exactly correspond to calendar days due to daylight
// savings, leap seconds, etc. The default is not to remove old log files
// based on age.
MaxAge int `json:"maxage" yaml:"maxage"`
// MaxBackups is the maximum number of old log files to retain. The default
// is to retain all old log files (though MaxAge may still cause them to get
// deleted.)
MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
// LocalTime determines if the time used for formatting the timestamps in
// backup files is the computer's local time. The default is to use UTC
// time.
LocalTime bool `json:"localtime" yaml:"localtime"`
// Compress determines if the rotated log files should be compressed
// using gzip. The default is not to perform compression.
Compress bool `json:"compress" yaml:"compress"`
size int64
file *os.File
mu sync.Mutex
millCh chan bool
startMill sync.Once
}
var (
// currentTime exists so it can be mocked out by tests.
currentTime = time.Now
// os_Stat exists so it can be mocked out by tests.
os_Stat = os.Stat
// megabyte is the conversion factor between MaxSize and bytes. It is a
// variable so tests can mock it out and not need to write megabytes of data
// to disk.
megabyte = 1024 * 1024
)
// Write implements io.Writer. If a write would cause the log file to be larger
// than MaxSize, the file is closed, renamed to include a timestamp of the
// current time, and a new log file is created using the original log file name.
// If the length of the write is greater than MaxSize, an error is returned.
func (l *Logger) Write(p []byte) (n int, err error) {
l.mu.Lock()
defer l.mu.Unlock()
writeLen := int64(len(p))
if writeLen > l.max() {
return 0, fmt.Errorf(
"write length %d exceeds maximum file size %d", writeLen, l.max(),
)
}
if l.file == nil {
if err = l.openExistingOrNew(len(p)); err != nil {
return 0, err
}
}
if l.size+writeLen > l.max() {
if err := l.rotate(); err != nil {
return 0, err
}
}
n, err = os.Stderr.Write(p)
n, err = l.file.Write(p)
l.size += int64(n)
return n, err
}
// Close implements io.Closer, and closes the current logfile.
func (l *Logger) Close() error {
l.mu.Lock()
defer l.mu.Unlock()
return l.close()
}
// close closes the file if it is open.
func (l *Logger) close() error {
if l.file == nil {
return nil
}
err := l.file.Close()
l.file = nil
return err
}
// Rotate causes Logger to close the existing log file and immediately create a
// new one. This is a helper function for applications that want to initiate
// rotations outside of the normal rotation rules, such as in response to
// SIGHUP. After rotating, this initiates compression and removal of old log
// files according to the configuration.
func (l *Logger) Rotate() error {
l.mu.Lock()
defer l.mu.Unlock()
return l.rotate()
}
// rotate closes the current file, moves it aside with a timestamp in the name,
// (if it exists), opens a new file with the original filename, and then runs
// post-rotation processing and removal.
func (l *Logger) rotate() error {
if err := l.close(); err != nil {
return err
}
if err := l.openNew(); err != nil {
return err
}
l.mill()
return nil
}
// openNew opens a new log file for writing, moving any old log file out of the
// way. This methods assumes the file has already been closed.
func (l *Logger) openNew() error {
err := os.MkdirAll(l.dir(), 0744)
if err != nil {
return fmt.Errorf("can't make directories for new logfile: %s", err)
}
name := l.filename()
mode := os.FileMode(0644)
info, err := os_Stat(name)
if err == nil {
// Copy the mode off the old logfile.
mode = info.Mode()
// move the existing file
newname := backupName(name, l.LocalTime)
if err := os.Rename(name, newname); err != nil {
return fmt.Errorf("can't rename log file: %s", err)
}
// this is a no-op anywhere but linux
if err := chown(name, info); err != nil {
return err
}
}
// we use truncate here because this should only get called when we've moved
// the file ourselves. if someone else creates the file in the meantime,
// just wipe out the contents.
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
if err != nil {
return fmt.Errorf("can't open new logfile: %s", err)
}
l.file = f
l.size = 0
return nil
}
// backupName creates a new filename from the given name, inserting a timestamp
// between the filename and the extension, using the local time if requested
// (otherwise UTC).
func backupName(name string, local bool) string {
dir := filepath.Dir(name)
filename := filepath.Base(name)
ext := filepath.Ext(filename)
prefix := filename[:len(filename)-len(ext)]
t := currentTime()
if !local {
t = t.UTC()
}
timestamp := t.Format(backupTimeFormat)
return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
}
// openExistingOrNew opens the logfile if it exists and if the current write
// would not put it over MaxSize. If there is no such file or the write would
// put it over the MaxSize, a new file is created.
func (l *Logger) openExistingOrNew(writeLen int) error {
l.mill()
filename := l.filename()
info, err := os_Stat(filename)
if os.IsNotExist(err) {
return l.openNew()
}
if err != nil {
return fmt.Errorf("error getting log file info: %s", err)
}
if info.Size()+int64(writeLen) >= l.max() {
return l.rotate()
}
file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
// if we fail to open the old log file for some reason, just ignore
// it and open a new log file.
return l.openNew()
}
l.file = file
l.size = info.Size()
return nil
}
// genFilename generates the name of the logfile from the current time.
func (l *Logger) filename() string {
if l.Filename != "" {
return l.Filename
}
name := filepath.Base(os.Args[0]) + "-lumberjack.log"
return filepath.Join(os.TempDir(), name)
}
// millRunOnce performs compression and removal of stale log files.
// Log files are compressed if enabled via configuration and old log
// files are removed, keeping at most l.MaxBackups files, as long as
// none of them are older than MaxAge.
func (l *Logger) millRunOnce() error {
if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress {
return nil
}
files, err := l.oldLogFiles()
if err != nil {
return err
}
var compress, remove []logInfo
if l.MaxBackups > 0 && l.MaxBackups < len(files) {
preserved := make(map[string]bool)
var remaining []logInfo
for _, f := range files {
// Only count the uncompressed log file or the
// compressed log file, not both.
fn := f.Name()
if strings.HasSuffix(fn, compressSuffix) {
fn = fn[:len(fn)-len(compressSuffix)]
}
preserved[fn] = true
if len(preserved) > l.MaxBackups {
remove = append(remove, f)
} else {
remaining = append(remaining, f)
}
}
files = remaining
}
if l.MaxAge > 0 {
diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
cutoff := currentTime().Add(-1 * diff)
var remaining []logInfo
for _, f := range files {
if f.timestamp.Before(cutoff) {
remove = append(remove, f)
} else {
remaining = append(remaining, f)
}
}
files = remaining
}
if l.Compress {
for _, f := range files {
if !strings.HasSuffix(f.Name(), compressSuffix) {
compress = append(compress, f)
}
}
}
for _, f := range remove {
errRemove := os.Remove(filepath.Join(l.dir(), f.Name()))
if err == nil && errRemove != nil {
err = errRemove
}
}
for _, f := range compress {
fn := filepath.Join(l.dir(), f.Name())
errCompress := compressLogFile(fn, fn+compressSuffix)
if err == nil && errCompress != nil {
err = errCompress
}
}
return err
}
// millRun runs in a goroutine to manage post-rotation compression and removal
// of old log files.
func (l *Logger) millRun() {
for _ = range l.millCh {
// what am I going to do, log this?
_ = l.millRunOnce()
}
}
// mill performs post-rotation compression and removal of stale log files,
// starting the mill goroutine if necessary.
func (l *Logger) mill() {
l.startMill.Do(func() {
l.millCh = make(chan bool, 1)
go l.millRun()
})
select {
case l.millCh <- true:
default:
}
}
// oldLogFiles returns the list of backup log files stored in the same
// directory as the current log file, sorted by ModTime
func (l *Logger) oldLogFiles() ([]logInfo, error) {
files, err := ioutil.ReadDir(l.dir())
if err != nil {
return nil, fmt.Errorf("can't read log file directory: %s", err)
}
logFiles := []logInfo{}
prefix, ext := l.prefixAndExt()
for _, f := range files {
if f.IsDir() {
continue
}
if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil {
logFiles = append(logFiles, logInfo{t, f})
continue
}
if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil {
logFiles = append(logFiles, logInfo{t, f})
continue
}
// error parsing means that the suffix at the end was not generated
// by lumberjack, and therefore it's not a backup file.
}
sort.Sort(byFormatTime(logFiles))
return logFiles, nil
}
// timeFromName extracts the formatted time from the filename by stripping off
// the filename's prefix and extension. This prevents someone's filename from
// confusing time.parse.
func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) {
if !strings.HasPrefix(filename, prefix) {
return time.Time{}, errors.New("mismatched prefix")
}
if !strings.HasSuffix(filename, ext) {
return time.Time{}, errors.New("mismatched extension")
}
ts := filename[len(prefix) : len(filename)-len(ext)]
return time.Parse(backupTimeFormat, ts)
}
// max returns the maximum size in bytes of log files before rolling.
func (l *Logger) max() int64 {
if l.MaxSize == 0 {
return int64(defaultMaxSize * megabyte)
}
return int64(l.MaxSize) * int64(megabyte)
}
// dir returns the directory for the current filename.
func (l *Logger) dir() string {
return filepath.Dir(l.filename())
}
// prefixAndExt returns the filename part and extension part from the Logger's
// filename.
func (l *Logger) prefixAndExt() (prefix, ext string) {
filename := filepath.Base(l.filename())
ext = filepath.Ext(filename)
prefix = filename[:len(filename)-len(ext)] + "-"
return prefix, ext
}
// compressLogFile compresses the given log file, removing the
// uncompressed log file if successful.
func compressLogFile(src, dst string) (err error) {
f, err := os.Open(src)
if err != nil {
return fmt.Errorf("failed to open log file: %v", err)
}
defer f.Close()
fi, err := os_Stat(src)
if err != nil {
return fmt.Errorf("failed to stat log file: %v", err)
}
if err := chown(dst, fi); err != nil {
return fmt.Errorf("failed to chown compressed log file: %v", err)
}
// If this file already exists, we presume it was created by
// a previous attempt to compress the log file.
gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode())
if err != nil {
return fmt.Errorf("failed to open compressed log file: %v", err)
}
defer gzf.Close()
gz := gzip.NewWriter(gzf)
defer func() {
if err != nil {
os.Remove(dst)
err = fmt.Errorf("failed to compress log file: %v", err)
}
}()
if _, err := io.Copy(gz, f); err != nil {
return err
}
if err := gz.Close(); err != nil {
return err
}
if err := gzf.Close(); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
if err := os.Remove(src); err != nil {
return err
}
return nil
}
// logInfo is a convenience struct to return the filename and its embedded
// timestamp.
type logInfo struct {
timestamp time.Time
os.FileInfo
}
// byFormatTime sorts by newest time formatted in the name.
type byFormatTime []logInfo
func (b byFormatTime) Less(i, j int) bool {
return b[i].timestamp.After(b[j].timestamp)
}
func (b byFormatTime) Swap(i, j int) {
b[i], b[j] = b[j], b[i]
}
func (b byFormatTime) Len() int {
return len(b)
}

247
model/device.go

@ -0,0 +1,247 @@
/*
Model for standard/IU records
*/
package model
import (
"bytes"
"encoding/gob"
"encoding/json"
"time"
)
/*
Standard tags for several properties
*/
const (
TAG_TIMESTAMP = 1
TAG_PRIORITY = 2
TAG_LONGITUDE = 3
TAG_LATITUDE = 4
TAG_ALTITUDE = 5
TAG_ANGLE = 6
TAG_SATELITES = 7
TAG_SPEED = 8
TAG_HDOP = 9
TAG_EVENT_SRC = 10
DEV_ONLINE = 1
DEV_OFFLINE = 0
CMD_RUP_MODULE = 0
CMD_RUP_RECORD = 1
CMD_RUP_RECORD_RESP = 100
CMD_RUP_CONFIG = 2
CMD_RUP_CONFIG_RESP = 102
CMD_RUP_VER = 3
CMD_RUP_VER_RESP = 103
CMD_RUP_FIRMW = 4
CMD_RUP_FIRMW_RESP = 104
CMD_RUP_SCARD = 5
CMD_RUP_SCARD_RESP = 107
CMD_RUP_SCARDZ = 6
CMD_RUP_SCARDZ_RESP = 107
CMD_RUP_SMGPRS = 7
CMD_RUP_SMGPRS_RESP = 108
CMD_RUP_DIAG = 9
CMD_RUP_DIAG_RESP = 109
CMD_RUP_TACHOC = 10
CMD_RUP_TACHOC_RESP = 110
CMD_RUP_TACHOD = 11
CMD_RUP_TACHOD_RESP = 111
CMD_RUP_TACHOI = 12
CMD_RUP_TACHOI_RESP = 111
CMD_RUP_TRCH = 14
CMD_RUP_TRCH_RESP = 114
CMD_RUP_GARMR = 30
CMD_RUP_GARMR_RESP = 130
CMD_RUP_GARMD = 31
CMD_RUP_GARMD_RESP = 131
CMD_RUP_CONN_PARM = 105
CMD_RUP_ODO = 106
)
// Device and standard Tag type
type DevTag uint16
type StdTag uint16
type IOMap map[DevTag]StdTag
type DeviceData struct {
Command byte
Data []byte
}
// Data structure for Device I/O record (8-bit)
type DeviceIo8 struct {
Tag StdTag
Value int8
}
// Data structure for Device I/O record (16-bit)
type DeviceIo16 struct {
Tag StdTag
Value int16
}
// Data structure for Device I/O record (32-bit)
type DeviceIo32 struct {
Tag StdTag
Value int32
}
// Data structure for Device I/O record (64-bit)
type DeviceIo64 struct {
Tag StdTag
Value int64
}
// Data structure for real/double I/O
type DeviceIoReal struct {
Tag StdTag
Value float64
}
// Data structure for string I/O
type DeviceIoString struct {
Tag StdTag
Value string
}
// IO group
type DeviceIo struct {
V8 []DeviceIo8
V16 []DeviceIo16
V32 []DeviceIo32
V64 []DeviceIo64
VReal []DeviceIoReal
VStr []DeviceIoString
}
// Data structure for standard IU records
type DeviceRecord struct {
Timestamp time.Time
Priority byte
Longitude float64
Latitude float64
Altitude float32
Angle float32
Satelites byte
Speed float32
IO DeviceIo
}
// Basic device information
type DeviceDescriptor struct {
IMEI uint64
QoS byte
Id uint64
Model string
}
// Collection of data readed from device
type DeviceRecords struct {
TimeReceived time.Time
Descriptor DeviceDescriptor
Records []DeviceRecord
}
//--------------------------------------------------------------------
type DeviceIoMapping struct {
Descriptor DeviceDescriptor
IOMapping map[DevTag]StdTag
}
//--------------------------------------------------------------------
type DisplayRequest struct {
Timestamp time.Time
IMEI uint64
PortID byte
Data []byte
}
//--------------------------------------------------------------------
//Parser interface that must be implemented by device
type Parser interface {
GetMaxPacketSize() int
GetMinPacketSize() int
GetBufferSize() int
GetStream() []byte
Payload() []byte
GetError() error
GetCommand() byte
// GetRecords() *DeviceRecords
GetIMEI() uint64
GetClientResponse() []byte
ExecuteAsync()
}
//--------------------------------------------------------------------
//Convert record to/from stream
type RecordsConverter struct {
}
/*
Convert records to JSON
*/
func (self RecordsConverter) ToJson(recs *DeviceRecords) (jstr string, err error) {
bin, err := json.Marshal(recs)
if err != nil {
return "", err
}
jstr = string(bin)
return jstr, nil
}
/*
Convert json to records
*/
func (self RecordsConverter) FromJson(jstr string) (recs *DeviceRecords, err error) {
var devRec DeviceRecords
err = json.Unmarshal([]byte(jstr), &devRec)
if err != nil {
return nil, err
}
recs = &devRec
return recs, nil
}
/*
Convert device records to bytes
*/
func (self RecordsConverter) ToStream(recs *DeviceRecords) (stream []byte, err error) {
var buf bytes.Buffer // Stand-in for byte stream buffer
enc := gob.NewEncoder(&buf) // Will write stream.
// Encode (send) some values.
err = enc.Encode(recs)
if err != nil {
return nil, err
}
//Gob bytes
stream = buf.Bytes()
return stream, nil
}
/*
Convert records from stream
*/
func (self RecordsConverter) FromStream(stream []byte) (recs *DeviceRecords, err error) {
buf := bytes.NewBuffer(stream)
dec := gob.NewDecoder(buf) // Will read from buffer.
// Decode (receive) and print the values.
var dpkt DeviceRecords
err = dec.Decode(&dpkt)
if err != nil {
return nil, err
}
recs = &dpkt
return recs, nil
}

468
model/message.go

@ -0,0 +1,468 @@
//Message model
/*
Table "public.bcs_list"
Column | Type | Modifiers
----------------+-----------------------------+-------------------------------------------------------
id | bigint | not null default nextval('bcs_list_id_seq'::regclass)
bcs_message_id | bigint |
imei | character varying(25) |
sent_ts | timestamp without time zone |
delivered_ts | timestamp without time zone |
Indexes:
"bcs_list_pkey" PRIMARY KEY, btree (id)
Foreign-key constraints:
"bcs_list_bcs_message_id_fkey" FOREIGN KEY (bcs_message_id) REFERENCES bcs_message(id)
Table "public.bcs_message"
Column | Type | Modifiers
------------------+-----------------------------+----------------------------------------------------------
id | bigint | not null default nextval('bcs_message_id_seq'::regclass)
bcs_message_time | timestamp without time zone |
duration | integer |
mbuzzer | integer |
message | text |
expired | timestamp without time zone |
company_id | bigint |
Indexes:
"bcs_message_pkey" PRIMARY KEY, btree (id)
Foreign-key constraints:
"bcs_message_company_id_fkey" FOREIGN KEY (company_id) REFERENCES m_company(id)
Referenced by:
TABLE "bcs_list" CONSTRAINT "bcs_list_bcs_message_id_fkey" FOREIGN KEY (bcs_message_id) REFERENCES bcs_message(id)
*/
package model
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/ipsusila/opt"
)
//Message status report
const (
MessageError = -1
MessageSent = 1
MessageDelivered = 2
)
//ISOTime encapsulate timestamp for JSON parsing
type ISOTime struct {
time.Time
}
//UserMessage retrieved from GUI
type UserMessage struct {
Vehicles []string `json:"vehicles"`
Duration int `json:"duration"`
NBuzzer int `json:"nbuzzer"`
Message string `json:"message"`
Expired ISOTime `json:"expired"`
}
//BroadcastMessage holds message information need to be broadcast
type BroadcastMessage struct {
Timestamp ISOTime `json:"ts"`
Duration int `json:"duration"`
NBuzzer int `json:"nbuzzer"`
Message string `json:"message"`
Expired ISOTime `json:"expired"`
}
//MessageStatus for specific IMEI
type MessageStatus struct {
IMEI string `json:"imei"`
Status string `json:"status"`
Timestamp string `json:"timestamp"`
StatusCode int `json:"-"`
}
//MessageProvider send/receive message
type MessageProvider interface {
Get(imei uint64) (*BroadcastMessage, error)
Put(message *UserMessage) error
Update(status *MessageStatus) error
}
type messageSet struct {
sync.RWMutex
messages []*BroadcastMessage
readPos int
writePos int
capacity int
numItems int
}
//DbMessageProvider provides message with DB storage
type DbMessageProvider struct {
sync.RWMutex
queueMsg map[uint64]*messageSet
sentMsg map[uint64]*messageSet
capacity int
saveToDb bool
loadFromDb bool
messageAPI string
reportAPI string
serviceTimeout time.Duration
appendIMEI bool
serviceClient *http.Client
}
//-------------------------------------------------------------------------------------------------
//UnmarshalJSON convert string to timestamp
//Convet from Y-m-d H:M:S
func (tm *ISOTime) UnmarshalJSON(b []byte) (err error) {
//TODO time other than local
s := strings.Trim(string(b), "\"")
tm.Time, err = time.ParseInLocation("2006-01-02 15:04:05", s, time.Local)
if err != nil {
tm.Time = time.Time{}
}
return err
}
//-------------------------------------------------------------------------------------------------
func newMessageSet(capacity int) *messageSet {
sets := &messageSet{
capacity: capacity,
numItems: 0,
readPos: 0,
writePos: 0,
messages: make([]*BroadcastMessage, capacity),
}
return sets
}
func (q *messageSet) doPut(msg *BroadcastMessage) bool {
//check count (full or not?)
q.Lock()
defer q.Unlock()
if q.numItems == q.capacity {
return false
}
msg.Timestamp.Time = time.Now()
pos := q.writePos % q.capacity
q.messages[pos] = msg
q.writePos = pos + 1
q.numItems++
return true
}
func (q *messageSet) put(msg *BroadcastMessage) bool {
//check message pointer
if msg == nil {
return false
}
return q.doPut(msg)
}
func (q *messageSet) putUnique(msg *BroadcastMessage) bool {
//check message pointer
if msg == nil || q.contains(msg) {
return false
}
return q.doPut(msg)
}
func (q *messageSet) take() *BroadcastMessage {
q.Lock()
defer q.Unlock()
//check count (empty or not?)
if q.numItems == 0 {
return nil
}
pos := q.readPos % q.capacity
msg := q.messages[pos]
q.readPos = pos + 1
q.numItems--
return msg
}
func (q *messageSet) get() *BroadcastMessage {
q.Lock()
defer q.Unlock()
//check count (empty or not?)
if q.numItems == 0 {
return nil
}
pos := q.readPos % q.capacity
return q.messages[pos]
}
func (q *messageSet) count() int {
q.RLock()
defer q.RUnlock()
return q.numItems
}
func (q *messageSet) contains(m *BroadcastMessage) bool {
q.RLock()
defer q.RUnlock()
beg := q.readPos
end := q.writePos
for {
pos := beg % q.capacity
if pos == end {
break
}
//check message
m2 := q.messages[pos]
eq := m.EqualTo(m2)
if eq {
return true
}
beg++
}
return false
}
//-------------------------------------------------------------------------------------------------
//EqualTo return true if message is equal
func (m1 *BroadcastMessage) EqualTo(m2 *BroadcastMessage) bool {
eq := m1.Timestamp == m2.Timestamp &&
m1.Duration == m2.Duration &&
m1.NBuzzer == m2.NBuzzer &&
m1.Expired == m2.Expired &&
m1.Message == m2.Message
return eq
}
//-------------------------------------------------------------------------------------------------
//NewDbMessageProvider create new provider
func NewDbMessageProvider(options *opt.Options) (*DbMessageProvider, error) {
//make sure url ends with /
p := &DbMessageProvider{}
//TODO init
return p, nil
}
func (p *DbMessageProvider) getFromDb(imei uint64) (*BroadcastMessage, error) {
if !p.loadFromDb {
return nil, nil
}
//Get message from API
//TODO: can it handle many connections?
msg := &BroadcastMessage{}
//alamat internal
imeiStr := fmt.Sprintf("%d", imei)
url := p.messageAPI + imeiStr
err := p.getJSON(url, msg)
if err != nil {
return nil, err
}
now := time.Now()
if msg.Expired.Before(now) {
return nil, nil
}
return msg, nil
}
func (p *DbMessageProvider) putToDb(message *UserMessage) error {
if !p.saveToDb {
return nil
}
return nil
}
func (p *DbMessageProvider) updateDb(status *MessageStatus) error {
url := p.reportAPI
if p.appendIMEI {
url += status.IMEI
}
err := p.putJSON(url, status)
if err != nil {
log.Printf("PUT request error: %v\n", err)
return err
}
return nil
}
func (p *DbMessageProvider) getJSON(url string, target interface{}) error {
r, err := p.serviceClient.Get(url)
if err != nil {
return err
}
defer r.Body.Close()
return json.NewDecoder(r.Body).Decode(target)
}
func (p *DbMessageProvider) putJSON(url string, data interface{}) error {
payload, err := json.Marshal(data)
if err != nil {
return err
}
reader := bytes.NewReader(payload)
client := p.serviceClient
req, err := http.NewRequest("PUT", url, reader)
if err != nil {
return err
}
req.ContentLength = int64(len(payload))
req.Header.Set("Content-Type", "application/json")
response, err := client.Do(req)
if err != nil {
return err
}
defer response.Body.Close()
//read content
result, err := ioutil.ReadAll(response.Body)
if err != nil {
return err
}
str := string(result)
if strings.Contains(str, "no") {
return errors.New("Failed to PUT data to service: " + str)
}
//request code
code := response.StatusCode
if code == http.StatusOK || code == http.StatusCreated {
return nil
}
status := response.Status
return errors.New("PUT error with status = " + status)
}
//-------------------------------------------------------------------------------------------------
//Get return broadcast message for given IMEI
func (p *DbMessageProvider) Get(imei uint64) (*BroadcastMessage, error) {
p.RLock()
defer p.RUnlock()
//get queue for this imei
queue, ok := p.queueMsg[imei]
if !ok {
return p.getFromDb(imei)
}
//get message
msg := queue.get()
if msg == nil {
return p.getFromDb(imei)
}
return msg, nil
}
//Put setup new messages
func (p *DbMessageProvider) Put(message *UserMessage) error {
p.Lock()
defer p.Unlock()
now := ISOTime{time.Now()}
for _, vid := range message.Vehicles {
imei, err := strconv.ParseUint(vid, 10, 64)
if err != nil {
log.Printf("Parse IMEI (%v) error: %v\n", vid, err)
continue
}
bm := &BroadcastMessage{
Timestamp: now,
Duration: message.Duration,
NBuzzer: message.NBuzzer,
Message: message.Message,
Expired: message.Expired,
}
queue, ok := p.queueMsg[imei]
if !ok {
queue = newMessageSet(p.capacity)
p.queueMsg[imei] = queue
}
ok = queue.putUnique(bm)
if !ok {
//log
log.Printf("Put message error-> %v:%v", imei, bm.Message)
}
}
//save to database
return p.putToDb(message)
}
//Update message status
func (p *DbMessageProvider) Update(status *MessageStatus) error {
p.Lock()
defer p.Unlock()
imei, err := strconv.ParseUint(status.IMEI, 10, 64)
if err != nil {
return err
}
//message delivered
if status.StatusCode == MessageDelivered || status.StatusCode == MessageError {
//get message from "sent message" collection
sets, ok := p.sentMsg[imei]
if !ok {
return errors.New("delivered message not found in sets")
}
msg := sets.take()
//if status is error, return message to broadcast queue
if msg != nil && status.StatusCode == MessageError {
//return to sets
queue, ok := p.queueMsg[imei]
if !ok {
queue = newMessageSet(p.capacity)
p.queueMsg[imei] = queue
}
queue.putUnique(msg)
}
} else if status.StatusCode == MessageSent {
//message sent, get from queue
queue, ok := p.queueMsg[imei]
if !ok {
return errors.New("message not found in sets")
}
msg := queue.take()
if msg != nil {
//move to "sent message" collection
sets, ok := p.sentMsg[imei]
if !ok {
sets = newMessageSet(p.capacity)
p.sentMsg[imei] = sets
}
sets.put(msg)
}
}
//update status in database
return p.updateDb(status)
}

62
model/message_test.go

@ -0,0 +1,62 @@
package model
import (
"fmt"
"testing"
"time"
)
func TestQueue(t *testing.T) {
queue := newMessageSet(20)
finish := make(chan bool, 2)
fnPush := func() {
for k := 0; k < 30; k++ {
msg := &BroadcastMessage{
Timestamp: ISOTime{time.Now()},
Message: fmt.Sprintf("Message %02d", k),
}
ok := queue.put(msg)
if !ok {
t.Logf("Push error (#n = %d)\n", queue.count())
time.Sleep(20)
}
}
finish <- true
}
fnPop := func() {
for k := 0; k < 50; k++ {
msg := queue.take()
if msg != nil {
t.Logf("#Items %d, msg: %v -> %v", queue.count(), msg.Timestamp, msg.Message)
} else {
t.Logf("#num items %d", queue.count())
time.Sleep(1)
}
}
finish <- true
close(finish)
}
go fnPush()
go fnPop()
for ok := range finish {
t.Logf("Finish %v", ok)
}
//pointer test
msg1 := &BroadcastMessage{
Timestamp: ISOTime{time.Now()},
}
msg2 := msg1
msg3 := msg1
if msg2 == msg3 {
t.Logf("Message is equal")
} else {
t.Logf("Message is not equal")
}
}

78
model/server.go

@ -0,0 +1,78 @@
/*
Package for server related model
*/
package model
const (
SERVER_ZMQ = "zmq"
SERVER_NET = "net"
CMD_START = "start"
CMD_RESTART = "restart"
CMD_STOP = "stop"
DEV_RUPTELA = "RUPTELA"
DEV_TELTONIKA = "TELTONIKA"
MAP_SHARD_COUNT = 32
)
/*
REQ-REP pattern request.
*/
type Request struct {
Origin string
Token string
Type string
Data string
}
type IoMapResponse struct {
Descriptor DeviceDescriptor
DevTags []DevTag
StdTags []StdTag
}
type ServerConfig struct {
Id int
IP string
Port int
MaxSocket int
Mode string
ChanLimit uint32
Device string
TimeoutMilis int
}
/*
Interface for service which provides several information.
*/
type ServiceProvider interface {
IsRegistered(imei uint64) bool
GetDeviceIoMapping(imei uint64) (iom *DeviceIoMapping, err error)
}
//Device server interface
type DeviceServer interface {
Start()
Stop()
}
/*
Get device io mapping
*/
func GetDeviceIoMapping(imei uint64) (iom *DeviceIoMapping, err error) {
//-------------------------------------
//TODO get mapping from server/cache
iom = new(DeviceIoMapping)
iom.Descriptor.Id = 10
iom.Descriptor.Model = "Pro3"
iom.Descriptor.QoS = 1
iom.Descriptor.IMEI = imei
if iom.IOMapping == nil {
iom.IOMapping = make(IOMap)
}
return iom, nil
}

BIN
parser/.DS_Store vendored

Binary file not shown.

573
parser/devicePrs.go

@ -0,0 +1,573 @@
/*
Package for parsing devicePrs byte stream to struct.
*/
package parser
import (
"database/sql"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"log"
"net/http"
"strings"
"time"
"bytes"
"io/ioutil"
"github.com/ipsusila/opt"
"github.com/confluentinc/confluent-kafka-go/kafka"
"../model"
)
const (
statusSent = 1
statusDelivered = 2
statusError = -1
)
const (
timeLayout = "2006-01-02 15:04:05"
)
//NACK response, CRC = 565
var rNACK = []byte{0x00, 0x02, 0x64, 0x00, 0x02, 0x35}
//ACK response, CRC = 5052
var rACK = []byte{0x00, 0x02, 0x64, 0x01, 0x13, 0xBC}
//trasparent channel error
var rEmpty = []byte{0x00, 0x02, 0x64, 0x01, 0x13, 0xBC}
//tch empty
var tEmpty = []byte{0, 4, 114, 1, 0, 0, 57, 239}
//PortID for transparent channel
var PortID byte = 0x01
//auto-increment id
var iCounter int64
var sReplacer = strings.NewReplacer("\\", "\\\\", ":", "\\:", "$", "\\$")
//--------------------------------------------------------------------
//DevicePrsOptions store specific parser options
type DevicePrsOptions struct {
messageAPI string
reportAPI string
serviceTimeout time.Duration
appendIMEI bool
insertQuery string
serviceClient *http.Client
tagOption *opt.Options
BrokerServer string
BrokerTopic string
DeviceName string
}
//ISOTime encapsulate timestamp for JSON parsing
type ISOTime struct {
time.Time
}
//BroadcastMessage store message information to be broadcasted
type BroadcastMessage struct {
Timestamp ISOTime `json:"ts"`
Duration int `json:"duration"`
NBuzzer int `json:"nbuzzer"`
Message string `json:"message"`
Expired ISOTime `json:"expired"`
}
//MessageStatus for specific IMEI
type MessageStatus struct {
IMEI string `json:"imei"`
Status string `json:"status"`
Timestamp string `json:"timestamp"`
}
//DevicePrsParser encapsulate parser configuration for DevicePrs device
type DevicePrsParser struct {
options *DevicePrsOptions
db *sql.DB
MaxPacketSize int
MinPacketSize int
BufferSize int
Data []byte
IMEI uint64
Error error
Command byte
//Records *model.DeviceRecords
Records DeviceRecordHeader
procdr *kafka.Producer
}
//UnmarshalJSON convert string to timestamp
func (tm *ISOTime) UnmarshalJSON(b []byte) (err error) {
//TODO time other than local
s := strings.Trim(string(b), "\"")
tm.Time, err = time.ParseInLocation(timeLayout, s, time.Local)
if err != nil {
tm.Time = time.Time{}
}
return err
}
//GetMaxPacketSize return maximum packet size for this parser
func (p *DevicePrsParser) GetMaxPacketSize() int {
return p.MaxPacketSize
}
//GetMinPacketSize return minimum valid packet size for this parser
func (p *DevicePrsParser) GetMinPacketSize() int {
return p.MinPacketSize
}
//GetBufferSize return buffer size for reading data
func (p *DevicePrsParser) GetBufferSize() int {
return p.BufferSize
}
//GetError return last error
func (p *DevicePrsParser) GetError() error {
if p.Error != nil {
return p.Error
}
return nil
}
//Payload return data associated stored in
func (p *DevicePrsParser) Payload() []byte {
//Field Packet length IMEI Command ID Payload Data CRC16
//Size (bytes) 2 8 1 [1-1009] 2
n := len(p.Data)
if n < p.MinPacketSize {
return []byte{}
}
return p.Data[11:(n - 2)]
}
//GetStream return data stream
func (p *DevicePrsParser) GetStream() []byte {
return p.Data
}
//GetIMEI return IMEI
func (p *DevicePrsParser) GetIMEI() uint64 {
return p.IMEI
}
//GetCommand return command associated to this packet
func (p *DevicePrsParser) GetCommand() byte {
return p.Command
}
func (p *DevicePrsParser) saveToDatabase() error {
imei := fmt.Sprintf("%d", p.IMEI)
dInHex := hex.EncodeToString(p.Data)
//TODO, make packet invalid if IMEI is not registered
db := p.db
if db != nil {
//TODO: save data to database
//`INSERT INTO "GPS_DATA"("IMEI", "DATA_LOG", "FLAG", "DATE_INS", "DESC", "GPS_CODE", "DATA_LEN")
//VALUES($1, $2, $3, $4, $5, $6, $7)`
now := time.Now().Local().Format(timeLayout)
query := p.options.insertQuery
result, err := db.Exec(query, imei, dInHex, false, now, "", p.options.DeviceName, len(p.Data))
if err != nil {
log.Printf("INSERT ERROR: %s -> %s; %s: %s\n", err.Error(), query, imei, dInHex)
return err
}
id, _ := result.LastInsertId()
nrows, _ := result.RowsAffected()
log.Printf("IMEI: %v, insert-id: %d, nrows: %d\n", imei, id, nrows)
} else {
//if database not defined, display packet in log
log.Printf("IMEI: %v\n Data: %v\n", imei, dInHex)
}
return nil
}
func (p *DevicePrsParser) sendToBroker() error {
imei := fmt.Sprintf("%d", p.IMEI)
//TODO, make packet invalid if IMEI is not registered
procdr := p.procdr
if procdr != nil {
dataJson, err := json.Marshal(p.Records)
if err != nil {
log.Printf("Error parse to JSON (IMEI: %v): %s\n", imei, err)
}
//log.Printf("JSON : %v\n",string(dataJson))
log.Printf("Number of records: %d (IMEI: %v)\n", p.Records.NumRec, imei)
//broker
topic := p.options.BrokerTopic
//producer channel
doneChan := make(chan bool)
go func() {
defer close(doneChan)
for e := range procdr.Events() {
switch ev := e.(type) {
case *kafka.Message:
m := ev
if m.TopicPartition.Error != nil {
log.Printf("Sent failed: %v (IMEI: %v)\n", m.TopicPartition.Error, imei)
} else {
log.Printf("Sent message to topic %s [%d] at offset %v (IMEI: %v)\n",
*m.TopicPartition.Topic, m.TopicPartition.Partition, m.TopicPartition.Offset, imei)
}
return
default:
log.Printf("Ignored event: %s (IMEI: %v)\n", ev,imei)
}
}
}()
value := string(dataJson)
procdr.ProduceChannel() <- &kafka.Message{
TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny},
Value: []byte(value),
Headers: []kafka.Header{{Key: imei, Value: []byte( p.Records.Tstamp.String() )}},
}
// wait for delivery report goroutine to finish
_ = <-doneChan
} else {
//if database not defined, display packet in log
log.Printf("Message Broker not defined, IMEI: %v\n", imei)
}
return nil
}
func (p *DevicePrsParser) getJSON(url string, target interface{}) error {
r, err := p.options.serviceClient.Get(url)
if err != nil {
return err
}
defer r.Body.Close()
return json.NewDecoder(r.Body).Decode(target)
}
func (p *DevicePrsParser) putJSON(url string, data interface{}) error {
if p.options == nil {
return errors.New("PUT error, options not defined")
}
payload, err := json.Marshal(data)
if err != nil {
return err
}
reader := bytes.NewReader(payload)
client := p.options.serviceClient
req, err := http.NewRequest("PUT", url, reader)
if err != nil {
return err
}
req.ContentLength = int64(len(payload))
req.Header.Set("Content-Type", "application/json")
response, err := client.Do(req)
if err != nil {
return err
}
defer response.Body.Close()
//read content
result, err := ioutil.ReadAll(response.Body)
if err != nil {
return err
}
str := string(result)
if strings.Contains(str, "no") {
return errors.New("Failed to update message status: " + str)
}
//request code
code := response.StatusCode
if code == http.StatusOK || code == http.StatusCreated {
return nil
}
status := response.Status
return errors.New("PUT error with status = " + status)
}
func (p *DevicePrsParser) askForBroadcastMessage() (string, error) {
if p.options == nil {
return "", errors.New("GET error, options not defined")
}
//Get message from API
//TODO: can it handle many connections?
msg := &BroadcastMessage{}
//alamat internal
imei := fmt.Sprintf("%d", p.IMEI)
url := p.options.messageAPI + imei
err := p.getJSON(url, msg)
if err != nil {
return "", err
}
now := time.Now()
if msg.Expired.Before(now) {
return "", nil
}
//get buzzer count
nbeep := msg.NBuzzer
if nbeep <= 0 {
nbeep = 1
} else if nbeep > 9 {
nbeep = 9
}
dispSec := msg.Duration
if dispSec <= 0 {
dispSec = 1
} else if dispSec > 999 {
dispSec = 999
}
escapedMsg := sReplacer.Replace(msg.Message)
pesan := fmt.Sprintf(":t%d%03d%v$", nbeep, dispSec, escapedMsg)
//log message
log.Printf("IMEI = %d, message = %v\n", p.IMEI, pesan)
//go update broadcast status (mark as "sent")
go p.updateBroadcastStatus(statusSent)
//return the message
return pesan, nil
}
//Update broadcast status
func (p *DevicePrsParser) updateBroadcastStatus(status int) error {
now := time.Now()
report := &MessageStatus{
IMEI: fmt.Sprintf("%d", p.IMEI),
Timestamp: now.Format(timeLayout),
}
switch status {
case statusError:
//Do nothing
log.Printf("IMEI=%d, Message status = Error\n", p.IMEI)
case statusSent:
report.Status = "sent"
case statusDelivered:
report.Status = "delivered"
}
if len(report.Status) > 0 && p.options != nil {
url := p.options.reportAPI
if p.options.appendIMEI {
url += fmt.Sprintf("%d", p.IMEI)
}
err := p.putJSON(url, report)
if err != nil {
log.Printf("PUT request error: %v\n", err)
return err
}
log.Printf("IMEI=%v, Message status = %v\n", report.IMEI, report.Status)
}
return nil
}
//GetBroadcastMessage return message payload tobe broadcased to client
func (p *DevicePrsParser) GetBroadcastMessage() ([]byte, error) {
pesan, err := p.askForBroadcastMessage()
n := len(pesan)
if err != nil {
return nil, err
}
//construct message
npacket := n + 4
data := make([]byte, 8+n)
data[0] = byte(npacket >> 8)
data[1] = byte(npacket)
data[2] = 0x72
data[3] = 0x01 //PORT 'B' (TODO: configurable)
data[4] = 0x00
data[5] = 0x00
//setup Payload
payload := []byte(pesan)
ptr := data[6:]
copy(ptr, payload)
//calculate CRC
crc := crc16CCITT(data[2:(n + 6)])
data[n+6] = byte(crc >> 8)
data[n+7] = byte(crc)
return data, nil
}
//GetClientResponse return appropriate response to client according to packet
func (p *DevicePrsParser) GetClientResponse() []byte {
//Get response to client
//TODO other response
switch int(p.Command) {
case model.CMD_RUP_RECORD:
if p.IMEI == 0 || p.Error != nil {
return rNACK
}
return rACK
case model.CMD_RUP_TRCH:
if p.IMEI == 0 || p.Error != nil {
return tEmpty
}
//parse incoming message
tch, err := parseTchStream(p.Data, p.MinPacketSize, p.MaxPacketSize)
if err != nil {
return tEmpty
}
//ceck command type
payload := string(tch.Data)
if strings.HasPrefix(payload, "GMn") ||
strings.HasPrefix(payload, "GMi") ||
strings.HasPrefix(payload, "GMe") {
//send message if initial, none or error
data, err := p.GetBroadcastMessage()
if err != nil {
p.Error = err
return tEmpty
}
return data
} else if strings.HasPrefix(payload, "GMo") {
//ok, message delivered
go p.updateBroadcastStatus(statusDelivered)
}
//empty transparent channel
return tEmpty
}
//empty
return rEmpty
}
//GetRecords return parsed records
// func (p *DevicePrsParser) GetRecords() *model.DeviceRecords {
// if p.IMEI == 0 || p.Error != nil {
// return nil
// }
// //parse stream if not yet done
// if p.Records == nil {
// //Stream already verified when creating parser
// p.Records, p.Error = parseStream(p.Data, p.MinPacketSize, p.MaxPacketSize, true)
// if p.Error != nil {
// return nil
// }
// }
// return p.Records
// }
//ExecuteAsync perform task
func (p *DevicePrsParser) ExecuteAsync() bool {
var status bool
status = true
//TODO identify according to command
cmd := p.GetCommand()
switch cmd {
case model.CMD_RUP_RECORD:
err := p.saveToDatabase()
if err != nil {
status = false
p.Error = err
}
err = p.sendToBroker()
}
return status
}
//--------------------------------------------------------------------
//NewDevicePrsOptions create options for devicePrs
func NewDevicePrsOptions(options *opt.Options) *DevicePrsOptions {
svcOpt := options.Get(model.DEV_RUPTELA)
defQuery := `INSERT INTO "GPS_DATA"("IMEI", "DATA_LOG", "FLAG", "DATE_INS", "DESC", "GPS_CODE", "DATA_LEN")
VALUES($1, $2, $3, $4, $5, $6, $7)`
insertQuery := svcOpt.GetString("insertQuery", defQuery)
rupOpt := &DevicePrsOptions{
messageAPI: svcOpt.GetString("message", ""),
reportAPI: svcOpt.GetString("report", ""),
serviceTimeout: time.Duration(svcOpt.GetInt("serviceTimeout", 10)) * time.Second,
appendIMEI: svcOpt.GetBool("appendIMEI", false),
insertQuery: insertQuery,
tagOption: options.Get("iotag"),
BrokerServer: options.Get("messagebroker").GetString("brokerServer",""),
BrokerTopic: options.Get("messagebroker").GetString("brokerTopic",""),
DeviceName: options.Get("server").GetString("device",""),
}
//create client
rupOpt.serviceClient = &http.Client{Timeout: rupOpt.serviceTimeout}
return rupOpt
}
//NewDevicePrs create empty parser
func NewDevicePrs() *DevicePrsParser {
p := &DevicePrsParser{
MaxPacketSize: 2048,
MinPacketSize: 14,
BufferSize: 2048,
}
return p
}
//NewDevicePrsParser create new parser. Maximum packet size is 1KB
func NewDevicePrsParser(options *DevicePrsOptions, db *sql.DB, data []byte, procdr *kafka.Producer) *DevicePrsParser {
//allocate parser with maximum and minimum packet size
p := NewDevicePrs()
p.options = options
p.db = db
p.procdr = procdr
//verify stream
if data != nil {
p.IMEI, p.Command, p.Records, p.Error = verifyStream(data, p.MinPacketSize, p.MaxPacketSize, options.tagOption)
if p.Error == nil {
p.Data = data
}
} else {
p.Error = fmt.Errorf("Stream is empty")
}
return p
}
//NewDevicePrsStringParser create new parser which accept HEX string.
//Maximum packet size is 1KB i.e. maximum string length is 2KB
func NewDevicePrsStringParser(options *DevicePrsOptions, db *sql.DB, data string, procdr *kafka.Producer) *DevicePrsParser {
stream, err := hex.DecodeString(data)
p := NewDevicePrsParser(options, db, stream,procdr)
if err != nil {
p.Error = err
}
return p
}
//--------------------------------------------------------------------

445
parser/devicePrsInternal.go

@ -0,0 +1,445 @@
/*
Package for parsing devicePrs byte stream to struct.
*/
package parser
import (
"bytes"
"encoding/binary"
"fmt"
"time"
//"encoding/hex"
"strconv"
"sort"
"github.com/ipsusila/opt"
"../model"
)
type ruptelaTchHeader struct {
Length uint16
IMEI uint64
CommandID byte
PortID byte
Reserved uint16
Timestamp uint32
}
type DeviceRecordHeader struct {
Imei uint64
Tstamp time.Time
NumRec uint16
DeviceRecords []DeviceRecord
}
type DeviceRecord struct {
Tstamp time.Time
TstampInt int64
Imei uint64
Longitude float64
Latitude float64
Altitude uint16
Angle uint16
Satelites uint16
Speed uint16
TagDevices map[string]TagDevice
}
type TagDevice struct {
TagName string
TagId string
TagDataType string
TagVal string
}
// ByTstamp implements sort.Interface for []DeviceRecord based on
// the TstampInt field.
type ByTstamp []DeviceRecord
func (a ByTstamp) Len() int { return len(a) }
func (a ByTstamp) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByTstamp) Less(i, j int) bool { return a[i].TstampInt < a[j].TstampInt }
/*
Verify that content of the byte stream match defined format
*/
func verifyStream(data []byte, minSize int, maxSize int, tagOption *opt.Options) (imei uint64, cmd byte, rec DeviceRecordHeader, err error) {
dlen := len(data)
if dlen < minSize {
return 0, 0,rec, fmt.Errorf("Packet size is too small (< %d)", dlen)
} else if dlen > maxSize {
return 0, 0,rec, fmt.Errorf("Packet size is greater than maximum allowed size (%d)", dlen)
}
//Extract packet size
sz := int(binary.BigEndian.Uint16(data[:2]))
if sz != (dlen - 4) {
return 0, 0,rec, fmt.Errorf("Size field mismatched (!%d)", dlen)
}
//extract IMEI
imei = binary.BigEndian.Uint64(data[2:10])
//extract and verify CRC
crc := binary.BigEndian.Uint16(data[dlen-2:])
ccrc := crc16CCITT(data[2 : dlen-2])
if crc != ccrc {
return 0, 0,rec, fmt.Errorf("Invalid Ruptela packet (CRC-16 does not match)")
}
//parse data
rec = parseData(data,imei,tagOption)
//extract command
cmd = data[10]
return imei, cmd, rec, nil
}
/*
Parse byte stream and convert to standard record format
*/
func parseData(data []byte, imei uint64, tagOption *opt.Options) (rec DeviceRecordHeader) {
//allocate records
rec.Tstamp = time.Now()
rec.Imei = imei
tagOption_ := tagOption.GetObjectArray("tags")
lengthTags := len(tagOption_)
tags := make(map[string]TagDevice)
for i:=0; i<lengthTags; i++ {
var tag TagDevice
tag.TagName = tagOption_[i].GetString("tagName","")
tag.TagId = tagOption_[i].GetString("tagId","")
tag.TagDataType = tagOption_[i].GetString("tagDataType","")
tag.TagVal = ""
tags[tag.TagId] = tag
}
//number of records
currByte := 12
plusByte := currByte + 1
numRec := convBinaryToUint16(addOneByteInTwoByte(data[currByte:plusByte]),2,"numRec")
//fmt.Printf("Number of records %d\n", numRec)
rec.NumRec = numRec
deviceRecords := make([]DeviceRecord, 0)
for j := 0; j < int(numRec); j++ {
var deviceRecord DeviceRecord
//imei
deviceRecord.Imei = imei
//Timestamp
currByte = plusByte
plusByte = currByte + 4
tstampInt := convBinaryToInt32(data[currByte:plusByte],8,"tstampInt")
tstamp := time.Unix(int64(tstampInt), 0)
//fmt.Printf("Timestamp %v\n",tstamp.String())
deviceRecord.TstampInt = int64(tstampInt)
deviceRecord.Tstamp = tstamp
//Priority
currByte = plusByte
plusByte = currByte + 2
//Longitude
currByte = plusByte
plusByte = currByte + 4
lonInt := convBinaryToInt32(data[currByte:plusByte],4,"lonInt")
lon :=float64(lonInt)/10000000
//fmt.Printf("lon %f\n",lon)
deviceRecord.Longitude = lon
//Latitude
currByte = plusByte
plusByte = currByte + 4
latInt := convBinaryToInt32(data[currByte:plusByte],4,"latInt")
lat :=float64(latInt)/10000000
//fmt.Printf("lat %f\n",lat)
deviceRecord.Latitude = lat
//Altitude
currByte = plusByte
plusByte = currByte + 2
alt := convBinaryToUint16(data[currByte:plusByte],2,"alt")
//fmt.Printf("alt %d\n",alt)
deviceRecord.Altitude = alt/10
//Angle
currByte = plusByte
plusByte = currByte + 2
angle := convBinaryToUint16(data[currByte:plusByte],2,"angle")
//fmt.Printf("angle %d\n",angle)
deviceRecord.Angle = angle/100
//Satelite
currByte = plusByte
plusByte = currByte + 1
satelites := convBinaryToUint16(addOneByteInTwoByte(data[currByte:plusByte]),2,"satelites")
//fmt.Printf("satelites %d\n",satelites)
deviceRecord.Satelites = satelites
//Speed
currByte = plusByte
plusByte = currByte + 2
speed := convBinaryToUint16(data[currByte:plusByte],2,"speed")
//fmt.Printf("speed %d\n",speed)
deviceRecord.Speed = speed
//HDOP - 09 (HEX) 9 (DEC) need to be divided by 10
currByte = plusByte
plusByte = currByte + 1
//IO data cause record - 09 (HEX) 9 (DEC)
currByte = plusByte
plusByte = currByte + 1
//--------------------------------------------------------------------
//read 1 Byte I/O values
//--------------------------------------------------------------------
//Total IO elements, which length is 1 Byte
currByte = plusByte
plusByte = currByte + 1
totalOneByteIO := convBinaryToUint16(addOneByteInTwoByte(data[currByte:plusByte]),2,"totalOneByteIO")
//fmt.Printf("total IO1 %d\n",totalOneByteIO)
for i := 0; i < int(totalOneByteIO); i++ {
currByte = plusByte
plusByte = currByte + 1
ioID := convBinaryToUint16(addOneByteInTwoByte(data[currByte:plusByte]),2,"io1ID")
//fmt.Printf("io1ID %d\n",ioID)
currByte = plusByte
plusByte = currByte + 1
ioVal := convBinaryToUint16(addOneByteInTwoByte(data[currByte:plusByte]),2,"io1Val")
//fmt.Printf("io1Val %d\n",ioVal)
tagDevice_ := tags[strconv.Itoa(int(ioID))]
tagDevice_.TagId = strconv.Itoa(int(ioID))
tagDevice_.TagVal = strconv.Itoa(int(ioVal))
if tagDevice_.TagName != ""{
tags[strconv.Itoa(int(ioID))] = tagDevice_
}
}
//--------------------------------------------------------------------
//read 2 Byte I/O values
//--------------------------------------------------------------------
//Total IO elements, which length is 2 Byte
currByte = plusByte
plusByte = currByte + 1
totalTwoByteIO := convBinaryToUint16(addOneByteInTwoByte(data[currByte:plusByte]),2,"totalTwoByteIO")
//fmt.Printf("total IO2 %d\n",totalTwoByteIO)
for i := 0; i < int(totalTwoByteIO); i++ {
currByte = plusByte
plusByte = currByte + 1
ioID := convBinaryToUint16(addOneByteInTwoByte(data[currByte:plusByte]),2,"io2ID")
//fmt.Printf("io2ID %d\n",ioID)
currByte = plusByte
plusByte = currByte + 2
ioVal := convBinaryToUint16(data[currByte:plusByte],2,"io2Val")
//fmt.Printf("io2Val %d\n",ioVal)
tagDevice_ := tags[strconv.Itoa(int(ioID))]
tagDevice_.TagId = strconv.Itoa(int(ioID))
tagDevice_.TagVal = strconv.Itoa(int(ioVal))
if tagDevice_.TagName != ""{
tags[strconv.Itoa(int(ioID))] = tagDevice_
}
}
//--------------------------------------------------------------------
//read 4 Byte I/O values
//--------------------------------------------------------------------
//Total IO elements, which length is 4 Byte
currByte = plusByte
plusByte = currByte + 1
totalFourByteIO := convBinaryToUint16(addOneByteInTwoByte(data[currByte:plusByte]),2,"totalFourByteIO")
//fmt.Printf("total IO4 %d\n",totalFourByteIO)
for i := 0; i < int(totalFourByteIO); i++ {
currByte = plusByte
plusByte = currByte + 1
ioID := convBinaryToUint16(addOneByteInTwoByte(data[currByte:plusByte]),2,"io4ID")
//fmt.Printf("io4ID %d\n",ioID)
currByte = plusByte
plusByte = currByte + 4
ioVal := convBinaryToInt32(data[currByte:plusByte],4,"io4Val")
//fmt.Printf("io4Val %d\n",ioVal)
tagDevice_ := tags[strconv.Itoa(int(ioID))]
tagDevice_.TagId = strconv.Itoa(int(ioID))
tagDevice_.TagVal = strconv.FormatInt(int64(ioVal), 10)
if tagDevice_.TagName != ""{
tags[strconv.Itoa(int(ioID))] = tagDevice_
}
}
//--------------------------------------------------------------------
//read 8 Byte I/O values
//--------------------------------------------------------------------
//Total IO elements, which length is 8 Byte
currByte = plusByte
plusByte = currByte + 1
total8ByteIO := convBinaryToUint16(addOneByteInTwoByte(data[currByte:plusByte]),2,"total8ByteIO")
//fmt.Printf("total IO8 %d\n",total8ByteIO)
for i := 0; i < int(total8ByteIO); i++ {
currByte = plusByte
plusByte = currByte + 1
ioID := convBinaryToUint16(addOneByteInTwoByte(data[currByte:plusByte]),2,"io8ID")
//fmt.Printf("io8ID %d\n",ioID)
currByte = plusByte
plusByte = currByte + 8
ioVal := convBinaryToInt64(data[currByte:plusByte],8,"io8Val")
//fmt.Printf("io8Val %d\n",ioVal)
tagDevice_ := tags[strconv.Itoa(int(ioID))]
tagDevice_.TagId = strconv.Itoa(int(ioID))
tagDevice_.TagVal = strconv.FormatInt(int64(ioVal), 10)
if tagDevice_.TagName != ""{
tags[strconv.Itoa(int(ioID))] = tagDevice_
}
}
deviceRecord.TagDevices = tags
deviceRecords = append(deviceRecords, deviceRecord)
}
//sort ascending by date
sort.Sort(ByTstamp(deviceRecords))
rec.DeviceRecords = deviceRecords
//CRC-16
currByte = plusByte
plusByte = currByte + 2
return rec
}
/*
Convert Binary to uint16
*/
func addOneByteInTwoByte(data []byte) (resultData []byte){
dataNew := make([]byte, 2)
dataNew[0] = 0x0
copy(dataNew[1:], data)
return dataNew
}
/*
Convert Binary to uint16
*/
func convBinaryToUint16(data []byte, byteLength int, desc string) (reslt uint16){
var dataInt uint16
dataNew := make([]byte, byteLength)
copy(dataNew[0:], data)
buf1 := bytes.NewReader(dataNew)
err := binary.Read(buf1, binary.BigEndian, &dataInt)
if err != nil {
fmt.Printf("binary.Read failed %v :%v\n", desc, err)
dataInt = 0
}
return dataInt
}
/*
Convert Binary to int32
*/
func convBinaryToInt32(data []byte, byteLength int, desc string) (reslt int32){
var dataInt int32
dataNew := make([]byte, byteLength)
copy(dataNew[0:], data)
buf1 := bytes.NewReader(dataNew)
err := binary.Read(buf1, binary.BigEndian, &dataInt)
if err != nil {
fmt.Printf("binary.Read failed %v :%v\n", desc, err)
dataInt = 0
}
return dataInt
}
/*
Convert Binary to int64
*/
func convBinaryToInt64(data []byte, byteLength int, desc string) (reslt int64){
var dataInt int64
dataNew := make([]byte, byteLength)
copy(dataNew[0:], data)
buf1 := bytes.NewReader(dataNew)
err := binary.Read(buf1, binary.BigEndian, &dataInt)
if err != nil {
fmt.Printf("binary.Read failed %v :%v\n", desc, err)
dataInt = 0
}
return dataInt
}
func parseTchStream(data []byte, minSize int, maxSize int) (*model.DisplayRequest, error) {
rec := &model.DisplayRequest{}
//check length
dataLen := len(data)
if dataLen < minSize {
return rec, fmt.Errorf("Packet size is too small (< %d)", dataLen)
}
if dataLen > maxSize {
return rec, fmt.Errorf("Packet size is greater than maximum allowed size (%d)", dataLen)
}
//create buffer
buf := bytes.NewBuffer(data)
//read packet header part
pktHdr := &ruptelaTchHeader{}
err := binary.Read(buf, binary.BigEndian, pktHdr)
if err != nil {
return rec, fmt.Errorf("Failed to read packet header (%v)", err)
}
//2+8+1+1+2+4 (len, imei, cmd, port, reserved, timestamp)
idx := 2 + 8 + 1 + 1 + 2 + 4
rec.IMEI = pktHdr.IMEI
rec.PortID = pktHdr.PortID
rec.Timestamp = time.Unix(int64(pktHdr.Timestamp), 0)
rec.Data = data[idx:(dataLen - 2)]
return rec, nil
}
// Calculate CRC16 with CCITT algorithm
// Based on C/C++ code in DevicePrs protocol documents
// CRC calculation omits Length(2-bytes) and CRC itself (2-bytes)
func crc16CCITT(data []byte) uint16 {
//--------------------------------------------------------------------
var ucBit, ucCarry uint16
//--------------------------------------------------------------------
var usPoly uint16 = 0x8408 //reversed 0x1021
var usCRC uint16 //initialized as zero
//--------------------------------------------------------------------
for _, d := range data {
usCRC ^= uint16(d)
for ucBit = 0; ucBit < 8; ucBit++ {
ucCarry = usCRC & 1
usCRC >>= 1
if ucCarry != 0 {
usCRC ^= usPoly
}
}
}
//--------------------------------------------------------------------
return usCRC
//--------------------------------------------------------------------
}

409
parser/devicePrs_test.go

@ -0,0 +1,409 @@
package parser
import (
"bytes"
"encoding/binary"
"encoding/gob"
"encoding/hex"
"encoding/json"
"fmt"
"net/http"
"testing"
"github.com/ipsusila/opt"
"time"
"../model"
)
func TestCRC2(t *testing.T) {
pesan := ":t2005Custom Message\\: Hello Server$"
n := len(pesan)
npacket := n + 4
data := make([]byte, 8+n)
data[0] = byte(npacket >> 8)
data[1] = byte(npacket)
data[2] = 0x72
data[3] = 0x01 //PORT 'B'
data[4] = 0x00
data[5] = 0x00
payload := []byte(pesan)
p := data[6:]
copy(p, payload)
crc := crc16CCITT(data[2:(n + 6)])
data[n+6] = byte(crc >> 8)
data[n+7] = byte(crc)
fmt.Printf("Data: %X", data)
}
func TestCRC(t *testing.T) {
//data := []byte{0x00, 0x02, 0x64, 0x01, 0x13, 0xBC}
data := []byte{0x00, 0x05, 0x72, 0x01, 0x00, 0x00, 0x66, 0xC5, 0x44}
n1 := len(data)
//ini OK
d1 := data[2 : n1-2]
fmt.Printf("Data: %X\n", d1)
crc1 := crc16CCITT(d1)
//salah
d2 := data[0 : n1-2]
fmt.Printf("Data: %X\n", d2)
crc2 := crc16CCITT(d2)
//salah
d3 := data //[0 : n1-2]
fmt.Printf("Data: %X\n", d3)
crc3 := crc16CCITT(d3)
fmt.Printf("#ACK CRC=%X, %X %X\n", crc1, byte(crc1>>8), byte(crc1))
fmt.Printf("#ACK CRC=%X\n", crc2)
fmt.Printf("#ACK CRC=%X\n", crc3)
}
// func xxTestParser(t *testing.T) {
// //Parse data from DevicePrs manual
// var p model.Parser = NewDevicePrsStringParser(nil, nil,
// "033500000C076B5C208F01011E5268CEF20000196E3A3A0AEF3E934F3E2D780000000007000000005268CEFD0000196E3A3A0AEF3E934F3"+
// "E2D780000000007000000005268CF080000196E3A3A0AEF3E934F3E2D780000000007000000005268CF130000196E3A3A0AEF3E934F3E2D7"+
// "80000000007000000005268CF1E0000196E3A3A0AEF3E934F3E2D780000000007000000005268CF290000196E3A3A0AEF3E934F3E2D78000"+
// "0000007000000005268CF340000196E3A3A0AEF3E934F3E2D780000000007000000005268CF3F0000196E3A3A0AEF3E934F3E2D780000000"+
// "007000000005268CF4A0000196E3A3A0AEF3E934F3E2D780000000007000000005268CF550000196E3A3A0AEF3E934F3E2D7800000000070"+
// "00000005268CF600000196E3A3A0AEF3E934F3E2D780000000007000000005268CF6B0000196E3A3A0AEF3E934F3E2D78000000000700000"+
// "0005268CF730000196E36630AEF42CE4F6D0BF40400022208000000005268CF7E0000196E36B60AEF42BE4F6D0BF400000000070000000052"+
// "68CF890000196E36B60AEF42BE4F6D0BF40000000007000000005268CF940000196E36B60AEF42BE4F6D0BF40000000007000000005268CF"+
// "9F0000196E36B60AEF42BE4F6D0BF40000000007000000005268CFAA0000196E36B60AEF42BE4F6D0BF40000000007000000005268CFB50"+
// "000196E36B60AEF42BE4F6D0BF40000000007000000005268CFC00000196E36B60AEF42BE4F6D0BF40000000007000000005268CFCB00001"+
// "96E36B60AEF42BE4F6D0BF40000000007000000005268CFD60000196E36B60AEF42BE4F6D0BF40000000007000000005268CFD70000196E"+
// "3C710AEF5EFF4F690BF40400011708000000005268CFE20000196E3B980AEF601A4F690BF40000000007000000005268CFED0000196E3B980"+
// "AEF601A4F690BF40000000007000000005268CFF80000196E3B980AEF601A4F690BF40000000007000000005268D0030000196E3B980AEF60"+
// "1A4F690BF40000000007000000005268D00E0000196E3B980AEF601A4F690BF40000000007000000005268D0190000196E3B980AEF601A4F6"+
// "90BF40000000007000000005268D0240000196E3B980AEF601A4F690BF400000000070000000046E2")
// //convert string to binary HEX
// if p.GetError() != nil {
// t.Fatal(p.GetError())
// }
// pkt := p.GetRecords()
// //display packet as JSON
// b, err := json.Marshal(pkt)
// if err != nil {
// t.Fatal(err)
// }
// fmt.Println(string(b))
// // ---------------------------------------------------------
// data2, err2 := hex.DecodeString(
// "007900000b1a2a5585c30100024e9c036900000f101733208ff45e07b31b570a001009090605011b1a020003001c01ad01021d338e1600000" +
// "2960000601a41014bc16d004e9c038400000f104fdf20900d20075103b00a001308090605011b1a020003001c01ad01021d33b11600000296" +
// "0000601a41014bc1ea0028f9")
// //convert string to binary HEX
// if err2 != nil {
// t.Fatal(err2)
// }
// p = NewDevicePrsParser(nil, nil, data2)
// if p.GetError() != nil {
// t.Fatal(p.GetError())
// }
// pkt = p.GetRecords()
// //get response CRC
// rep := p.GetClientResponse()
// crc := crc16CCITT(rep[2 : len(rep)-2])
// fmt.Println("#ACK CRC=", crc)
// //display packet as JSON
// b, err = json.Marshal(pkt)
// if err != nil {
// t.Fatal(err)
// }
// origPkt := string(b)
// fmt.Println(origPkt)
// //test for map
// m := make(map[byte]int)
// m[0] = 10
// m[11] = 20
// m[2] = 30
// fmt.Println(m)
// ioM := new(model.IoMapResponse)
// ioM.Descriptor.IMEI = 10
// ioM.Descriptor.Id = 11
// ioM.Descriptor.Model = "ABC"
// ioM.Descriptor.QoS = 4
// ioM.DevTags = []model.DevTag{0, 1, 2, 3}
// ioM.StdTags = []model.StdTag{10, 11, 2, 4}
// b, err = json.Marshal(ioM)
// if err != nil {
// t.Fatal(err)
// }
// jstr := string(b)
// fmt.Println(jstr)
// var io2 model.IoMapResponse
// err = json.Unmarshal([]byte(jstr), &io2)
// fmt.Println(io2)
// var b1 byte = 200
// x := int8(b1)
// y := byte(x)
// fmt.Printf("%d -> %d -> %d\n", b1, x, y)
// by := data2[len(data2)-2:]
// fmt.Printf("Byte = %+v, %d\n", by, binary.BigEndian.Uint16(by))
// p = NewDevicePrsParser(nil, nil, []byte{})
// fmt.Println("Parser ", p)
// p = NewDevicePrsParser(nil, nil, nil)
// fmt.Println("Parser ", p)
// if p.GetError() != nil {
// fmt.Println(p.GetError())
// }
// rep = p.GetClientResponse()
// crc = crc16CCITT(rep[2 : len(rep)-2])
// fmt.Println("#NACK CRC=", crc)
// imei, cmd, _ := verifyStream(data2, p.GetMinPacketSize(), p.GetMaxPacketSize())
// fmt.Printf("IMEI = %X, cmd = %d\n", imei, cmd)
// //test encoding/decoding
// var network bytes.Buffer // Stand-in for a network connection
// enc := gob.NewEncoder(&network) // Will write to network.
// dec := gob.NewDecoder(&network) // Will read from network.
// // Encode (send) some values.
// err = enc.Encode(pkt)
// if err != nil {
// t.Fatal("encode error:", err)
// }
// //fmt.Printf("Buffer = %+v\n", network)
// // Decode (receive) and print the values.
// var dpkt model.DeviceRecords
// err = dec.Decode(&dpkt)
// if err != nil {
// t.Fatal("decode error 1:", err)
// }
// b, err = json.Marshal(&dpkt)
// decPkt := string(b)
// fmt.Printf("%+v\n", decPkt)
// if origPkt != decPkt {
// t.Fatal("Encode/Decode records doesnot match.")
// }
// //test original converter
// conv := model.RecordsConverter{}
// stream, err := conv.ToStream(pkt)
// if err != nil {
// t.Fatal("To stream error: ", err)
// }
// fmt.Printf("Stream length = %d bytes, orig = %d, json = %d\n",
// len(stream), len(data2), len(origPkt))
// //fmt.Printf("Stream = %+v\n", string(stream))
// pkt2, err := conv.FromStream(stream)
// if err != nil {
// t.Fatal("From stream error: ", err)
// }
// b, err = json.Marshal(&pkt2)
// decPkt2 := string(b)
// if origPkt != decPkt2 {
// t.Fatal("Encode/Decode records doesnot match.")
// }
// fmt.Printf("%+v\n", decPkt2)
// nack := []byte{0x00, 0x02, 0x64, 0x00, 0x02, 0x35}
// crc1 := crc16CCITT(nack[2 : len(nack)-2])
// fmt.Printf("CRC = %x\n", crc1)
// tsch := []byte{0x00, 0x05, 0x72, 0x01, 0x00, 0x00, 0x66, 0x19, 0xF0}
// crc1 = crc16CCITT(tsch[2 : len(tsch)-2])
// fmt.Printf("CRC = %x\n", crc1)
// str := hex.EncodeToString(tsch)
// fmt.Printf("Hex = %+v\n", str)
// data3, _ := hex.DecodeString(
// "0011000315A07F44865A0E01000053EA01DF65AD6D")
// crc1 = crc16CCITT(data3[2 : len(data3)-2])
// fmt.Printf("CRC = %x, %d, %d\n", crc1, len(data3), len(data2))
// }
func TestTch(t *testing.T) {
str := "0011000315A07F44865A0E01000053EA01DF65AD6D"
data, err := hex.DecodeString(str)
crc1 := crc16CCITT(data[2 : len(data)-2])
t.Logf("CRC = %X", crc1)
p := NewDevicePrsParser(nil, nil, data)
if p.Error != nil {
t.Errorf("Parser error: %v", p.Error)
t.FailNow()
}
t.Logf("IMEI=%v, command=%d", p.IMEI, p.Command)
t.Logf("Struct = %+v", p)
tch, err := parseTchStream(p.Data, p.MinPacketSize, p.MaxPacketSize)
if err != nil {
t.Errorf("Parse TCH error: %v", err)
t.FailNow()
}
t.Logf("TCH = %+v\n", tch)
t.Logf("Data = %v", string(tch.Data))
//test
pesan := ""
n := len(pesan)
//construct message
npacket := n + 4
data = make([]byte, 8+n)
data[0] = byte(npacket >> 8)
data[1] = byte(npacket)
data[2] = 0x72
data[3] = 0x01 //PORT 'B' (TODO: configurable)
data[4] = 0x00
data[5] = 0x00
//setup Payload
payload := []byte(pesan)
ptr := data[6:]
copy(ptr, payload)
//calculate CRC
crc := crc16CCITT(data[2:(n + 6)])
data[n+6] = byte(crc >> 8)
data[n+7] = byte(crc)
t.Logf("Data = %v\n", data)
}
func getJSON(url string, target interface{}) error {
restClient := &http.Client{Timeout: 10 * time.Second}
r, err := restClient.Get(url)
if err != nil {
return err
}
defer r.Body.Close()
return json.NewDecoder(r.Body).Decode(target)
}
func TestJSON(t *testing.T) {
msg := &BroadcastMessage{}
imei := "868324028509698"
url := "http://202.47.70.196/oslog/api/BroadcastPrivate/getbroadcastmessage/" + imei
err := getJSON(url, msg)
if err != nil {
t.Errorf("Error get JSON: %v", err)
t.FailNow()
}
t.Logf("JSON = %+v", msg)
}
func TestOptions(t *testing.T) {
cfgText := `
{
server: {
id: conoco01
listenAddr: ":8081"
acceptTimeout: 10 # timeout (dalam detik)
writeTimeout: 10 # timeout (dalam detik)
maxReceive: 50
maxSend: 50
device: ruptela
}
ruptela: {
message: "http://localhost:8081/api/"
report: "http://localhost:8081/reportAPI"
appendIMEI: false
serviceTimeout: 10 # REST Service timeout (dalam detik)
}
gpsdata: {
storage: sql
sql: {
driver: postgres
connection: "user=isi-user password=isi-password dbname=OSLOGREC_MTRACK host=127.0.0.1 port=5432 connect_timeout=30 sslmode=disable"
maxIdle: 10 #Max jumlah koneksi idle
maxOpen: 10 #Max jumlah koneksi open
maxLifetime: 60 #Maximum lifetime (dalam detik)
insertQuery: INSERT INTO "GPS_DATA"("IMEI", "DATA_LOG", "FLAG", "DATE_INS", "DESC", "GPS_CODE", "DATA_LEN") VALUES($1, $2, $3, $4, $5, $6, $7)
}
}
log: {
#Known types: console, file, sql
type: console, file, sql
console: {
# use default options
}
#for file (uncomment type)
# type: file
file: {
name: ./applog.log
append: true
}
#for sql (uncomment type)
# SQLite -> driver: sqlite3
# PostgreSQL -> driver: postgres
# type: sql
sql: {
driver: sqlite3
connection: ./applog.sqlite
maxIdle: 10 #Max jumlah koneksi idle
maxOpen: 10 #Max jumlah koneksi open
maxLifetime: 60 #Maximum lifetime (dalam detik)
createQuery: CREATE TABLE applog(id INTEGER PRIMARY KEY AUTOINCREMENT, ts DATETIME, app VARCHAR(100), content TEXT)
insertQuery: INSERT INTO applog(ts, app, content) VALUES(?, ?, ?)
}
}
}
`
options, err := opt.FromText(cfgText, "hjson")
if err != nil {
t.Error(err)
t.FailNow()
}
t.Logf("%+v", options)
now := time.Now().Format(timeLayout)
nowLocal := time.Now().Local().Format(timeLayout)
t.Logf("Now= %v, local= %v\n", now, nowLocal)
//test container
/*
c1, key := options.GetContainer("server")
t.Logf("Key = %s, Server = %+v", key, c1)
c2, key := options.GetContainer("server.id")
t.Logf("Key = %s, Server.id = %+v", key, c2)
c3, key := options.GetContainer("log.sql")
t.Logf("Key = %s, Log.sql = %+v", key, c3)
*/
}

BIN
server/.DS_Store vendored

Binary file not shown.

325
server/devicePrsNet.go

@ -0,0 +1,325 @@
//
// DevicePrs Devie Server based on Go net package
// specify Mode=net in model.ServerConfig
//
package server
import (
"database/sql"
"encoding/binary"
"errors"
"io"
"log"
"net"
"os"
"os/signal"
"syscall"
"time"
"encoding/hex"
"sync"
"math"
"github.com/gansidui/gotcp"
"github.com/ipsusila/opt"
"github.com/confluentinc/confluent-kafka-go/kafka"
//"../model"
"../parser"
)
//-------------------------------------------------------------------
var serverId = ""
type ConnectionDevice struct {
imei uint64
data []byte
timeProcess time.Time
}
type MapConnection struct {
addr map[string]ConnectionDevice
mux sync.Mutex
}
func (c *MapConnection) AddUpdt(key string,connectionDevice ConnectionDevice) {
c.mux.Lock()
// Lock so only one goroutine at a time can access the map c.v.
c.addr[key] = connectionDevice
c.mux.Unlock()
}
func (c *MapConnection) Del(key string) {
c.mux.Lock()
// Lock so only one goroutine at a time can access the map c.v.
delete(c.addr, key)
c.mux.Unlock()
}
func (c *MapConnection) Value(key string) ConnectionDevice {
c.mux.Lock()
// Lock so only one goroutine at a time can access the map c.v.
defer c.mux.Unlock()
return c.addr[key]
}
var connectionDevices = MapConnection{addr: make(map[string]ConnectionDevice)}
//DevicePrsServerPacket holds packet information
type DevicePrsServerPacket struct {
buff []byte
}
//NewDevicePrsServerPacket create new packet
func NewDevicePrsServerPacket(data []byte) *DevicePrsServerPacket {
pkt := new(DevicePrsServerPacket)
pkt.buff = data
return pkt
}
//Serialize packet
func (p *DevicePrsServerPacket) Serialize() []byte {
return p.buff
}
//-------------------------------------------------------------------
//DevicePrsServerNet info
type DevicePrsServerNet struct {
Options *opt.Options
Server *gotcp.Server
Callback *DevicePrsServerCallback
}
//Stop tcp server
func (rup *DevicePrsServerNet) Stop() {
rup.Server.Stop()
}
//Start the server
func (rup *DevicePrsServerNet) Start() {
//---- options ---------------------------------------
serverOpt := rup.Options.Get("server")
maxSend := uint32(serverOpt.GetInt("maxSend", 40))
maxReceive := uint32(serverOpt.GetInt("maxReceive", 40))
addr := serverOpt.GetString("listenAddr", ":8081")
acceptTimeout := time.Duration(serverOpt.GetInt("acceptTimeout", 2)) * time.Second
writeTimeout := time.Duration(serverOpt.GetInt("writeTimeout", 5)) * time.Second
//database options
gpsData := rup.Options.Get("gpsdata")
storage := gpsData.GetString("storage", "sql")
dbOpt := gpsData.Get(storage)
dbDriver := dbOpt.GetString("driver", "postgres")
dbInfo := dbOpt.GetString("connection", "")
dbMaxIdle := dbOpt.GetInt("maxIdle", 0)
dbMaxOpen := dbOpt.GetInt("maxOpen", 5)
dbMaxLifetime := time.Duration(dbOpt.GetInt("maxLifetime", 60)) * time.Second
//devicePrs options
rupOpt := parser.NewDevicePrsOptions(rup.Options)
//server ID
serverId = serverOpt.GetString("id", "undefined (check configuration)")
//listen to TCP
tcpAddr, err := net.ResolveTCPAddr("tcp4", addr)
if err != nil {
log.Printf("Error resolving address %v\n", addr)
return
}
listener, err := net.ListenTCP("tcp", tcpAddr)
if err != nil {
log.Printf("Error listening to tcp address %v\n", tcpAddr)
return
}
//open database connection
db, err := sql.Open(dbDriver, dbInfo)
if err != nil {
log.Printf("Error opening db %v\n", dbDriver)
return
}
defer db.Close()
err = db.Ping()
if err != nil {
log.Printf("DB connection error: %v\n", err)
return
}
db.SetMaxIdleConns(dbMaxIdle)
db.SetMaxOpenConns(dbMaxOpen)
db.SetConnMaxLifetime(dbMaxLifetime)
//create procedure broker
broker := rupOpt.BrokerServer
procdr, err := kafka.NewProducer(&kafka.ConfigMap{"bootstrap.servers": broker})
if err != nil {
log.Printf("Failed to create producer: %s\n", err)
os.Exit(1)
}
log.Printf("Created Producer %v\n", procdr)
// creates a server
config := &gotcp.Config{
PacketSendChanLimit: maxSend,
PacketReceiveChanLimit: maxReceive,
}
proto := &DevicePrsServerProtocol{
maxPacketSize: parser.NewDevicePrs().MaxPacketSize,
}
//map for storing various statistics
callback := &DevicePrsServerCallback{
options: rupOpt,
gpsDb: db,
writeTimeout: writeTimeout,
procdr: procdr,
}
srv := gotcp.NewServer(config, callback, proto)
rup.Server = srv
rup.Callback = callback
// starts service
go srv.Start(listener, acceptTimeout)
log.Printf("listening: %v\n", listener.Addr())
// catchs system signal
chSig := make(chan os.Signal)
signal.Notify(chSig, syscall.SIGINT, syscall.SIGTERM)
ch := <-chSig
log.Printf("Signal: %v\n", ch)
// stops service
srv.Stop()
}
//-------------------------------------------------------------------
//DevicePrsServerCallback holds server callback info
type DevicePrsServerCallback struct {
options *parser.DevicePrsOptions
gpsDb *sql.DB
writeTimeout time.Duration
procdr *kafka.Producer
}
//OnConnect is called when new connection established
func (cb *DevicePrsServerCallback) OnConnect(c *gotcp.Conn) bool {
addr := c.GetRawConn().RemoteAddr()
c.PutExtraData(addr)
log.Printf("%v New connection(client: %v)\n", serverId,addr)
//insert to array new address
var connDevice ConnectionDevice
connDevice.imei = 0
connDevice.timeProcess = time.Now()
connectionDevices.AddUpdt(addr.String(),connDevice)
return true
}
//OnMessage is called when packet readed
func (cb *DevicePrsServerCallback) OnMessage(c *gotcp.Conn, p gotcp.Packet) bool {
addr := c.GetRawConn().RemoteAddr()
//downcast packet to devicePrs packet
packet := p.(*DevicePrsServerPacket)
data := packet.Serialize()
//fmt.Println(hex.EncodeToString(data))
//imei is empty so, we have to add last imei to the data
connectionDevice := connectionDevices.Value(addr.String())
//create parser
par := parser.NewDevicePrsParser(cb.options, cb.gpsDb, data, cb.procdr)
if err := par.GetError(); err != nil {
log.Printf("Error verifying packet(client: %v): %v, Err=%v\n",
addr,hex.EncodeToString(data), err)
}
//set imei in array imei
imei_ := connectionDevice.imei
if(imei_ == 0){
imei_ = par.IMEI
connectionDevice.imei = par.IMEI
connectionDevice.data = data
connectionDevices.AddUpdt(addr.String(),connectionDevice)
}
duration := time.Since(connectionDevice.timeProcess)
log.Printf("Data received(client: %v, IMEI: %v) with duration %v s\n", addr, imei_,math.Round(duration.Seconds()*100)/100)
//save to database
statusInsert := par.ExecuteAsync()
if(statusInsert){
// send response to client (ACK or NACK)
rep := par.GetClientResponse()
c.AsyncWritePacket(NewDevicePrsServerPacket(rep), cb.writeTimeout)
log.Printf("Sent ACK (client: %v, IMEI: %v)\n", addr, imei_)
}
//send message if record command
//if par.GetCommand() == model.CMD_RUP_RECORD {
// msg, err := par.GetBroadcastMessage()
// if len(msg) > 0 {
// c.AsyncWritePacket(NewRuptelaServerPacket(msg), cb.writeTimeout)
// } else if err != nil {
// log.Printf("Get message error(client: %v, IMEI: %v) = %v\n", addr, par.IMEI,err)
// }
//}
//run parallel processing to handle packet parsing
//go par.ExecuteAsync()
return true
}
//OnClose is called when connection being closed
func (cb *DevicePrsServerCallback) OnClose(c *gotcp.Conn) {
addr := c.GetRawConn().RemoteAddr()
connectionDevice := connectionDevices.Value(addr.String())
duration := time.Since(connectionDevice.timeProcess)
connectionDevices.Del(addr.String())
log.Printf("%v Close connection (client: %v, IMEI: %v) with duration %v s\n", serverId ,c.GetExtraData(),connectionDevice.imei,math.Round(duration.Seconds()*100)/100)
}
//-------------------------------------------------------------------
//DevicePrsServerProtocol holds parser
type DevicePrsServerProtocol struct {
maxPacketSize int
}
//ReadPacket called everytime data is available
func (p *DevicePrsServerProtocol) ReadPacket(conn *net.TCPConn) (gotcp.Packet, error) {
var (
lengthBytes = make([]byte, 2)
length uint16
)
//TODO handle packet other than Device Record
//OK
// read length
if _, err := io.ReadFull(conn, lengthBytes); err != nil {
return nil, err
}
Limit := uint16(p.maxPacketSize)
if length = binary.BigEndian.Uint16(lengthBytes); length > Limit {
return nil, errors.New("The size of packet is larger than the limit")
}
log.Printf("data packet length(client: %v) : %d", conn.RemoteAddr(), length)
//Packet structure
//LEN + DATA + CRC16
buff := make([]byte, 2+length+2)
copy(buff[0:2], lengthBytes)
// read body ( buff = lengthBytes + body )
if _, err := io.ReadFull(conn, buff[2:]); err != nil {
return nil, err
}
return NewDevicePrsServerPacket(buff), nil
}

BIN
server/exe/.DS_Store vendored

Binary file not shown.

15947
server/exe/applog.log

File diff suppressed because it is too large Load Diff

BIN
server/exe/applog.sqlite

Binary file not shown.

60
server/exe/config-server.hjson

@ -0,0 +1,60 @@
{
server: {
id: teltonika9000
listenAddr: ":9000"
acceptTimeout: 10 # timeout (dalam detik)
writeTimeout: 10 # timeout (dalam detik)
maxReceive: 50
maxSend: 50
device: ruptela
}
ruptela: {
message: "http://192.168.70.200/oslog/api/BroadcastPrivate/getbroadcastmessage/"
report: "http://192.168.70.200/oslog/api/BroadcastPrivate/putbroadcastmessage"
appendIMEI: false
serviceTimeout: 10 # REST Service timeout (dalam detik)
}
gpsdata: {
storage: sql
sql: {
driver: postgres
connection: "user=postgres password=s3mb1l4n dbname=OSLOGRECV2 host=192.168.70.196 port=5432 connect_timeout=30 sslmode=disable"
maxIdle: 10 #Max jumlah koneksi idle
maxOpen: 10 #Max jumlah koneksi open
maxLifetime: 60 #Maximum lifetime (dalam detik)
insertQuery: INSERT INTO "GPS_DATA"("IMEI", "DATA_LOG", "FLAG", "DATE_INS", "DESC", "GPS_CODE", "DATA_LEN") VALUES($1, $2, $3, $4, $5, $6, $7)
}
}
log: {
#Known types: console, file, sql
type: console, file
console: {
# use default options
}
#for file (uncomment type)
# type: file
file: {
name: /home/vagrant/receiver/teltonika9000/applog.log
append: true
}
#for sql (uncomment type)
# SQLite -> driver: sqlite3
# PostgreSQL -> driver: postgres
# type: sql
sql: {
driver: sqlite3
connection: ./applog.sqlite
maxIdle: 10 #Max jumlah koneksi idle
maxOpen: 10 #Max jumlah koneksi open
maxLifetime: 60 #Maximum lifetime (dalam detik)
createQuery: CREATE TABLE applog(id INTEGER PRIMARY KEY AUTOINCREMENT, ts DATETIME, app VARCHAR(100), content TEXT)
insertQuery: INSERT INTO applog(ts, app, content) VALUES(?, ?, ?)
}
}
}

160
server/exe/config.hjson

@ -0,0 +1,160 @@
{
server: {
id: ruptela
listenAddr: ":4000"
acceptTimeout: 10 # timeout (dalam detik)
writeTimeout: 10 # timeout (dalam detik)
maxReceive: 50
maxSend: 50
device: RUPTELA
}
ruptela: {
message: "http://localhost:8081/api/"
report: "http://localhost:8081/reportAPI"
appendIMEI: false
serviceTimeout: 10 # REST Service timeout (dalam detik)
}
gpsdata: {
storage: sql
sql: {
driver: postgres
connection: "user=postgres password=s3mb1l4n dbname=OSLOGREC host=127.0.0.1 port=5432 connect_timeout=30 sslmode=disable"
maxIdle: 10 #Max jumlah koneksi idle
maxOpen: 10 #Max jumlah koneksi open
maxLifetime: 60 #Maximum lifetime (dalam detik)
insertQuery: INSERT INTO "GPS_DATA"("IMEI", "DATA_LOG", "FLAG", "DATE_INS", "DESC", "GPS_CODE", "DATA_LEN") VALUES($1, $2, $3, $4, $5, $6, $7)
}
}
log: {
#Known types: console, file, sql
type: console, file
console: {
# use default options
}
#for file (uncomment type)
# type: file
file: {
name: /Users/baymac/Documents/work/IU/oslog/sourcecode/oslog.id/putu/jyoti_teltonika/server/exe/applog.log
append: true
}
#for sql (uncomment type)
# SQLite -> driver: sqlite3
# PostgreSQL -> driver: postgres
# type: sql
sql: {
driver: sqlite3
connection: ./applog.sqlite
maxIdle: 10 #Max jumlah koneksi idle
maxOpen: 10 #Max jumlah koneksi open
maxLifetime: 60 #Maximum lifetime (dalam detik)
createQuery: CREATE TABLE applog(id INTEGER PRIMARY KEY AUTOINCREMENT, ts DATETIME, app VARCHAR(100), content TEXT)
insertQuery: INSERT INTO applog(ts, app, content) VALUES(?, ?, ?)
}
}
messagebroker: {
brokerServer: localhost:9092
brokerTopic: ruptela4000
}
iotag: {
tags: [
{
tagName: Din1
tagId: 2
tagDataType: int
},
{
tagName: Din2
tagId: 3
tagDataType: int
},
{
tagName: Din3
tagId: 4
tagDataType: int
},
{
tagName: Din4
tagId: 5
tagDataType: int
},
{
tagName: Ain1
tagId: 22
tagDataType: int
},
{
tagName: Ain2
tagId: 23
tagDataType: int
},
{
tagName: Dist
tagId: 77
tagDataType: float32
},
{
tagName: DistTotal
tagId: 65
tagDataType: float64
},
{
tagName: GsmSignalSensor
tagId: 27
tagDataType: int
},
{
tagName: DeepSleepSensor
tagId: 200
tagDataType: int
},
{
tagName: HarshBreaking
tagId: 135
tagDataType: int
},
{
tagName: HarshAcceleration
tagId: 136
tagDataType: int
},
{
tagName: BatteryVolt
tagId: 30
tagDataType: int
},
{
tagName: PowerVolt
tagId: 29
tagDataType: int
},
{
tagName: Temp
tagId: 78
tagDataType: int
},
{
tagName: Temp1
tagId: 79
tagDataType: int
},
{
tagName: Temp2
tagId: 80
tagDataType: int
},
{
tagName: Rfid
tagId: 171
tagDataType: string
}
]
}
}

102
server/exe/deviceServer.go

@ -0,0 +1,102 @@
//
// Device server which handle communication to various devices (currently implements RUPTELA)
//
package main
import (
"flag"
"log"
"runtime"
"os"
"io/ioutil"
"strconv"
"path/filepath"
"github.com/ipsusila/opt"
//"../../model"
"../../server"
"../../lumberjack"
_ "github.com/lib/pq"
_ "github.com/mattn/go-sqlite3"
)
var exPath = "";
func startServer(options *opt.Options) {
//---options ---------------------------------------------------------
//start server according to mode
svcNet := &server.DevicePrsServerNet{
Options: options,
}
svcNet.Start()
}
func main() {
//maximum cpu
runtime.GOMAXPROCS(runtime.NumCPU())
ex, err := os.Executable()
if err != nil {
panic(err)
}
exPath = filepath.Dir(ex) + "/"
confPath := exPath + "config.hjson"
// re-open file
file, err := os.Open(confPath)
if err != nil {
confPath = "config.hjson"
}
defer file.Close()
//parse configurationf file
cfgFile := flag.String("conf", confPath, "Configuration file")
flag.Parse()
//load options
config, err := opt.FromFile(*cfgFile, opt.FormatAuto)
if err != nil {
log.Printf("Error while loading configuration file %v -> %v\n", *cfgFile, err)
return
}
//display server ID (displayed in console)
log.Printf("Server ID: %v\n", config.Get("server").GetString("id", "undefined (check configuration)"))
//setup log
//lw, err := utility.NewLogWriter(config)
//if err != nil {
// log.Printf("Error while setup logging: %v\n", err)
// return
//}
log.SetFlags(log.Lshortfile | log.Ldate | log.Ltime)
//log.SetOutput(lw)
logName := config.Get("log").Get("file").GetString("name", "")
log.SetOutput(&lumberjack.Logger{
Filename: logName,
MaxSize: 10, // megabytes
MaxBackups: 3,
MaxAge: 3, //days
Compress: true, // disabled by default
})
//defer lw.Close()
pidFilePath := exPath + "receiver-" + config.Get("server").GetString("id", "undefined (check configuration)") + "-pid"
errRemoveFile := os.Remove(pidFilePath)
if errRemoveFile != nil {
log.Printf("PID file will be created")
}
pidStr := strconv.Itoa(os.Getpid())
log.Printf("PID: %v\n", pidStr)
d1 := []byte(pidStr)
errCreateFile := ioutil.WriteFile(pidFilePath, d1, 0644)
if errCreateFile != nil {
log.Printf("Error while setup write PID file: %v\n", errCreateFile)
} else{
log.Printf("PID file created")
}
//Run application
startServer(config)
}

133
server/ruptela_test.go

@ -0,0 +1,133 @@
package server
import (
"encoding/hex"
"fmt"
"net"
"testing"
"time"
"../model"
)
/*
Send data to server
*/
func sendData(conf *model.ServerConfig, data []byte, dur chan<- int64) error {
//simulate client
stTime := time.Now()
addr := fmt.Sprintf("%s:%d", conf.IP, conf.Port)
conn, err := net.Dial("tcp", addr)
if err != nil {
fmt.Println("Error: ", err)
dur <- -1
return err
}
defer conn.Close()
nw, err := conn.Write(data)
if err != nil {
fmt.Printf("Write error: %+v\n", err)
dur <- -1
return err
}
fmt.Println("Packet writen = ", nw)
//sleep awhile
//time.Sleep(time.Second)
//read response
rep := make([]byte, 20)
nr, err := conn.Read(rep)
if err != nil {
fmt.Printf("Read error: %+v\n", err)
dur <- -1
return err
}
fmt.Println("Response -> ", rep[:nr])
dur <- time.Now().Sub(stTime).Nanoseconds()
return nil
}
func TestInvalid(t *testing.T) {
//config
conf := model.ServerConfig{IP: "127.0.0.1", Port: 8095}
//test failed data
dur := make(chan int64, 1)
data3 := []byte{0x08, 0x00, 0x01, 0x02}
sendData(&conf, data3, dur)
<-dur
}
//start sending data
func TestRuptelaReceiver(t *testing.T) {
//send data
data1, _ := hex.DecodeString(
"007900000b1a2a5585c30100024e9c036900000f101733208ff45e07b31b570a001009090605011b1a020003001c01ad01021d338e1600000" +
"2960000601a41014bc16d004e9c038400000f104fdf20900d20075103b00a001308090605011b1a020003001c01ad01021d33b11600000296" +
"0000601a41014bc1ea0028f9")
data2, _ := hex.DecodeString(
"033500000C076B5C208F01011E5268CEF20000196E3A3A0AEF3E934F3E2D780000000007000000005268CEFD0000196E3A3A0AEF3E934F3" +
"E2D780000000007000000005268CF080000196E3A3A0AEF3E934F3E2D780000000007000000005268CF130000196E3A3A0AEF3E934F3E2D7" +
"80000000007000000005268CF1E0000196E3A3A0AEF3E934F3E2D780000000007000000005268CF290000196E3A3A0AEF3E934F3E2D78000" +
"0000007000000005268CF340000196E3A3A0AEF3E934F3E2D780000000007000000005268CF3F0000196E3A3A0AEF3E934F3E2D780000000" +
"007000000005268CF4A0000196E3A3A0AEF3E934F3E2D780000000007000000005268CF550000196E3A3A0AEF3E934F3E2D7800000000070" +
"00000005268CF600000196E3A3A0AEF3E934F3E2D780000000007000000005268CF6B0000196E3A3A0AEF3E934F3E2D78000000000700000" +
"0005268CF730000196E36630AEF42CE4F6D0BF40400022208000000005268CF7E0000196E36B60AEF42BE4F6D0BF400000000070000000052" +
"68CF890000196E36B60AEF42BE4F6D0BF40000000007000000005268CF940000196E36B60AEF42BE4F6D0BF40000000007000000005268CF" +
"9F0000196E36B60AEF42BE4F6D0BF40000000007000000005268CFAA0000196E36B60AEF42BE4F6D0BF40000000007000000005268CFB50" +
"000196E36B60AEF42BE4F6D0BF40000000007000000005268CFC00000196E36B60AEF42BE4F6D0BF40000000007000000005268CFCB00001" +
"96E36B60AEF42BE4F6D0BF40000000007000000005268CFD60000196E36B60AEF42BE4F6D0BF40000000007000000005268CFD70000196E" +
"3C710AEF5EFF4F690BF40400011708000000005268CFE20000196E3B980AEF601A4F690BF40000000007000000005268CFED0000196E3B980" +
"AEF601A4F690BF40000000007000000005268CFF80000196E3B980AEF601A4F690BF40000000007000000005268D0030000196E3B980AEF60" +
"1A4F690BF40000000007000000005268D00E0000196E3B980AEF601A4F690BF40000000007000000005268D0190000196E3B980AEF601A4F6" +
"90BF40000000007000000005268D0240000196E3B980AEF601A4F690BF400000000070000000046E2")
data3, _ := hex.DecodeString(
"0011000315A07F44865A0E01000053EA01DF65AD6D")
const NTEST = 1
conf := model.ServerConfig{IP: "127.0.0.1", Port: 8081}
ch := make(chan int64, NTEST*3)
n := 0
for i := 0; i < NTEST; i++ {
go sendData(&conf, data1, ch)
n++
go sendData(&conf, data2, ch)
n++
go sendData(&conf, data3, ch)
n++
}
var ma int64 = 0
var mi int64 = 10000000000000
var tot float64 = 0
var ti int64
for i := 0; i < NTEST*2; i++ {
ti = <-ch
if ti < 0 {
continue
}
if ti < mi {
mi = ti
}
if ti > ma {
ma = ti
}
tot += float64(ti)
}
const MILIS = 1000000
avg := tot / (NTEST * 2)
fmt.Printf("Total time = %+v ms, avg = %+v ms, mi = %+v ms, ma = %+v ms\n",
tot/MILIS, avg/MILIS, float64(mi)/MILIS, float64(ma)/MILIS)
}

172
utility/logwriter.go

@ -0,0 +1,172 @@
package utility
import (
"database/sql"
"io"
"os"
"time"
"strings"
"github.com/ipsusila/opt"
)
//SQLWriter write log to database i.e. SQLite
type SQLWriter struct {
db *sql.DB
insertQuery string
createQuery string
appID string
}
//LogWriter support multiple output
type LogWriter struct {
writers []io.WriteCloser
console bool
}
//NewLogWriter create multiple writer
func NewLogWriter(options *opt.Options) (*LogWriter, error) {
logOpt := options.Get("log")
//backward compatibility, get from server
appID := options.Get("server").GetString("id", "")
if appID == "" {
//if failed, get from `log.id`
appID = logOpt.GetString("id", "")
}
if appID == "" {
//if failed, get from `id``
appID = options.GetString("id", "*undefined*")
}
outTypes := logOpt.GetString("type", "")
typeItems := strings.Split(outTypes, ",")
writers := []io.WriteCloser{}
console := false
for _, item := range typeItems {
item = strings.TrimSpace(item)
switch item {
case "console":
console = true
case "file":
fileOpt := logOpt.Get("file")
name := fileOpt.GetString("name", "")
appendExisting := fileOpt.GetBool("append", true)
flag := os.O_WRONLY | os.O_CREATE
if appendExisting {
flag |= os.O_APPEND
} else {
flag |= os.O_TRUNC
}
f, err := os.OpenFile(name, flag, 0666)
if err != nil {
return nil, err
}
writers = append(writers, f)
case "sql":
w, err := newSQLWriter(logOpt, appID)
if err != nil {
return nil, err
}
writers = append(writers, w)
}
}
lw := &LogWriter{
writers: writers,
console: console,
}
return lw, nil
}
//NewSQLWriter open log writer to sql
func newSQLWriter(logOpt *opt.Options, appID string) (*SQLWriter, error) {
sqlOpt := logOpt.Get("sql")
dbDriver := sqlOpt.GetString("driver", "")
dbInfo := sqlOpt.GetString("connection", "")
createQuery := sqlOpt.GetString("createQuery", "")
insertQuery := sqlOpt.GetString("insertQuery", "")
dbMaxIdle := sqlOpt.GetInt("maxIdle", 0)
dbMaxOpen := sqlOpt.GetInt("maxOpen", 5)
dbMaxLifetime := time.Duration(sqlOpt.GetInt("maxLifetime", 60)) * time.Second
//open databae connection
db, err := sql.Open(dbDriver, dbInfo)
if err != nil {
return nil, err
}
err = db.Ping()
if err != nil {
db.Close()
return nil, err
}
//connection pooling
db.SetMaxIdleConns(dbMaxIdle)
db.SetMaxOpenConns(dbMaxOpen)
db.SetConnMaxLifetime(dbMaxLifetime)
//ignore error
//TODO, verify query (security, untrusted source)
db.Exec(createQuery)
w := &SQLWriter{
db: db,
createQuery: createQuery,
insertQuery: insertQuery,
appID: appID,
}
return w, nil
}
//Write to multiple log
func (lw *LogWriter) Write(data []byte) (n int, err error) {
//now := time.Now().Format("[2006-01-02 15:04:05]")
if lw.console {
n, err = os.Stderr.Write(data)
}
for _, w := range lw.writers {
n, err = w.Write(data)
}
return n, err
}
//Close all writer
func (lw *LogWriter) Close() error {
for _, w := range lw.writers {
w.Close()
}
return nil
}
//Write data with timestamp (ts, log)
func (w *SQLWriter) Write(data []byte) (int, error) {
//TODO, verify query (security, untrusted source)
result, err := w.db.Exec(w.insertQuery, time.Now(), w.appID, string(data))
if err != nil {
return 0, err
}
n := len(data)
_, err = result.LastInsertId()
if err != nil {
return n, err
}
nrows, err := result.RowsAffected()
if err != nil || nrows == 0 {
return n, err
}
return n, nil
}
func (w *SQLWriter) Close() error {
return w.db.Close()
}

39
utility/watcher/config.hjson

@ -0,0 +1,39 @@
{
watch: {
paths: ["../../", "doesnotexist"]
maxFile: 5
pattern: server
verbose: true
catchSignal: false
}
log: {
#Known types: console, file, sql
type: console, sql
console: {
# use default options
}
#for file (uncomment type)
# type: file
file: {
name: ./applog.log
append: true
}
#for sql (uncomment type)
# SQLite -> driver: sqlite3
# PostgreSQL -> driver: postgres
# type: sql
sql: {
driver: sqlite3
connection: ./applog.sqlite
maxIdle: 10 #Max jumlah koneksi idle
maxOpen: 10 #Max jumlah koneksi open
maxLifetime: 60 #Maximum lifetime (dalam detik)
createQuery: CREATE TABLE applog(id INTEGER PRIMARY KEY AUTOINCREMENT, ts DATETIME, app VARCHAR(100), content TEXT)
insertQuery: INSERT INTO applog(ts, app, content) VALUES(?, ?, ?)
}
}
}

340
utility/watcher/log_watcher.go

@ -0,0 +1,340 @@
package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"path/filepath"
"regexp"
"sync"
"syscall"
"github.com/fsnotify/fsnotify"
"github.com/ipsusila/opt"
"oslog.id/putu/jyoti/utility"
_ "github.com/mattn/go-sqlite3"
)
type watcherOptions struct {
watchPaths []string
maxLastFile int
pattern string
verbose bool
catchSignal bool
regPattern *regexp.Regexp
}
type node struct {
name string
fullName string
next *node
}
type list struct {
sync.Mutex
numItems int
head *node
tail *node
}
func (l *list) clear() {
l.head = nil
l.tail = nil
l.numItems = 0
}
func (l *list) count() int {
l.Lock()
defer l.Unlock()
return l.numItems
}
func (l *list) newNode(name string) (*node, error) {
fullName, err := filepath.Abs(name)
if err != nil {
return nil, err
}
nd := &node{
name: name,
fullName: fullName,
}
return nd, nil
}
func (l *list) push(name string) error {
l.Lock()
defer l.Unlock()
nd, err := l.newNode(name)
if err != nil {
return err
}
if l.tail == nil {
l.head = nd
l.tail = nd
} else {
l.tail.next = nd
l.tail = nd
}
l.numItems++
return nil
}
func (l *list) peek() *node {
l.Lock()
defer l.Unlock()
if l.head == nil {
return nil
}
return l.head
}
func (l *list) safePop() *node {
if l.head == nil {
return nil
}
nd := l.head
l.head = nd.next
if l.head == nil {
l.tail = nil
}
l.numItems--
return nd
}
func (l *list) pop() *node {
l.Lock()
defer l.Unlock()
return l.safePop()
}
func (l *list) remove(name string) *node {
l.Lock()
defer l.Unlock()
//create item
item, err := l.newNode(name)
if err != nil {
return nil
}
//empty
if l.head == nil || item == nil {
return nil
} else if l.head.fullName == item.fullName {
return l.safePop()
}
//node current and next
nd := l.head
next := l.head.next
for next != nil {
if next.fullName == item.fullName {
nextNext := next.next
nd.next = nextNext
l.numItems--
if next == l.tail {
l.tail = nd
}
return next
}
//next items
nd = next
next = nd.next
}
return nil
}
func (l *list) iterate(fn func(nd *node)) {
l.Lock()
defer l.Unlock()
nd := l.head
for nd != nil {
fn(nd)
nd = nd.next
}
}
func startWatching(options *watcherOptions) {
//file items
items := &list{}
errItems := &list{}
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
done := make(chan bool, 1)
mustExit := make(chan bool, 1)
// Process events
go func() {
defer func() {
close(done)
}()
for {
select {
case ev := <-watcher.Events:
isCreate := ev.Op&fsnotify.Create == fsnotify.Create
isDelete := ev.Op&fsnotify.Remove == fsnotify.Remove
if isCreate {
//match pattern
//when new items created, delete old items
//if number of items exceed threshold
if options.regPattern == nil || options.regPattern.MatchString(ev.Name) {
items.push(ev.Name)
if options.verbose {
log.Printf("Item %v ADDED", ev.Name)
}
if items.count() > options.maxLastFile {
node := items.pop()
err := os.Remove(node.fullName)
if err != nil {
errItems.push(node.name)
log.Printf("Error %v while deleting %v", err, node.fullName)
}
}
}
} else if isDelete {
if options.verbose {
log.Printf("Item %v REMOVED", ev.Name)
}
//if manually deleted: remove from list
items.remove(ev.Name)
}
case err := <-watcher.Errors:
log.Println("Watcher error:", err)
case <-mustExit:
return
}
}
}()
//Start watching items
nwatched := 0
for _, sPath := range options.watchPaths {
err = watcher.Add(sPath)
if err != nil {
log.Printf("Error watching `%s` -> %v", sPath, err)
} else {
nwatched++
if options.verbose {
log.Printf("Watching %s...", sPath)
}
}
}
if nwatched == 0 {
close(mustExit)
} else {
//catch signal
if options.catchSignal {
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, os.Interrupt, syscall.SIGTERM)
go func() {
for _ = range signalChan {
fmt.Println("Received an interrupt, stopping watcher...")
close(mustExit)
return
}
}()
} else {
//wait for q/q
buf := make([]byte, 1)
for {
//wait input
n, err := os.Stdin.Read(buf)
if err != nil {
log.Println(err)
} else if n > 0 && buf[0] == 'q' || buf[0] == 'Q' {
close(mustExit)
break
}
}
}
}
// Hang so program doesn't exit
<-done
watcher.Close()
//erritems
if errItems.count() > 0 {
log.Printf("The following item(s) were not removed do to error")
errItems.iterate(func(nd *node) {
log.Printf("%v", nd.fullName)
})
}
log.Printf("Watcher %d DONE", items.count())
}
func main() {
//parse configurationf file
cfgFile := flag.String("conf", "config.hjson", "Configuration file")
flag.Parse()
//load options
config, err := opt.FromFile(*cfgFile, opt.FormatAuto)
if err != nil {
log.Printf("Error while loading configuration file %v -> %v\n", *cfgFile, err)
return
}
//display server ID (displayed in console)
log.Printf("Application ID: %v\n", config.GetString("id", "undefined (check configuration)"))
//configure log writer
lw, err := utility.NewLogWriter(config)
if err != nil {
log.Printf("Error while setup logging: %v\n", err)
return
}
log.SetFlags(log.Lshortfile | log.Ldate | log.Ltime)
log.SetOutput(lw)
defer lw.Close()
//options
/*
watchPaths []string
maxLastFile int
pattern string
verbose bool
catchSignal bool
*/
options := &watcherOptions{}
wcfg := config.Get("watch")
options.watchPaths = wcfg.GetStringArray("paths")
options.maxLastFile = wcfg.GetInt("maxFile", 50)
options.pattern = wcfg.GetString("pattern", "")
options.verbose = wcfg.GetBool("verbose", false)
options.catchSignal = wcfg.GetBool("catchSignal", false)
//verbose?
if options.verbose {
log.Printf("%s", config.AsJSON())
}
//compile regex
if len(options.pattern) > 0 {
options.regPattern = regexp.MustCompile(options.pattern)
}
startWatching(options)
}

20
vendor/github.com/gansidui/gotcp/LICENSE generated vendored

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2014 Jie Li
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

25
vendor/github.com/gansidui/gotcp/README.md generated vendored

@ -0,0 +1,25 @@
gotcp
================
A Go package for quickly building tcp servers
Usage
================
###Install
~~~
go get github.com/gansidui/gotcp
~~~
###Examples
* [echo](https://github.com/gansidui/gotcp/tree/master/examples/echo)
* [telnet](https://github.com/gansidui/gotcp/tree/master/examples/telnet)
Document
================
[Doc](http://godoc.org/github.com/gansidui/gotcp)

215
vendor/github.com/gansidui/gotcp/conn.go generated vendored

@ -0,0 +1,215 @@
package gotcp
import (
"errors"
"net"
"sync"
"sync/atomic"
"time"
)
// Error type
var (
ErrConnClosing = errors.New("use of closed network connection")
ErrWriteBlocking = errors.New("write packet was blocking")
ErrReadBlocking = errors.New("read packet was blocking")
)
// Conn exposes a set of callbacks for the various events that occur on a connection
type Conn struct {
srv *Server
conn *net.TCPConn // the raw connection
extraData interface{} // to save extra data
closeOnce sync.Once // close the conn, once, per instance
closeFlag int32 // close flag
closeChan chan struct{} // close chanel
packetSendChan chan Packet // packet send chanel
packetReceiveChan chan Packet // packeet receive chanel
}
// ConnCallback is an interface of methods that are used as callbacks on a connection
type ConnCallback interface {
// OnConnect is called when the connection was accepted,
// If the return value of false is closed
OnConnect(*Conn) bool
// OnMessage is called when the connection receives a packet,
// If the return value of false is closed
OnMessage(*Conn, Packet) bool
// OnClose is called when the connection closed
OnClose(*Conn)
}
// newConn returns a wrapper of raw conn
func newConn(conn *net.TCPConn, srv *Server) *Conn {
return &Conn{
srv: srv,
conn: conn,
closeChan: make(chan struct{}),
packetSendChan: make(chan Packet, srv.config.PacketSendChanLimit),
packetReceiveChan: make(chan Packet, srv.config.PacketReceiveChanLimit),
}
}
// GetExtraData gets the extra data from the Conn
func (c *Conn) GetExtraData() interface{} {
return c.extraData
}
// PutExtraData puts the extra data with the Conn
func (c *Conn) PutExtraData(data interface{}) {
c.extraData = data
}
// GetRawConn returns the raw net.TCPConn from the Conn
func (c *Conn) GetRawConn() *net.TCPConn {
return c.conn
}
// Close closes the connection
func (c *Conn) Close() {
c.closeOnce.Do(func() {
atomic.StoreInt32(&c.closeFlag, 1)
close(c.closeChan)
close(c.packetSendChan)
close(c.packetReceiveChan)
c.conn.Close()
c.srv.callback.OnClose(c)
})
}
// IsClosed indicates whether or not the connection is closed
func (c *Conn) IsClosed() bool {
return atomic.LoadInt32(&c.closeFlag) == 1
}
// AsyncWritePacket async writes a packet, this method will never block
func (c *Conn) AsyncWritePacket(p Packet, timeout time.Duration) (err error) {
if c.IsClosed() {
return ErrConnClosing
}
defer func() {
if e := recover(); e != nil {
err = ErrConnClosing
}
}()
if timeout == 0 {
select {
case c.packetSendChan <- p:
return nil
default:
return ErrWriteBlocking
}
} else {
select {
case c.packetSendChan <- p:
return nil
case <-c.closeChan:
return ErrConnClosing
case <-time.After(timeout):
return ErrWriteBlocking
}
}
}
// Do it
func (c *Conn) Do() {
if !c.srv.callback.OnConnect(c) {
return
}
asyncDo(c.handleLoop, c.srv.waitGroup)
asyncDo(c.readLoop, c.srv.waitGroup)
asyncDo(c.writeLoop, c.srv.waitGroup)
}
func (c *Conn) readLoop() {
defer func() {
recover()
c.Close()
}()
for {
select {
case <-c.srv.exitChan:
return
case <-c.closeChan:
return
default:
}
p, err := c.srv.protocol.ReadPacket(c.conn)
if err != nil {
return
}
c.packetReceiveChan <- p
}
}
func (c *Conn) writeLoop() {
defer func() {
recover()
c.Close()
}()
for {
select {
case <-c.srv.exitChan:
return
case <-c.closeChan:
return
case p := <-c.packetSendChan:
if c.IsClosed() {
return
}
if _, err := c.conn.Write(p.Serialize()); err != nil {
return
}
}
}
}
func (c *Conn) handleLoop() {
defer func() {
recover()
c.Close()
}()
for {
select {
case <-c.srv.exitChan:
return
case <-c.closeChan:
return
case p := <-c.packetReceiveChan:
if c.IsClosed() {
return
}
if !c.srv.callback.OnMessage(c, p) {
return
}
}
}
}
func asyncDo(fn func(), wg *sync.WaitGroup) {
wg.Add(1)
go func() {
fn()
wg.Done()
}()
}

42
vendor/github.com/gansidui/gotcp/examples/echo/client/client.go generated vendored

@ -0,0 +1,42 @@
package main
import (
"fmt"
"log"
"net"
"time"
"github.com/gansidui/gotcp/examples/echo"
)
func main() {
tcpAddr, err := net.ResolveTCPAddr("tcp4", "127.0.0.1:8989")
checkError(err)
conn, err := net.DialTCP("tcp", nil, tcpAddr)
checkError(err)
echoProtocol := &echo.EchoProtocol{}
// ping <--> pong
for i := 0; i < 3; i++ {
// write
conn.Write(echo.NewEchoPacket([]byte("hello"), false).Serialize())
// read
p, err := echoProtocol.ReadPacket(conn)
if err == nil {
echoPacket := p.(*echo.EchoPacket)
fmt.Printf("Server reply:[%v] [%v]\n", echoPacket.GetLength(), string(echoPacket.GetBody()))
}
time.Sleep(2 * time.Second)
}
conn.Close()
}
func checkError(err error) {
if err != nil {
log.Fatal(err)
}
}

69
vendor/github.com/gansidui/gotcp/examples/echo/echoProtocol.go generated vendored

@ -0,0 +1,69 @@
package echo
import (
"encoding/binary"
"errors"
"io"
"net"
"github.com/gansidui/gotcp"
)
type EchoPacket struct {
buff []byte
}
func (this *EchoPacket) Serialize() []byte {
return this.buff
}
func (this *EchoPacket) GetLength() uint32 {
return binary.BigEndian.Uint32(this.buff[0:4])
}
func (this *EchoPacket) GetBody() []byte {
return this.buff[4:]
}
func NewEchoPacket(buff []byte, hasLengthField bool) *EchoPacket {
p := &EchoPacket{}
if hasLengthField {
p.buff = buff
} else {
p.buff = make([]byte, 4+len(buff))
binary.BigEndian.PutUint32(p.buff[0:4], uint32(len(buff)))
copy(p.buff[4:], buff)
}
return p
}
type EchoProtocol struct {
}
func (this *EchoProtocol) ReadPacket(conn *net.TCPConn) (gotcp.Packet, error) {
var (
lengthBytes []byte = make([]byte, 4)
length uint32
)
// read length
if _, err := io.ReadFull(conn, lengthBytes); err != nil {
return nil, err
}
if length = binary.BigEndian.Uint32(lengthBytes); length > 1024 {
return nil, errors.New("the size of packet is larger than the limit")
}
buff := make([]byte, 4+length)
copy(buff[0:4], lengthBytes)
// read body ( buff = lengthBytes + body )
if _, err := io.ReadFull(conn, buff[4:]); err != nil {
return nil, err
}
return NewEchoPacket(buff, true), nil
}

70
vendor/github.com/gansidui/gotcp/examples/echo/server/server.go generated vendored

@ -0,0 +1,70 @@
package main
import (
"fmt"
"log"
"net"
"os"
"os/signal"
"runtime"
"syscall"
"time"
"github.com/gansidui/gotcp"
"github.com/gansidui/gotcp/examples/echo"
)
type Callback struct{}
func (this *Callback) OnConnect(c *gotcp.Conn) bool {
addr := c.GetRawConn().RemoteAddr()
c.PutExtraData(addr)
fmt.Println("OnConnect:", addr)
return true
}
func (this *Callback) OnMessage(c *gotcp.Conn, p gotcp.Packet) bool {
echoPacket := p.(*echo.EchoPacket)
fmt.Printf("OnMessage:[%v] [%v]\n", echoPacket.GetLength(), string(echoPacket.GetBody()))
c.AsyncWritePacket(echo.NewEchoPacket(echoPacket.Serialize(), true), time.Second)
return true
}
func (this *Callback) OnClose(c *gotcp.Conn) {
fmt.Println("OnClose:", c.GetExtraData())
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
// creates a tcp listener
tcpAddr, err := net.ResolveTCPAddr("tcp4", ":8989")
checkError(err)
listener, err := net.ListenTCP("tcp", tcpAddr)
checkError(err)
// creates a server
config := &gotcp.Config{
PacketSendChanLimit: 20,
PacketReceiveChanLimit: 20,
}
srv := gotcp.NewServer(config, &Callback{}, &echo.EchoProtocol{})
// starts service
go srv.Start(listener, time.Second)
fmt.Println("listening:", listener.Addr())
// catchs system signal
chSig := make(chan os.Signal)
signal.Notify(chSig, syscall.SIGINT, syscall.SIGTERM)
fmt.Println("Signal: ", <-chSig)
// stops service
srv.Stop()
}
func checkError(err error) {
if err != nil {
log.Fatal(err)
}
}

50
vendor/github.com/gansidui/gotcp/examples/telnet/server/server.go generated vendored

@ -0,0 +1,50 @@
package main
import (
"fmt"
"log"
"net"
"os"
"os/signal"
"runtime"
"syscall"
"time"
"github.com/gansidui/gotcp"
"github.com/gansidui/gotcp/examples/telnet"
)
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
// creates a tcp listener
tcpAddr, err := net.ResolveTCPAddr("tcp4", ":23")
checkError(err)
listener, err := net.ListenTCP("tcp", tcpAddr)
checkError(err)
// creates a server
config := &gotcp.Config{
PacketSendChanLimit: 20,
PacketReceiveChanLimit: 20,
}
srv := gotcp.NewServer(config, &telnet.TelnetCallback{}, &telnet.TelnetProtocol{})
// starts service
go srv.Start(listener, time.Second)
fmt.Println("listening:", listener.Addr())
// catchs system signal
chSig := make(chan os.Signal)
signal.Notify(chSig, syscall.SIGINT, syscall.SIGTERM)
fmt.Println("Signal: ", <-chSig)
// stops service
srv.Stop()
}
func checkError(err error) {
if err != nil {
log.Fatal(err)
}
}

115
vendor/github.com/gansidui/gotcp/examples/telnet/telnetProtocol.go generated vendored

@ -0,0 +1,115 @@
package telnet
import (
"bytes"
"fmt"
"net"
"strings"
"github.com/gansidui/gotcp"
)
var (
endTag = []byte("\r\n") // Telnet command's end tag
)
// Packet
type TelnetPacket struct {
pType string
pData []byte
}
func (p *TelnetPacket) Serialize() []byte {
buf := p.pData
buf = append(buf, endTag...)
return buf
}
func (p *TelnetPacket) GetType() string {
return p.pType
}
func (p *TelnetPacket) GetData() []byte {
return p.pData
}
func NewTelnetPacket(pType string, pData []byte) *TelnetPacket {
return &TelnetPacket{
pType: pType,
pData: pData,
}
}
type TelnetProtocol struct {
}
func (this *TelnetProtocol) ReadPacket(conn *net.TCPConn) (gotcp.Packet, error) {
fullBuf := bytes.NewBuffer([]byte{})
for {
data := make([]byte, 1024)
readLengh, err := conn.Read(data)
if err != nil { //EOF, or worse
return nil, err
}
if readLengh == 0 { // Connection maybe closed by the client
return nil, gotcp.ErrConnClosing
} else {
fullBuf.Write(data[:readLengh])
index := bytes.Index(fullBuf.Bytes(), endTag)
if index > -1 {
command := fullBuf.Next(index)
fullBuf.Next(2)
//fmt.Println(string(command))
commandList := strings.Split(string(command), " ")
if len(commandList) > 1 {
return NewTelnetPacket(commandList[0], []byte(commandList[1])), nil
} else {
if commandList[0] == "quit" {
return NewTelnetPacket("quit", command), nil
} else {
return NewTelnetPacket("unknow", command), nil
}
}
}
}
}
}
type TelnetCallback struct {
}
func (this *TelnetCallback) OnConnect(c *gotcp.Conn) bool {
addr := c.GetRawConn().RemoteAddr()
c.PutExtraData(addr)
fmt.Println("OnConnect:", addr)
c.AsyncWritePacket(NewTelnetPacket("unknow", []byte("Welcome to this Telnet Server")), 0)
return true
}
func (this *TelnetCallback) OnMessage(c *gotcp.Conn, p gotcp.Packet) bool {
packet := p.(*TelnetPacket)
command := packet.GetData()
commandType := packet.GetType()
switch commandType {
case "echo":
c.AsyncWritePacket(NewTelnetPacket("echo", command), 0)
case "login":
c.AsyncWritePacket(NewTelnetPacket("login", []byte(string(command)+" has login")), 0)
case "quit":
return false
default:
c.AsyncWritePacket(NewTelnetPacket("unknow", []byte("unknow command")), 0)
}
return true
}
func (this *TelnetCallback) OnClose(c *gotcp.Conn) {
fmt.Println("OnClose:", c.GetExtraData())
}

13
vendor/github.com/gansidui/gotcp/protocol.go generated vendored

@ -0,0 +1,13 @@
package gotcp
import (
"net"
)
type Packet interface {
Serialize() []byte
}
type Protocol interface {
ReadPacket(conn *net.TCPConn) (Packet, error)
}

68
vendor/github.com/gansidui/gotcp/server.go generated vendored

@ -0,0 +1,68 @@
package gotcp
import (
"net"
"sync"
"time"
)
type Config struct {
PacketSendChanLimit uint32 // the limit of packet send channel
PacketReceiveChanLimit uint32 // the limit of packet receive channel
}
type Server struct {
config *Config // server configuration
callback ConnCallback // message callbacks in connection
protocol Protocol // customize packet protocol
exitChan chan struct{} // notify all goroutines to shutdown
waitGroup *sync.WaitGroup // wait for all goroutines
}
// NewServer creates a server
func NewServer(config *Config, callback ConnCallback, protocol Protocol) *Server {
return &Server{
config: config,
callback: callback,
protocol: protocol,
exitChan: make(chan struct{}),
waitGroup: &sync.WaitGroup{},
}
}
// Start starts service
func (s *Server) Start(listener *net.TCPListener, acceptTimeout time.Duration) {
s.waitGroup.Add(1)
defer func() {
listener.Close()
s.waitGroup.Done()
}()
for {
select {
case <-s.exitChan:
return
default:
}
listener.SetDeadline(time.Now().Add(acceptTimeout))
conn, err := listener.AcceptTCP()
if err != nil {
continue
}
s.waitGroup.Add(1)
go func() {
newConn(conn, s).Do()
s.waitGroup.Done()
}()
}
}
// Stop stops service
func (s *Server) Stop() {
close(s.exitChan)
s.waitGroup.Wait()
}
Loading…
Cancel
Save