WIP more stuff

This commit is contained in:
zervo 2025-08-13 09:44:02 +02:00
parent 2ab930d892
commit 80d722d536
14 changed files with 357 additions and 22 deletions

View file

@ -27,7 +27,7 @@ func main() {
log.Fatalf("Failed to load config: %v", err)
}
fmt.Printf("Starting server on port %d...\n", cfg.Port)
fmt.Printf("Starting server on port %d...\n", cfg.WebPort)
if err := server.Start(cfg); err != nil {
log.Fatalf("Server error: %v", err)
}

1
go.mod
View file

@ -12,6 +12,7 @@ require (
github.com/aymerick/douceur v0.2.0 // indirect
github.com/fatih/structs v1.1.0 // indirect
github.com/flosch/pongo2/v4 v4.0.2 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/gomarkdown/markdown v0.0.0-20240328165702-4d01890c35c0 // indirect
github.com/google/uuid v1.6.0 // indirect

2
go.sum
View file

@ -19,6 +19,8 @@ github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/flosch/pongo2/v4 v4.0.2 h1:gv+5Pe3vaSVmiJvh/BZa82b7/00YUGm0PIyVVLop0Hw=
github.com/flosch/pongo2/v4 v4.0.2/go.mod h1:B5ObFANs/36VwxxlgKpdchIJHMvHB562PW+BWPhwZD8=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomarkdown/markdown v0.0.0-20240328165702-4d01890c35c0 h1:4gjrh/PN2MuWCCElk8/I4OCKRKWCCo2zEct3VKCbibU=

View file

@ -6,10 +6,10 @@ import (
"gopkg.in/yaml.v3"
)
var defaultCfg = Config{
var defaultCfg = ServerConfig{
ServerName: "My Fileserver",
Port: 8080,
Directories: []Directory{
WebPort: 8080,
Directories: []DirectoryConfig{
{
Id: "example",
DisplayName: "Example Directory",
@ -19,28 +19,28 @@ var defaultCfg = Config{
}
// Directory represents a configuration for a directory to be served.
type Directory struct {
type DirectoryConfig struct {
Id string `yaml:"id"`
DisplayName string `yaml:"display_name"`
Description string `yaml:"description"`
Path string `yaml:"path"`
}
// Config represents a configuration for the fileserver.
type Config struct {
ServerName string `yaml:"server_name"`
Port int `yaml:"port"`
Directories []Directory `yaml:"directories"`
// ServerConfig represents a configuration for the fileserver.
type ServerConfig struct {
ServerName string `yaml:"server_name"`
WebPort int `yaml:"web_port"`
Directories []DirectoryConfig `yaml:"directories"`
}
// LoadConfig loads configuration from a YAML file.
func LoadConfig(path string) (*Config, error) {
func LoadConfig(path string) (*ServerConfig, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
var cfg Config
var cfg ServerConfig
if err := yaml.Unmarshal(data, &cfg); err != nil {
return nil, err
}

View file

@ -0,0 +1,25 @@
package filesystem
// SearchFilter is a filter to apply to search operations.
type SearchFilter func(*FileNode) bool
// FileIndexer holds and interacts with a filesystem tree.
type FileIndexer interface {
// GetNode returns the node at the given path.
GetNode(path string) (*FileNode, error)
// Search returns all child nodes matching the given filter.
Search(filter SearchFilter) []*FileNode
// GetPath returns the filesystem path for the given node.
GetPath(node *FileNode) string
// Reload reloads the entire tree from the root node.
Reload() error
// ReloadFrom reloads from a specific node in the tree.
ReloadFrom(node *FileNode) error
// Count returns the amount of nodes in the indexer tree.
Count() int
}

View file

@ -0,0 +1,163 @@
package filesystem
import (
"fmt"
"os"
"path/filepath"
"sync"
)
var _ FileIndexer = (*MemoryIndexer)(nil)
// MemoryIndexer is an in-memory implementation of FileIndexer.
type MemoryIndexer struct {
root string
tree *FileNode
flat map[string]*FileNode
mu sync.RWMutex
}
// NewMemoryIndexer loads and returns an in-memory FileIndexer.
func NewMemoryIndexer(root string) (*MemoryIndexer, error) {
idx := &MemoryIndexer{
root: root,
flat: make(map[string]*FileNode),
}
flat := make(map[string]*FileNode)
tree, err := idx.buildTree(root, nil, flat)
if err != nil {
return nil, fmt.Errorf("load file index tree root: %v", err)
}
idx.tree = tree
idx.flat = flat
return idx, nil
}
// buildTree builds a FileNode tree in memory from a filesystem path.
func (m *MemoryIndexer) buildTree(path string, parent *FileNode, flatMap map[string]*FileNode) (*FileNode, error) {
info, err := os.Stat(path)
if err != nil {
return nil, fmt.Errorf("node stat: %v", err)
}
node := &FileNode{
Name: info.Name(),
IsDir: info.IsDir(),
Size: info.Size(),
ModTime: info.ModTime(),
Parent: parent,
}
flatMap[path] = node
if info.IsDir() {
children, err := os.ReadDir(path)
if err != nil {
return nil, fmt.Errorf("get children: %v", err)
}
for _, c := range children {
childPath := filepath.Join(path, c.Name())
childNode, err := m.buildTree(childPath, node, flatMap)
if err != nil {
fmt.Printf("Skipping child node due to failure: %v", err)
continue
}
node.Children = append(node.Children, childNode)
}
}
return node, nil
}
// GetNode retrieves a node from the indexer based on its path.
func (m *MemoryIndexer) GetNode(path string) (*FileNode, error) {
m.mu.RLock()
defer m.mu.RUnlock()
if node, ok := m.flat[path]; ok {
return node, nil
}
return nil, os.ErrNotExist
}
// Search retrieves all nodes that match the given SearchFilter.
func (m *MemoryIndexer) Search(filter SearchFilter) []*FileNode {
m.mu.RLock()
defer m.mu.RUnlock()
var matched []*FileNode
for _, node := range m.flat {
if filter(node) {
matched = append(matched, node)
}
}
return matched
}
// GetPath reconstructs the filesystem path of a node via reverse tree traversal.
func (m *MemoryIndexer) GetPath(node *FileNode) string {
var parts []string
for n := node; n != nil; n = n.Parent {
parts = append([]string{n.Name}, parts...)
}
return "/" + filepath.Join(parts...)
}
// Reload reloads the entire indexer tree into the memory from the filesystem.
func (m *MemoryIndexer) Reload() error {
m.mu.Lock()
defer m.mu.Unlock()
newFlat := make(map[string]*FileNode)
newTree, err := m.buildTree(m.root, nil, newFlat)
if err != nil {
return fmt.Errorf("load file index tree root: %v", err)
}
m.flat = newFlat
m.tree = newTree
return nil
}
// ReloadFrom reloads the given node and everything below it into memory from the filesystem.
func (m *MemoryIndexer) ReloadFrom(node *FileNode) error {
m.mu.Lock()
defer m.mu.Unlock()
path := m.GetPath(node)
newFlat := make(map[string]*FileNode)
newNode, err := m.buildTree(path, node.Parent, newFlat)
if err != nil {
return fmt.Errorf("load file index tree node: %v", err)
}
if node.Parent != nil {
for i, child := range node.Parent.Children {
if child == node {
node.Parent.Children[i] = newNode
break
}
}
}
for k, v := range newFlat {
m.flat[k] = v
}
return nil
}
// Count just returns the total amount of nodes in the internal indexer tree.
func (m *MemoryIndexer) Count() int {
return len(m.flat)
}

View file

@ -0,0 +1,13 @@
package filesystem
import "time"
// FileNode represents an item in a filesystem tree.
type FileNode struct {
Name string
IsDir bool
Size int64
ModTime time.Time
Parent *FileNode
Children []*FileNode
}

View file

@ -0,0 +1,131 @@
package filesystem
import (
"bufio"
"fmt"
"os"
"runtime"
"strconv"
"strings"
"sync"
"github.com/fsnotify/fsnotify"
)
const FallbackWatcherQuota = 8192
// WatchManager manages filesystem watchers and triggers indexer reloads.
type WatchManager struct {
watcher *fsnotify.Watcher
root string
maxWatches int
usedWatches int
indexers []*FileIndexer
watched map[string]*FileNode
pending []*FileNode
mu sync.Mutex
}
// NewWatchManager creates a manager that allocates as many watches as possible,
// up to the given quota. If the given quota is 0 or less, system quota is used.
func NewWatchManager(indexers []*FileIndexer, userQuota int) (*WatchManager, error) {
w, err := fsnotify.NewWatcher()
if err != nil {
return nil, fmt.Errorf("create watcher: %v", err)
}
systemLimit := detectSystemWatchLimit()
quota := systemLimit
if userQuota > 0 && userQuota < quota {
quota = userQuota
}
wm := &WatchManager{
watcher: w,
maxWatches: quota,
indexers: indexers,
watched: make(map[string]*FileNode),
pending: make([]*FileNode, 0),
}
fmt.Printf("[watcher] system max_user_watches=%d, using quota=%d", systemLimit, quota)
return wm, nil
}
// addWatchLocked adds a watch for the given path. Note that caller manages mutex.
func (wm *WatchManager) addWatchLocked(path string, node *FileNode) error {
if wm.usedWatches >= wm.maxWatches {
wm.pending = append(wm.pending, node)
return fmt.Errorf("quota exhausted, deferring watch of %s", path)
}
if _, isWatched := wm.watched[path]; isWatched {
return nil
}
if err := wm.watcher.Add(path); err != nil {
return fmt.Errorf("add watched: %v", err)
}
wm.watched[path] = node
wm.usedWatches++
fmt.Printf("[watcher] watching %s (used %d/%d)", path, wm.usedWatches, wm.maxWatches)
return nil
}
// watchRecursive attempts to watch the node and its children, with given amount of available watches.
// Must also pass the FileIndexer which manages the node.
func (wm *WatchManager) watchRecursive(indexer FileIndexer, node *FileNode, available int) error {
wm.mu.Lock()
defer wm.mu.Unlock()
path := indexer.GetPath(node)
if !node.IsDir {
return nil
}
if _, isWatched := wm.watched[path]; !isWatched {
if err := wm.addWatchLocked(path, node); err != nil {
fmt.Printf("[watcher] watch failed: %v", err)
} else {
available--
}
}
for _, child := range node.Children {
if available <= 0 {
}
}
}
func detectSystemWatchLimit() int {
if runtime.GOOS != "linux" {
fmt.Printf("[watcher] cannot read system watcher quota, defaulting to %d.\n", FallbackWatcherQuota)
return FallbackWatcherQuota
}
f, err := os.Open("/proc/sys/fs/inotify/max_user_watches")
if err != nil {
fmt.Printf("[watcher] cannot read system watcher quota, defaulting to %d.\n", FallbackWatcherQuota)
return FallbackWatcherQuota
}
defer f.Close()
scanner := bufio.NewScanner(f)
if scanner.Scan() {
if v, err := strconv.Atoi(strings.TrimSpace(scanner.Text())); err == nil && v > 0 {
return v
}
}
fmt.Printf("[watcher] cannot read system watcher quota, defaulting to %d.\n", FallbackWatcherQuota)
return FallbackWatcherQuota
}

View file

@ -6,7 +6,7 @@ import (
)
// BrowseRoute registers all routes under '/b' (aka browse).
func BrowseRoute(app *iris.Application, cfg *config.Config) {
func BrowseRoute(app *iris.Application, cfg *config.ServerConfig) {
party := app.Party("/b")
party.Get("/{directory}/{path:path}")
}

View file

@ -7,7 +7,7 @@ import (
)
// DirectoriesRoute registers all routes under '/directories'.
func DirectoriesRoute(app *iris.Application, cfg *config.Config) {
func DirectoriesRoute(app *iris.Application, cfg *config.ServerConfig) {
party := app.Party("/directories")
party.Get("/", func(ctx iris.Context) {

View file

@ -16,7 +16,7 @@ import (
)
// Start starts the server.
func Start(cfg *config.Config) error {
func Start(cfg *config.ServerConfig) error {
app := iris.New()
templates, err := fs.Sub(fileserver.TemplateFS, "templates")
@ -61,7 +61,7 @@ func Start(cfg *config.Config) error {
ctx.SendFile(filePath, filename)
})
return app.Listen(":" + fmt.Sprint(cfg.Port))
return app.Listen(":" + fmt.Sprint(cfg.WebPort))
}
// index redirects request from '/' to '/directories'.

View file

@ -11,7 +11,7 @@ import (
type browsepage struct {
}
func BrowseView(ctx iris.Context, cfg *config.Config) {
func BrowseView(ctx iris.Context, cfg *config.ServerConfig) {
ctx.CompressWriter(true)
ctx.ViewData("", data.LayoutData{
ServerName: cfg.ServerName,

View file

@ -9,11 +9,11 @@ import (
)
type directorypage struct {
Directories []config.Directory
Directories []config.DirectoryConfig
}
// DirectoriesView renders the view for '/directories'.
func DirectoriesView(ctx iris.Context, cfg *config.Config) {
func DirectoriesView(ctx iris.Context, cfg *config.ServerConfig) {
ctx.CompressWriter(true)
ctx.ViewData("", data.LayoutData{
ServerName: cfg.ServerName,

View file

@ -9,12 +9,12 @@ import (
// ValidateDirectories ensure the given directories meet the required criteria to be served.
// Any directory that doesn't meet the requirements gets removed from the slice.
func ValidateDirectories(dirs []config.Directory) {
for i, dir := range dirs {
func ValidateDirectories(dirs []config.DirectoryConfig) {
for _, dir := range dirs {
exists, err := util.FileExists(dir.Path)
if !exists {
fmt.Printf("WARNING: The directory '%s' does not exist and will be delisted. %v", dir.Id, err)
dirs[i] = nil
//dirs[i] = nil
}
}
}