mirror of
				https://github.com/superseriousbusiness/gotosocial.git
				synced 2025-10-31 00:22:26 -05:00 
			
		
		
		
	* start replacing client + federator + media workers with new worker + queue types
* refactor federatingDB.Delete(), drop queued messages when deleting account / status
* move all queue purging to the processor workers
* undo toolchain updates
* code comments, ensure dereferencer worker pool gets started
* update gruf libraries in readme
* start the job scheduler separately to the worker pools
* reshuffle ordering or server.go + remove duplicate worker start / stop
* update go-list version
* fix vendoring
* move queue invalidation to before wipeing / deletion, to ensure queued work not dropped
* add logging to worker processing functions in testrig, don't start workers in unexpected places
* update go-structr to add (+then rely on) QueueCtx{} type
* ensure more worker pools get started properly in tests
* fix remaining broken tests relying on worker queue logic
* fix account test suite queue popping logic, ensure noop workers do not pull from queue
* move back accidentally shuffled account deletion order
* ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete()
* silently drop deletes from accounts not permitted to
* don't warn log on forwarded deletes
* make if else clauses easier to parse
* use getFederatorMsg()
* improved code comment
* improved code comment re: requesting account delete checks
* remove boolean result from worker start / stop since false = already running or already stopped
* remove optional passed-in http.client
* remove worker starting from the admin CLI commands (we don't need to handle side-effects)
* update prune cli to start scheduler but not all of the workers
* fix rebase issues
* remove redundant return statements
* i'm sorry sir linter
		
	
			
		
			
				
	
	
		
			268 lines
		
	
	
	
		
			7.8 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			268 lines
		
	
	
	
		
			7.8 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
| // GoToSocial
 | |
| // Copyright (C) GoToSocial Authors admin@gotosocial.org
 | |
| // SPDX-License-Identifier: AGPL-3.0-or-later
 | |
| //
 | |
| // This program is free software: you can redistribute it and/or modify
 | |
| // it under the terms of the GNU Affero General Public License as published by
 | |
| // the Free Software Foundation, either version 3 of the License, or
 | |
| // (at your option) any later version.
 | |
| //
 | |
| // This program is distributed in the hope that it will be useful,
 | |
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | |
| // GNU Affero General Public License for more details.
 | |
| //
 | |
| // You should have received a copy of the GNU Affero General Public License
 | |
| // along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | |
| 
 | |
| package testrig
 | |
| 
 | |
| import (
 | |
| 	"bytes"
 | |
| 	"context"
 | |
| 	"fmt"
 | |
| 	"io"
 | |
| 	"mime/multipart"
 | |
| 	"net/url"
 | |
| 	"os"
 | |
| 	"time"
 | |
| 
 | |
| 	"codeberg.org/gruf/go-byteutil"
 | |
| 	"codeberg.org/gruf/go-kv/format"
 | |
| 	"github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
 | |
| 	"github.com/superseriousbusiness/gotosocial/internal/log"
 | |
| 	"github.com/superseriousbusiness/gotosocial/internal/messages"
 | |
| 	tlprocessor "github.com/superseriousbusiness/gotosocial/internal/processing/timeline"
 | |
| 	"github.com/superseriousbusiness/gotosocial/internal/processing/workers"
 | |
| 	"github.com/superseriousbusiness/gotosocial/internal/state"
 | |
| 	"github.com/superseriousbusiness/gotosocial/internal/timeline"
 | |
| 	"github.com/superseriousbusiness/gotosocial/internal/typeutils"
 | |
| )
 | |
| 
 | |
| // Starts workers on the provided state using noop processing functions.
 | |
| // Useful when you *don't* want to trigger side effects in a test.
 | |
| func StartNoopWorkers(state *state.State) {
 | |
| 	state.Workers.Client.Process = func(ctx context.Context, msg *messages.FromClientAPI) error { return nil }
 | |
| 	state.Workers.Federator.Process = func(ctx context.Context, msg *messages.FromFediAPI) error { return nil }
 | |
| 
 | |
| 	state.Workers.Client.Init(messages.ClientMsgIndices())
 | |
| 	state.Workers.Federator.Init(messages.FederatorMsgIndices())
 | |
| 	state.Workers.Delivery.Init(nil)
 | |
| 
 | |
| 	// Specifically do NOT start the workers
 | |
| 	// as caller may require queue contents.
 | |
| 	// (i.e. don't want workers pulling)
 | |
| 	// _ = state.Workers.Client.Start(1)
 | |
| 	// _ = state.Workers.Federator.Start(1)
 | |
| 	// _ = state.Workers.Dereference.Start(1)
 | |
| 	// _ = state.Workers.Media.Start(1)
 | |
| 	//
 | |
| 	// (except for the scheduler, that's fine)
 | |
| 	_ = state.Workers.Scheduler.Start()
 | |
| }
 | |
| 
 | |
| // Starts workers on the provided state using processing functions from the given
 | |
| // workers processor. Useful when you *do* want to trigger side effects in a test.
 | |
| func StartWorkers(state *state.State, processor *workers.Processor) {
 | |
| 	state.Workers.Client.Process = func(ctx context.Context, msg *messages.FromClientAPI) error {
 | |
| 		log.Debugf(ctx, "Workers{}.Client{}.Process(%s)", dump(msg))
 | |
| 		return processor.ProcessFromClientAPI(ctx, msg)
 | |
| 	}
 | |
| 
 | |
| 	state.Workers.Federator.Process = func(ctx context.Context, msg *messages.FromFediAPI) error {
 | |
| 		log.Debugf(ctx, "Workers{}.Federator{}.Process(%s)", dump(msg))
 | |
| 		return processor.ProcessFromFediAPI(ctx, msg)
 | |
| 	}
 | |
| 
 | |
| 	state.Workers.Client.Init(messages.ClientMsgIndices())
 | |
| 	state.Workers.Federator.Init(messages.FederatorMsgIndices())
 | |
| 	state.Workers.Delivery.Init(nil)
 | |
| 
 | |
| 	_ = state.Workers.Scheduler.Start()
 | |
| 	state.Workers.Client.Start(1)
 | |
| 	state.Workers.Federator.Start(1)
 | |
| 	state.Workers.Dereference.Start(1)
 | |
| 	state.Workers.Media.Start(1)
 | |
| }
 | |
| 
 | |
| func StopWorkers(state *state.State) {
 | |
| 	_ = state.Workers.Scheduler.Stop()
 | |
| 	state.Workers.Client.Stop()
 | |
| 	state.Workers.Federator.Stop()
 | |
| 	state.Workers.Dereference.Stop()
 | |
| 	state.Workers.Media.Stop()
 | |
| }
 | |
| 
 | |
| func StartTimelines(state *state.State, filter *visibility.Filter, converter *typeutils.Converter) {
 | |
| 	state.Timelines.Home = timeline.NewManager(
 | |
| 		tlprocessor.HomeTimelineGrab(state),
 | |
| 		tlprocessor.HomeTimelineFilter(state, filter),
 | |
| 		tlprocessor.HomeTimelineStatusPrepare(state, converter),
 | |
| 		tlprocessor.SkipInsert(),
 | |
| 	)
 | |
| 	if err := state.Timelines.Home.Start(); err != nil {
 | |
| 		panic(fmt.Sprintf("error starting home timeline: %s", err))
 | |
| 	}
 | |
| 
 | |
| 	state.Timelines.List = timeline.NewManager(
 | |
| 		tlprocessor.ListTimelineGrab(state),
 | |
| 		tlprocessor.ListTimelineFilter(state, filter),
 | |
| 		tlprocessor.ListTimelineStatusPrepare(state, converter),
 | |
| 		tlprocessor.SkipInsert(),
 | |
| 	)
 | |
| 	if err := state.Timelines.List.Start(); err != nil {
 | |
| 		panic(fmt.Sprintf("error starting list timeline: %s", err))
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // EqualRequestURIs checks whether inputs have equal request URIs,
 | |
| // handling cases of url.URL{}, *url.URL{}, string, *string.
 | |
| func EqualRequestURIs(u1, u2 any) bool {
 | |
| 	var uri1, uri2 string
 | |
| 
 | |
| 	requestURI := func(in string) (string, error) {
 | |
| 		u, err := url.Parse(in)
 | |
| 		if err != nil {
 | |
| 			return "", err
 | |
| 		}
 | |
| 		return u.RequestURI(), nil
 | |
| 	}
 | |
| 
 | |
| 	switch u1 := u1.(type) {
 | |
| 	case url.URL:
 | |
| 		uri1 = u1.RequestURI()
 | |
| 	case *url.URL:
 | |
| 		uri1 = u1.RequestURI()
 | |
| 	case *string:
 | |
| 		var err error
 | |
| 		uri1, err = requestURI(*u1)
 | |
| 		if err != nil {
 | |
| 			return false
 | |
| 		}
 | |
| 	case string:
 | |
| 		var err error
 | |
| 		uri1, err = requestURI(u1)
 | |
| 		if err != nil {
 | |
| 			return false
 | |
| 		}
 | |
| 	default:
 | |
| 		panic("unsupported type")
 | |
| 	}
 | |
| 
 | |
| 	switch u2 := u2.(type) {
 | |
| 	case url.URL:
 | |
| 		uri2 = u2.RequestURI()
 | |
| 	case *url.URL:
 | |
| 		uri2 = u2.RequestURI()
 | |
| 	case *string:
 | |
| 		var err error
 | |
| 		uri2, err = requestURI(*u2)
 | |
| 		if err != nil {
 | |
| 			return false
 | |
| 		}
 | |
| 	case string:
 | |
| 		var err error
 | |
| 		uri2, err = requestURI(u2)
 | |
| 		if err != nil {
 | |
| 			return false
 | |
| 		}
 | |
| 	default:
 | |
| 		panic("unsupported type")
 | |
| 	}
 | |
| 
 | |
| 	return uri1 == uri2
 | |
| }
 | |
| 
 | |
| // CreateMultipartFormData is a handy function for taking a fieldname and a filename, and creating a multipart form bytes buffer
 | |
| // with the file contents set in the given fieldname. The extraFields param can be used to add extra FormFields to the request, as necessary.
 | |
| // The returned bytes.Buffer b can be used like so:
 | |
| //
 | |
| //	httptest.NewRequest(http.MethodPost, "https://example.org/whateverpath", bytes.NewReader(b.Bytes()))
 | |
| //
 | |
| // The returned *multipart.Writer w can be used to set the content type of the request, like so:
 | |
| //
 | |
| //	req.Header.Set("Content-Type", w.FormDataContentType())
 | |
| func CreateMultipartFormData(fieldName string, fileName string, extraFields map[string][]string) (bytes.Buffer, *multipart.Writer, error) {
 | |
| 	var b bytes.Buffer
 | |
| 
 | |
| 	w := multipart.NewWriter(&b)
 | |
| 	var fw io.Writer
 | |
| 
 | |
| 	if fileName != "" {
 | |
| 		file, err := os.Open(fileName)
 | |
| 		if err != nil {
 | |
| 			return b, nil, err
 | |
| 		}
 | |
| 		if fw, err = w.CreateFormFile(fieldName, file.Name()); err != nil {
 | |
| 			return b, nil, err
 | |
| 		}
 | |
| 		if _, err = io.Copy(fw, file); err != nil {
 | |
| 			return b, nil, err
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	for k, vs := range extraFields {
 | |
| 		for _, v := range vs {
 | |
| 			if err := w.WriteField(k, v); err != nil {
 | |
| 				return b, nil, err
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	if err := w.Close(); err != nil {
 | |
| 		return b, nil, err
 | |
| 	}
 | |
| 	return b, w, nil
 | |
| }
 | |
| 
 | |
| // URLMustParse tries to parse the given URL and panics if it can't.
 | |
| // Should only be used in tests.
 | |
| func URLMustParse(stringURL string) *url.URL {
 | |
| 	u, err := url.Parse(stringURL)
 | |
| 	if err != nil {
 | |
| 		panic(err)
 | |
| 	}
 | |
| 	return u
 | |
| }
 | |
| 
 | |
| // TimeMustParse tries to parse the given time as RFC3339, and panics if it can't.
 | |
| // Should only be used in tests.
 | |
| func TimeMustParse(timeString string) time.Time {
 | |
| 	t, err := time.Parse(time.RFC3339, timeString)
 | |
| 	if err != nil {
 | |
| 		panic(err)
 | |
| 	}
 | |
| 	return t
 | |
| }
 | |
| 
 | |
| // WaitFor calls condition every 200ms, returning true
 | |
| // when condition() returns true, or false after 5s.
 | |
| //
 | |
| // It's useful for when you're waiting for something to
 | |
| // happen, but you don't know exactly how long it will take,
 | |
| // and you want to fail if the thing doesn't happen within 5s.
 | |
| func WaitFor(condition func() bool) bool {
 | |
| 	tick := time.NewTicker(200 * time.Millisecond)
 | |
| 	defer tick.Stop()
 | |
| 
 | |
| 	timeout := time.NewTimer(5 * time.Second)
 | |
| 	defer timeout.Stop()
 | |
| 
 | |
| 	for {
 | |
| 		select {
 | |
| 		case <-tick.C:
 | |
| 			if condition() {
 | |
| 				return true
 | |
| 			}
 | |
| 		case <-timeout.C:
 | |
| 			return false
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // dump returns debug output of 'v'.
 | |
| func dump(v any) string {
 | |
| 	var buf byteutil.Buffer
 | |
| 	format.Append(&buf, v)
 | |
| 	return buf.String()
 | |
| }
 |