新增异步函数执行功能

This commit is contained in:
duanhf2012
2023-02-22 09:53:50 +08:00
parent 0ebbe0e31d
commit 8111b12da5
11 changed files with 1674 additions and 24 deletions

91
concurrent/concurrent.go Normal file
View File

@@ -0,0 +1,91 @@
package concurrent
import (
"errors"
"runtime"
"github.com/duanhf2012/origin/log"
)
const defaultMaxTaskChannelNum = 1000000
type IConcurrent interface {
OpenConcurrentByNumCPU(cpuMul float32)
OpenConcurrent(minGoroutineNum int32, maxGoroutineNum int32, maxTaskChannelNum int)
AsyncDoByQueue(queueId int64, fn func(), cb func(err error))
AsyncDo(f func(), cb func(err error))
}
type Concurrent struct {
dispatch
tasks chan task
cbChannel chan func(error)
}
/*
cpuMul 表示cpu的倍数
建议:(1)cpu密集型 使用1 (2)i/o密集型使用2或者更高
*/
func (c *Concurrent) OpenConcurrentByNumCPU(cpuNumMul float32) {
goroutineNum := int32(float32(runtime.NumCPU())*cpuNumMul + 1)
c.OpenConcurrent(goroutineNum, goroutineNum, defaultMaxTaskChannelNum)
}
func (c *Concurrent) OpenConcurrent(minGoroutineNum int32, maxGoroutineNum int32, maxTaskChannelNum int) {
c.tasks = make(chan task, maxTaskChannelNum)
c.cbChannel = make(chan func(error), maxTaskChannelNum)
//打开dispach
c.dispatch.open(minGoroutineNum, maxGoroutineNum, c.tasks, c.cbChannel)
}
func (c *Concurrent) AsyncDo(f func(), cb func(err error)) {
c.AsyncDoByQueue(0, f, cb)
}
func (c *Concurrent) AsyncDoByQueue(queueId int64, fn func(), cb func(err error)) {
if cap(c.tasks) == 0 {
panic("not open concurrent")
}
if fn == nil && cb == nil {
log.SStack("fn and cb is nil")
return
}
if len(c.tasks) > cap(c.tasks) {
log.SError("tasks channel is full")
if cb != nil {
c.pushAsyncDoCallbackEvent(func(err error) {
cb(errors.New("tasks channel is full"))
})
}
return
}
if fn == nil {
c.pushAsyncDoCallbackEvent(cb)
return
}
select {
case c.tasks <- task{queueId, fn, cb}:
}
}
func (c *Concurrent) Close() {
if cap(c.tasks) == 0 {
return
}
log.SRelease("wait close concurrent")
c.dispatch.close()
log.SRelease("concurrent has successfully exited")
}
func (c *Concurrent) GetCallBackChannel() chan func(error) {
return c.cbChannel
}

195
concurrent/dispatch.go Normal file
View File

@@ -0,0 +1,195 @@
package concurrent
import (
"sync"
"sync/atomic"
"time"
"fmt"
"runtime"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/util/queue"
)
var idleTimeout = 2 * time.Second
type dispatch struct {
minConcurrentNum int32
maxConcurrentNum int32
queueIdChannel chan int64
workerQueue chan task
tasks chan task
idle bool
workerNum int32
cbChannel chan func(error)
mapTaskQueueSession map[int64]*queue.Deque[task]
waitWorker sync.WaitGroup
waitDispatch sync.WaitGroup
}
func (d *dispatch) open(minGoroutineNum int32, maxGoroutineNum int32, tasks chan task, cbChannel chan func(error)) {
d.minConcurrentNum = minGoroutineNum
d.maxConcurrentNum = maxGoroutineNum
d.tasks = tasks
d.mapTaskQueueSession = make(map[int64]*queue.Deque[task], 1024)
d.workerQueue = make(chan task)
d.cbChannel = cbChannel
d.queueIdChannel = make(chan int64, cap(tasks))
d.waitDispatch.Add(1)
go d.run()
}
func (d *dispatch) run() {
defer d.waitDispatch.Done()
timeout := time.NewTimer(idleTimeout)
for {
select {
case queueId := <-d.queueIdChannel:
d.processqueueEvent(queueId)
default:
select {
case t, ok := <-d.tasks:
if ok == false {
return
}
d.processTask(&t)
case queueId := <-d.queueIdChannel:
d.processqueueEvent(queueId)
case <-timeout.C:
d.processTimer()
if atomic.LoadInt32(&d.minConcurrentNum) == -1 && len(d.tasks) == 0 {
idleTimeout = time.Millisecond * 10
}
timeout.Reset(idleTimeout)
}
}
if atomic.LoadInt32(&d.minConcurrentNum) == -1 && d.workerNum == 0 {
d.waitWorker.Wait()
d.cbChannel <- nil
return
}
}
}
func (d *dispatch) processTimer() {
if d.idle == true && d.workerNum > d.minConcurrentNum {
d.processIdle()
}
d.idle = true
}
func (d *dispatch) processqueueEvent(queueId int64) {
d.idle = false
queueSession := d.mapTaskQueueSession[queueId]
if queueSession == nil {
return
}
queueSession.PopFront()
if queueSession.Len() == 0 {
return
}
t := queueSession.Front()
d.executeTask(&t)
}
func (d *dispatch) executeTask(t *task) {
select {
case d.workerQueue <- *t:
return
default:
if d.workerNum < d.maxConcurrentNum {
var work worker
work.start(&d.waitWorker, t, d)
return
}
}
d.workerQueue <- *t
}
func (d *dispatch) processTask(t *task) {
d.idle = false
//处理有排队任务
if t.queueId != 0 {
queueSession := d.mapTaskQueueSession[t.queueId]
if queueSession == nil {
queueSession = &queue.Deque[task]{}
d.mapTaskQueueSession[t.queueId] = queueSession
}
//没有正在执行的任务,则直接执行
if queueSession.Len() == 0 {
d.executeTask(t)
}
queueSession.PushBack(*t)
return
}
//普通任务
d.executeTask(t)
}
func (d *dispatch) processIdle() {
select {
case d.workerQueue <- task{}:
d.workerNum--
default:
}
}
func (d *dispatch) pushQueueTaskFinishEvent(queueId int64) {
d.queueIdChannel <- queueId
}
func (c *dispatch) pushAsyncDoCallbackEvent(cb func(err error)) {
if cb == nil {
//不需要回调的情况
return
}
c.cbChannel <- cb
}
func (d *dispatch) close() {
atomic.StoreInt32(&d.minConcurrentNum, -1)
breakFor:
for {
select {
case cb := <-d.cbChannel:
if cb == nil {
break breakFor
}
cb(nil)
}
}
d.waitDispatch.Wait()
}
func (d *dispatch) DoCallback(cb func(err error)) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.SError("core dump info[", errString, "]\n", string(buf[:l]))
}
}()
cb(nil)
}

78
concurrent/worker.go Normal file
View File

@@ -0,0 +1,78 @@
package concurrent
import (
"sync"
"errors"
"fmt"
"runtime"
"github.com/duanhf2012/origin/log"
)
type task struct {
queueId int64
fn func()
cb func(err error)
}
type worker struct {
*dispatch
}
func (t *task) isExistTask() bool {
return t.fn == nil
}
func (w *worker) start(waitGroup *sync.WaitGroup, t *task, d *dispatch) {
w.dispatch = d
d.workerNum += 1
waitGroup.Add(1)
go w.run(waitGroup, *t)
}
func (w *worker) run(waitGroup *sync.WaitGroup, t task) {
defer waitGroup.Done()
w.exec(&t)
for {
select {
case tw := <-w.workerQueue:
if tw.isExistTask() {
//exit goroutine
log.SRelease("worker goroutine exit")
return
}
w.exec(&tw)
}
}
}
func (w *worker) exec(t *task) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
cb := t.cb
t.cb = func(err error) {
cb(errors.New(errString))
}
w.endCallFun(t)
log.SError("core dump info[", errString, "]\n", string(buf[:l]))
}
}()
t.fn()
w.endCallFun(t)
}
func (w *worker) endCallFun(t *task) {
w.pushAsyncDoCallbackEvent(t.cb)
if t.queueId != 0 {
w.pushQueueTaskFinishEvent(t.queueId)
}
}