You need to do these imports (some or all - as many as you need) the more you import the bigger the binary will be which is why you get to choose as a user. So you could import just the drive backend and just the sync commands if you wanted.
You can make librclone.RPC("sync/sync",) return when the command has been placed in the execution queue, that is before the command execution has completed by adding _async=true. If doing this, you probably also want to create a _group and then track execution with core/stats. I made a small example here: https://forum.rclone.org/t/c-code-for-rclone-copy/33990/11
I suggest we help each other building a small example using the most used options like _config, _filters, _group and core/stats on a sync of local folders. That will be a good starting point for both you and @shenv56 - and cover most of the requests I have seen on the forum.
Here's my example so far (I'll add filter, config soon) that I'd be happy to contribute but I have some questions in regards to async polling using job/status:
Suppose my polling interval is > 1 minute and from the docs "The job can be queried for up to 1 minute after it has finished.". Should I be exiting on both finished=True from the job/status call as well as a 404 on the jobStatus call and assume then the job is finished, since I can't check past a minute?
package main
import (
"encoding/json"
"fmt"
"time"
"github.com/rclone/rclone/librclone/librclone"
_ "github.com/rclone/rclone/fs/sync"
_ "github.com/rclone/rclone/backend/all"
)
type syncRequest struct {
SrcFs string `json:"srcFs"`
DstFs string `json:"dstFs"`
Group string `json:"_group"`
Async bool `json:"_async"`
}
type syncResponse struct {
JobID int64 `json:"jobid"`
}
type statusRequest struct {
JobID int64 `json:"jobid"`
}
type statusResponse struct {
Finished bool `json:"finished"`
Success bool `json:"success"`
}
type statsRequest struct {
Group string `json:"group"`
}
type statsResponse struct {
Bytes int64 `json:"bytes"`
Speed float64 `json:"speed"`
Transfers int64 `json:"transfers"`
ElapsedTime float64 `json:"elapsedTime"`
Errors int64 `json:"errors"`
}
func main() {
librclone.Initialize()
syncRequest := syncRequest{
SrcFs: "<absolute_path>",
DstFs: ":s3,env_auth=false,access_key_id=<access>,secret_access_key=<secret>,endpoint='<endpoint>':<bucket>",
Group: "MyTransfer",
Async: true,
}
syncRequestJSON, err := json.Marshal(syncRequest)
if err != nil {
fmt.Println(err)
}
out, status := librclone.RPC("sync/copy", string(syncRequestJSON))
if status != 200 {
fmt.Println("Error: Got status : %d and output %q", status, out)
}
var syncResponse syncResponse
err = json.Unmarshal([]byte(out), &syncResponse)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("Job Id of Async Job: %d\n", syncResponse.JobID)
statusRequest := statusRequest{JobID: syncResponse.JobID}
statusRequestJSON, err := json.Marshal(statusRequest)
if err != nil {
fmt.Println(err)
}
var statusResponse statusResponse
statusTries := 0
for !statusResponse.Finished {
out, status := librclone.RPC("job/status", string(statusRequestJSON))
fmt.Println(out)
if status == 404 {
break
}
err = json.Unmarshal([]byte(out), &statusResponse)
if err != nil {
fmt.Println(err)
break
}
time.Sleep(10 * time.Second)
statusTries++
fmt.Printf("Polled status of job %d, %d times\n", statusRequest.JobID, statusTries)
}
if !statusResponse.Success {
fmt.Println("Job finished but did not have status success.")
return
}
statsRequest := statsRequest{Group: "MyTransfer"}
statsRequestJSON, err := json.Marshal(statsRequest)
if err != nil {
fmt.Println(err)
}
out, _ = librclone.RPC("core/stats", string(statsRequestJSON))
var stats statsResponse
err = json.Unmarshal([]byte(out), &stats)
if err != nil {
fmt.Println(err)
}
fmt.Printf("Transferred %d bytes and %d files\n", stats.Bytes, stats.Transfers)
}```