2015-03-09 12:20:32 +00:00
|
|
|
local UIManager = require("ui/uimanager")
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
local logger = require("logger")
|
|
|
|
local socketutil = require("socketutil")
|
|
|
|
|
|
|
|
-- Push/Pull
|
|
|
|
local PROGRESS_TIMEOUTS = { 2, 5 }
|
|
|
|
-- Login/Register
|
|
|
|
local AUTH_TIMEOUTS = { 5, 10 }
|
2015-03-09 12:20:32 +00:00
|
|
|
|
|
|
|
local KOSyncClient = {
|
|
|
|
service_spec = nil,
|
2015-09-17 15:27:19 +00:00
|
|
|
custom_url = nil,
|
2015-03-09 12:20:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
function KOSyncClient:new(o)
|
2016-11-01 07:31:24 +00:00
|
|
|
if o == nil then o = {} end
|
2015-03-09 12:20:32 +00:00
|
|
|
setmetatable(o, self)
|
|
|
|
self.__index = self
|
|
|
|
if o.init then o:init() end
|
|
|
|
return o
|
|
|
|
end
|
|
|
|
|
|
|
|
function KOSyncClient:init()
|
|
|
|
local Spore = require("Spore")
|
2015-09-17 15:27:19 +00:00
|
|
|
self.client = Spore.new_from_spec(self.service_spec, {
|
|
|
|
base_url = self.custom_url,
|
|
|
|
})
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
package.loaded["Spore.Middleware.GinClient"] = {}
|
|
|
|
require("Spore.Middleware.GinClient").call = function(_, req)
|
|
|
|
req.headers["accept"] = "application/vnd.koreader.v1+json"
|
2015-03-09 12:20:32 +00:00
|
|
|
end
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
package.loaded["Spore.Middleware.KOSyncAuth"] = {}
|
|
|
|
require("Spore.Middleware.KOSyncAuth").call = function(args, req)
|
|
|
|
req.headers["x-auth-user"] = args.username
|
|
|
|
req.headers["x-auth-key"] = args.userkey
|
2015-03-09 12:20:32 +00:00
|
|
|
end
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
package.loaded["Spore.Middleware.AsyncHTTP"] = {}
|
|
|
|
require("Spore.Middleware.AsyncHTTP").call = function(args, req)
|
2015-03-20 02:14:13 +00:00
|
|
|
-- disable async http if Turbo looper is missing
|
|
|
|
if not UIManager.looper then return end
|
2015-03-09 12:20:32 +00:00
|
|
|
req:finalize()
|
|
|
|
local result
|
2015-03-20 02:14:13 +00:00
|
|
|
require("httpclient"):new():request({
|
2015-03-09 12:20:32 +00:00
|
|
|
url = req.url,
|
|
|
|
method = req.method,
|
|
|
|
body = req.env.spore.payload,
|
|
|
|
on_headers = function(headers)
|
|
|
|
for header, value in pairs(req.headers) do
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
if type(header) == "string" then
|
2015-03-09 12:20:32 +00:00
|
|
|
headers:add(header, value)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end,
|
|
|
|
}, function(res)
|
|
|
|
result = res
|
|
|
|
-- Turbo HTTP client uses code instead of status
|
|
|
|
-- change to status so that Spore can understand
|
|
|
|
result.status = res.code
|
|
|
|
coroutine.resume(args.thread)
|
|
|
|
end)
|
|
|
|
return coroutine.create(function() coroutine.yield(result) end)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
function KOSyncClient:register(username, password)
|
|
|
|
self.client:reset_middlewares()
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
self.client:enable("Format.JSON")
|
2015-03-09 12:20:32 +00:00
|
|
|
self.client:enable("GinClient")
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
socketutil:set_timeout(AUTH_TIMEOUTS[1], AUTH_TIMEOUTS[2])
|
2015-03-09 12:20:32 +00:00
|
|
|
local ok, res = pcall(function()
|
|
|
|
return self.client:register({
|
|
|
|
username = username,
|
|
|
|
password = password,
|
|
|
|
})
|
|
|
|
end)
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
socketutil:reset_timeout()
|
2015-03-09 12:20:32 +00:00
|
|
|
if ok then
|
|
|
|
return res.status == 201, res.body
|
|
|
|
else
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
logger.dbg("KOSyncClient:register failure:", res)
|
2015-03-09 12:20:32 +00:00
|
|
|
return false, res.body
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
function KOSyncClient:authorize(username, password)
|
|
|
|
self.client:reset_middlewares()
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
self.client:enable("Format.JSON")
|
2015-03-09 12:20:32 +00:00
|
|
|
self.client:enable("GinClient")
|
|
|
|
self.client:enable("KOSyncAuth", {
|
|
|
|
username = username,
|
|
|
|
userkey = password,
|
|
|
|
})
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
socketutil:set_timeout(AUTH_TIMEOUTS[1], AUTH_TIMEOUTS[2])
|
2015-03-09 12:20:32 +00:00
|
|
|
local ok, res = pcall(function()
|
|
|
|
return self.client:authorize()
|
|
|
|
end)
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
socketutil:reset_timeout()
|
2015-03-09 12:20:32 +00:00
|
|
|
if ok then
|
|
|
|
return res.status == 200, res.body
|
|
|
|
else
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
logger.dbg("KOSyncClient:authorize failure:", res)
|
2016-09-24 08:20:28 +00:00
|
|
|
return false, res.body
|
2015-03-09 12:20:32 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2016-07-30 00:38:02 +00:00
|
|
|
function KOSyncClient:update_progress(
|
|
|
|
username,
|
|
|
|
password,
|
|
|
|
document,
|
|
|
|
progress,
|
|
|
|
percentage,
|
|
|
|
device,
|
|
|
|
device_id,
|
|
|
|
callback)
|
2015-03-09 12:20:32 +00:00
|
|
|
self.client:reset_middlewares()
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
self.client:enable("Format.JSON")
|
2015-03-09 12:20:32 +00:00
|
|
|
self.client:enable("GinClient")
|
|
|
|
self.client:enable("KOSyncAuth", {
|
|
|
|
username = username,
|
|
|
|
userkey = password,
|
|
|
|
})
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
-- Set *very* tight timeouts to avoid blocking for too long...
|
|
|
|
socketutil:set_timeout(PROGRESS_TIMEOUTS[1], PROGRESS_TIMEOUTS[2])
|
2015-03-09 12:20:32 +00:00
|
|
|
local co = coroutine.create(function()
|
|
|
|
local ok, res = pcall(function()
|
|
|
|
return self.client:update_progress({
|
|
|
|
document = document,
|
kosync: send progress as a string to the server (#8758)
Currently the progress sent to the server can be either a string or an int (depending on whetther the document has pages).
The following are both payload sent from koreader to the server.
```
{"percentage":0.005,"device":"device_name","device_id":"B78EA04ACC3A453DBA220D720C0BE102","document":"348e34463a44ba68659fc6fe814a6778","progress":3}
```
where document `348e34463a44ba68659fc6fe814a6778` is a pdf file.
```
{"percentage":1,"device":"device_name","device_id":"B78EA04ACC3A453DBA220D720C0BE102","document":"4eb484b229696cb39cd8fe5495aa1bbe","progress":"\/body\/DocFragment[30]\/body\/p\/img.0"}
```
where document `4eb484b229696cb39cd8fe5495aa1bbe` is an epub file.
This may add extra work to the backend server. A few commits were added to my personal fork of [kosyncsrv](https://github.com/yeeac/kosyncsrv) (a kosync backend server). kosyncsrv initially tries to decode progress as a string. It then failed on document with pages (in which, progress is just integer page number). I then change the field's type, only to [revert it later](https://github.com/contrun/kosyncsrv/commit/8a642e31a0bcb061a04ececd4af57a27836b7928).
I believe it is more appropriate for us to fix the progress type to string.
2022-02-20 15:08:56 +00:00
|
|
|
progress = tostring(progress),
|
2015-03-09 12:20:32 +00:00
|
|
|
percentage = percentage,
|
|
|
|
device = device,
|
2016-07-30 00:38:02 +00:00
|
|
|
device_id = device_id,
|
2015-03-09 12:20:32 +00:00
|
|
|
})
|
|
|
|
end)
|
|
|
|
if ok then
|
|
|
|
callback(res.status == 200, res.body)
|
|
|
|
else
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
logger.dbg("KOSyncClient:update_progress failure:", res)
|
2016-09-24 08:20:28 +00:00
|
|
|
callback(false, res.body)
|
2015-03-09 12:20:32 +00:00
|
|
|
end
|
|
|
|
end)
|
|
|
|
self.client:enable("AsyncHTTP", {thread = co})
|
|
|
|
coroutine.resume(co)
|
2015-03-21 05:18:34 +00:00
|
|
|
if UIManager.looper then UIManager:setInputTimeout() end
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
socketutil:reset_timeout()
|
2015-03-09 12:20:32 +00:00
|
|
|
end
|
|
|
|
|
2016-07-30 00:38:02 +00:00
|
|
|
function KOSyncClient:get_progress(
|
|
|
|
username,
|
|
|
|
password,
|
|
|
|
document,
|
|
|
|
callback)
|
2015-03-09 12:20:32 +00:00
|
|
|
self.client:reset_middlewares()
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
self.client:enable("Format.JSON")
|
2015-03-09 12:20:32 +00:00
|
|
|
self.client:enable("GinClient")
|
|
|
|
self.client:enable("KOSyncAuth", {
|
|
|
|
username = username,
|
|
|
|
userkey = password,
|
|
|
|
})
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
socketutil:set_timeout(PROGRESS_TIMEOUTS[1], PROGRESS_TIMEOUTS[2])
|
2015-03-09 12:20:32 +00:00
|
|
|
local co = coroutine.create(function()
|
|
|
|
local ok, res = pcall(function()
|
|
|
|
return self.client:get_progress({
|
|
|
|
document = document,
|
|
|
|
})
|
|
|
|
end)
|
|
|
|
if ok then
|
|
|
|
callback(res.status == 200, res.body)
|
|
|
|
else
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
logger.dbg("KOSyncClient:get_progress failure:", res)
|
2016-09-24 08:20:28 +00:00
|
|
|
callback(false, res.body)
|
2015-03-09 12:20:32 +00:00
|
|
|
end
|
|
|
|
end)
|
|
|
|
self.client:enable("AsyncHTTP", {thread = co})
|
|
|
|
coroutine.resume(co)
|
2015-03-21 05:18:34 +00:00
|
|
|
if UIManager.looper then UIManager:setInputTimeout() end
|
KOSync: Set sane socket timeouts properly (#10835)
An attempt was made in the original code, but the whole thing was designed in the hope of actually switching to turbo, so it was super janky without it.
Anyway, we now actually have a sane way to set socket timeouts, so, use that, and set them very tight for now.
This is fairly critical right now, because the server is down, and the default timeouts are ~30s. That happens to be *above* the debounce threshold, so you can't even hope for that to help you. Meaning, right now, you get a 2 * 30s block on resume with auto sync. That's... Very Not Good(TM).
That becomes a single 2s one after this.
2023-08-22 14:30:37 +00:00
|
|
|
socketutil:reset_timeout()
|
2015-03-09 12:20:32 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
return KOSyncClient
|