I'm writing a http file server with downloading file in multiple thread with netty
. When using only HttpServerCodec()
, everything works perfect but OOM: direct buffer memory error. Then I turn to the ChunkedWriteHandler()
handler.
But the problem is, the browser(new Edge) either say 'can't download file' or download a file with zero size. I have no idea about this and need help, please.
The log show that the transfer process complete immediately without any time cost.
[main] INFO Main - Pick up path C:/Temp
[main] INFO dd.oliver.htp.HtpServer - Server start at 2333
[nioEventLoopGroup-3-1] INFO dd.oliver.htp.RequestHandler - Request C:/Temp/d.test.oliverdd
[nioEventLoopGroup-3-1] INFO dd.oliver.htp.RequestHandler - [id: 0xe5ce2ec6, L:/0:0:0:0:0:0:0:1:2333 - R:/0:0:0:0:0:0:0:1:63040] Transfer complete.
here is my code, which is referring netty example.
This is ChannelInitializer:
class HtpChannelInitializer(val basePath: String) : ChannelInitializer<SocketChannel>() {
override fun initChannel(ch: SocketChannel) {
ch.pipeline().addLast("HttpCodec", HttpServerCodec())
ch.pipeline().addLast("HttpAggregator", HttpObjectAggregator(65536))
ch.pipeline().addLast("HttpChunked", ChunkedWriteHandler())
ch.pipeline().addLast("RequestHandle", RequestHandler(basePath))
}
}
This is RequestHandler:
import io.netty.channel.*
import io.netty.handler.codec.http.*
import io.netty.handler.codec.http.HttpVersion.HTTP_1_0
import io.netty.handler.stream.ChunkedFile
import org.slf4j.LoggerFactory
import java.io.File
import java.io.RandomAccessFile
private val logger = LoggerFactory.getLogger(RequestHandler::class.java)
class RequestHandler_test(val basePath: String) : SimpleChannelInboundHandler<HttpRequest>() {
...
override fun channelReadComplete(ctx: ChannelHandlerContext) {
ctx.flush()
}
override fun channelRead0(ctx: ChannelHandlerContext, msg: HttpRequest) {
val path = basePath + msg.uri() // msg.uri() example: / or /a/b or /a/b/c.txt
logger.info("Request $path")
val file = File(path)
if (file.isFile) {
val rfile = RandomAccessFile(file, "r")
// Line
val response = DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK)
// Headers
response.headers().set("Accept-Ranges", "bytes")
response.headers().set("Content-Disposition", "attachment; filename=\"${file.name}\"")
response.headers().set("Content-Type", "application/octet-stream")
response.headers().set("Content-Length", "${rfile.length()}")
if (!(msg.headers().contains("Connection") && msg.headers().get("Connection") == "keep-alive")) {
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE);
} else if (msg.protocolVersion() == HTTP_1_0) {
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE);
}
// Content
// response.content().writeBytes(rfile.channel, 0L, rfile.length().toInt())
// ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE)
ctx.write(response)
val sendFileFuture = ctx.write(
HttpChunkedInput(ChunkedFile(rfile, 0, rfile.length(), 8192)),
ctx.newProgressivePromise()
)
sendFileFuture.addListener(object : ChannelProgressiveFutureListener {
override fun operationProgressed(
future: ChannelProgressiveFuture,
progress: Long,
total: Long
) {
if (total < 0) { // total unknown
logger.info(future.channel().toString() + " Transfer progress: " + progress)
} else {
logger.info(
future.channel().toString() + " Transfer progress: " + progress + " / " + total
)
}
}
override fun operationComplete(future: ChannelProgressiveFuture) {
logger.info(future.channel().toString() + " Transfer complete.")
}
})
if (!(msg.headers().contains("Connection") && msg.headers().get("Connection") == "close")) {
sendFileFuture.addListener(ChannelFutureListener.CLOSE)
}
}
}
...
}