[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Mldonkey-commits] mldonkey distrib/ChangeLog src/networks/donkey/...
From: |
mldonkey-commits |
Subject: |
[Mldonkey-commits] mldonkey distrib/ChangeLog src/networks/donkey/... |
Date: |
Thu, 11 Jan 2007 12:24:18 +0000 |
CVSROOT: /sources/mldonkey
Module name: mldonkey
Changes by: spiralvoice <spiralvoice> 07/01/11 12:24:18
Modified files:
distrib : ChangeLog
src/networks/donkey: donkeyFiles.ml donkeyOptions.ml
Log message:
patch #5665
CVSWeb URLs:
http://cvs.savannah.gnu.org/viewcvs/mldonkey/distrib/ChangeLog?cvsroot=mldonkey&r1=1.1147&r2=1.1148
http://cvs.savannah.gnu.org/viewcvs/mldonkey/src/networks/donkey/donkeyFiles.ml?cvsroot=mldonkey&r1=1.25&r2=1.26
http://cvs.savannah.gnu.org/viewcvs/mldonkey/src/networks/donkey/donkeyOptions.ml?cvsroot=mldonkey&r1=1.59&r2=1.60
Patches:
Index: distrib/ChangeLog
===================================================================
RCS file: /sources/mldonkey/mldonkey/distrib/ChangeLog,v
retrieving revision 1.1147
retrieving revision 1.1148
diff -u -b -r1.1147 -r1.1148
--- distrib/ChangeLog 11 Jan 2007 12:16:18 -0000 1.1147
+++ distrib/ChangeLog 11 Jan 2007 12:24:18 -0000 1.1148
@@ -15,6 +15,14 @@
=========
2007/01/11
+5665: EDK: Support compressed upload, implement file read cache (TripleM)
+new options:
+- ED2K_upload_compression to enable compressed upload, default true
+- ED2K_upload_compression_threshold, default 2000 bytes
+ Size difference in bytes between one zone (180 kBytes) and its compressed
+ counterpart, which has to occure, to send compressed parts instead of plain.
+- ED2K_upload_compression_level, Zlib compression level, default 9
+- ED2K_upload_compression_table_size, default 20
5669: HTML: Add HTML headers to prohibit browser-side caching (Schlumpf)
5671: Configure: Fix question whether to compile lablgtk, same as patch 5401
5675: Updated Mozilla protocol handler to version 1.10
Index: src/networks/donkey/donkeyFiles.ml
===================================================================
RCS file: /sources/mldonkey/mldonkey/src/networks/donkey/donkeyFiles.ml,v
retrieving revision 1.25
retrieving revision 1.26
diff -u -b -r1.25 -r1.26
--- src/networks/donkey/donkeyFiles.ml 8 Jan 2007 15:12:10 -0000 1.25
+++ src/networks/donkey/donkeyFiles.ml 11 Jan 2007 12:24:18 -0000 1.26
@@ -61,6 +61,51 @@
where nseconds = Fifo.length upload_clients
*)
+exception Cache_table_hit of string * string
+type cache_entry = {
+ md4 : Md4.t;
+ begin_offset : int64;
+ end_offset : int64;
+ cached_part : string;
+ comp_part : string
+ }
+let cache_table_index = ref 0
+let cache_table_size = !!upload_compression_table_size
+let ( cache_table : cache_entry Weak.t ) = Weak.create cache_table_size
+let cached_load file begin_offset end_offset compress=
+ try
+ for i = 0 to cache_table_size-1 do
+ match Weak.get cache_table i with
+ Some
({md4=md4;begin_offset=bo;end_offset=eo;cached_part=cached_file;comp_part=cached_comp})
when (md4 = file.file_md4) && (bo=begin_offset) && (eo=end_offset) ->
+ if !verbose_upload then
+ lprintf_nl "Cache Hit for %s (%Ld,%Ld)" (file_best_name file)
begin_offset end_offset;
+ if (compress && (String.length cached_comp > 0)) || not compress
then raise (Cache_table_hit(cached_file,cached_comp))
+ | _ -> ()
+ done;
+ let entry_length = Int64.to_int(end_offset -- begin_offset) in
+ let cached_file = String.create entry_length in
+ Unix32.read (file_fd file) begin_offset cached_file 0 entry_length;
+ let cached_comp = if compress then
+ Zlib.compress_string ~level:!!upload_compression_level cached_file
+ else
+ ""
+ in
+ cache_table_index := (!cache_table_index + 1) mod cache_table_size;
+ let (entry : cache_entry)={
+ md4=file.file_md4;
+ begin_offset=begin_offset;
+ end_offset=end_offset;
+ cached_part=cached_file;
+ comp_part=cached_comp} in
+ Weak.set cache_table !cache_table_index (Some entry);
+ if !verbose_upload then
+ lprintf_nl "Cache Miss for %s (%Ld,%Ld) orig.len %d comp.len %d"
(file_best_name file) begin_offset end_offset (String.length cached_file)
(String.length cached_comp);
+ Some (cached_file,cached_comp)
+ with
+ | Cache_table_hit (cached_file,cached_comp) -> Some
(cached_file,cached_comp)
+ | End_of_file -> if !verbose then lprintf_nl
+ "End_of_file in cached_load file %s size %Ld begin %Ld end %Ld"
(file_best_name file) (file_size file) begin_offset end_offset; None
+
module NewUpload = struct
@@ -74,25 +119,22 @@
M.CloseSlotReq Q.t)
*)
- let rec send_small_block c sock file begin_pos len_int =
-(* lprintf "send_small_block %d\n" len_int; *)
-(* let len_int = Int32.to_int len in *)
+ let rec send_small_block_plain c sock file begin_offset cfile pos len_int
sixtyfour =
try
if !verbose_upload then
- lprintf_nl "Sending %s to %s, begin %Ld len %d"
+ lprintf_nl "Sending plain %s to %s, begin_offset %Ld pos %d len %d"
(file_best_name file) (full_client_identifier c)
- (begin_pos) (len_int);
+ (begin_offset) (pos) (len_int);
- if file_is_largefile file && c.client_emule_proto.emule_largefiles <>
1 then raise Donkey_large_file;
let msg =
(
let module M = DonkeyProtoClient in
let module B = M.Bloc in
M.BlocReq {
B.md4 = file.file_md4;
- B.usesixtyfour = (begin_pos ++ (Int64.of_int len_int)) >
old_max_emule_file_size;
- B.start_pos = begin_pos;
- B.end_pos = begin_pos ++ (Int64.of_int len_int);
+ B.usesixtyfour = sixtyfour;
+ B.start_pos = begin_offset ++ (Int64.of_int pos);
+ B.end_pos = begin_offset ++ (Int64.of_int(pos + len_int));
B.bloc_str = "";
B.bloc_begin = 0;
B.bloc_len = 0;
@@ -103,7 +145,7 @@
let upload_buffer = String.create (slen + len_int) in
String.blit s 0 upload_buffer 0 slen;
DonkeyProtoCom.new_string msg upload_buffer;
- Unix32.read (file_fd file) begin_pos upload_buffer slen len_int;
+ String.blit cfile pos upload_buffer slen len_int;
let uploaded = Int64.of_int len_int in
count_upload c uploaded;
CommonUploads.consume_bandwidth len_int;
@@ -116,32 +158,86 @@
write_string sock upload_buffer;
check_end_upload c sock
with
- | End_of_file -> lprintf_nl "Can not send file %s to %s, file removed?"
- (file_best_name file) (full_client_identifier c)
- | Donkey_large_file -> lprintf_nl "File %s is too large for %s."
+ | e -> if !verbose then lprintf_nl
+ "Exception %s in send_small_block_plain" (Printexc2.to_string e)
+
+ let rec send_small_block_compressed c sock file begin_offset ccomp pos
len_int pay_len sixtyfour =
+ try
+ if !verbose_upload then
+ lprintf_nl "Sending compressed %s to %s, begin_offset %Ld pos %d len
%d"
(file_best_name file) (full_client_identifier c)
+ (begin_offset) (pos) (len_int);
+
+ let msg =
+ (
+ let module M = DonkeyProtoClient in
+ let module B = M.EmuleCompressedPart in
+ M.EmuleCompressedPart {
+ B.md4 = file.file_md4;
+ B.usesixtyfour = sixtyfour;
+ B.statpos = begin_offset;
+ B.newsize = Int64.of_int pay_len;
+ B.bloc = "";
+ }
+ ) in
+ let s = client_msg_to_string c.client_emule_proto msg in
+ let slen = String.length s in
+ let upload_buffer = String.create (slen + len_int) in
+ String.blit s 0 upload_buffer 0 slen;
+ DonkeyProtoCom.new_string msg upload_buffer;
+ String.blit ccomp pos upload_buffer slen len_int;
+ CommonUploads.consume_bandwidth len_int;
+
+ write_string sock upload_buffer;
+ check_end_upload c sock
+ with
| e -> if !verbose then lprintf_nl
- "Exception %s in send_small_block" (Printexc2.to_string e)
+ "Exception %s in send_small_block_compressed"
(Printexc2.to_string e)
let rec send_client_block c sock per_client =
-(* lprintf "send_client_block\n"; *)
+ try
if per_client > 0 && CommonUploads.can_write_len sock max_msg_size then
match c.client_upload with
- | Some ({ up_chunks = current_chunk :: chunks } as up) ->
+ | Some ({ up_chunks = (begin_offset,end_offset) :: chunks } as up) ->
+ if file_is_largefile up.up_file &&
c.client_emule_proto.emule_largefiles <> 1 then begin
+ DonkeyOneFile.remove_client_slot c;
+ lprintf_nl "File %s is too large for %s." (file_best_name
up.up_file) (full_client_identifier c);
+ end else
if up.up_file.file_shared = None then begin
(* Is there a message to warn that a file is not shared anymore ? *)
- c.client_upload <- None;
+ DonkeyOneFile.remove_client_slot c;
end else
- let max_len = up.up_end_chunk -- up.up_pos in
- let max_len = Int64.to_int max_len in
- let msg_block_size_int = min msg_block_size_int per_client in
- if max_len <= msg_block_size_int then
+ let compress = !!upload_compression &&
(c.client_emule_proto.emule_compression <> 0) in
+ let cfile,ccomp = match cached_load up.up_file begin_offset
end_offset compress with
+ Some (cached_file,cached_comp) -> cached_file,cached_comp
+ | _ -> "",""
+ in
+ let compressed = compress && ((String.length ccomp) +
!!upload_compression_threshold < (String.length cfile)) in
+ let pay_len = if compressed then (String.length ccomp) else
(String.length cfile) in
+ let pos = Int64.to_int (up.up_pos -- begin_offset) in
+ let max_len = pay_len - pos in
+ let allowed_msg_block_size_int = min msg_block_size_int per_client
in
+ let sixtyfour = end_offset >= old_max_emule_file_size in
+ if max_len <= allowed_msg_block_size_int then
(* last block from chunk *)
begin
- send_small_block c sock up.up_file up.up_pos max_len;
+ if compressed then
+ begin
+ send_small_block_compressed c sock up.up_file begin_offset
ccomp pos max_len pay_len sixtyfour;
+ let uploaded = end_offset -- begin_offset in
+ count_upload c uploaded;
+ (match up.up_file.file_shared with None -> ()
+ | Some impl ->
+ shared_must_update_downloaded (as_shared impl);
+ impl.impl_shared_uploaded <-
+ impl.impl_shared_uploaded ++ uploaded)
+ end
+ else
+ send_small_block_plain c sock up.up_file begin_offset cfile
pos max_len sixtyfour
+ ;
if !verbose_upload then
- lprintf_nl "End of chunk (%d) %Ld %s" max_len
up.up_end_chunk (file_best_name up.up_file);
- up.up_flying_chunks <- up.up_flying_chunks @ [current_chunk];
+ lprintf_nl "End of chunk %Ld %Ld %s" begin_offset
end_offset (file_best_name up.up_file);
+ up.up_flying_chunks <- up.up_flying_chunks @
[(begin_offset,end_offset)];
up.up_chunks <- chunks;
let per_client = per_client - max_len in
match chunks with
@@ -158,14 +254,25 @@
end
else
(* small block from chunk *)
+ if allowed_msg_block_size_int >= msg_block_size_int then
+ begin
+ if compressed then
begin
- send_small_block c sock up.up_file up.up_pos
- msg_block_size_int;
+ send_small_block_compressed c sock up.up_file
begin_offset ccomp pos msg_block_size_int pay_len sixtyfour;
+ end
+ else
+ begin
+ send_small_block_plain c sock up.up_file begin_offset
cfile pos msg_block_size_int sixtyfour;
+ end
+ ;
up.up_pos <- up.up_pos ++ (Int64.of_int msg_block_size_int);
let per_client = per_client-msg_block_size_int in
send_client_block c sock per_client
end
| _ -> ()
+ with
+ | e -> if !verbose then lprintf_nl
+ "Exception %s in send_client_block" (Printexc2.to_string e)
let upload_to_client c size =
(* lprintf "upload_to_client %d\n" size; *)
Index: src/networks/donkey/donkeyOptions.ml
===================================================================
RCS file: /sources/mldonkey/mldonkey/src/networks/donkey/donkeyOptions.ml,v
retrieving revision 1.59
retrieving revision 1.60
diff -u -b -r1.59 -r1.60
--- src/networks/donkey/donkeyOptions.ml 8 Jan 2007 15:12:10 -0000
1.59
+++ src/networks/donkey/donkeyOptions.ml 11 Jan 2007 12:24:18 -0000
1.60
@@ -192,6 +192,47 @@
"Uploaded zones (1 zone = 180 kBytes) needed to enable the dynamic upload
lifetime"
int_option 10
+let upload_compression = define_expert_option donkey_section
["upload_compression"]
+ "Enables compressed upload as part of the protocol"
+ bool_option true
+
+let upload_compression_threshold = define_expert_option donkey_section
["upload_compression_threshold"]
+ "Sizedifference in bytes between one zone (180 kBytes) and its compressed
+ counterpart, which has to occure, to send compressed parts instead of plain."
+ int_option 2000
+
+let _ =
+ option_hook upload_compression_threshold (fun _ ->
+ if !!upload_compression_threshold < 0 then
+ upload_compression_threshold =:= 0
+ )
+
+let upload_compression_level = define_expert_option donkey_section
["upload_compression_level"]
+ "Level of the used zlibcompression. allowed are values between 0 and 9.
higher
+ level means better compression, but higher cpu usage too. (emules default
+ compression level for compressed parts is 9)"
+ int_option 9
+
+let _ =
+ option_hook upload_compression_level (fun _ ->
+ if !!upload_compression_level < 0
+ || !!upload_compression_level > 9 then
+ upload_compression_level =:= 9
+ )
+
+let upload_compression_table_size = define_expert_option donkey_section
["upload_compression_table_size"]
+ "Size of the cache table in entries (ca. 2 * 180 kbytes). zones have to be
+ compressed at once, but only parts of it are sent at a time (10 kbytes).
+ to reduce diskaccess and repeated compression to a minimum, size should be
+ at least the number of total upload slots. restart of core is required."
+ int_option 20
+
+let _ =
+ option_hook upload_compression_table_size (fun _ ->
+ if !!upload_compression_table_size < 1 then
+ upload_compression_table_size =:= 1
+ )
+
let connected_server_timeout = define_expert_option donkey_section
["connected_server_timeout"]
"How long can a silent server stay connected"
float_option 1800.
- [Mldonkey-commits] mldonkey distrib/ChangeLog src/networks/donkey/..., mldonkey-commits, 2007/01/08
- [Mldonkey-commits] mldonkey distrib/ChangeLog src/networks/donkey/..., mldonkey-commits, 2007/01/08
- [Mldonkey-commits] mldonkey distrib/ChangeLog src/networks/donkey/..., mldonkey-commits, 2007/01/08
- [Mldonkey-commits] mldonkey distrib/ChangeLog src/networks/donkey/...,
mldonkey-commits <=
- [Mldonkey-commits] mldonkey distrib/ChangeLog src/networks/donkey/..., mldonkey-commits, 2007/01/15
- [Mldonkey-commits] mldonkey distrib/ChangeLog src/networks/donkey/..., mldonkey-commits, 2007/01/21
- [Mldonkey-commits] mldonkey distrib/ChangeLog src/networks/donkey/..., mldonkey-commits, 2007/01/28